hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
69988fc4324f019975be77a8b788675fca57a812 | 59 | py | Python | sample_responses.py | dloucasfx/collectd-openstack | 05a0e4657ff28a9c2cccb13b61144c702ec766cc | [
"Apache-2.0"
] | null | null | null | sample_responses.py | dloucasfx/collectd-openstack | 05a0e4657ff28a9c2cccb13b61144c702ec766cc | [
"Apache-2.0"
] | null | null | null | sample_responses.py | dloucasfx/collectd-openstack | 05a0e4657ff28a9c2cccb13b61144c702ec766cc | [
"Apache-2.0"
] | null | null | null | nova_metrics = {}
cinder_metrics = {}
neutron_metrics = {}
| 14.75 | 20 | 0.694915 |
b97cfdf7e99e7bbf11c9dff49225ac516589bc63 | 4,889 | py | Python | project/project/apps/user/tests/tests_view_ObtainTemporaryAuthToken.py | RignonNoel/django-init | 4f00ec5f9ad8083a8dea5483c4e43712fceeba7a | [
"MIT"
] | null | null | null | project/project/apps/user/tests/tests_view_ObtainTemporaryAuthToken.py | RignonNoel/django-init | 4f00ec5f9ad8083a8dea5483c4e43712fceeba7a | [
"MIT"
] | null | null | null | project/project/apps/user/tests/tests_view_ObtainTemporaryAuthToken.py | RignonNoel/django-init | 4f00ec5f9ad8083a8dea5483c4e43712fceeba7a | [
"MIT"
] | 1 | 2019-11-20T17:24:33.000Z | 2019-11-20T17:24:33.000Z | import json
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
from project.factories import UserFactory
from ..models import TemporaryToken
User = get_user_model()
class ObtainTemporaryAuthTokenTests(APITestCase):
def setUp(self):
self.client = APIClient()
self.user = UserFactory()
self.user.set_password('Test123!')
self.user.save()
self.url = reverse('token_api')
def test_authenticate_email(self):
"""
Ensure we can authenticate on the platform.
"""
data = {
'email': self.user.email,
'password': 'Test123!'
}
response = self.client.post(self.url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = TemporaryToken.objects.get(
user__email=self.user.email,
)
self.assertContains(response, token)
def test_authenticate(self):
"""
Ensure we can authenticate on the platform using a email.
"""
data = {
'email': self.user.email,
'password': 'Test123!'
}
response = self.client.post(self.url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = TemporaryToken.objects.get(
user__email=self.user.email,
)
self.assertContains(response, token)
def test_authenticate_expired_token(self):
"""
Ensure we can authenticate on the platform when token is expired.
"""
data = {
'email': self.user.email,
'password': 'Test123!'
}
response = self.client.post(self.url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
token_old = TemporaryToken.objects.get(
user__email=self.user.email,
)
token_old.expire()
response = self.client.post(self.url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
token_new = TemporaryToken.objects.get(
user__email=self.user.email,
)
self.assertNotContains(response, token_old)
self.assertContains(response, token_new)
def test_authenticate_bad_password(self):
"""
Ensure we can't authenticate with a wrong password'
"""
data = {
'email': self.user.email,
'password': 'test123!' # No caps on the first letter
}
response = self.client.post(self.url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
tokens = TemporaryToken.objects.filter(
user__email=self.user.email
).count()
self.assertEqual(0, tokens)
def test_authenticate_bad_email(self):
"""
Ensure we can't authenticate with a wrong email
"""
data = {
'email': 'Jon@john', # Forget the `h` in `John`
'password': 'Test123!'
}
response = self.client.post(self.url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
tokens = TemporaryToken.objects.filter(
user__email=self.user.email
).count()
self.assertEqual(0, tokens)
def test_authenticate_inactive(self):
"""
Ensure we can't authenticate if user is inactive
"""
data = {
'email': self.user.email,
'password': 'Test123!'
}
User.objects.filter(id=self.user.id).update(is_active=False)
response = self.client.post(self.url, data, format='json')
content = {
"non_field_errors": [
"Unable to log in with provided credentials."
]
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
tokens = TemporaryToken.objects.filter(
user__email=self.user.email
).count()
self.assertEqual(0, tokens)
def test_authenticate_missing_parameter(self):
"""
Ensure we can't authenticate if "email" or "password" are not
provided.
"""
response = self.client.post(self.url, {}, format='json')
content = {
'password': ['This field is required.'],
'email': ['This field is required.']
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
tokens = TemporaryToken.objects.filter(
user__email=self.user.email
).count()
self.assertEqual(0, tokens)
| 29.630303 | 75 | 0.6036 |
3c4008f23b5d80be86572d2f22577aa3f5fea8cb | 2,467 | py | Python | tests/test_pdf.py | simonwoerpel/ingest-file | 1ff68be0abb92e50bf726a1c8c1f8ff12d8b2fc0 | [
"MIT"
] | 23 | 2017-05-25T01:08:58.000Z | 2019-06-22T19:35:50.000Z | tests/test_pdf.py | simonwoerpel/ingest-file | 1ff68be0abb92e50bf726a1c8c1f8ff12d8b2fc0 | [
"MIT"
] | 153 | 2020-10-07T13:42:08.000Z | 2022-03-18T08:11:37.000Z | tests/test_pdf.py | simonwoerpel/ingest-file | 1ff68be0abb92e50bf726a1c8c1f8ff12d8b2fc0 | [
"MIT"
] | 9 | 2020-10-22T08:54:20.000Z | 2022-02-01T10:23:22.000Z | # -*- coding: utf-8 -*-
from .support import TestCase
class PDFIngestorTest(TestCase):
def test_match(self):
fixture_path, entity = self.fixture("readme.pdf")
self.manager.ingest(fixture_path, entity)
self.assertEqual(entity.first("mimeType"), "application/pdf")
def test_match_empty(self):
fixture_path, entity = self.fixture("empty.pdf")
self.manager.ingest(fixture_path, entity)
self.assertNotEqual(entity.first("mimeType"), "application/pdf")
def test_ingest_binary_mode(self):
fixture_path, entity = self.fixture("readme.pdf")
self.manager.ingest(fixture_path, entity)
# 2 doc fragments, 1 page
self.assertEqual(len(self.get_emitted()), 2)
self.assertIn(
"Ingestors extract useful information" " in a structured standard format",
self.manager.entities[0].first("bodyText"),
)
entities = list(self.manager.dataset.iterate(entity_id=entity.id))
self.assertEqual(len(entities), 1)
text = entities[0].first("indexText")
self.assertIn("Ingestors extract useful information", text)
def test_ingest_noisy_fixture(self):
fixture_path, entity = self.fixture("500 pages.pdf")
self.manager.ingest(fixture_path, entity)
self.assertEqual(len(self.get_emitted()), 501)
self.assertEqual(
self.manager.entities[0].first("bodyText"), "Hello, World! \nHello, World!"
)
self.assertEqual(entity.schema.name, "Pages")
def test_ingest_complex_fixture(self):
fixture_path, entity = self.fixture("very_complex_math_book.pdf")
self.manager.ingest(fixture_path, entity)
self.assertEqual(len(self.get_emitted()), 589)
self.assertIn(
"ALGEBRA \nABSTRACT AND CONCRETE \nE DITION 2.6",
self.manager.entities[0].first("bodyText"),
)
self.assertTrue(
any(
"A Note to the Reader" in x
for x in self.manager.dataset.get(entity_id=entity.id).get("indexText")
)
)
def test_ingest_unicode_fixture(self):
fixture_path, entity = self.fixture("udhr_ger.pdf")
self.manager.ingest(fixture_path, entity)
self.assertEqual(len(self.get_emitted()), 7)
self.assertIn(
u"Würde und der gleichen und unveräußerlichen",
self.manager.entities[0].first("bodyText"),
)
| 37.378788 | 87 | 0.637211 |
8376d75ccbbe886861c090d45fe0cbff50851593 | 11,953 | py | Python | homeassistant/components/doorbird/__init__.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 1 | 2021-03-23T07:20:03.000Z | 2021-03-23T07:20:03.000Z | homeassistant/components/doorbird/__init__.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 51 | 2020-08-03T07:30:44.000Z | 2022-03-22T06:02:42.000Z | homeassistant/components/doorbird/__init__.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 2 | 2021-03-22T21:42:48.000Z | 2021-04-12T12:26:39.000Z | """Support for DoorBird devices."""
import asyncio
import logging
import urllib
from urllib.error import HTTPError
from aiohttp import web
from doorbirdpy import DoorBird
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICES,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_TOKEN,
CONF_USERNAME,
HTTP_OK,
HTTP_UNAUTHORIZED,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.network import get_url
from homeassistant.util import dt as dt_util, slugify
from .const import (
CONF_EVENTS,
DOMAIN,
DOOR_STATION,
DOOR_STATION_EVENT_ENTITY_IDS,
DOOR_STATION_INFO,
PLATFORMS,
UNDO_UPDATE_LISTENER,
)
from .util import get_doorstation_by_token
_LOGGER = logging.getLogger(__name__)
API_URL = f"/api/{DOMAIN}"
CONF_CUSTOM_URL = "hass_url_override"
RESET_DEVICE_FAVORITES = "doorbird_reset_favorites"
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_EVENTS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_CUSTOM_URL): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{vol.Required(CONF_DEVICES): vol.All(cv.ensure_list, [DEVICE_SCHEMA])}
)
},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the DoorBird component."""
hass.data.setdefault(DOMAIN, {})
# Provide an endpoint for the doorstations to call to trigger events
hass.http.register_view(DoorBirdRequestView)
if DOMAIN in config and CONF_DEVICES in config[DOMAIN]:
for index, doorstation_config in enumerate(config[DOMAIN][CONF_DEVICES]):
if CONF_NAME not in doorstation_config:
doorstation_config[CONF_NAME] = f"DoorBird {index + 1}"
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=doorstation_config
)
)
def _reset_device_favorites_handler(event):
"""Handle clearing favorites on device."""
token = event.data.get("token")
if token is None:
return
doorstation = get_doorstation_by_token(hass, token)
if doorstation is None:
_LOGGER.error("Device not found for provided token")
return
# Clear webhooks
favorites = doorstation.device.favorites()
for favorite_type in favorites:
for favorite_id in favorites[favorite_type]:
doorstation.device.delete_favorite(favorite_type, favorite_id)
hass.bus.async_listen(RESET_DEVICE_FAVORITES, _reset_device_favorites_handler)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up DoorBird from a config entry."""
_async_import_options_from_data_if_missing(hass, entry)
doorstation_config = entry.data
doorstation_options = entry.options
config_entry_id = entry.entry_id
device_ip = doorstation_config[CONF_HOST]
username = doorstation_config[CONF_USERNAME]
password = doorstation_config[CONF_PASSWORD]
device = DoorBird(device_ip, username, password)
try:
status, info = await hass.async_add_executor_job(_init_doorbird_device, device)
except urllib.error.HTTPError as err:
if err.code == HTTP_UNAUTHORIZED:
_LOGGER.error(
"Authorization rejected by DoorBird for %s@%s", username, device_ip
)
return False
raise ConfigEntryNotReady from err
except OSError as oserr:
_LOGGER.error("Failed to setup doorbird at %s: %s", device_ip, oserr)
raise ConfigEntryNotReady from oserr
if not status[0]:
_LOGGER.error(
"Could not connect to DoorBird as %s@%s: Error %s",
username,
device_ip,
str(status[1]),
)
raise ConfigEntryNotReady
token = doorstation_config.get(CONF_TOKEN, config_entry_id)
custom_url = doorstation_config.get(CONF_CUSTOM_URL)
name = doorstation_config.get(CONF_NAME)
events = doorstation_options.get(CONF_EVENTS, [])
doorstation = ConfiguredDoorBird(device, name, custom_url, token)
doorstation.update_events(events)
# Subscribe to doorbell or motion events
if not await _async_register_events(hass, doorstation):
raise ConfigEntryNotReady
undo_listener = entry.add_update_listener(_update_listener)
hass.data[DOMAIN][config_entry_id] = {
DOOR_STATION: doorstation,
DOOR_STATION_INFO: info,
UNDO_UPDATE_LISTENER: undo_listener,
}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
def _init_doorbird_device(device):
return device.ready(), device.info()
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def _async_register_events(hass, doorstation):
try:
await hass.async_add_executor_job(doorstation.register_events, hass)
except HTTPError:
hass.components.persistent_notification.async_create(
"Doorbird configuration failed. Please verify that API "
"Operator permission is enabled for the Doorbird user. "
"A restart will be required once permissions have been "
"verified.",
title="Doorbird Configuration Failure",
notification_id="doorbird_schedule_error",
)
return False
return True
async def _update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
config_entry_id = entry.entry_id
doorstation = hass.data[DOMAIN][config_entry_id][DOOR_STATION]
doorstation.update_events(entry.options[CONF_EVENTS])
# Subscribe to doorbell or motion events
await _async_register_events(hass, doorstation)
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
modified = False
for importable_option in [CONF_EVENTS]:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, options=options)
class ConfiguredDoorBird:
"""Attach additional information to pass along with configured device."""
def __init__(self, device, name, custom_url, token):
"""Initialize configured device."""
self._name = name
self._device = device
self._custom_url = custom_url
self.events = None
self.doorstation_events = None
self._token = token
def update_events(self, events):
"""Update the doorbird events."""
self.events = events
self.doorstation_events = [self._get_event_name(event) for event in self.events]
@property
def name(self):
"""Get custom device name."""
return self._name
@property
def device(self):
"""Get the configured device."""
return self._device
@property
def custom_url(self):
"""Get custom url for device."""
return self._custom_url
@property
def token(self):
"""Get token for device."""
return self._token
def register_events(self, hass):
"""Register events on device."""
# Get the URL of this server
hass_url = get_url(hass)
# Override url if another is specified in the configuration
if self.custom_url is not None:
hass_url = self.custom_url
for event in self.doorstation_events:
self._register_event(hass_url, event)
_LOGGER.info("Successfully registered URL for %s on %s", event, self.name)
@property
def slug(self):
"""Get device slug."""
return slugify(self._name)
def _get_event_name(self, event):
return f"{self.slug}_{event}"
def _register_event(self, hass_url, event):
"""Add a schedule entry in the device for a sensor."""
url = f"{hass_url}{API_URL}/{event}?token={self._token}"
# Register HA URL as webhook if not already, then get the ID
if not self.webhook_is_registered(url):
self.device.change_favorite("http", f"Home Assistant ({event})", url)
fav_id = self.get_webhook_id(url)
if not fav_id:
_LOGGER.warning(
'Could not find favorite for URL "%s". ' 'Skipping sensor "%s"',
url,
event,
)
return
def webhook_is_registered(self, url, favs=None) -> bool:
"""Return whether the given URL is registered as a device favorite."""
return self.get_webhook_id(url, favs) is not None
def get_webhook_id(self, url, favs=None) -> str or None:
"""
Return the device favorite ID for the given URL.
The favorite must exist or there will be problems.
"""
favs = favs if favs else self.device.favorites()
if "http" not in favs:
return None
for fav_id in favs["http"]:
if favs["http"][fav_id]["value"] == url:
return fav_id
return None
def get_event_data(self):
"""Get data to pass along with HA event."""
return {
"timestamp": dt_util.utcnow().isoformat(),
"live_video_url": self._device.live_video_url,
"live_image_url": self._device.live_image_url,
"rtsp_live_video_url": self._device.rtsp_live_video_url,
"html5_viewer_url": self._device.html5_viewer_url,
}
class DoorBirdRequestView(HomeAssistantView):
"""Provide a page for the device to call."""
requires_auth = False
url = API_URL
name = API_URL[1:].replace("/", ":")
extra_urls = [API_URL + "/{event}"]
async def get(self, request, event):
"""Respond to requests from the device."""
hass = request.app["hass"]
token = request.query.get("token")
device = get_doorstation_by_token(hass, token)
if device is None:
return web.Response(
status=HTTP_UNAUTHORIZED, text="Invalid token provided."
)
if device:
event_data = device.get_event_data()
else:
event_data = {}
if event == "clear":
hass.bus.async_fire(RESET_DEVICE_FAVORITES, {"token": token})
message = f"HTTP Favorites cleared for {device.slug}"
return web.Response(status=HTTP_OK, text=message)
event_data[ATTR_ENTITY_ID] = hass.data[DOMAIN][
DOOR_STATION_EVENT_ENTITY_IDS
].get(event)
hass.bus.async_fire(f"{DOMAIN}_{event}", event_data)
return web.Response(status=HTTP_OK, text="OK")
| 30.727506 | 88 | 0.655233 |
470bf6a7a511db5d8feffe9622827e10faeec666 | 15,944 | py | Python | tests/end_to_end/helpers/assertions.py | chalalaz/pipelinewise | 9e4a7b655c20b925a8080f27a6f147a6b357b888 | [
"Apache-2.0"
] | 452 | 2019-09-02T09:40:08.000Z | 2022-03-27T17:57:10.000Z | tests/end_to_end/helpers/assertions.py | chalalaz/pipelinewise | 9e4a7b655c20b925a8080f27a6f147a6b357b888 | [
"Apache-2.0"
] | 301 | 2019-09-05T18:17:30.000Z | 2022-03-21T22:01:46.000Z | tests/end_to_end/helpers/assertions.py | chalalaz/pipelinewise | 9e4a7b655c20b925a8080f27a6f147a6b357b888 | [
"Apache-2.0"
] | 118 | 2019-09-07T17:28:04.000Z | 2022-03-30T03:23:42.000Z | import glob
import os
import re
from typing import List, Set, Union
from pathlib import Path
from . import tasks
from . import db
def assert_run_tap_success(tap, target, sync_engines, profiling=False):
"""Run a specific tap and make sure that it's using the correct sync engine,
finished successfully and state file created with the right content"""
command = f'pipelinewise run_tap --tap {tap} --target {target}'
if profiling:
command = f'{command} --profiler'
[return_code, stdout, stderr] = tasks.run_command(command)
for sync_engine in sync_engines:
log_file = tasks.find_run_tap_log_file(stdout, sync_engine)
assert_command_success(return_code, stdout, stderr, log_file)
assert_state_file_valid(target, tap, log_file)
if profiling:
assert_profiling_stats_files_created(
stdout, 'run_tap', sync_engines, tap, target
)
def assert_resync_tables_success(tap, target, profiling=False):
"""Resync a specific tap and make sure that it's using the correct sync engine,
finished successfully and state file created with the right content"""
command = f'pipelinewise sync_tables --tap {tap} --target {target}'
if profiling:
command = f'{command} --profiler'
[return_code, stdout, stderr] = tasks.run_command(command)
log_file = tasks.find_run_tap_log_file(stdout, 'fastsync')
assert_command_success(return_code, stdout, stderr, log_file)
assert_state_file_valid(target, tap, log_file)
if profiling:
assert_profiling_stats_files_created(
stdout, 'sync_tables', ['fastsync'], tap, target
)
def assert_command_success(return_code, stdout, stderr, log_path=None):
"""Assert helper function to check if command finished successfully.
In case of failure it logs stdout, stderr and content of the failed command log
if exists"""
if return_code != 0 or stderr != '':
failed_log = ''
failed_log_path = f'{log_path}.failed'
# Load failed log file if exists
if os.path.isfile(failed_log_path):
with open(failed_log_path, 'r', encoding='utf-8') as file:
failed_log = file.read()
print(f'STDOUT: {stdout}\nSTDERR: {stderr}\nFAILED LOG: {failed_log}')
assert False
# check success log file if log path defined
success_log_path = f'{log_path}.success'
if log_path and not os.path.isfile(success_log_path):
assert False
else:
assert True
def assert_state_file_valid(target_name, tap_name, log_path=None):
"""Assert helper function to check if state file exists for
a certain tap for a certain target"""
state_file = Path(
f'{Path.home()}/.pipelinewise/{target_name}/{tap_name}/state.json'
).resolve()
assert os.path.isfile(state_file)
# Check if state file content equals to last emitted state in log
if log_path:
success_log_path = f'{log_path}.success'
state_in_log = None
with open(success_log_path, 'r', encoding='utf-8') as log_f:
state_log_pattern = re.search(
r'\nINFO STATE emitted from target: (.+\n)',
'\n'.join(log_f.readlines()),
)
if state_log_pattern:
state_in_log = state_log_pattern.groups()[-1]
# If the emitted state message exists in the log then compare it to the actual state file
if state_in_log:
with open(state_file, 'r', encoding='utf-8') as state_f:
assert state_in_log == ''.join(state_f.readlines())
def assert_cols_in_table(
query_runner_fn: callable, table_schema: str, table_name: str, columns: List[str]
):
"""Fetches the given table's columns from information_schema and
tests if every given column is in the result
:param query_runner_fn: method to run queries
:param table_schema: search table in this schema
:param table_name: table with the columns
:param columns: list of columns to check if there are in the table's columns
"""
funcs = _map_tap_to_target_functions(None, query_runner_fn)
sql_get_columns_for_table_fn = funcs.get(
'target_sql_get_table_cols_fn', db.sql_get_columns_for_table
)
sql = sql_get_columns_for_table_fn(table_schema, table_name)
result = query_runner_fn(sql)
cols = [res[0] for res in result]
try:
assert all(col in cols for col in columns)
except AssertionError as ex:
ex.args += (
'Error',
columns,
f'One ore more columns not found in target table {table_name}',
)
raise
def _run_sql(query_runner_fn: callable, sql_query: str) -> List:
"""Run an SQL query by a query runner function"""
return list(query_runner_fn(sql_query))
def _map_tap_to_target_functions(
tap_query_runner_fn: callable, target_query_runner_fn: callable
) -> dict:
"""Takes two query runner methods and creates a map with the compatible database
specific functions that required to run assertions.
:param tap_query_runner_fn: method to run queries in the first connection
:param target_query_runner_fn: method to run queries in the second connection
:return: Dictionary of the functions to use for the tap-target pair
"""
f_map = {
# tap-mysql specific attributes and functions
'run_query_tap_mysql': {
'source_schemas': ['mysql_source_db'],
'target_schemas': ['ppw_e2e_tap_mysql'],
'source_sql_get_cols_fn': db.sql_get_columns_mysql,
'source_sql_dynamic_row_count_fn': db.sql_dynamic_row_count_mysql,
},
# tap-postgres specific attributes and functions
'run_query_tap_postgres': {
'source_schemas': ['public', 'public2'],
'target_schemas': ['ppw_e2e_tap_postgres', 'ppw_e2e_tap_postgres_public2'],
'source_sql_get_cols_fn': db.sql_get_columns_postgres,
'source_sql_dynamic_row_count_fn': db.sql_dynamic_row_count_postgres,
},
# target-postgres specific attributes and functions
'run_query_target_postgres': {
'target_sql_get_cols_fn': db.sql_get_columns_postgres,
'target_sql_dynamic_row_count_fn': db.sql_dynamic_row_count_postgres,
},
# target-snowflake specific attributes and functions
'run_query_target_snowflake': {
'target_sql_get_cols_fn': db.sql_get_columns_snowflake,
'target_sql_dynamic_row_count_fn': db.sql_dynamic_row_count_snowflake,
},
# target-bigquery specific attributes and functions
'run_query_target_bigquery': {
'target_sql_get_cols_fn': db.sql_get_columns_bigquery,
'target_sql_get_table_cols_fn': db.sql_get_columns_for_table_bigquery,
'target_sql_dynamic_row_count_fn': db.sql_dynamic_row_count_bigquery,
'target_sql_safe_name_fn': db.safe_name_bigquery,
},
# target-redshift specific attributes and functions
'run_query_target_redshift': {
'target_sql_get_cols_fn': db.sql_get_columns_redshift,
'target_sql_dynamic_row_count_fn': db.sql_dynamic_row_count_redshift,
},
}
# Merge the keys into one dict by tap and target query runner names
if tap_query_runner_fn:
return {
**f_map[tap_query_runner_fn.__name__],
**f_map[target_query_runner_fn.__name__],
}
return {**f_map[target_query_runner_fn.__name__]}
def assert_row_counts_equal(
tap_query_runner_fn: callable, target_query_runner_fn: callable
) -> None:
"""Takes two query runner methods, counts the row numbers in every table in both the
source and target databases and tests if the row counts are matching.
:param tap_query_runner_fn: method to run queries in the first connection
:param target_query_runner_fn: method to run queries in the second connection"""
# Generate a map of source and target specific functions
funcs = _map_tap_to_target_functions(tap_query_runner_fn, target_query_runner_fn)
# Get source and target schemas
source_schemas = funcs['source_schemas']
target_schemas = funcs['target_schemas']
# Generate a dynamic SQLs to count rows in source and target databases
source_dynamic_sql_row_count = funcs['source_sql_dynamic_row_count_fn'](
source_schemas
)
target_dynamic_sql_row_count = funcs['target_sql_dynamic_row_count_fn'](
target_schemas
)
# Count rows
source_sql_row_count = _run_sql(tap_query_runner_fn, source_dynamic_sql_row_count)[
0
][0]
target_sql_row_count = _run_sql(
target_query_runner_fn, target_dynamic_sql_row_count
)[0][0]
# Run the generated SQLs
row_counts_in_source = _run_sql(tap_query_runner_fn, source_sql_row_count)
row_counts_in_target = _run_sql(target_query_runner_fn, target_sql_row_count)
# Some sources and targets can't be compared directly (e.g. BigQuery doesn't accept spaces in table names)
# we fix that by renaming the source tables to names that the target would accept
if 'target_sql_safe_name_fn' in funcs:
row_counts_in_source = [
(funcs['target_sql_safe_name_fn'](table), row_count)
for (table, row_count) in row_counts_in_source
]
# Compare the two dataset
assert row_counts_in_target == row_counts_in_source
# pylint: disable=too-many-locals
def assert_all_columns_exist(
tap_query_runner_fn: callable,
target_query_runner_fn: callable,
column_type_mapper_fn: callable = None,
ignore_cols: Union[Set, List] = None,
) -> None:
"""Takes two query runner methods, gets the columns list for every table in both the
source and target database and tests if every column in source exists in the target database.
Some taps have unsupported column types and these are not part of the schemas published to the target thus
target table doesn't have such columns.
:param tap_query_runner_fn: method to run queries in the first connection
:param target_query_runner_fn: method to run queries in the second connection
:param column_type_mapper_fn: method to convert source to target column types
:param ignore_cols: List or set of columns to ignore if we know target table won't have them"""
# Generate a map of source and target specific functions
funcs = _map_tap_to_target_functions(tap_query_runner_fn, target_query_runner_fn)
# Get source and target schemas
source_schemas = funcs['source_schemas']
target_schemas = funcs['target_schemas']
# Generate SQLs to get columns from source and target databases
source_sql_get_cols = funcs['source_sql_get_cols_fn'](source_schemas)
target_sql_get_cols = funcs['target_sql_get_cols_fn'](target_schemas)
# Run the generated SQLs
source_table_cols = _run_sql(tap_query_runner_fn, source_sql_get_cols)
target_table_cols = _run_sql(target_query_runner_fn, target_sql_get_cols)
def _cols_list_to_dict(cols: List) -> dict:
"""
Converts list of columns with char separators to dictionary
:param cols: list of ':' separated strings using the format of
column_name:column_type:column_type_extra
:return: Dictionary of columns where key is the column_name
"""
cols_dict = {}
for col in cols:
col_props = col.split(':')
cols_dict[col_props[0]] = {'type': col_props[1], 'type_extra': col_props[2]}
return cols_dict
# Compare the two dataset
for table_cols in source_table_cols:
table_to_check = table_cols[0].lower()
# Some sources and targets can't be compared directly (e.g. BigQuery doesn't accept spaces in table names)
# we fix that by renaming the source tables to names that the target would accept
if 'target_sql_safe_name_fn' in funcs:
table_to_check = funcs['target_sql_safe_name_fn'](table_to_check)
source_cols = table_cols[1].lower().split(';')
try:
target_cols = (
next(t[1] for t in target_table_cols if t[0].lower() == table_to_check)
.lower()
.split(';')
)
except StopIteration as ex:
ex.args += ('Error', f'{table_to_check} table not found in target')
raise
source_cols_dict = _cols_list_to_dict(source_cols)
target_cols_dict = _cols_list_to_dict(target_cols)
print(target_cols_dict)
for col_name, col_props in source_cols_dict.items():
# Check if column exists in the target table
if ignore_cols and col_name in ignore_cols:
continue
try:
assert col_name in target_cols_dict
except AssertionError as ex:
ex.args += (
'Error',
f'{col_name} column not found in target table {table_to_check}',
)
raise
# Check if column type is expected in the target table, if mapper function provided
if column_type_mapper_fn:
try:
target_col = target_cols_dict[col_name]
exp_col_type = (
column_type_mapper_fn(
col_props['type'], col_props['type_extra']
)
.replace(' NULL', '')
.lower()
)
act_col_type = target_col['type'].lower()
assert act_col_type == exp_col_type
except AssertionError as ex:
ex.args += (
'Error',
f'{col_name} column type is not as expected. '
f'Expected: {exp_col_type} '
f'Actual: {act_col_type}',
)
raise
def assert_date_column_naive_in_target(
target_query_runner_fn, column_name, full_table_name
):
"""
Checks if all dates in the given column are naive,i.e no timezone
Args:
target_query_runner_fn: target query runner callable
column_name: column of timestamp type
full_table_name: fully qualified table name
"""
dates = target_query_runner_fn(f'SELECT {column_name} FROM {full_table_name};')
for date in dates:
if date[0] is not None:
assert date[0].tzinfo is None
def assert_profiling_stats_files_created(
stdout: str,
command: str,
sync_engines: List = None,
tap: Union[str, List[str]] = None,
target: str = None,
):
"""
Asserts that profiling pstat files were created by checking their existence
Args:
stdout: ppw command stdout
command: ppw command name
sync_engines: in case of run_tap or sync_tables, sync engines should be fastsync and/or singer
tap: in case of run_tap or sync_tables, tap is the tap ID
target: in case of run_tap or sync_tables, it is the target ID
"""
# find profiling directory from output
profiler_dir = tasks.find_profiling_folder(stdout)
# crawl the folder looking for pstat files and strip the folder name from the file name
pstat_files = {
file[len(f'{profiler_dir}/'):]
for file in glob.iglob(f'{profiler_dir}/*.pstat')
}
assert f'pipelinewise_{command}.pstat' in pstat_files
if sync_engines is not None:
if 'fastsync' in sync_engines:
assert f'fastsync_{tap}_{target}.pstat' in pstat_files
if 'singer' in sync_engines:
assert f'tap_{tap}.pstat' in pstat_files
assert f'target_{target}.pstat' in pstat_files
if isinstance(tap, list):
for tap_ in tap:
assert f'tap_{tap_}.pstat' in pstat_files
| 39.270936 | 114 | 0.668653 |
eb5cd37199a7182140c7e2a53bd5798110a399a1 | 379 | py | Python | encapsulation/exercise/04_restaurant/project/beverage/beverage.py | Galchov/python-oop | 1bf7c51ac2c605bae11b08df7edd4341e20a1b39 | [
"MIT"
] | null | null | null | encapsulation/exercise/04_restaurant/project/beverage/beverage.py | Galchov/python-oop | 1bf7c51ac2c605bae11b08df7edd4341e20a1b39 | [
"MIT"
] | null | null | null | encapsulation/exercise/04_restaurant/project/beverage/beverage.py | Galchov/python-oop | 1bf7c51ac2c605bae11b08df7edd4341e20a1b39 | [
"MIT"
] | null | null | null | from project.product import Product
class Beverage(Product):
def __init__(self, name: str, price: float, milliliters: float):
super().__init__(name, price)
self.milliliters = milliliters
@property
def milliliters(self):
return self.__milliliters
@milliliters.setter
def milliliters(self, value):
self.__milliliters = value
| 23.6875 | 68 | 0.680739 |
311caab1b1ef856ca5fb77d888355f4c2bb4e00e | 683 | py | Python | var/spack/repos/builtin/packages/r-dichromat/package.py | renjithravindrankannath/spack | 043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/r-dichromat/package.py | renjithravindrankannath/spack | 043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/r-dichromat/package.py | renjithravindrankannath/spack | 043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RDichromat(RPackage):
"""Color Schemes for Dichromats.
Collapse red-green or green-blue distinctions to simulate the effects of
different types of color-blindness."""
cran = "dichromat"
version('2.0-0.1', sha256='a10578e9ad8a581bd8fe0d8a8370051f3cdcf12c7d282f3af2a18dacda566081')
version('2.0-0', sha256='31151eaf36f70bdc1172da5ff5088ee51cc0a3db4ead59c7c38c25316d580dd1')
depends_on('r@2.10:', type=('build', 'run'))
| 32.52381 | 97 | 0.754026 |
4ab3769f69c35f245f2d5794a75ad2a3c329560d | 707 | py | Python | MyMusicApp/users/migrations/0003_auto_20190914_0036.py | kells4real/MusicApp | 4e4ba065c4f472243413551f63dc4e9eddf7f4a7 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | MyMusicApp/users/migrations/0003_auto_20190914_0036.py | kells4real/MusicApp | 4e4ba065c4f472243413551f63dc4e9eddf7f4a7 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | MyMusicApp/users/migrations/0003_auto_20190914_0036.py | kells4real/MusicApp | 4e4ba065c4f472243413551f63dc4e9eddf7f4a7 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | # Generated by Django 2.2.2 on 2019-09-13 23:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_profile_bio'),
]
operations = [
migrations.AddField(
model_name='profile',
name='facebook',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='instagram',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='twitter',
field=models.URLField(blank=True, null=True),
),
]
| 24.37931 | 57 | 0.55587 |
6297d544b32119e102e5bfd8cdf75ee7aa04d94f | 395 | py | Python | src/tryjandgo/wsgi.py | adonnelly759/todo | 7c63b2717c07b8e295a55ea4e1359052d67e966a | [
"bzip2-1.0.6"
] | null | null | null | src/tryjandgo/wsgi.py | adonnelly759/todo | 7c63b2717c07b8e295a55ea4e1359052d67e966a | [
"bzip2-1.0.6"
] | null | null | null | src/tryjandgo/wsgi.py | adonnelly759/todo | 7c63b2717c07b8e295a55ea4e1359052d67e966a | [
"bzip2-1.0.6"
] | null | null | null | """
WSGI config for tryjandgo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tryjandgo.settings')
application = get_wsgi_application()
| 23.235294 | 78 | 0.787342 |
db3758e97c80142f0a7ef7595fede1fd6c9fb5db | 14,908 | py | Python | sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_workspace_managed_sql_server_blob_auditing_policies_operations.py | xolve/azure-sdk-for-python | 9f5baa19c392f77f811d936ee43450e4ea524002 | [
"MIT"
] | 1 | 2022-03-09T08:59:13.000Z | 2022-03-09T08:59:13.000Z | sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_workspace_managed_sql_server_blob_auditing_policies_operations.py | xolve/azure-sdk-for-python | 9f5baa19c392f77f811d936ee43450e4ea524002 | [
"MIT"
] | null | null | null | sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_workspace_managed_sql_server_blob_auditing_policies_operations.py | xolve/azure-sdk-for-python | 9f5baa19c392f77f811d936ee43450e4ea524002 | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._workspace_managed_sql_server_blob_auditing_policies_operations import build_create_or_update_request_initial, build_get_request, build_list_by_workspace_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class WorkspaceManagedSqlServerBlobAuditingPoliciesOperations:
"""WorkspaceManagedSqlServerBlobAuditingPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
blob_auditing_policy_name: Union[str, "_models.BlobAuditingPolicyName"],
**kwargs: Any
) -> "_models.ServerBlobAuditingPolicy":
"""Get server's blob auditing policy.
Get a workspace managed sql server's blob auditing policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param blob_auditing_policy_name: The name of the blob auditing policy.
:type blob_auditing_policy_name: str or ~azure.mgmt.synapse.models.BlobAuditingPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServerBlobAuditingPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.ServerBlobAuditingPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerBlobAuditingPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
blob_auditing_policy_name=blob_auditing_policy_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServerBlobAuditingPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/auditingSettings/{blobAuditingPolicyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
blob_auditing_policy_name: Union[str, "_models.BlobAuditingPolicyName"],
parameters: "_models.ServerBlobAuditingPolicy",
**kwargs: Any
) -> Optional["_models.ServerBlobAuditingPolicy"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServerBlobAuditingPolicy"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ServerBlobAuditingPolicy')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
blob_auditing_policy_name=blob_auditing_policy_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerBlobAuditingPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/auditingSettings/{blobAuditingPolicyName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
blob_auditing_policy_name: Union[str, "_models.BlobAuditingPolicyName"],
parameters: "_models.ServerBlobAuditingPolicy",
**kwargs: Any
) -> AsyncLROPoller["_models.ServerBlobAuditingPolicy"]:
"""Create or Update server's blob auditing policy.
Create or Update a workspace managed sql server's blob auditing policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param blob_auditing_policy_name: The name of the blob auditing policy.
:type blob_auditing_policy_name: str or ~azure.mgmt.synapse.models.BlobAuditingPolicyName
:param parameters: Properties of extended blob auditing policy.
:type parameters: ~azure.mgmt.synapse.models.ServerBlobAuditingPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServerBlobAuditingPolicy or the
result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.synapse.models.ServerBlobAuditingPolicy]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerBlobAuditingPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
blob_auditing_policy_name=blob_auditing_policy_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ServerBlobAuditingPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/auditingSettings/{blobAuditingPolicyName}'} # type: ignore
@distributed_trace
def list_by_workspace(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ServerBlobAuditingPolicyListResult"]:
"""List workspace server's blob auditing policies.
List workspace managed sql server's blob auditing policies.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServerBlobAuditingPolicyListResult or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.ServerBlobAuditingPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerBlobAuditingPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_workspace_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_by_workspace.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_workspace_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ServerBlobAuditingPolicyListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/auditingSettings'} # type: ignore
| 48.090323 | 231 | 0.691105 |
8a61d100b564fd24ddeba3f4dd8c2cca31a16be7 | 28 | py | Python | pathfinding/__init__.py | LHGames-2017/Enter_team_name_here | 6d476813ce016d840365c92dc995688174a1d8bd | [
"MIT"
] | 3 | 2018-02-04T20:01:34.000Z | 2019-04-27T17:53:20.000Z | pathfinding/__init__.py | LHGames-2017/Enter_team_name_here | 6d476813ce016d840365c92dc995688174a1d8bd | [
"MIT"
] | null | null | null | pathfinding/__init__.py | LHGames-2017/Enter_team_name_here | 6d476813ce016d840365c92dc995688174a1d8bd | [
"MIT"
] | 2 | 2018-02-04T20:03:39.000Z | 2018-06-24T21:43:38.000Z | __all__ = ['core', 'finder'] | 28 | 28 | 0.607143 |
fd188bca839edcf416fb947c5e5a0a2126e4f920 | 13,726 | py | Python | dplace_app/models.py | Lumilam/D-Place | 3544163e22308919b5c27328dfaea8a1c0f3b291 | [
"MIT"
] | null | null | null | dplace_app/models.py | Lumilam/D-Place | 3544163e22308919b5c27328dfaea8a1c0f3b291 | [
"MIT"
] | null | null | null | dplace_app/models.py | Lumilam/D-Place | 3544163e22308919b5c27328dfaea8a1c0f3b291 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
from django.core.urlresolvers import reverse
from django.db import models
__all__ = [
'ISOCode', 'Society', 'EnvironmentalCategory', 'EnvironmentalVariable',
'EnvironmentalValue', 'CulturalVariable', 'CulturalValue', 'CulturalCategory',
'CulturalCodeDescription', 'Source', 'Language', 'LanguageFamily', 'LanguageTree',
'LanguageTreeLabels', 'LanguageTreeLabelsSequence', 'GeographicRegion']
UNIT_CHOICES = (
('mm', 'mm'),
('℃', '℃'),
('mo', 'mo'),
('', ''),
('gC m-2 day-1', 'gC m-2 day-1')
)
CLASS_LEVELS = (
(1, 'Family'),
(2, 'Subfamily'),
(3, 'Subsubfamily')
)
CLASSIFICATION_SCHEMES = (
('E', 'Ethnologue17',),
('R', 'Ethnologue17-Revised',),
('G', 'Glottolog',),
)
class ISOCode(models.Model):
iso_code = models.CharField(
'ISO Code', db_index=True, max_length=3, unique=True, null=False)
def __unicode__(self):
return self.iso_code
class Meta(object):
verbose_name = "ISO Code"
class Society(models.Model):
ext_id = models.CharField('External ID', db_index=True, unique=True, max_length=20)
xd_id = models.CharField('Cross ID', db_index=True, default=None, null=True, max_length=10)
name = models.CharField('Name', db_index=True, max_length=200)
latitude = models.FloatField('Latitude', null=True)
longitude = models.FloatField('Longitude', null=True)
focal_year = models.CharField('Focal Year', null=True, blank=True, max_length=100)
alternate_names = models.TextField(default="")
original_name = models.CharField('ORIG_name', max_length=200, default=None, null=True)
original_latitude = models.FloatField('ORIG_latitude', null=True)
original_longitude = models.FloatField('ORIG_longitude', null=True)
region = models.ForeignKey('GeographicRegion', null=True)
source = models.ForeignKey('Source', null=True)
language = models.ForeignKey('Language', null=True, related_name="societies")
hraf_link = models.CharField('HRAF', null=True, default=None, max_length=200)
chirila_link = models.CharField('CHIRILA', default = None, null=True, max_length=200)
@property
def location(self):
return dict(coordinates=[self.longitude, self.latitude])
@property
def original_location(self):
return dict(coordinates=[self.original_longitude, self.original_latitude])
def get_environmental_data(self):
"""Returns environmental data for the given society"""
valueDict = defaultdict(list)
for value in self.environmentalvalue_set.select_related('variable').order_by('variable__name').all():
valueDict[str(value.variable.category)].append({
'name': value.variable.name,
'value': format(value.value, '.4f'),
'units': value.variable.units,
'comment': value.comment
})
return valueDict
def get_cultural_trait_data(self):
"""Returns the data for the given society"""
valueDict = defaultdict(list)
for value in self.culturalvalue_set.select_related('code').select_related('variable').order_by('variable__label').all():
categories = value.variable.index_categories.all()
for c in categories:
valueDict[str(c)].append({
'id': value.id,
'label': value.variable.label,
'name': value.variable.name,
'code': value.coded_value,
'description': value.get_description(),
'year': value.focal_year,
'comment': value.comment,
'sources': value.references.all(),
})
return valueDict
def get_data_references(self):
"""Returns the references for the cultural trait data"""
refs = []
qset = self.culturalvalue_set
for value in qset.all():
for r in value.references.all():
if r not in refs:
refs.append(r)
return sorted(refs, key=lambda r: r.author)
def __unicode__(self):
return "%s - %s" % (self.ext_id, self.name)
def get_absolute_url(self):
return reverse("view_society", args=[self.ext_id])
class Meta(object):
verbose_name_plural = "Societies"
class EnvironmentalCategory(models.Model):
name = models.CharField(max_length=30, db_index=True, unique=True)
def __unicode__(self):
return self.name
class Meta(object):
verbose_name = "Category"
verbose_name_plural = "Categories"
ordering = ("name",)
class EnvironmentalVariable(models.Model):
var_id = models.CharField(max_length=50, unique=True)
name = models.CharField(max_length=50, unique=True)
category = models.ForeignKey('EnvironmentalCategory', null=True)
units = models.CharField(max_length=100, choices=UNIT_CHOICES)
codebook_info = models.CharField(max_length=500, default='None')
def __unicode__(self):
if self.units:
return "%s (%s)" % (self.name, self.units)
return self.name
class Meta(object):
ordering = ["name"]
class EnvironmentalValue(models.Model):
variable = models.ForeignKey('EnvironmentalVariable', related_name="values")
value = models.FloatField(db_index=True)
society = models.ForeignKey('Society')
source = models.ForeignKey('Source', null=True)
comment = models.TextField(default="")
def __unicode__(self):
return "%f" % self.value
class Meta(object):
ordering = ["variable"]
unique_together = ('variable', 'society')
index_together = ['variable', 'value']
class CulturalVariable(models.Model):
"""
Variables in the Ethnographic Atlas have a number and are accompanied
by a description, e.g.
NUMBER: 6, DESCRIPTION: Mode of Marriage (Primary)
This number is converted to a label: EA006
"""
label = models.CharField(max_length=25, db_index=True)
name = models.CharField(max_length=200, db_index=True, default='Unknown')
source = models.ForeignKey('Source', null=True)
index_categories = models.ManyToManyField(
'CulturalCategory', related_name='index_variables')
niche_categories = models.ManyToManyField(
'CulturalCategory', related_name='niche_variables')
codebook_info = models.TextField(default='None')
data_type = models.CharField(max_length=200, null=True)
units = models.CharField(max_length=100, default='')
def coded_societies(self):
return Society.objects.filter(culturalvalue__in=self.values.all())
def __unicode__(self):
return "%s - %s" % (self.label, self.name)
class Meta(object):
verbose_name = "Variable"
ordering = ["label"]
class CulturalCategory(models.Model):
name = models.CharField(max_length=30, db_index=True, unique=True)
def __unicode__(self):
return self.name
class Meta(object):
verbose_name = "Category"
verbose_name_plural = "Categories"
ordering = ["name"]
class CulturalCodeDescription(models.Model):
"""
Most of the variables in the Ethnographic Atlas are coded with
discrete values that map to a text description, e.g.
CODE: 3, DESCRIPTION: 26 - 35% Dependence
Some coded values to not map to a description, e.g. those that
represent a 4-digit year
This model is not used by every value in the EA.
"""
variable = models.ForeignKey('CulturalVariable', db_index=True, related_name="codes")
code = models.CharField(
max_length=20, db_index=True, null=False, default='.')
code_number = models.IntegerField(null=True, db_index=True)
description = models.CharField(max_length=500, default='Unknown')
short_description = models.CharField(max_length=500, default="")
n = models.IntegerField(null=True, default=0)
def save(self, *args, **kwargs):
self.read_code_number()
super(CulturalCodeDescription, self).save(*args, **kwargs)
def read_code_number(self):
try:
self.code_number = int(self.code)
except ValueError:
pass
def coded_societies(self):
return Society.objects.filter(culturalvalue__coded_value=self.code)
def __unicode__(self):
return "%s - %s" % (self.code, self.description)
class Meta(object):
verbose_name = "Code"
ordering = ("variable", "code_number", "code")
class CulturalValue(models.Model):
"""
The values coded in the EA are typically discrete codes
that map to a description. Some are not and
Most of the variables in the Ethnographic Atlas are coded with
discrete values that map to a text description, e.g.
CODE: 3, DESCRIPTION: 26 - 35% Dependence
Some coded values to not map to a description, e.g. those that
represent a 4-digit year
This model is not used by every code
"""
variable = models.ForeignKey('CulturalVariable', related_name="values")
society = models.ForeignKey('Society')
coded_value = models.CharField(max_length=100, db_index=True, null=False, default='.')
coded_value_float = models.FloatField(null=True)
code = models.ForeignKey('CulturalCodeDescription', db_index=True, null=True)
source = models.ForeignKey('Source', null=True)
comment = models.TextField(default="")
references = models.ManyToManyField('Source', related_name="references")
subcase = models.TextField(default="")
focal_year = models.CharField(max_length=10, default="")
def get_description(self):
return self.code.description if self.code else ''
def __unicode__(self):
return "%s" % self.coded_value
class Meta(object):
verbose_name = "Value"
ordering = ("variable", "coded_value")
index_together = [
['variable', 'society', 'focal_year'],
['variable', 'coded_value', 'focal_year', 'subcase'],
['variable', 'code', 'focal_year'],
['society', 'coded_value', 'focal_year', 'subcase'],
['society', 'code', 'focal_year'],
]
unique_together = ('variable', 'society', 'coded_value', 'comment', 'subcase', 'focal_year')
class Source(models.Model):
"""
Stores references for VariableCodedValues, also for dataset sources.
"""
# Not really sure if we should separate dataset sources from references (I
# think we should), but since all the code has already been written with
# this model, I won't change it yet.
# text, because might be '1996', '1999-2001', or 'ND'
year = models.CharField(max_length=30, db_index=True)
author = models.CharField(max_length=50, db_index=True)
reference = models.CharField(max_length=500)
name = models.CharField(max_length=100, db_index=True, default="")
def __unicode__(self):
return "%s (%s)" % (self.author, self.year)
class Meta(object):
unique_together = ('year', 'author')
class LanguageFamily(models.Model):
scheme = models.CharField(max_length=1, choices=CLASSIFICATION_SCHEMES, default='G')
name = models.CharField(max_length=50, db_index=True)
language_count = models.IntegerField(default=0, null=False)
def update_counts(self):
self.language_count = 0
for society in Society.objects.all().filter(language__family=self):
if society.culturalvalue_set.count() > 0:
self.language_count += 1
self.save()
class Language(models.Model):
name = models.CharField(max_length=200, db_index=True)
glotto_code = models.CharField(max_length=8, null=False, unique=True)
# needs to be null=True because some glottolog languages do not have isocodes
iso_code = models.ForeignKey('ISOCode', null=True)
family = models.ForeignKey('LanguageFamily', null=True)
def __unicode__(self):
return "Language: %s, ISO Code %s, Glotto Code %s" % (
self.name, self.iso_code, self.glotto_code)
def get_absolute_url(self):
return reverse("view_language", args=[self.glotto_code])
class Meta(object):
verbose_name = "Language"
unique_together = ('iso_code', 'glotto_code')
class GeographicRegion(models.Model):
level_2_re = models.FloatField()
count = models.FloatField()
region_nam = models.CharField(max_length=254)
continent = models.CharField(max_length=254)
tdwg_code = models.IntegerField()
def __unicode__(self):
return "Region: %s, Continent %s" % (self.region_nam, self.continent)
class LanguageTree(models.Model):
name = models.CharField(max_length=50, db_index=True)
file = models.FileField(upload_to='language_trees', null=True)
newick_string = models.TextField(default='')
source = models.ForeignKey('Source', null=True)
taxa = models.ManyToManyField('LanguageTreeLabels')
class LanguageTreeLabels(models.Model):
languageTree = models.ForeignKey('LanguageTree')
label = models.CharField(max_length=255, db_index=True)
language = models.ForeignKey('Language', null=True)
societies = models.ManyToManyField('Society', through="LanguageTreeLabelsSequence")
class Meta:
ordering = ("-languagetreelabelssequence__fixed_order",)
class LanguageTreeLabelsSequence(models.Model):
society = models.ForeignKey('Society')
labels = models.ForeignKey('LanguageTreeLabels')
fixed_order = models.PositiveSmallIntegerField(db_index=True)
class Meta:
ordering = ("-fixed_order",)
| 35.285347 | 128 | 0.663121 |
3d8c7b6bf579f87521248456b2ccbad17fedff88 | 4,053 | py | Python | TrainingExtensions/torch/test/python/test_qc_quantize_op.py | quic-akhobare/aimet | 1811a0ef58a75d103e173731b436876ee5dc4c49 | [
"BSD-3-Clause"
] | null | null | null | TrainingExtensions/torch/test/python/test_qc_quantize_op.py | quic-akhobare/aimet | 1811a0ef58a75d103e173731b436876ee5dc4c49 | [
"BSD-3-Clause"
] | null | null | null | TrainingExtensions/torch/test/python/test_qc_quantize_op.py | quic-akhobare/aimet | 1811a0ef58a75d103e173731b436876ee5dc4c49 | [
"BSD-3-Clause"
] | 1 | 2021-03-06T18:40:33.000Z | 2021-03-06T18:40:33.000Z | # -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2017-2018, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import unittest
import torch
from aimet_common.defs import QuantScheme
from aimet_torch.qc_quantize_op import QcPostTrainingWrapper, QcQuantizeOpMode
class TestQcQuantizeOp(unittest.TestCase):
def test_update_stats_with_pymo(self):
device = torch.device('cpu')
conv1 = torch.nn.Conv2d(4, 4, 1)
quantize = QcPostTrainingWrapper(conv1, weight_bw=8, activation_bw=8, round_mode='nearest',
quant_scheme=QuantScheme.post_training_tf_enhanced)
input_var = torch.autograd.Variable(torch.randn(4, 4, 2, 2), requires_grad=False).to(device)
print(input_var)
quantize.set_mode(QcQuantizeOpMode.ANALYSIS)
output = quantize.forward(input_var)
quantize.compute_encoding()
actual_encoding = quantize.output_quantizer.encoding
print("Encoding returned: min={}, max={}, offset={}. delta={}, bw={}"
.format(actual_encoding.min, actual_encoding.max,
actual_encoding.offset, actual_encoding.delta, actual_encoding.bw))
def test_quantize_dequantize_with_pymo(self):
device = torch.device('cpu')
conv1 = torch.nn.Conv2d(4, 4, 1)
quantize = QcPostTrainingWrapper(conv1, weight_bw=8, activation_bw=8, round_mode='nearest',
quant_scheme=QuantScheme.post_training_tf_enhanced)
input_var = torch.autograd.Variable(torch.randn(4, 4, 2, 2), requires_grad=True).to(device)
quantize.set_mode(QcQuantizeOpMode.ANALYSIS)
output = quantize.forward(input_var)
quantize.compute_encoding()
actual_encoding = quantize.output_quantizer.encoding
print("Encoding returned: min={}, max={}, offset={}. delta={}, bw={}"
.format(quantize.output_quantizer.encoding.min,
quantize.output_quantizer.encoding.max,
quantize.output_quantizer.encoding.offset,
quantize.output_quantizer.encoding.delta,
quantize.output_quantizer.encoding.bw))
quantize.set_mode(QcQuantizeOpMode.ACTIVE)
output = quantize.forward(input_var)
| 45.033333 | 100 | 0.676289 |
083f91d70689736b715dbf48886e4664dad8b8fa | 15,879 | py | Python | lib_src/src_python2.7.9/Python-2.7.9/build/lib.macosx-10.4-x86_64-2.7/_sysconfigdata.py | neoviki/python_dev_environment_macOS | bd22d369e39830b8459a0411e1bbb6b26fa4fd93 | [
"BSD-2-Clause"
] | null | null | null | lib_src/src_python2.7.9/Python-2.7.9/build/lib.macosx-10.4-x86_64-2.7/_sysconfigdata.py | neoviki/python_dev_environment_macOS | bd22d369e39830b8459a0411e1bbb6b26fa4fd93 | [
"BSD-2-Clause"
] | null | null | null | lib_src/src_python2.7.9/Python-2.7.9/build/lib.macosx-10.4-x86_64-2.7/_sysconfigdata.py | neoviki/python_dev_environment_macOS | bd22d369e39830b8459a0411e1bbb6b26fa4fd93 | [
"BSD-2-Clause"
] | null | null | null | # system configuration generated and used by the sysconfig module
build_time_vars = {'AC_APPLE_UNIVERSAL_BUILD': 0,
'AIX_GENUINE_CPLUSPLUS': 0,
'AR': 'ar',
'ARFLAGS': 'rc',
'ASDLGEN': './Parser/asdl_c.py',
'ASDLGEN_FILES': './Parser/asdl.py ./Parser/asdl_c.py',
'AST_ASDL': './Parser/Python.asdl',
'AST_C': 'Python/Python-ast.c',
'AST_C_DIR': 'Python',
'AST_H': 'Include/Python-ast.h',
'AST_H_DIR': 'Include',
'ATHEOS_THREADS': 0,
'BASECFLAGS': '-fno-strict-aliasing',
'BASEMODLIBS': '',
'BEOS_THREADS': 0,
'BINDIR': '/Users/viki/sample_app/python_interpreter/bin',
'BINLIBDEST': '/Users/viki/sample_app/python_interpreter/lib/python2.7',
'BLDLIBRARY': 'libpython2.7.a',
'BLDSHARED': 'gcc -bundle -undefined dynamic_lookup',
'BUILDEXE': '.exe',
'BUILDPYTHON': 'python.exe',
'CC': 'gcc',
'CCSHARED': '',
'CFLAGS': '-fno-strict-aliasing -g -O2 -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes',
'CFLAGSFORSHARED': '',
'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in',
'CONFIG_ARGS': "'--prefix=/Users/viki/sample_app/python_interpreter/'",
'CONFINCLUDEDIR': '/Users/viki/sample_app/python_interpreter/include',
'CONFINCLUDEPY': '/Users/viki/sample_app/python_interpreter/include/python2.7',
'COREPYTHONPATH': ':plat-darwin:plat-mac:plat-mac/lib-scriptpackages:lib-tk:lib-old',
'CPPFLAGS': '-I. -IInclude -I./Include',
'CXX': 'g++',
'C_THREADS': 0,
'DESTDIRS': '/Users/viki/sample_app/python_interpreter /Users/viki/sample_app/python_interpreter/lib /Users/viki/sample_app/python_interpreter/lib/python2.7 /Users/viki/sample_app/python_interpreter/lib/python2.7/lib-dynload',
'DESTLIB': '/Users/viki/sample_app/python_interpreter/lib/python2.7',
'DESTPATH': '',
'DESTSHARED': '/Users/viki/sample_app/python_interpreter/lib/python2.7/lib-dynload',
'DIRMODE': 755,
'DIST': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in Include Lib Misc Demo Ext-dummy',
'DISTDIRS': 'Include Lib Misc Demo Ext-dummy',
'DISTFILES': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in',
'DLINCLDIR': '.',
'DLLLIBRARY': '',
'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0,
'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0,
'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1,
'DYNLOADFILE': 'dynload_shlib.o',
'ENABLE_IPV6': 1,
'ENSUREPIP': 'no',
'EXE': '',
'EXEMODE': 755,
'EXTRAMACHDEPPATH': ':plat-mac:plat-mac/lib-scriptpackages',
'EXTRAPLATDIR': 'plat-mac plat-mac/Carbon plat-mac/lib-scriptpackages \\',
'EXTRATESTOPTS': '',
'EXTRA_CFLAGS': '',
'FILEMODE': 644,
'FLOCK_NEEDS_LIBBSD': 0,
'GETPGRP_HAVE_ARG': 0,
'GETTIMEOFDAY_NO_TZ': 0,
'GLHACK': '-Dclear=__GLclear',
'GNULD': 'no',
'GRAMMAR_C': 'Python/graminit.c',
'GRAMMAR_H': 'Include/graminit.h',
'GRAMMAR_INPUT': './Grammar/Grammar',
'HAVE_ACOSH': 1,
'HAVE_ADDRINFO': 1,
'HAVE_ALARM': 1,
'HAVE_ALLOCA_H': 1,
'HAVE_ALTZONE': 0,
'HAVE_ASINH': 1,
'HAVE_ASM_TYPES_H': 0,
'HAVE_ATANH': 1,
'HAVE_ATTRIBUTE_FORMAT_PARSETUPLE': 0,
'HAVE_BIND_TEXTDOMAIN_CODESET': 0,
'HAVE_BLUETOOTH_BLUETOOTH_H': 0,
'HAVE_BLUETOOTH_H': 0,
'HAVE_BROKEN_NICE': 0,
'HAVE_BROKEN_PIPE_BUF': 0,
'HAVE_BROKEN_POLL': 0,
'HAVE_BROKEN_POSIX_SEMAPHORES': 0,
'HAVE_BROKEN_PTHREAD_SIGMASK': 0,
'HAVE_BROKEN_SEM_GETVALUE': 1,
'HAVE_BROKEN_UNSETENV': 0,
'HAVE_C99_BOOL': 1,
'HAVE_CHFLAGS': 1,
'HAVE_CHOWN': 1,
'HAVE_CHROOT': 1,
'HAVE_CLOCK': 1,
'HAVE_CONFSTR': 1,
'HAVE_CONIO_H': 0,
'HAVE_COPYSIGN': 1,
'HAVE_CTERMID': 1,
'HAVE_CTERMID_R': 1,
'HAVE_CURSES_H': 1,
'HAVE_CURSES_IS_TERM_RESIZED': 1,
'HAVE_CURSES_RESIZETERM': 1,
'HAVE_CURSES_RESIZE_TERM': 1,
'HAVE_DECL_ISFINITE': 1,
'HAVE_DECL_ISINF': 1,
'HAVE_DECL_ISNAN': 1,
'HAVE_DECL_TZNAME': 0,
'HAVE_DEVICE_MACROS': 1,
'HAVE_DEV_PTC': 0,
'HAVE_DEV_PTMX': 1,
'HAVE_DIRECT_H': 0,
'HAVE_DIRENT_H': 1,
'HAVE_DLFCN_H': 1,
'HAVE_DLOPEN': 1,
'HAVE_DUP2': 1,
'HAVE_DYNAMIC_LOADING': 1,
'HAVE_EPOLL': 0,
'HAVE_ERF': 1,
'HAVE_ERFC': 1,
'HAVE_ERRNO_H': 1,
'HAVE_EXECV': 1,
'HAVE_EXPM1': 1,
'HAVE_FCHDIR': 1,
'HAVE_FCHMOD': 1,
'HAVE_FCHOWN': 1,
'HAVE_FCNTL_H': 1,
'HAVE_FDATASYNC': 0,
'HAVE_FINITE': 1,
'HAVE_FLOCK': 1,
'HAVE_FORK': 1,
'HAVE_FORKPTY': 1,
'HAVE_FPATHCONF': 1,
'HAVE_FSEEK64': 0,
'HAVE_FSEEKO': 1,
'HAVE_FSTATVFS': 1,
'HAVE_FSYNC': 1,
'HAVE_FTELL64': 0,
'HAVE_FTELLO': 1,
'HAVE_FTIME': 1,
'HAVE_FTRUNCATE': 1,
'HAVE_GAI_STRERROR': 1,
'HAVE_GAMMA': 1,
'HAVE_GCC_ASM_FOR_X87': 1,
'HAVE_GETADDRINFO': 1,
'HAVE_GETCWD': 1,
'HAVE_GETC_UNLOCKED': 1,
'HAVE_GETGROUPS': 1,
'HAVE_GETHOSTBYNAME': 1,
'HAVE_GETHOSTBYNAME_R': 0,
'HAVE_GETHOSTBYNAME_R_3_ARG': 0,
'HAVE_GETHOSTBYNAME_R_5_ARG': 0,
'HAVE_GETHOSTBYNAME_R_6_ARG': 0,
'HAVE_GETITIMER': 1,
'HAVE_GETLOADAVG': 1,
'HAVE_GETLOGIN': 1,
'HAVE_GETNAMEINFO': 1,
'HAVE_GETPAGESIZE': 1,
'HAVE_GETPEERNAME': 1,
'HAVE_GETPGID': 1,
'HAVE_GETPGRP': 1,
'HAVE_GETPID': 1,
'HAVE_GETPRIORITY': 1,
'HAVE_GETPWENT': 1,
'HAVE_GETRESGID': 0,
'HAVE_GETRESUID': 0,
'HAVE_GETSID': 1,
'HAVE_GETSPENT': 0,
'HAVE_GETSPNAM': 0,
'HAVE_GETTIMEOFDAY': 1,
'HAVE_GETWD': 1,
'HAVE_GRP_H': 1,
'HAVE_HSTRERROR': 1,
'HAVE_HYPOT': 1,
'HAVE_IEEEFP_H': 0,
'HAVE_INET_ATON': 1,
'HAVE_INET_PTON': 1,
'HAVE_INITGROUPS': 1,
'HAVE_INT32_T': 1,
'HAVE_INT64_T': 1,
'HAVE_INTTYPES_H': 1,
'HAVE_IO_H': 0,
'HAVE_KILL': 1,
'HAVE_KILLPG': 1,
'HAVE_KQUEUE': 1,
'HAVE_LANGINFO_H': 1,
'HAVE_LARGEFILE_SUPPORT': 0,
'HAVE_LCHFLAGS': 1,
'HAVE_LCHMOD': 1,
'HAVE_LCHOWN': 1,
'HAVE_LGAMMA': 1,
'HAVE_LIBDL': 1,
'HAVE_LIBDLD': 0,
'HAVE_LIBIEEE': 0,
'HAVE_LIBINTL_H': 0,
'HAVE_LIBREADLINE': 1,
'HAVE_LIBRESOLV': 0,
'HAVE_LIBUTIL_H': 0,
'HAVE_LINK': 1,
'HAVE_LINUX_NETLINK_H': 0,
'HAVE_LINUX_TIPC_H': 0,
'HAVE_LOG1P': 1,
'HAVE_LONG_DOUBLE': 1,
'HAVE_LONG_LONG': 1,
'HAVE_LSTAT': 1,
'HAVE_MAKEDEV': 1,
'HAVE_MEMMOVE': 1,
'HAVE_MEMORY_H': 1,
'HAVE_MKFIFO': 1,
'HAVE_MKNOD': 1,
'HAVE_MKTIME': 1,
'HAVE_MMAP': 1,
'HAVE_MREMAP': 0,
'HAVE_NCURSES_H': 1,
'HAVE_NDIR_H': 0,
'HAVE_NETPACKET_PACKET_H': 0,
'HAVE_NICE': 1,
'HAVE_OPENPTY': 1,
'HAVE_OSX105_SDK': 1,
'HAVE_PATHCONF': 1,
'HAVE_PAUSE': 1,
'HAVE_PLOCK': 0,
'HAVE_POLL': 1,
'HAVE_POLL_H': 1,
'HAVE_PROCESS_H': 0,
'HAVE_PROTOTYPES': 1,
'HAVE_PTH': 0,
'HAVE_PTHREAD_ATFORK': 1,
'HAVE_PTHREAD_DESTRUCTOR': 0,
'HAVE_PTHREAD_H': 1,
'HAVE_PTHREAD_INIT': 0,
'HAVE_PTHREAD_SIGMASK': 1,
'HAVE_PTY_H': 0,
'HAVE_PUTENV': 1,
'HAVE_READLINK': 1,
'HAVE_REALPATH': 1,
'HAVE_RL_CALLBACK': 1,
'HAVE_RL_CATCH_SIGNAL': 0,
'HAVE_RL_COMPLETION_APPEND_CHARACTER': 1,
'HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK': 1,
'HAVE_RL_COMPLETION_MATCHES': 1,
'HAVE_RL_COMPLETION_SUPPRESS_APPEND': 0,
'HAVE_RL_PRE_INPUT_HOOK': 1,
'HAVE_ROUND': 1,
'HAVE_SELECT': 1,
'HAVE_SEM_GETVALUE': 1,
'HAVE_SEM_OPEN': 1,
'HAVE_SEM_TIMEDWAIT': 0,
'HAVE_SEM_UNLINK': 1,
'HAVE_SETEGID': 1,
'HAVE_SETEUID': 1,
'HAVE_SETGID': 1,
'HAVE_SETGROUPS': 1,
'HAVE_SETITIMER': 1,
'HAVE_SETLOCALE': 1,
'HAVE_SETPGID': 1,
'HAVE_SETPGRP': 1,
'HAVE_SETREGID': 1,
'HAVE_SETRESGID': 0,
'HAVE_SETRESUID': 0,
'HAVE_SETREUID': 1,
'HAVE_SETSID': 1,
'HAVE_SETUID': 1,
'HAVE_SETVBUF': 1,
'HAVE_SHADOW_H': 0,
'HAVE_SIGACTION': 1,
'HAVE_SIGINTERRUPT': 1,
'HAVE_SIGNAL_H': 1,
'HAVE_SIGRELSE': 1,
'HAVE_SNPRINTF': 1,
'HAVE_SOCKADDR_SA_LEN': 1,
'HAVE_SOCKADDR_STORAGE': 1,
'HAVE_SOCKETPAIR': 1,
'HAVE_SPAWN_H': 1,
'HAVE_SSIZE_T': 1,
'HAVE_STATVFS': 1,
'HAVE_STAT_TV_NSEC': 0,
'HAVE_STAT_TV_NSEC2': 1,
'HAVE_STDARG_PROTOTYPES': 1,
'HAVE_STDINT_H': 1,
'HAVE_STDLIB_H': 1,
'HAVE_STRDUP': 1,
'HAVE_STRFTIME': 1,
'HAVE_STRINGS_H': 1,
'HAVE_STRING_H': 1,
'HAVE_STROPTS_H': 0,
'HAVE_STRUCT_STAT_ST_BIRTHTIME': 0,
'HAVE_STRUCT_STAT_ST_BLKSIZE': 1,
'HAVE_STRUCT_STAT_ST_BLOCKS': 1,
'HAVE_STRUCT_STAT_ST_FLAGS': 1,
'HAVE_STRUCT_STAT_ST_GEN': 1,
'HAVE_STRUCT_STAT_ST_RDEV': 1,
'HAVE_STRUCT_TM_TM_ZONE': 1,
'HAVE_ST_BLOCKS': 1,
'HAVE_SYMLINK': 1,
'HAVE_SYSCONF': 1,
'HAVE_SYSEXITS_H': 1,
'HAVE_SYS_AUDIOIO_H': 0,
'HAVE_SYS_BSDTTY_H': 0,
'HAVE_SYS_DIR_H': 0,
'HAVE_SYS_EPOLL_H': 0,
'HAVE_SYS_EVENT_H': 1,
'HAVE_SYS_FILE_H': 1,
'HAVE_SYS_LOADAVG_H': 0,
'HAVE_SYS_LOCK_H': 1,
'HAVE_SYS_MKDEV_H': 0,
'HAVE_SYS_MODEM_H': 0,
'HAVE_SYS_NDIR_H': 0,
'HAVE_SYS_PARAM_H': 1,
'HAVE_SYS_POLL_H': 1,
'HAVE_SYS_RESOURCE_H': 1,
'HAVE_SYS_SELECT_H': 1,
'HAVE_SYS_SOCKET_H': 1,
'HAVE_SYS_STATVFS_H': 1,
'HAVE_SYS_STAT_H': 1,
'HAVE_SYS_TERMIO_H': 0,
'HAVE_SYS_TIMES_H': 1,
'HAVE_SYS_TIME_H': 1,
'HAVE_SYS_TYPES_H': 1,
'HAVE_SYS_UN_H': 1,
'HAVE_SYS_UTSNAME_H': 1,
'HAVE_SYS_WAIT_H': 1,
'HAVE_TCGETPGRP': 1,
'HAVE_TCSETPGRP': 1,
'HAVE_TEMPNAM': 1,
'HAVE_TERMIOS_H': 1,
'HAVE_TERM_H': 1,
'HAVE_TGAMMA': 1,
'HAVE_THREAD_H': 0,
'HAVE_TIMEGM': 1,
'HAVE_TIMES': 1,
'HAVE_TMPFILE': 1,
'HAVE_TMPNAM': 1,
'HAVE_TMPNAM_R': 0,
'HAVE_TM_ZONE': 1,
'HAVE_TRUNCATE': 1,
'HAVE_TZNAME': 0,
'HAVE_UCS4_TCL': 0,
'HAVE_UINT32_T': 1,
'HAVE_UINT64_T': 1,
'HAVE_UINTPTR_T': 1,
'HAVE_UNAME': 1,
'HAVE_UNISTD_H': 1,
'HAVE_UNSETENV': 1,
'HAVE_USABLE_WCHAR_T': 0,
'HAVE_UTIL_H': 1,
'HAVE_UTIMES': 1,
'HAVE_UTIME_H': 1,
'HAVE_WAIT3': 1,
'HAVE_WAIT4': 1,
'HAVE_WAITPID': 1,
'HAVE_WCHAR_H': 1,
'HAVE_WCSCOLL': 1,
'HAVE_WORKING_TZSET': 1,
'HAVE_ZLIB_COPY': 1,
'HAVE__GETPTY': 0,
'HGBRANCH': '',
'HGTAG': '',
'HGVERSION': '',
'HOST_GNU_TYPE': 'x86_64-apple-darwin19.6.0',
'HURD_C_THREADS': 0,
'INCLDIRSTOMAKE': '/Users/viki/sample_app/python_interpreter/include /Users/viki/sample_app/python_interpreter/include /Users/viki/sample_app/python_interpreter/include/python2.7 /Users/viki/sample_app/python_interpreter/include/python2.7',
'INCLUDEDIR': '/Users/viki/sample_app/python_interpreter/include',
'INCLUDEPY': '/Users/viki/sample_app/python_interpreter/include/python2.7',
'INSTALL': '/usr/bin/install -c',
'INSTALL_DATA': '/usr/bin/install -c -m 644',
'INSTALL_PROGRAM': '/usr/bin/install -c',
'INSTALL_SCRIPT': '/usr/bin/install -c',
'INSTALL_SHARED': '/usr/bin/install -c -m 555',
'INSTSONAME': 'libpython2.7.a',
'LDCXXSHARED': 'g++ -bundle -undefined dynamic_lookup',
'LDFLAGS': '',
'LDLAST': '',
'LDLIBRARY': 'libpython2.7.a',
'LDLIBRARYDIR': '',
'LDSHARED': 'gcc -bundle -undefined dynamic_lookup',
'LIBC': '',
'LIBDEST': '/Users/viki/sample_app/python_interpreter/lib/python2.7',
'LIBDIR': '/Users/viki/sample_app/python_interpreter/lib',
'LIBFFI_INCLUDEDIR': '',
'LIBM': '',
'LIBOBJDIR': 'Python/',
'LIBOBJS': '',
'LIBP': '/Users/viki/sample_app/python_interpreter/lib/python2.7',
'LIBPC': '/Users/viki/sample_app/python_interpreter/lib/pkgconfig',
'LIBPL': '/Users/viki/sample_app/python_interpreter/lib/python2.7/config',
'LIBRARY': 'libpython2.7.a',
'LIBRARY_OBJS': '\\',
'LIBS': '-ldl -framework CoreFoundation',
'LIBSUBDIRS': 'lib-tk lib-tk/test lib-tk/test/test_tkinter \\',
'LINKCC': 'gcc',
'LINKFORSHARED': '-u _PyMac_Error',
'LN': 'ln',
'LOCALMODLIBS': '',
'MACHDEP': 'darwin',
'MACHDEPPATH': ':plat-darwin',
'MACHDEPS': 'plat-darwin plat-mac plat-mac/Carbon plat-mac/lib-scriptpackages \\',
'MACHDEP_OBJS': 'Python/mactoolboxglue.o',
'MACHDESTLIB': '/Users/viki/sample_app/python_interpreter/lib/python2.7',
'MACH_C_THREADS': 0,
'MACOSX_DEPLOYMENT_TARGET': '10.4',
'MAINCC': 'gcc',
'MAJOR_IN_MKDEV': 0,
'MAJOR_IN_SYSMACROS': 0,
'MAKESETUP': './Modules/makesetup',
'MANDIR': '/Users/viki/sample_app/python_interpreter/share/man',
'MEMTESTOPTS': '-l -x test_subprocess test_io test_lib2to3 \\ -x test_dl test___all__ test_fork1 \\',
'MKDIR_P': './install-sh -c -d',
'MODLIBS': '',
'MODOBJS': 'Modules/threadmodule.o Modules/signalmodule.o Modules/posixmodule.o Modules/errnomodule.o Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o Modules/_weakref.o Modules/zipimport.o Modules/symtablemodule.o Modules/xxsubtype.o',
'MODULE_OBJS': '\\',
'MULTIARCH': '',
'MVWDELCH_IS_EXPRESSION': 1,
'OBJECT_OBJS': '\\',
'OLDPATH': ':lib-old',
'OPT': '-DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes',
'OTHER_LIBTOOL_OPT': '',
'PACKAGE_BUGREPORT': 0,
'PACKAGE_NAME': 0,
'PACKAGE_STRING': 0,
'PACKAGE_TARNAME': 0,
'PACKAGE_URL': 0,
'PACKAGE_VERSION': 0,
'PARSER_HEADERS': '\\',
'PARSER_OBJS': '\\ Parser/myreadline.o Parser/tokenizer.o',
'PGEN': 'Parser/pgen',
'PGENOBJS': '\\ \\',
'PGENSRCS': '\\ \\',
'PGOBJS': '\\',
'PGSRCS': '\\',
'PLATDIR': 'plat-darwin',
'PLATMACDIRS': 'plat-mac plat-mac/Carbon plat-mac/lib-scriptpackages \\',
'PLATMACPATH': ':plat-mac:plat-mac/lib-scriptpackages',
'POBJS': '\\',
'POSIX_SEMAPHORES_NOT_ENABLED': 0,
'PROFILE_TASK': './Tools/pybench/pybench.py -n 2 --with-gc --with-syscheck',
'PSRCS': '\\',
'PTHREAD_SYSTEM_SCHED_SUPPORTED': 0,
'PURIFY': '',
'PYLONG_BITS_IN_DIGIT': 0,
'PYTHON': 'python',
'PYTHONFRAMEWORK': '',
'PYTHONFRAMEWORKDIR': 'no-framework',
'PYTHONFRAMEWORKINSTALLDIR': '',
'PYTHONFRAMEWORKPREFIX': '',
'PYTHONPATH': ':plat-darwin:plat-mac:plat-mac/lib-scriptpackages:lib-tk:lib-old',
'PYTHON_FOR_BUILD': './python.exe -E',
'PYTHON_HEADERS': '\\',
'PYTHON_OBJS': '\\',
'PY_CFLAGS': '-fno-strict-aliasing -g -O2 -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -I. -IInclude -I./Include -DPy_BUILD_CORE',
'PY_FORMAT_LONG_LONG': '"ll"',
'PY_FORMAT_SIZE_T': '"z"',
'PY_UNICODE_TYPE': 'unsigned short',
'Py_DEBUG': 0,
'Py_ENABLE_SHARED': 0,
'Py_UNICODE_SIZE': 2,
'Py_USING_UNICODE': 1,
'QUICKTESTOPTS': '-l -x test_subprocess test_io test_lib2to3 \\',
'RANLIB': 'ranlib',
'RESSRCDIR': 'Mac/Resources/framework',
'RETSIGTYPE': 'void',
'RUNSHARED': '',
'SCRIPTDIR': '/Users/viki/sample_app/python_interpreter/lib',
'SETPGRP_HAVE_ARG': 0,
'SGI_ABI': '',
'SHELL': '/bin/sh',
'SHLIBS': '-ldl -framework CoreFoundation',
'SHLIB_EXT': '".so"',
'SIGNAL_OBJS': '',
'SIGNED_RIGHT_SHIFT_ZERO_FILLS': 1,
'SITEPATH': '',
'SIZEOF_DOUBLE': 8,
'SIZEOF_FLOAT': 4,
'SIZEOF_FPOS_T': 8,
'SIZEOF_INT': 4,
'SIZEOF_LONG': 8,
'SIZEOF_LONG_DOUBLE': 16,
'SIZEOF_LONG_LONG': 8,
'SIZEOF_OFF_T': 8,
'SIZEOF_PID_T': 4,
'SIZEOF_PTHREAD_T': 8,
'SIZEOF_SHORT': 2,
'SIZEOF_SIZE_T': 8,
'SIZEOF_TIME_T': 8,
'SIZEOF_UINTPTR_T': 8,
'SIZEOF_VOID_P': 8,
'SIZEOF_WCHAR_T': 4,
'SIZEOF__BOOL': 1,
'SO': '.so',
'SRCDIRS': 'Parser Grammar Objects Python Modules Mac',
'SRC_GDB_HOOKS': './Tools/gdb/libpython.py',
'STDC_HEADERS': 1,
'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */",
'STRINGLIB_HEADERS': '\\',
'SUBDIRS': '',
'SUBDIRSTOO': 'Include Lib Misc Demo',
'SVNVERSION': 'svnversion .',
'SYSLIBS': '',
'SYS_SELECT_WITH_SYS_TIME': 1,
'TANH_PRESERVES_ZERO_SIGN': 1,
'TCLTK_INCLUDES': '',
'TCLTK_LIBS': '',
'TESTOPTS': '-l',
'TESTPATH': '',
'TESTPROG': './Lib/test/regrtest.py',
'TESTPYTHON': './python.exe -Wd -3 -E -tt',
'TESTPYTHONOPTS': '',
'THREADOBJ': 'Python/thread.o',
'TIME_WITH_SYS_TIME': 1,
'TKPATH': ':lib-tk',
'TM_IN_SYS_TIME': 0,
'UNICODE_OBJS': 'Objects/unicodeobject.o Objects/unicodectype.o',
'UNIVERSALSDK': '',
'USE_TOOLBOX_OBJECT_GLUE': 1,
'VA_LIST_IS_ARRAY': 1,
'VERSION': '2.7',
'WANT_SIGFPE_HANDLER': 0,
'WANT_WCTYPE_FUNCTIONS': 0,
'WINDOW_HAS_FLAGS': 0,
'WITH_DOC_STRINGS': 1,
'WITH_DYLD': 1,
'WITH_LIBINTL': 0,
'WITH_NEXT_FRAMEWORK': 0,
'WITH_PYMALLOC': 1,
'WITH_THREAD': 1,
'WITH_TSC': 0,
'WITH_VALGRIND': 0,
'X87_DOUBLE_ROUNDING': 0,
'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax',
'abs_builddir': '/Users/viki/Documents/Documents_30GB_sciebo/isy_project_shadow_hand_ui/python_dev_environment_macOS/lib_src/src_python2.7.9/Python-2.7.9',
'abs_srcdir': '/Users/viki/Documents/Documents_30GB_sciebo/isy_project_shadow_hand_ui/python_dev_environment_macOS/lib_src/src_python2.7.9/Python-2.7.9',
'build': 'x86_64-apple-darwin19.6.0',
'datarootdir': '/Users/viki/sample_app/python_interpreter/share',
'exec_prefix': '/Users/viki/sample_app/python_interpreter',
'host': 'x86_64-apple-darwin19.6.0',
'prefix': '/Users/viki/sample_app/python_interpreter',
'srcdir': '.'}
| 29.847744 | 257 | 0.69614 |
3df67ee700b201c315695cf983885d62fc7c22cb | 260 | py | Python | core/urls.py | knoriy/django-ajax-record | 369268849063347569ab42a2e79539e7b6a84874 | [
"MIT"
] | 4 | 2021-12-12T01:02:08.000Z | 2022-03-18T18:06:11.000Z | core/urls.py | knoriy/django-ajax-record | 369268849063347569ab42a2e79539e7b6a84874 | [
"MIT"
] | 1 | 2022-01-18T17:55:12.000Z | 2022-01-19T10:16:35.000Z | core/urls.py | knoriy/django-ajax-record | 369268849063347569ab42a2e79539e7b6a84874 | [
"MIT"
] | 2 | 2022-01-19T09:40:34.000Z | 2022-01-20T14:03:20.000Z | from django.urls import path
from . import views
app_name = "core"
urlpatterns = [
path("", views.index, name="index"),
path("record/", views.record, name="record"),
path("record/detail/<uuid:id>/", views.record_detail, name="record_detail"),
]
| 21.666667 | 80 | 0.665385 |
c015614b48f293cfba50719abff91f572c67c325 | 676 | py | Python | restrito/admin.py | ricmedeiroos/AC9-atualizado | e7ff317830c6429629498a0a5cc63a9d62320c0f | [
"Apache-2.0"
] | null | null | null | restrito/admin.py | ricmedeiroos/AC9-atualizado | e7ff317830c6429629498a0a5cc63a9d62320c0f | [
"Apache-2.0"
] | 6 | 2020-06-05T20:57:34.000Z | 2022-03-11T23:47:43.000Z | restrito/admin.py | ricmedeiroos/AC9-atualizado | e7ff317830c6429629498a0a5cc63a9d62320c0f | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from restrito.models import SolicitacaoMatricula
class SolicitacaoMatriculaAdmin(admin.ModelAdmin):
list_display = ('disciplina_ofertada', 'aluno', 'status', 'data')
list_filter = ('status', 'disciplina_ofertada__disciplina')
fields = ('aluno', 'disciplina_ofertada', 'coordenador', 'status')
readonly_fields = ('aluno', 'disciplina_ofertada', 'coordenador')
def save_model(self, request, obj, form, change):
obj.coordenador = request.user.coordenador
super(SolicitacaoMatriculaAdmin, self).save_model(request, obj, form, change)
admin.site.register(SolicitacaoMatricula, SolicitacaoMatriculaAdmin)
| 42.25 | 85 | 0.75 |
6902cd2c4e455cc320454af618677cd85ab849e9 | 1,160 | py | Python | src/accounts/models.py | earth-emoji/citizens4 | eb5f5b0191f7c8690037c9adac07eb4affb40de4 | [
"MIT"
] | null | null | null | src/accounts/models.py | earth-emoji/citizens4 | eb5f5b0191f7c8690037c9adac07eb4affb40de4 | [
"MIT"
] | 10 | 2020-02-12T00:46:48.000Z | 2022-03-11T23:51:27.000Z | src/accounts/models.py | earth-emoji/citizens4 | eb5f5b0191f7c8690037c9adac07eb4affb40de4 | [
"MIT"
] | null | null | null | import uuid
from django.conf import settings
from django.db import models
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
class PoliticalParty(models.Model):
name = models.CharField(max_length=120)
established = models.PositiveIntegerField(null=True, blank=True)
info = models.TextField(null=True, blank=True)
url = models.URLField(null=True, blank=True)
class Meta:
verbose_name_plural = _('Political Parties')
def __str__(self):
return self.name
class UserProfileManager(models.Manager):
use_for_related_fields = True
# Create your models here.
class UserProfile(models.Model):
slug = models.SlugField(unique=True, default=uuid.uuid1, blank=True)
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name="profile", on_delete=models.CASCADE)
# contacts = models.ManyToManyField('self', related_name='contacts', blank=True)
party = models.ForeignKey(PoliticalParty, on_delete=models.CASCADE, related_name='members', null=True, blank=True)
objects = UserProfileManager()
def __str__(self):
return self.user.username
| 36.25 | 118 | 0.747414 |
eb4e810db2a3472190923db611d8d8d928768518 | 4,100 | py | Python | FaultPrediction/rbm_train.py | lixymiracle/faultprediction | 1dd650604b7be6b3bb88f235ed0d4c10111a0dd9 | [
"MIT"
] | 2 | 2018-08-18T04:33:45.000Z | 2018-08-18T04:33:54.000Z | FaultPrediction/rbm_train.py | lixymiracle/faultprediction | 1dd650604b7be6b3bb88f235ed0d4c10111a0dd9 | [
"MIT"
] | null | null | null | FaultPrediction/rbm_train.py | lixymiracle/faultprediction | 1dd650604b7be6b3bb88f235ed0d4c10111a0dd9 | [
"MIT"
] | null | null | null | import tensorflow as tf
import os
import rbm
import input_data
import tfrecord_manager
data_path = '/home/lixiangyu/Desktop/train_data.csv'
logs_train_dir = '/home/lixiangyu/Documents/logs_rbm/'
tfrecord_path = 'train.tfrecord'
BATCH_SIZE = 128
CAPACITY = 2000
NUM_THREADS = 32
def build_model(X, w1, b1, wo, bo):
h1 = tf.nn.sigmoid(tf.matmul(X, w1) + b1)
model = tf.nn.sigmoid(tf.matmul(h1, wo) + bo)
return model
def init_weight(shape):
return tf.Variable(tf.random_normal(shape, mean=0.0, stddev=0.01))
def init_bias(dim):
return tf.Variable(tf.zeros([dim]))
def train():
# mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
#
# X = tf.placeholder("float", [None, 784])
# Y = tf.placeholder("float", [None, 10])
# rbm_layer = rbm.RBM("mnist", 784, 500)
# for i in range(10):
# print("RBM CD: ", i)
# rbm_layer.cd1(trX)
# rbm_w, rbm_vb, rbm_hb = rbm_layer.cd1(trX)
#
# wo = init_weight([500, 10])
# bo = init_bias(10)
# py_x = build_model(X, rbm_w, rbm_hb, wo, bo)
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
# train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost)
# predict_op = tf.argmax(py_x, 1)
#
# sess = tf.Session()
# init = tf.initialize_all_variables()
# sess.run(init)
#
# for i in range(10):
# for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)):
# sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]})
# print(i, np.mean(np.argmax(teY, axis=1) ==
# sess.run(predict_op, feed_dict={X: teX, Y: teY})))
if not os.path.exists(tfrecord_path):
para, faultcode = input_data.get_data(data_path)
tfrecord_manager.create_tfrecord(para, faultcode, tfrecord_path)
para_batch, faultcode_batch = tfrecord_manager.read_tfrecord(tfrecord_path, BATCH_SIZE, CAPACITY, NUM_THREADS)
p_data = para_batch
fc_data = faultcode_batch
rbm_layer = rbm.RBM("faultprediction", 100, 500)
rbm_w, rbm_vb, rbm_hb = rbm_layer.cd1(p_data)
wo = init_weight([500, 887])
bo = init_bias(887)
logits = build_model(p_data, rbm_w, rbm_hb, wo, bo)
# l1 = addLayer(p_data, 100, 40, activate_function=tf.nn.relu) # layer1: 100 x 40
# l2 = addLayer(l1, 40, 887, activate_function=None) # layer2: 40 x 887
# train_loss = tf.reduce_mean(tf.reduce_sum(tf.square((y_data - l2)), reduction_indices=[0]))
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=fc_data)
train_loss = tf.reduce_mean(cross_entropy)
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(train_loss)
train_acc = tf.nn.in_top_k(logits, fc_data, 1)
train_acc = tf.reduce_mean(tf.cast(train_acc, tf.float16))
summary_op = tf.summary.merge_all()
sess = tf.Session()
train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
saver = tf.train.Saver()
init = tf.global_variables_initializer()
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
for step in range(10000):
if coord.should_stop():
break
_, tr_loss, tr_acc = sess.run([train_op, train_loss, train_acc])
if step % 50 == 0:
print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tr_loss, tr_acc * 100))
summary_str = sess.run(summary_op)
train_writer.add_summary(summary_str, step)
if step % 2000 == 0 or (step + 1) == 10000:
checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
except tf.errors.OutOfRangeError:
print("done")
finally:
coord.request_stop()
coord.join(threads)
sess.close()
if __name__ == '__main__':
train()
| 33.333333 | 114 | 0.64561 |
bec169c1fc8fa997a601ba1968092fb55dac001b | 14,251 | py | Python | benchmark/alpa/benchmark_3d_one_case_gpt_bert.py | TarzanZhao/alpa | c477805bfb182788ed07e8b3a8e2924333433507 | [
"Apache-2.0"
] | null | null | null | benchmark/alpa/benchmark_3d_one_case_gpt_bert.py | TarzanZhao/alpa | c477805bfb182788ed07e8b3a8e2924333433507 | [
"Apache-2.0"
] | null | null | null | benchmark/alpa/benchmark_3d_one_case_gpt_bert.py | TarzanZhao/alpa | c477805bfb182788ed07e8b3a8e2924333433507 | [
"Apache-2.0"
] | null | null | null | """Benchmark one case of inter-op + intra-op parallelism."""
import jax
import jax.numpy as jnp
import numpy as np
import optax
import ray
import time
import alpa
from alpa import (parallelize, global_config, get_global_cluster,
set_global_virtual_physical_mesh, PipeshardParallel,
ManualPipeshardParallel, AutoShardingOption,
manual_layer_construction, automatic_layer_construction,
automatic_remat)
from alpa.model.bert_model import BertConfig, FlaxBertForMaskedLMModule
from alpa.model.model_util import TrainState
from alpa.model.gpt_model import FlaxGPTForLMModule
from alpa.pipeline_parallel.stage_construction import get_last_dp_result
from alpa.timer import timers
from alpa.util import print_used_time, to_str_round, GB
from benchmark.util import compute_gpt_parameter_count, compute_gpt_tflops
def report_pipeline_breakdown(executable, timer_names, niter):
overall_costs = executable.get_execution_time_costs(warmup=0,
timer_name="overall")
print(">>> overall: {}...".format(overall_costs))
other_percentage = [100.0] * niter
other = overall_costs
for timer_name in timer_names:
costs = executable.get_execution_time_costs(warmup=0,
timer_name=timer_name)
if len(costs) == 0:
costs = [0.0] * niter
percentage = [
cost / overall_costs[i] * 100 for i, cost in enumerate(costs)
]
other = [remain - costs[i] for i, remain in enumerate(other)]
other_percentage = [
remain - percentage[i] for i, remain in enumerate(other_percentage)
]
strs = []
for i, cost in enumerate(costs):
strs.append(str(cost) + f" ({percentage[i]:.1f}) ")
print_string = ",".join(strs)
print(">>> {}: {}".format(timer_name, print_string))
# print unknown overhead
strs = []
for i, remain in enumerate(other):
strs.append(" " + str(remain) + f" ({other_percentage[i]:.1f})")
print_string = ",".join(strs)
print(">>> {}: {}".format("Others: ", print_string))
def create_train_state(rngkey, model, batch, dtype):
params = model.init_dummy(rngkey, batch["input_ids"],
batch["attention_mask"], batch["token_type_ids"],
batch["position_ids"])
def weight_decay_mask(pytree):
# do not use weight decay on layer norm and bias.
return jax.tree_map(lambda x: x.ndim > 1, pytree)
tx = optax.chain(
#optax.clip_by_global_norm(1.0), # TODO(lmzheng): fix reduce-scatter for this
optax.adamw(learning_rate=1e-2, mask=weight_decay_mask))
mixed_precision = (dtype == jnp.float16)
state = TrainState.create(apply_fn=model.apply,
params=params,
tx=tx,
mixed_precision=mixed_precision,
dynamic_scale=None)
return state
def create_train_state_aval(rngkey, model, batch, dtype):
params = jax.eval_shape(model.init, rngkey, batch["input_ids"],
batch["attention_mask"], batch["token_type_ids"],
batch["position_ids"])
def weight_decay_mask(pytree):
# do not use weight decay on layer norm and bias.
return jax.tree_map(lambda x: x.ndim > 1, pytree)
tx = optax.chain(
#optax.clip_by_global_norm(1.0), # TODO(lmzheng): fix reduce-scatter for this
optax.adamw(learning_rate=1e-2, mask=weight_decay_mask))
mixed_precision = (dtype == jnp.float16)
state = TrainState.create_aval(apply_fn=model.apply,
params=params,
tx=tx,
mixed_precision=mixed_precision,
dynamic_scale=None)
return state
def get_train_step(parallel_method, auto_layer, num_manual_pipeline_stages,
num_auto_layers, auto_remat_mode, num_auto_remat_layers):
@parallelize(method=parallel_method)
def train_step(state, batch, rng_key):
def loss_func(params):
rngs = {"dropout": rng_key}
logits = state.apply_fn(params,
batch["input_ids"],
batch["attention_mask"],
batch["token_type_ids"],
batch["position_ids"],
deterministic=True,
rngs=rngs)[0]
label_mask = jnp.where(batch["labels"] > 0, 1.0, 0.0)
labels = jax.nn.one_hot(batch["labels"], logits.shape[-1])
loss = -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1),
axis=-1)
loss = (label_mask * loss).sum() / label_mask.sum()
return loss
if not auto_layer:
loss_func = manual_layer_construction(loss_func)
else:
if auto_remat_mode == "fine_grained":
loss_func = automatic_remat(loss_func,
layer_num=num_auto_remat_layers)
loss_func = automatic_layer_construction(
loss_func, layer_num=num_auto_layers)
else:
use_remat = True if auto_remat_mode is "coarse_grained" else False
loss_func = automatic_layer_construction(
loss_func, remat_layer=use_remat, layer_num=num_auto_layers)
grads = alpa.grad(loss_func)(state.params)
new_state = state.apply_gradients(grads=grads)
# TODO(lmzheng): add dynamic scaling for mixed-precision training
return new_state
return train_step
def benchmark_gpt_bert_internal(model_type,
benchmark_case,
niter,
num_hosts,
num_devices_per_host,
aval_train_state=True):
print_used_time(None)
# Model configs
(batch_size, seq_len, hidden_size, num_layers, num_heads, vocab_size,
num_micro_batches, parallel_mode, parallel_args) = benchmark_case
dtype = jnp.float16
tie_word_embeddings = False
# Connect to the cluster
virtual_mesh = get_global_cluster().get_virtual_physical_mesh(
host_ids=list(range(num_hosts)),
num_devices_per_host=num_devices_per_host)
set_global_virtual_physical_mesh(virtual_mesh)
# Parallel configs
if parallel_mode == "search":
prefer_reduce_scatter, use_remat, num_auto_layers, auto_stage_option = parallel_args
auto_layer = True
auto_remat_mode = "coarse_grained" if use_remat else None
num_auto_remat_layers = None
add_manual_layer_marker = add_manual_remat = num_manual_pipeline_stages = False
method = PipeshardParallel(
stage_mode="auto",
num_micro_batches=num_micro_batches,
default_auto_sharding_option=AutoShardingOption(
prefer_reduce_scatter=prefer_reduce_scatter),
**auto_stage_option)
elif parallel_mode == "load_solution":
prefer_reduce_scatter, use_remat, num_auto_layers, manual_stage_option = parallel_args
auto_layer = True
auto_remat_mode = "fine_grained" if use_remat else None
num_auto_remat_layers = num_layers
add_manual_layer_marker = add_manual_remat = num_manual_pipeline_stages = False
method = ManualPipeshardParallel(
*manual_stage_option,
num_micro_batches=num_micro_batches,
default_auto_sharding_option=AutoShardingOption(
prefer_reduce_scatter=prefer_reduce_scatter))
elif parallel_mode == "manual":
(prefer_reduce_scatter, use_remat, (dp, op, pp),
force_batch_dim_mapping) = parallel_args
as_option = AutoShardingOption(
prefer_reduce_scatter=prefer_reduce_scatter)
if force_batch_dim_mapping:
as_option.force_batch_dim_to_mesh_dim = 0
auto_layer = False
num_auto_layers = auto_remat_mode = num_auto_remat_layers = None
add_manual_layer_marker = True
add_manual_remat = use_remat
logical_mesh_shape = (dp, op)
num_manual_pipeline_stages = pp
num_mesh_devices = np.prod(logical_mesh_shape)
num_devices_per_host = virtual_mesh.num_devices_per_host
if num_mesh_devices <= num_devices_per_host:
physical_mesh_shape = (1, num_mesh_devices)
else:
assert num_mesh_devices % num_devices_per_host == 0
physical_mesh_shape = (num_mesh_devices // num_devices_per_host,
num_devices_per_host)
method = ManualPipeshardParallel(
num_micro_batches=num_micro_batches,
forward_stage_layer_ids=[[i] for i in range(pp)],
submesh_physical_shapes=[physical_mesh_shape] * pp,
submesh_logical_shapes=[logical_mesh_shape] * pp,
submesh_autosharding_option_dicts=[{}] * pp,
default_auto_sharding_option=as_option)
else:
raise ValueError(f"Invalid model: {parallel_mode}")
# Prepare input batch
batch = {
"input_ids": jnp.ones((batch_size, seq_len), dtype=jnp.int32),
"attention_mask": jnp.ones((batch_size, seq_len), dtype=jnp.int32),
"token_type_ids": jnp.ones((batch_size, seq_len), dtype=jnp.int32),
"position_ids": jnp.ones((batch_size, seq_len), dtype=jnp.int32),
"labels": jnp.ones((batch_size, seq_len), dtype=jnp.int32),
}
print_used_time("Prepare input")
# Init train state
if model_type == "bert":
model = FlaxBertForMaskedLMModule(BertConfig(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=num_heads,
intermediate_size=hidden_size * 4,
num_hidden_layers=num_layers,
type_vocab_size=0,
tie_word_embeddings=tie_word_embeddings,
gradient_checkpointing=add_manual_remat,
add_manual_pipeline_markers=add_manual_layer_marker,
pipeline_mp_size=num_manual_pipeline_stages,
),
dtype=dtype)
elif model_type == "gpt":
model = FlaxGPTForLMModule(BertConfig(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=num_heads,
intermediate_size=hidden_size * 4,
num_hidden_layers=num_layers,
type_vocab_size=0,
tie_word_embeddings=tie_word_embeddings,
gradient_checkpointing=add_manual_remat,
add_manual_pipeline_markers=add_manual_layer_marker,
pipeline_mp_size=num_manual_pipeline_stages,
),
dtype=dtype)
else:
raise ValueError(f"Invalid model {model_type}")
rngkey = jax.random.PRNGKey(0)
if aval_train_state:
state = create_train_state_aval(rngkey, model, batch, dtype)
else:
state = create_train_state(rngkey, model, batch, dtype)
print_used_time("Create train state")
# Compile executable
train_step = get_train_step(method, auto_layer, num_manual_pipeline_stages,
num_auto_layers, auto_remat_mode,
num_auto_remat_layers)
executable = train_step.get_executable(state, batch, rngkey)
print_used_time("Compile (driver)")
if parallel_mode == "search":
compilation_times = {
k: timers(k).elapsed() for k in [
"stage-construction", "stage-construction-dp",
"stage-construction-compilation", "stage-construction-profiling"
]
}
print(
f"compilation time breakdown: {to_str_round(compilation_times, 2)}")
else:
compilation_times = None
# Dump hlo ir for debugging
stage_hlo_texts = executable.get_hlo_text()
for i in range(len(stage_hlo_texts)):
with open(f"tmp/stage_{i}.hlo", "w") as fout:
fout.write(stage_hlo_texts[i])
with open(f"tmp/resharding_tasks.txt", "w") as fout:
fout.write(executable.print_resharding_tasks())
executable.sync()
print_used_time("Compile (worker)")
# Benchmark latency without driver overhead
for i in range(niter):
print(f"Iteration {i} ...")
state = train_step(state, batch, rngkey)
executable.sync()
latencies = executable.get_execution_time_costs(warmup=1)
max_mem_allocated = executable.mesh_group.get_max_memory_allocated()
# Benchmark latency with driver overhead
if False:
global_config.use_dummy_value_for_benchmarking = False
global_config.pipeline_sync_for_timer = False
number = niter
executable.sync()
tic = time.time()
for i in range(number):
state = train_step(state, batch, rngkey)
executable.sync()
e2e_latency = (time.time() - tic) / number
print(f"latency with dirver overhead: {e2e_latency:.3f}")
print_used_time("Benchmark")
# Compute statistics
tflops = compute_gpt_tflops(batch_size, seq_len, num_layers, hidden_size,
vocab_size, virtual_mesh.num_devices,
np.mean(latencies))
tflops_ckpt = compute_gpt_tflops(batch_size, seq_len, num_layers,
hidden_size,
vocab_size, virtual_mesh.num_devices,
np.mean(latencies), True)
parameter_count = compute_gpt_parameter_count(num_layers, hidden_size,
vocab_size)
#report_pipeline_breakdown(executable, ["resharding_send", "resharding_recv", "compute"], niter)
return (parameter_count, max_mem_allocated, latencies, tflops, tflops_ckpt,
compilation_times) + get_last_dp_result()
| 42.540299 | 100 | 0.620097 |
b12bb7fddf499c7bb4300fe4b93b84210f66bf20 | 821 | py | Python | djangojobboard/core/api/serializers.py | jnnnn1999/Online-Thai-Video-Content-Search-System | a3291ec340d97f6dea3b23ad311692dedb72db60 | [
"MIT"
] | null | null | null | djangojobboard/core/api/serializers.py | jnnnn1999/Online-Thai-Video-Content-Search-System | a3291ec340d97f6dea3b23ad311692dedb72db60 | [
"MIT"
] | null | null | null | djangojobboard/core/api/serializers.py | jnnnn1999/Online-Thai-Video-Content-Search-System | a3291ec340d97f6dea3b23ad311692dedb72db60 | [
"MIT"
] | null | null | null | from rest_framework.serializers import ModelSerializer
from djangojobboard.core.models import File, Transcribe
from rest_framework import serializers
class FileSerializer(ModelSerializer):
# is_owner = serializers.SerializerMethodField()
class Meta:
model = File
fields = (
"id",
"title",
"user",
"youtube_website",
"date_created",
"youtube_id_db",
# "is_owner",
)
read_only_fields = ("date_created","user")
# def get_is_owner(self, obj):
# user = self.context.user
# return obj.user == user
class TranscribeSerializer(serializers.ModelSerializer):
class Meta:
model = Transcribe
fields = ('word', 'start_time','end_time','file_id','id')
| 24.878788 | 65 | 0.599269 |
cdcdf7451657e9534f6eb1f046c865f51546a361 | 25,936 | py | Python | tensorflow_datasets/core/dataset_info.py | daniel-trejobanos/ts_datasets_hypoxia | 032ead3f0656fc30a423837c0ae397534fc819aa | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/core/dataset_info.py | daniel-trejobanos/ts_datasets_hypoxia | 032ead3f0656fc30a423837c0ae397534fc819aa | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/core/dataset_info.py | daniel-trejobanos/ts_datasets_hypoxia | 032ead3f0656fc30a423837c0ae397534fc819aa | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DatasetInfo records the information we know about a dataset.
This includes things that we know about the dataset statically, i.e.:
- schema
- description
- canonical location
- does it have validation and tests splits
- size
- etc.
This also includes the things that can and should be computed once we've
processed the dataset as well:
- number of examples (in each split)
- feature statistics (in each split)
- etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import json
import os
import posixpath
import tempfile
from absl import logging
import numpy as np
import six
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core import api_utils
from tensorflow_datasets.core import dataset_utils
from tensorflow_datasets.core import lazy_imports_lib
from tensorflow_datasets.core import splits as splits_lib
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.features import top_level_feature
from tensorflow_datasets.core.proto import dataset_info_pb2
from tensorflow_datasets.core.proto import json_format
from tensorflow_datasets.core.utils import gcs_utils
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
# Name of the file to output the DatasetInfo protobuf object.
DATASET_INFO_FILENAME = "dataset_info.json"
LICENSE_FILENAME = "LICENSE"
INFO_STR = """tfds.core.DatasetInfo(
name='{name}',
version={version},
description='{description}',
homepage='{homepage}',
features={features},
total_num_examples={total_num_examples},
splits={splits},
supervised_keys={supervised_keys},
citation={citation},
redistribution_info={redistribution_info},
)
"""
# TODO(tfds): Do we require to warn the user about the peak memory used while
# constructing the dataset?
class DatasetInfo(object):
"""Information about a dataset.
`DatasetInfo` documents datasets, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later
by `compute_dynamic_properties`. For example: the min and max values of a
feature is typically updated during data generation (i.e. on calling
builder.download_and_prepare()`).
"""
@api_utils.disallow_positional_args
def __init__(self,
builder,
description=None,
features=None,
supervised_keys=None,
homepage=None,
urls=None,
citation=None,
metadata=None,
redistribution_info=None):
"""Constructs DatasetInfo.
Args:
builder: `DatasetBuilder`, dataset builder for this info.
description: `str`, description of this dataset.
features: `tfds.features.FeaturesDict`, Information on the feature dict
of the `tf.data.Dataset()` object from the `builder.as_dataset()`
method.
supervised_keys: `tuple` of `(input_key, target_key)`, Specifies the
input feature and the label for supervised learning, if applicable for
the dataset. The keys correspond to the feature names to select in
`info.features`. When calling `tfds.core.DatasetBuilder.as_dataset()`
with `as_supervised=True`, the `tf.data.Dataset` object will yield
the (input, target) defined here.
homepage: `str`, optional, the homepage for this dataset.
urls: DEPRECATED, use `homepage` instead.
citation: `str`, optional, the citation to use for this dataset.
metadata: `tfds.core.Metadata`, additonal object which will be
stored/restored with the dataset. This allows for storing additional
information with the dataset.
redistribution_info: `dict`, optional, information needed for
redistribution, as specified in `dataset_info_pb2.RedistributionInfo`.
The content of the `license` subfield will automatically be written to a
LICENSE file stored with the dataset.
"""
self._builder = builder
self._info_proto = dataset_info_pb2.DatasetInfo(
name=builder.name,
description=description,
version=str(builder._version), # pylint: disable=protected-access
citation=citation,
redistribution_info=dataset_info_pb2.RedistributionInfo(
**redistribution_info) if redistribution_info else None)
if urls: # TODO(epot):Delete field once every user have been migrated
raise ValueError("`urls=` field is deprecated. Please use "
"`homepage='{}'` instead.".format(urls[0]))
if homepage:
self._info_proto.location.urls[:] = [homepage]
if features:
if not isinstance(features, top_level_feature.TopLevelFeature):
raise ValueError(
"DatasetInfo.features only supports FeaturesDict or Sequence at "
"the top-level. Got {}".format(features))
features._set_top_level() # pylint: disable=protected-access
self._features = features
self._splits = splits_lib.SplitDict(self._builder.name)
if supervised_keys is not None:
assert isinstance(supervised_keys, tuple)
assert len(supervised_keys) == 2
self._info_proto.supervised_keys.input = supervised_keys[0]
self._info_proto.supervised_keys.output = supervised_keys[1]
if metadata and not isinstance(metadata, Metadata):
raise ValueError(
"Metadata should be a `tfds.core.Metadata` instance. Received "
"{}".format(metadata))
self._metadata = metadata
# Is this object initialized with both the static and the dynamic data?
self._fully_initialized = False
@property
def as_proto(self):
return self._info_proto
@property
def name(self):
return self.as_proto.name
@property
def full_name(self):
"""Full canonical name: (<dataset_name>/<config_name>/<version>)."""
names = [self._builder.name]
if self._builder.builder_config:
names.append(self._builder.builder_config.name)
names.append(str(self.version))
return posixpath.join(*names)
@property
def description(self):
return self.as_proto.description
@property
def version(self):
return self._builder.version
@property
def homepage(self):
urls = self.as_proto.location.urls
tfds_homepage = "https://www.tensorflow.org/datasets/catalog/{}".format(
self.name)
return urls and urls[0] or tfds_homepage
@property
def citation(self):
return self.as_proto.citation
@property
def data_dir(self):
return self._builder.data_dir
@property
def dataset_size(self):
"""Generated dataset files size, in bytes."""
# For old datasets, maybe empty.
return sum(split.num_bytes for split in self.splits.values())
@property
def download_size(self):
"""Downloaded files size, in bytes."""
# Fallback to deprecated `size_in_bytes` if `download_size` is empty.
return self.as_proto.download_size or self.as_proto.size_in_bytes
@download_size.setter
def download_size(self, size):
self.as_proto.download_size = size
@property
def features(self):
return self._features
@property
def metadata(self):
return self._metadata
@property
def supervised_keys(self):
if not self.as_proto.HasField("supervised_keys"):
return None
supervised_keys = self.as_proto.supervised_keys
return (supervised_keys.input, supervised_keys.output)
@property
def redistribution_info(self):
return self.as_proto.redistribution_info
@property
def splits(self):
return self._splits.copy()
def update_splits_if_different(self, split_dict):
"""Overwrite the splits if they are different from the current ones.
* If splits aren't already defined or different (ex: different number of
shards), then the new split dict is used. This will trigger stats
computation during download_and_prepare.
* If splits are already defined in DatasetInfo and similar (same names and
shards): keep the restored split which contains the statistics (restored
from GCS or file)
Args:
split_dict: `tfds.core.SplitDict`, the new split
"""
assert isinstance(split_dict, splits_lib.SplitDict)
# If splits are already defined and identical, then we do not update
if self._splits and splits_lib.check_splits_equals(
self._splits, split_dict):
return
self._set_splits(split_dict)
def _set_splits(self, split_dict):
"""Split setter (private method)."""
# Update the dictionary representation.
# Use from/to proto for a clean copy
self._splits = split_dict.copy()
# Update the proto
del self.as_proto.splits[:] # Clear previous
for split_info in split_dict.to_proto():
self.as_proto.splits.add().CopyFrom(split_info)
@property
def initialized(self):
"""Whether DatasetInfo has been fully initialized."""
return self._fully_initialized
def _dataset_info_path(self, dataset_info_dir):
return os.path.join(dataset_info_dir, DATASET_INFO_FILENAME)
def _license_path(self, dataset_info_dir):
return os.path.join(dataset_info_dir, LICENSE_FILENAME)
def compute_dynamic_properties(self):
self._compute_dynamic_properties(self._builder)
self._fully_initialized = True
def _compute_dynamic_properties(self, builder):
"""Update from the DatasetBuilder."""
# Fill other things by going over the dataset.
splits = self.splits
for split_info in utils.tqdm(
splits.values(), desc="Computing statistics...", unit=" split"):
try:
split_name = split_info.name
# Fill DatasetFeatureStatistics.
dataset_feature_statistics, schema = get_dataset_feature_statistics(
builder, split_name)
# Add the statistics to this split.
split_info.statistics.CopyFrom(dataset_feature_statistics)
# Set the schema at the top-level since this is independent of the
# split.
self.as_proto.schema.CopyFrom(schema)
except tf.errors.InvalidArgumentError:
# This means there is no such split, even though it was specified in the
# info, the least we can do is to log this.
logging.error(("%s's info() property specifies split %s, but it "
"doesn't seem to have been generated. Please ensure "
"that the data was downloaded for this split and re-run "
"download_and_prepare."), self.name, split_name)
raise
# Set splits to trigger proto update in setter
self._set_splits(splits)
@property
def as_json(self):
return json_format.MessageToJson(self.as_proto, sort_keys=True)
def write_to_directory(self, dataset_info_dir):
"""Write `DatasetInfo` as JSON to `dataset_info_dir`."""
# Save the metadata from the features (vocabulary, labels,...)
if self.features:
self.features.save_metadata(dataset_info_dir)
# Save any additional metadata
if self.metadata is not None:
self.metadata.save_metadata(dataset_info_dir)
if self.redistribution_info.license:
with tf.io.gfile.GFile(self._license_path(dataset_info_dir), "w") as f:
f.write(self.redistribution_info.license)
with tf.io.gfile.GFile(self._dataset_info_path(dataset_info_dir), "w") as f:
f.write(self.as_json)
def read_from_directory(self, dataset_info_dir):
"""Update DatasetInfo from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
"""
if not dataset_info_dir:
raise ValueError(
"Calling read_from_directory with undefined dataset_info_dir.")
json_filename = self._dataset_info_path(dataset_info_dir)
# Load the metadata from disk
parsed_proto = read_from_json(json_filename)
# Update splits
split_dict = splits_lib.SplitDict.from_proto(self.name, parsed_proto.splits)
self._set_splits(split_dict)
# Restore the feature metadata (vocabulary, labels names,...)
if self.features:
self.features.load_metadata(dataset_info_dir)
if self.metadata is not None:
self.metadata.load_metadata(dataset_info_dir)
# Update fields which are not defined in the code. This means that
# the code will overwrite fields which are present in
# dataset_info.json.
for field_name, field in self.as_proto.DESCRIPTOR.fields_by_name.items():
field_value = getattr(self._info_proto, field_name)
field_value_restored = getattr(parsed_proto, field_name)
try:
is_defined = self._info_proto.HasField(field_name)
except ValueError:
is_defined = bool(field_value)
try:
is_defined_in_restored = parsed_proto.HasField(field_name)
except ValueError:
is_defined_in_restored = bool(field_value_restored)
# If field is defined in code, we ignore the value
if is_defined:
if field_value != field_value_restored:
logging.info(
"Field info.%s from disk and from code do not match. Keeping "
"the one from code.", field_name)
continue
# If the field is also not defined in JSON file, we do nothing
if not is_defined_in_restored:
continue
# Otherwise, we restore the dataset_info.json value
if field.type == field.TYPE_MESSAGE:
field_value.MergeFrom(field_value_restored)
else:
setattr(self._info_proto, field_name, field_value_restored)
if self._builder._version != self.version: # pylint: disable=protected-access
raise AssertionError(
"The constructed DatasetInfo instance and the restored proto version "
"do not match. Builder version: {}. Proto version: {}".format(
self._builder._version, self.version)) # pylint: disable=protected-access
# Mark as fully initialized.
self._fully_initialized = True
def initialize_from_bucket(self):
"""Initialize DatasetInfo from GCS bucket info files."""
# In order to support Colab, we use the HTTP GCS API to access the metadata
# files. They are copied locally and then loaded.
tmp_dir = tempfile.mkdtemp("tfds")
# data_files = gcs_utils.gcs_dataset_info_files(self.full_name)
data_files = False
if not data_files:
return
logging.info("Loading info from GCS for %s", self.full_name)
for fname in data_files:
out_fname = os.path.join(tmp_dir, os.path.basename(fname))
gcs_utils.download_gcs_file(fname, out_fname)
self.read_from_directory(tmp_dir)
def __repr__(self):
splits_pprint = _indent("\n".join(["{"] + [
" '{}': {},".format(k, split.num_examples)
for k, split in sorted(self.splits.items())
] + ["}"]))
features_pprint = _indent(repr(self.features))
citation_pprint = _indent('"""{}"""'.format(self.citation.strip()))
return INFO_STR.format(
name=self.name,
version=self.version,
description=self.description,
total_num_examples=self.splits.total_num_examples,
features=features_pprint,
splits=splits_pprint,
citation=citation_pprint,
homepage=self.homepage,
supervised_keys=self.supervised_keys,
# Proto add a \n that we strip.
redistribution_info=str(self.redistribution_info).strip())
def _indent(content):
"""Add indentation to all lines except the first."""
lines = content.split("\n")
return "\n".join([lines[0]] + [" " + l for l in lines[1:]])
#
#
# This part is quite a bit messy and can be easily simplified with TFDV
# libraries, we can cut down the complexity by implementing cases on a need to
# do basis.
#
# My understanding of possible TF's types and shapes and kinds
# (ex: SparseTensor) is limited, please shout hard and guide on implementation.
#
#
_FEATURE_TYPE_MAP = {
tf.float16: schema_pb2.FLOAT,
tf.float32: schema_pb2.FLOAT,
tf.float64: schema_pb2.FLOAT,
tf.int8: schema_pb2.INT,
tf.int16: schema_pb2.INT,
tf.int32: schema_pb2.INT,
tf.int64: schema_pb2.INT,
tf.uint8: schema_pb2.INT,
tf.uint16: schema_pb2.INT,
tf.uint32: schema_pb2.INT,
tf.uint64: schema_pb2.INT,
}
_SCHEMA_TYPE_MAP = {
schema_pb2.INT: statistics_pb2.FeatureNameStatistics.INT,
schema_pb2.FLOAT: statistics_pb2.FeatureNameStatistics.FLOAT,
schema_pb2.BYTES: statistics_pb2.FeatureNameStatistics.BYTES,
schema_pb2.STRUCT: statistics_pb2.FeatureNameStatistics.STRUCT,
}
# TODO(afrozm): What follows below can *VERY EASILY* be done by TFDV - rewrite
# this section once they are python 3 ready.
def get_dataset_feature_statistics(builder, split):
"""Calculate statistics for the specified split."""
statistics = statistics_pb2.DatasetFeatureStatistics()
# Make this to the best of our abilities.
schema = schema_pb2.Schema()
dataset = builder.as_dataset(split=split)
# Just computing the number of examples for now.
statistics.num_examples = 0
# Feature dictionaries.
feature_to_num_examples = collections.defaultdict(int)
feature_to_min = {}
feature_to_max = {}
np_dataset = dataset_utils.as_numpy(dataset)
for example in utils.tqdm(np_dataset, unit=" examples", leave=False):
statistics.num_examples += 1
assert isinstance(example, dict)
feature_names = sorted(example.keys())
for feature_name in feature_names:
# Update the number of examples this feature appears in.
feature_to_num_examples[feature_name] += 1
feature_np = example[feature_name]
# For compatibility in graph and eager mode, we can get PODs here and
# everything may not be neatly wrapped up in numpy's ndarray.
feature_dtype = type(feature_np)
if isinstance(feature_np, np.ndarray):
# If we have an empty array, then don't proceed further with computing
# statistics on it.
if feature_np.size == 0:
continue
feature_dtype = feature_np.dtype.type
feature_min, feature_max = None, None
is_numeric = (np.issubdtype(feature_dtype, np.number) or
feature_dtype == np.bool_)
if is_numeric:
feature_min = np.min(feature_np)
feature_max = np.max(feature_np)
# TODO(afrozm): What if shapes don't match? Populate ValueCount? Add
# logic for that.
# Set or update the min, max.
if is_numeric:
if ((feature_name not in feature_to_min) or
(feature_to_min[feature_name] > feature_min)):
feature_to_min[feature_name] = feature_min
if ((feature_name not in feature_to_max) or
(feature_to_max[feature_name] < feature_max)):
feature_to_max[feature_name] = feature_max
# Start here, we've processed all examples.
output_shapes_dict = tf.compat.v1.data.get_output_shapes(dataset)
output_types_dict = tf.compat.v1.data.get_output_types(dataset)
for feature_name in sorted(feature_to_num_examples.keys()):
# Try to fill in the schema.
feature = schema.feature.add()
feature.name = feature_name
# TODO(afrozm): Make this work with nested structures, currently the Schema
# proto has no support for it.
maybe_feature_shape = output_shapes_dict[feature_name]
if not isinstance(maybe_feature_shape, tf.TensorShape):
logging.error(
"Statistics generation doesn't work for nested structures yet")
continue
for dim in maybe_feature_shape.as_list():
# We denote `None`s as -1 in the shape proto.
feature.shape.dim.add().size = dim if dim else -1
feature_type = output_types_dict[feature_name]
feature.type = _FEATURE_TYPE_MAP.get(feature_type, schema_pb2.BYTES)
common_statistics = statistics_pb2.CommonStatistics()
common_statistics.num_non_missing = feature_to_num_examples[feature_name]
common_statistics.num_missing = (
statistics.num_examples - common_statistics.num_non_missing)
feature_name_statistics = statistics.features.add()
feature_name_statistics.name = feature_name
# TODO(afrozm): This can be skipped, since type information was added to
# the Schema.
feature_name_statistics.type = _SCHEMA_TYPE_MAP.get(
feature.type, statistics_pb2.FeatureNameStatistics.BYTES)
if feature.type == schema_pb2.INT or feature.type == schema_pb2.FLOAT:
numeric_statistics = statistics_pb2.NumericStatistics()
# Uses `.get` as Sequence(int) containing only empty array won't contains
# any value.
numeric_statistics.min = feature_to_min.get(feature_name, 0)
numeric_statistics.max = feature_to_max.get(feature_name, 0)
numeric_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.num_stats.CopyFrom(numeric_statistics)
else:
# Let's shove it into BytesStatistics for now.
bytes_statistics = statistics_pb2.BytesStatistics()
bytes_statistics.common_stats.CopyFrom(common_statistics)
feature_name_statistics.bytes_stats.CopyFrom(bytes_statistics)
return statistics, schema
def read_from_json(json_filename):
"""Read JSON-formatted proto into DatasetInfo proto."""
with tf.io.gfile.GFile(json_filename) as f:
dataset_info_json_str = f.read()
# Parse it back into a proto.
parsed_proto = json_format.Parse(dataset_info_json_str,
dataset_info_pb2.DatasetInfo())
return parsed_proto
@six.add_metaclass(abc.ABCMeta)
class Metadata(dict):
"""Abstract base class for DatasetInfo metadata container.
`builder.info.metadata` allows the dataset to expose additional general
information about the dataset which are not specific to a feature or
individual example.
To implement the interface, overwrite `save_metadata` and
`load_metadata`.
See `tfds.core.MetadataDict` for a simple implementation that acts as a
dict that saves data to/from a JSON file.
"""
@abc.abstractmethod
def save_metadata(self, data_dir):
"""Save the metadata."""
raise NotImplementedError()
@abc.abstractmethod
def load_metadata(self, data_dir):
"""Restore the metadata."""
raise NotImplementedError()
class MetadataDict(Metadata, dict):
"""A `tfds.core.Metadata` object that acts as a `dict`.
By default, the metadata will be serialized as JSON.
"""
def _build_filepath(self, data_dir):
return os.path.join(data_dir, "metadata.json")
def save_metadata(self, data_dir):
"""Save the metadata."""
with tf.io.gfile.GFile(self._build_filepath(data_dir), "w") as f:
json.dump(self, f)
def load_metadata(self, data_dir):
"""Restore the metadata."""
self.clear()
with tf.io.gfile.GFile(self._build_filepath(data_dir), "r") as f:
self.update(json.load(f))
class BeamMetadataDict(MetadataDict):
"""A `tfds.core.Metadata` object supporting Beam-generated datasets."""
def __init__(self, *args, **kwargs):
super(BeamMetadataDict, self).__init__(*args, **kwargs)
self._tempdir = tempfile.mkdtemp("tfds_beam_metadata")
def _temp_filepath(self, key):
return os.path.join(self._tempdir, "%s.json" % key)
def __setitem__(self, key, item):
"""Creates write sink for beam PValues or sets value of key in `dict`.
If the item is a PValue, it is expected to contain exactly one element,
which will be written out as a temporary JSON file once the beam pipeline
runs. These outputs will be loaded and stored in a single JSON when
`save_metadata` is called after the pipeline completes.
Args:
key: hashable type, the key for the item.
item: `beam.pvalue.PValue` or other, the metadata value.
"""
beam = lazy_imports_lib.lazy_imports.apache_beam
if isinstance(item, beam.pvalue.PValue):
if key in self:
raise ValueError("Already added PValue with key: %s" % key)
logging.info("Lazily adding metadata item with Beam: %s", key)
def _to_json(item_list):
if len(item_list) != 1:
raise ValueError(
"Each metadata PValue must contain a single element. Got %d." %
len(item_list))
item = item_list[0]
return json.dumps(item)
_ = (item
| "metadata_%s_tolist" % key >> beam.combiners.ToList()
| "metadata_%s_tojson" % key >> beam.Map(_to_json)
| "metadata_%s_write" % key >> beam.io.WriteToText(
self._temp_filepath(key),
num_shards=1,
shard_name_template=""))
super(BeamMetadataDict, self).__setitem__(key, item)
def save_metadata(self, data_dir):
"""Save the metadata inside the beam job."""
beam = lazy_imports_lib.lazy_imports.apache_beam
for key, item in self.items():
if isinstance(item, beam.pvalue.PValue):
with tf.io.gfile.GFile(self._temp_filepath(key), "r") as f:
self[key] = json.load(f)
tf.io.gfile.rmtree(self._tempdir)
super(BeamMetadataDict, self).save_metadata(data_dir)
| 35.724518 | 88 | 0.707087 |
b59275a152d65cc54bf20eb2d835a9cf3a0e35f6 | 6,446 | py | Python | Python Data Analysis/Course Materials/Python/Matplotlib.py | TanishT/General-Python-Projects | 274880f76fea166703c15aa537ec2c0c85114c6e | [
"MIT"
] | 1 | 2021-03-11T03:28:25.000Z | 2021-03-11T03:28:25.000Z | Python Data Analysis/Course Materials/Python/Matplotlib.py | TanishT/General-Python-Projects | 274880f76fea166703c15aa537ec2c0c85114c6e | [
"MIT"
] | null | null | null | Python Data Analysis/Course Materials/Python/Matplotlib.py | TanishT/General-Python-Projects | 274880f76fea166703c15aa537ec2c0c85114c6e | [
"MIT"
] | null | null | null | #control b to show build output and graph
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 10, 1000) #generates an array of 1000 numbers between 0 and 10
#formatting graph to make it more professional
plt.rcParams.update({'font.size' : 15}) #changinig font-size to 15px
plt.plot(x, np.sin(x), color = "black", linestyle = "dotted", label = "sin(x)" ) #random nums act as x-cor, sin(x) value at those x-cor = y-cor
plt.title("Graph of Sin(x)")
plt.axis([0, 10, -1.05, 1.05]) #first 2 are x-axs, second 2 are y-axis
plt.legend(loc = "lower left")
plt.xlabel('x')
plt.ylabel('sin(x)')
plt.show()
y = np.sin(x[:, np.newaxis] + np.pi * np.arange(0, 2, 0.5))
lines = plt.plot(x, y)
# lines is a list of plt.Line2D instances
print(plt.legend(lines[:2], ['first', 'second']))
#creating graph that shows error levels and certainity
xCoord = np.linspace(0, 10, 50)
unsurePercent = 0.2
yCoord = np.sin(xCoord) + unsurePercent * np.random.randn(50) #generates noisy sin curve b/c of unsure% times random num
plt.errorbar(xCoord, yCoord, yerr = unsurePercent, fmt = ".k", ecolor= 'green', elinewidth = 3, capsize = 5);
plt.show()
"""
#error bar generates lines that extend from yCoord * 0.8 - Ycoord * 1.2 to show full range, plt.errorbar(xCoord, yCoord, yerr = unsurePercent, fmt = ".k", ecolor= 'green', elinewidth = 3, capsize = 5);
#error bar generates lines that extend from yCoord * 0.8 - Ycoord * 1.2 to show full range
"""
fig = plt.figure(figsize = (12, 12))
x = np.linspace(0, 10, 1000)
y = np.sin(x)
for i in range (6):
fig.add_subplot(3, 2, i+1) #fig.add_subplot creates set of graphs next to each other, first digit is the number of rows, the second the number of columns, and the third the index of the subplot.
plt.style.use(plt.style.available[i]) #prints graph in style in rotation
plt.plot(x, y)
plt.text(s = plt.style.available[i], x = 2, y = 1, color = 'red')
#multiple graphs
# Change default font size
plt.rcParams.update({'font.size': 15})
fig = plt.figure()
ax = plt.axes()
plt.show()
# Solid line, color specified by its name
plt.plot(x, np.sin(x - 0), color='blue', linestyle='solid', label='blue')
plt.show()
# Short name for color, dashed line
plt.plot(x, np.sin(x - 1), color='g', linestyle='dashed', label='vert')
plt.show()
# Grayscale between 0 and 1, dashes and dots
plt.plot(x, np.sin(x - 2), color='0.75', linestyle='dashdot', label='gris')
plt.show()
# RGB color, dotted line
plt.plot(x, np.sin(x - 3), color='#FF0000', linestyle='dotted', label='rouge')
plt.show()
# Axis limits. Try also 'tight' and 'equal' to see their effect
plt.axis([-1, 11, -1.5, 1.5]);
# Labels
plt.title("Example of a graph")
# The legend is generated from the argument 'label' of 'plot'
# 'loc' specified the placement of the legend.
plt.legend(loc='lower left');
# Axis titles
ax = ax.set(xlabel='x', ylabel='sin(x)')
#bar graph
x = np.random.randn(1000)
plt.style.use('classic')
fig=plt.figure(figsize=(5,3))
ax = plt.axes(facecolor='#E6E6E6')
# Display ticks underneath the axis
ax.set_axisbelow(True)
# White frame
plt.grid(color='w', linestyle='solid')
# Hide the frame
for spine in ax.spines.values():
spine.set_visible(False)
# Hide the markers at the top and the right
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
# We can personalise the markers, and rotate them
marqueurs = [-3, -2, -1, 0, 1, 2, 3]
xtick_labels = ['A', 'B', 'C', 'D', 'E', 'F']
plt.xticks(marqueurs, xtick_labels, rotation=30)
# Change the color of markers
ax.tick_params(colors='gray', direction='out')
for tick in ax.get_xticklabels():
tick.set_color('gray')
for tick in ax.get_yticklabels():
tick.set_color('gray')
# Change the color of the edges
ax.hist(x, edgecolor='#E6E6E6', color='#EE6666');
plt.show()
"""
#error bar generates lines that extend from yCoord * 0.8 - Ycoord * 1.2 to show full range, plt.errorbar(xCoord, yCoord, yerr = unsurePercent, fmt = ".k", ecolor= 'green', elinewidth = 3, capsize = 5);
#error bar generates lines that extend from yCoord * 0.8 - Ycoord * 1.2 to show full range
"""
fig = plt.figure(figsize = (12, 12))
x = np.linspace(0, 10, 1000)
y = np.sin(x)
for i in range (6):
fig.add_subplot(3, 2, i+1) #fig.add_subplot creates set of graphs next to each other, first digit is the number of rows, the second the number of columns, and the third the index of the subplot.
plt.style.use(plt.style.available[i]) #prints graph in style in rotation
plt.plot(x, y)
plt.text(s = plt.style.available[i], x = 2, y = 1, color = 'red')
#multiple graphs
# Change default font size
plt.rcParams.update({'font.size': 15})
fig = plt.figure()
ax = plt.axes()
plt.show()
# Solid line, color specified by its name
plt.plot(x, np.sin(x - 0), color='blue', linestyle='solid', label='blue')
plt.show()
# Short name for color, dashed line
plt.plot(x, np.sin(x - 1), color='g', linestyle='dashed', label='vert')
plt.show()
# Grayscale between 0 and 1, dashes and dots
plt.plot(x, np.sin(x - 2), color='0.75', linestyle='dashdot', label='gris')
plt.show()
# RGB color, dotted line
plt.plot(x, np.sin(x - 3), color='#FF0000', linestyle='dotted', label='rouge')
plt.show()
# Axis limits. Try also 'tight' and 'equal' to see their effect
plt.axis([-1, 11, -1.5, 1.5]);
# Labels
plt.title("Example of a graph")
# The legend is generated from the argument 'label' of 'plot'
# 'loc' specified the placement of the legend.
plt.legend(loc='lower left');
# Axis titles
ax = ax.set(xlabel='x', ylabel='sin(x)')
#bar graph
x = np.random.randn(1000)
plt.style.use('classic')
fig=plt.figure(figsize=(5,3))
ax = plt.axes(facecolor='#E6E6E6')
# Display ticks underneath the axis
ax.set_axisbelow(True)
# White frame
plt.grid(color='w', linestyle='solid')
# Hide the frame
for spine in ax.spines.values():
spine.set_visible(False)
# Hide the markers at the top and the right
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
# We can personalise the markers, and rotate them
marqueurs = [-3, -2, -1, 0, 1, 2, 3]
xtick_labels = ['A', 'B', 'C', 'D', 'E', 'F']
plt.xticks(marqueurs, xtick_labels, rotation=30)
# Change the color of markers
ax.tick_params(colors='gray', direction='out')
for tick in ax.get_xticklabels():
tick.set_color('gray')
for tick in ax.get_yticklabels():
tick.set_color('gray')
# Change the color of the edges
ax.hist(x, edgecolor='#E6E6E6', color='#EE6666');
plt.show() | 30.990385 | 202 | 0.677009 |
42208ae7020f9b6d58705370163347abcc5be99f | 9,131 | py | Python | ocpmodels/models/gemnet/utils.py | Open-Catalyst-Project/baselines | 89948582edfb8debb736406d54db9813a5f2c88d | [
"MIT"
] | 10 | 2020-02-17T22:40:25.000Z | 2020-10-10T13:33:05.000Z | ocpmodels/models/gemnet/utils.py | abhshkdz/cgcnn | 5a664501d2eff24d362b6f1ea504300705c481ac | [
"MIT",
"BSD-3-Clause"
] | 50 | 2020-04-08T19:43:24.000Z | 2020-10-13T21:34:33.000Z | ocpmodels/models/gemnet/utils.py | abhshkdz/cgcnn | 5a664501d2eff24d362b6f1ea504300705c481ac | [
"MIT",
"BSD-3-Clause"
] | 7 | 2020-02-19T03:43:44.000Z | 2020-10-04T09:06:07.000Z | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import json
import torch
from torch_scatter import segment_csr
def read_json(path):
""""""
if not path.endswith(".json"):
raise UserWarning(f"Path {path} is not a json-path.")
with open(path, "r") as f:
content = json.load(f)
return content
def update_json(path, data):
""""""
if not path.endswith(".json"):
raise UserWarning(f"Path {path} is not a json-path.")
content = read_json(path)
content.update(data)
write_json(path, content)
def write_json(path, data):
""""""
if not path.endswith(".json"):
raise UserWarning(f"Path {path} is not a json-path.")
with open(path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def read_value_json(path, key):
""""""
content = read_json(path)
if key in content.keys():
return content[key]
else:
return None
def ragged_range(sizes):
"""Multiple concatenated ranges.
Examples
--------
sizes = [1 4 2 3]
Return: [0 0 1 2 3 0 1 0 1 2]
"""
assert sizes.dim() == 1
if sizes.sum() == 0:
return sizes.new_empty(0)
# Remove 0 sizes
sizes_nonzero = sizes > 0
if not torch.all(sizes_nonzero):
sizes = torch.masked_select(sizes, sizes_nonzero)
# Initialize indexing array with ones as we need to setup incremental indexing
# within each group when cumulatively summed at the final stage.
id_steps = torch.ones(sizes.sum(), dtype=torch.long, device=sizes.device)
id_steps[0] = 0
insert_index = sizes[:-1].cumsum(0)
insert_val = (1 - sizes)[:-1]
# Assign index-offsetting values
id_steps[insert_index] = insert_val
# Finally index into input array for the group repeated o/p
res = id_steps.cumsum(0)
return res
def repeat_blocks(
sizes,
repeats,
continuous_indexing=True,
start_idx=0,
block_inc=0,
repeat_inc=0,
):
"""Repeat blocks of indices.
Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements
continuous_indexing: Whether to keep increasing the index after each block
start_idx: Starting index
block_inc: Number to increment by after each block,
either global or per block. Shape: len(sizes) - 1
repeat_inc: Number to increment by after each repetition,
either global or per block
Examples
--------
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False
Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True
Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
repeat_inc = 4
Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
start_idx = 5
Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
block_inc = 1
Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7]
sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True
Return: [0 1 2 0 1 2 3 4 3 4 3 4]
sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True
Return: [0 1 0 1 5 6 5 6]
"""
assert sizes.dim() == 1
assert all(sizes >= 0)
# Remove 0 sizes
sizes_nonzero = sizes > 0
if not torch.all(sizes_nonzero):
assert block_inc == 0 # Implementing this is not worth the effort
sizes = torch.masked_select(sizes, sizes_nonzero)
if isinstance(repeats, torch.Tensor):
repeats = torch.masked_select(repeats, sizes_nonzero)
if isinstance(repeat_inc, torch.Tensor):
repeat_inc = torch.masked_select(repeat_inc, sizes_nonzero)
if isinstance(repeats, torch.Tensor):
assert all(repeats >= 0)
insert_dummy = repeats[0] == 0
if insert_dummy:
one = sizes.new_ones(1)
zero = sizes.new_zeros(1)
sizes = torch.cat((one, sizes))
repeats = torch.cat((one, repeats))
if isinstance(block_inc, torch.Tensor):
block_inc = torch.cat((zero, block_inc))
if isinstance(repeat_inc, torch.Tensor):
repeat_inc = torch.cat((zero, repeat_inc))
else:
assert repeats >= 0
insert_dummy = False
# Get repeats for each group using group lengths/sizes
r1 = torch.repeat_interleave(
torch.arange(len(sizes), device=sizes.device), repeats
)
# Get total size of output array, as needed to initialize output indexing array
N = (sizes * repeats).sum()
# Initialize indexing array with ones as we need to setup incremental indexing
# within each group when cumulatively summed at the final stage.
# Two steps here:
# 1. Within each group, we have multiple sequences, so setup the offsetting
# at each sequence lengths by the seq. lengths preceding those.
id_ar = torch.ones(N, dtype=torch.long, device=sizes.device)
id_ar[0] = 0
insert_index = sizes[r1[:-1]].cumsum(0)
insert_val = (1 - sizes)[r1[:-1]]
if isinstance(repeats, torch.Tensor) and torch.any(repeats == 0):
diffs = r1[1:] - r1[:-1]
indptr = torch.cat((sizes.new_zeros(1), diffs.cumsum(0)))
if continuous_indexing:
# If a group was skipped (repeats=0) we need to add its size
insert_val += segment_csr(sizes[: r1[-1]], indptr, reduce="sum")
# Add block increments
if isinstance(block_inc, torch.Tensor):
insert_val += segment_csr(
block_inc[: r1[-1]], indptr, reduce="sum"
)
else:
insert_val += block_inc * (indptr[1:] - indptr[:-1])
if insert_dummy:
insert_val[0] -= block_inc
else:
idx = r1[1:] != r1[:-1]
if continuous_indexing:
# 2. For each group, make sure the indexing starts from the next group's
# first element. So, simply assign 1s there.
insert_val[idx] = 1
# Add block increments
insert_val[idx] += block_inc
# Add repeat_inc within each group
if isinstance(repeat_inc, torch.Tensor):
insert_val += repeat_inc[r1[:-1]]
if isinstance(repeats, torch.Tensor):
repeat_inc_inner = repeat_inc[repeats > 0][:-1]
else:
repeat_inc_inner = repeat_inc[:-1]
else:
insert_val += repeat_inc
repeat_inc_inner = repeat_inc
# Subtract the increments between groups
if isinstance(repeats, torch.Tensor):
repeats_inner = repeats[repeats > 0][:-1]
else:
repeats_inner = repeats
insert_val[r1[1:] != r1[:-1]] -= repeat_inc_inner * repeats_inner
# Assign index-offsetting values
id_ar[insert_index] = insert_val
if insert_dummy:
id_ar = id_ar[1:]
if continuous_indexing:
id_ar[0] -= 1
# Set start index now, in case of insertion due to leading repeats=0
id_ar[0] += start_idx
# Finally index into input array for the group repeated o/p
res = id_ar.cumsum(0)
return res
def calculate_interatomic_vectors(R, id_s, id_t, offsets_st):
"""
Calculate the vectors connecting the given atom pairs,
considering offsets from periodic boundary conditions (PBC).
Parameters
----------
R: Tensor, shape = (nAtoms, 3)
Atom positions.
id_s: Tensor, shape = (nEdges,)
Indices of the source atom of the edges.
id_t: Tensor, shape = (nEdges,)
Indices of the target atom of the edges.
offsets_st: Tensor, shape = (nEdges,)
PBC offsets of the edges.
Subtract this from the correct direction.
Returns
-------
(D_st, V_st): tuple
D_st: Tensor, shape = (nEdges,)
Distance from atom t to s.
V_st: Tensor, shape = (nEdges,)
Unit direction from atom t to s.
"""
Rs = R[id_s]
Rt = R[id_t]
# ReLU prevents negative numbers in sqrt
if offsets_st is None:
V_st = Rt - Rs # s -> t
else:
V_st = Rt - Rs + offsets_st # s -> t
D_st = torch.sqrt(torch.sum(V_st**2, dim=1))
V_st = V_st / D_st[..., None]
return D_st, V_st
def inner_product_normalized(x, y):
"""
Calculate the inner product between the given normalized vectors,
giving a result between -1 and 1.
"""
return torch.sum(x * y, dim=-1).clamp(min=-1, max=1)
def mask_neighbors(neighbors, edge_mask):
neighbors_old_indptr = torch.cat([neighbors.new_zeros(1), neighbors])
neighbors_old_indptr = torch.cumsum(neighbors_old_indptr, dim=0)
neighbors = segment_csr(edge_mask.long(), neighbors_old_indptr)
return neighbors
| 32.610714 | 128 | 0.609791 |
5e2489175473d90be1c7cbb73a798961fb7386d2 | 698 | py | Python | sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200801/__init__.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200801/__init__.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200801/__init__.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .get_machine_learning_compute import *
from .get_private_endpoint_connection import *
from .get_workspace import *
from .get_workspace_connection import *
from .list_machine_learning_compute_keys import *
from .list_machine_learning_compute_nodes import *
from .list_workspace_keys import *
from .machine_learning_compute import *
from .private_endpoint_connection import *
from .workspace import *
from .workspace_connection import *
from ._inputs import *
from . import outputs
| 36.736842 | 80 | 0.797994 |
f05ffb371a96a66f40c9b8cd4875ad673a7dac71 | 7,577 | py | Python | pysaurus/core/functions.py | notoraptor/pysaurus | 3bf5fe8c15e0e0e580e5edaea05b4a1298641367 | [
"MIT"
] | null | null | null | pysaurus/core/functions.py | notoraptor/pysaurus | 3bf5fe8c15e0e0e580e5edaea05b4a1298641367 | [
"MIT"
] | 4 | 2021-08-13T14:03:02.000Z | 2022-03-05T16:02:45.000Z | pysaurus/core/functions.py | notoraptor/pysaurus | 3bf5fe8c15e0e0e580e5edaea05b4a1298641367 | [
"MIT"
] | null | null | null | import bisect
import concurrent.futures
import os
import re
import threading
from pysaurus.core.modules import HTMLStripper
# Datetime since timestamp 0.
REGEX_NO_WORD = re.compile(r"(\W|_)+")
REGEX_CONSECUTIVE_UPPER_CASES = re.compile("[A-Z]{2,}")
REGEX_LOWER_THEN_UPPER_CASES = re.compile("([a-z0-9])([A-Z])")
REGEX_WORD_THEN_NUMBER = re.compile(r"([^0-9 ])([0-9])")
REGEX_NUMBER_THEN_WORD = re.compile(r"([0-9])([^0-9 ])")
REGEX_NUMBER = re.compile(r"([0-9]+)")
REGEX_ATTRIBUTE = re.compile(r"^[a-zA-Z][a-zA-Z0-9_]*$")
JSON_INTEGER_MIN = -(2 ** 31)
JSON_INTEGER_MAX = 2 ** 31 - 1
DISCARDED_CHARACTERS = r"@#\\/?$:!"
def is_valid_attribute_name(key):
return REGEX_ATTRIBUTE.match(key)
def has_discarded_characters(txt: str):
return any(c in txt for c in DISCARDED_CHARACTERS)
def separate_text_and_numbers(text: str):
pieces = REGEX_NUMBER.split(text)
for i in range(1, len(pieces), 2):
pieces[i] = int(pieces[i])
return tuple(pieces)
def split_words_and_numbers(text):
text = REGEX_WORD_THEN_NUMBER.sub(r"\1 \2", text)
text = REGEX_NUMBER_THEN_WORD.sub(r"\1 \2", text)
return text
def camel_case_to_snake_case(name, split_upper_cases=True):
"""Convert a string (expected to be in camel case) to snake case.
:param name: string to convert.
:param split_upper_cases: if True, split consecutive uppercases too
(e.g. 'ABC' => 'a_b_c')
:return: snake case version of given name.
:rtype: str
"""
if name == "":
return name
if split_upper_cases:
name = REGEX_CONSECUTIVE_UPPER_CASES.sub(
lambda m: "_".join(c for c in m.group(0)), name
)
return REGEX_LOWER_THEN_UPPER_CASES.sub(r"\1_\2", name).lower()
def string_to_pieces(the_string, as_set=False):
builder = set if as_set else list
the_string = camel_case_to_snake_case(the_string, split_upper_cases=False)
the_string = split_words_and_numbers(the_string)
return builder(
piece.lower() for piece in REGEX_NO_WORD.sub(" ", the_string).split()
)
def dispatch_tasks(tasks, job_count, extra_args=None):
# type: (list, int, list) -> list
"""Split <tasks> into <job_count> jobs and associate each one
with an unique job ID starting from <next_job_id>, so that
each job could assign an unique ID to each of his task by
incrementing his job ID when managing his tasks.
:param tasks: a list of tasks to split.
:param job_count: number of jobs.
:param extra_args: (optional) list
:return: a list of lists each containing (job, job ID, and extra args if provided).
"""
if extra_args is None:
extra_args = []
task_count = len(tasks)
if job_count > task_count:
job_lengths = [1] * task_count
else:
job_lengths = [task_count // job_count] * job_count
for i in range(task_count % job_count):
job_lengths[i] += 1
assert sum(job_lengths) == task_count
cursor = 0
jobs = []
job_id = 0
job_count = len(job_lengths)
for job_len in job_lengths:
job_id += 1
jobs.append(
[tasks[cursor : (cursor + job_len)], f"{job_id}on{job_count}"] + extra_args
)
cursor += job_len
# NB: next_job_id is now next_job_id + len(tasks).
return jobs
def package_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
def flat_to_coord(index, width):
# i => (x, y)
return index % width, index // width
def coord_to_flat(x, y, width):
# (x, y) => i
return y * width + x
def get_file_extension(string):
# type: (str) -> str
index_of_dot = string.rfind(".")
if index_of_dot >= 0:
return string[(index_of_dot + 1) :].lower()
return ""
def _pgcd(a, b):
# type: (int, int) -> int
return a if not b else _pgcd(b, a % b)
def pgcd(a, b):
# type: (int, int) -> int
""" "Plus grand commun diviseur" (Greatest Common Divider)"""
if a < 0:
a = -a
if b < 0:
b = -b
if a < b:
a, b = b, a
return _pgcd(a, b)
def parallelize(function, jobs, cpu_count):
with concurrent.futures.ProcessPoolExecutor(max_workers=cpu_count) as executor:
results = list(executor.map(function, jobs))
return results
def launch_thread(function, *args, **kwargs):
thread = threading.Thread(
target=function, args=args, kwargs=kwargs, name=function.__name__
)
thread.start()
return thread
def html_to_title(title):
# type: (str) -> str
"""
Remove HTML tags, simple and double starting/ending quotes from given string.
:param title: text to clear
:return: cleared text
"""
if title:
title = HTMLStripper.strip(title)
strip_again = True
while strip_again:
strip_again = False
for character in ('"', "'"):
if title.startswith(character) and title.endswith(character):
title = title.strip(character)
strip_again = True
return title
def identity(value):
return value
def function_none(*args, **kwargs):
return None
def is_dictionary(dict_to_check):
"""Check if given variable is a dictionary-like object.
:param dict_to_check: Dictionary to check.
:return: Indicates if the object is a dictionary.
:rtype: bool
"""
return isinstance(dict_to_check, dict) or all(
hasattr(dict_to_check, expected_attribute)
for expected_attribute in (
"__len__",
"__contains__",
"__bool__",
"__iter__",
"__getitem__",
"keys",
"values",
"items",
)
)
get_start_index = bisect.bisect_left
get_end_index = bisect.bisect_right
def class_get_public_attributes(cls: type, exclude=(), wrapper=sorted):
fields = {
field
for field in dir(cls)
if "a" <= field[0] <= "z" and not callable(getattr(cls, field))
}
fields.difference_update(exclude)
fields.difference_update(getattr(cls, "__protected__", ()))
return fields if wrapper is set else wrapper(fields)
def compute_nb_pages(count, page_size):
return (count // page_size) + bool(count % page_size)
def to_json_value(value):
if isinstance(value, (tuple, list, set)):
return [to_json_value(element) for element in value]
if isinstance(value, dict):
return {
to_json_value(key): to_json_value(element) for key, element in value.items()
}
if isinstance(value, (str, float, bool, type(None))):
return value
if isinstance(value, int) and JSON_INTEGER_MIN <= value <= JSON_INTEGER_MAX:
return value
return str(value)
def deep_equals(value, other):
"""Compare elements and deeply compare lists,, tuples and dict values."""
value_type = type(value)
assert value_type is type(other), (value_type, type(other))
if value_type in (list, tuple):
return len(value) == len(other) and all(
deep_equals(value[i], other[i]) for i in range(len(value))
)
if value_type is set:
return len(value) == len(other) and all(element in other for element in value)
if value_type is dict:
return len(value) == len(other) and all(
key in other and deep_equals(value[key], other[key]) for key in value
)
return value == other
def get_default(value, default):
return default if value is None else value
def compute_nb_couples(n: int):
return (n * (n - 1)) // 2 if n > 1 else 0
| 28.809886 | 88 | 0.637192 |
5b6644dd74fc569c21b482ac266d0cd808cc6930 | 816 | py | Python | gallery/decay.py | jeroenvantilburg/ppg | 6c3d769bb4dc6a83dbba7ac656daf8c631806494 | [
"Unlicense"
] | null | null | null | gallery/decay.py | jeroenvantilburg/ppg | 6c3d769bb4dc6a83dbba7ac656daf8c631806494 | [
"Unlicense"
] | null | null | null | gallery/decay.py | jeroenvantilburg/ppg | 6c3d769bb4dc6a83dbba7ac656daf8c631806494 | [
"Unlicense"
] | null | null | null | # Open this file with jeroenvantilburg.nl/ppg or any python environment
import matplotlib.pyplot as plt # do not modify; required by ppg
import numpy as np
# Create a numpy array of 500 entries with exponential decay
x = np.linspace(0, 5, 500)
y = np.array(2000 * np.exp(-x))
# Create the plot
fig, ax = plt.subplots()
ax.plot(x, y)
# Set the axis
ax.set_xlabel("t (s)", loc='right', fontsize=16)
ax.set_ylabel("N", loc='top', fontsize=16)
ax.set_xlim([0, 5.5])
ax.set_ylim([0, 2100])
# Show the major and minor grid lines
ax.minorticks_on()
ax.grid(which='major', color="grey", alpha=1, lw=0.5)
ax.grid(which='minor', color="grey", alpha=0.5, lw=0.4)
# Remove upper and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
plt.show() # last line; do not modify; required by ppg
| 28.137931 | 71 | 0.699755 |
ec5dfd227529d62759b7bc312ae24668557dc021 | 9,780 | py | Python | classificationTool/interactiveWindowContours.py | oskasf/MAPHIS-1 | f82f2d8562e8a74af6c8ea13f78e26966fe8f31f | [
"MIT"
] | null | null | null | classificationTool/interactiveWindowContours.py | oskasf/MAPHIS-1 | f82f2d8562e8a74af6c8ea13f78e26966fe8f31f | [
"MIT"
] | null | null | null | classificationTool/interactiveWindowContours.py | oskasf/MAPHIS-1 | f82f2d8562e8a74af6c8ea13f78e26966fe8f31f | [
"MIT"
] | null | null | null | from argparse import Action
import tkinter as tk
from tkinter import ttk
from tkinter.messagebox import showinfo
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import csv
import glob
from PIL import Image
from matplotlib.figure import Figure
import shutil
import numpy as np
from pathlib import Path, PurePath
from shutil import copyfile
from ast import literal_eval as make_tuple
import cv2
tolerance = 10
class Application(ttk.Frame):
def __init__(self, classifType:str, master:tk.Tk, rawFolderName:str, cityName:str ,classifiedFolderName:str, mapsFileExtension='tif', contoursFileExtension='npz'):
ttk.Frame.__init__(self, master)
self.master = master
self.classifType = classifType
self.cityName = cityName
self.rawFolderPath = Path(PurePath(rawFolderName).joinpath(cityName))
self.classifiedFolderPath = Path(PurePath(classifiedFolderName).joinpath(cityName))
self.saveProgressFile = self.classifiedFolderPath / f'progress{classifType.capitalize()}.csv'
self.featureNamesListPath = self.classifiedFolderPath / f'featureList{classifType.capitalize()}.csv'
self.classifiedFolderPath.mkdir(parents=True, exist_ok=True)
self.mapsFileExtension= mapsFileExtension
self.storedMaps = glob.glob(str(self.rawFolderPath)+'/*.'+mapsFileExtension)
self.storedContours = glob.glob(str(self.rawFolderPath)+'/*.'+contoursFileExtension)
self.nPatches = len(self.storedContours)
self.heightThumbnail, self.widthThumbnail = 300
self.currentContourName = self.storedContours[0]
self.currentMapName = self.currentContourName.split('.')[0]+'.'+mapsFileExtension
classifiedTileName = self.currentContourName.split('\\')[-1].split('.')[0].split('_')[0]
if self.saveProgressFile.is_file():
with open(self.saveProgressFile,'r') as f:
reader = csv.reader(f)
myList = list(reader)
self.indexContour = int(myList[0][0])
else:
self.indexContour = 0
self.figThumbnail = Figure(figsize=(5, 4), dpi=100)
self.figThumbnail.add_subplot(111).matshow(self.drawContourOnMap(self.currentContourName))
self.canvaThumbnail = FigureCanvasTkAgg(self.figThumbnail, master)
self.canvaThumbnail.draw()
self.canvaThumbnail.get_tk_widget().grid(row=0,column=1)
self.canvaDrawned = np.zeros((11400,7590,3), dtype=np.uint8)
self.background = self.fileOpenFunction(self.currentMapName)
cv2.CvtColor(self.background, cv2.COLOR_GRAY2RGB)
'''dataframe = pd.read_csv('results/rawShapes/Barrow_in_Furness/shapeFeatures.csv', usecols = [i for i in range(4,15)])
dataframe.hist( color='steelblue', edgecolor='black', linewidth=1.0, xlabelsize=8, ylabelsize=8, grid=False)
plt.show()
f, ax = plt.subplots(figsize=(10, 6))
corr = dataframe.corr()
mask = np.triu(np.ones_like(corr, dtype=bool))
hm = sns.heatmap(round(corr,2), mask=mask,annot=True, ax=ax, cmap="coolwarm",fmt='.2f',
linewidths=.05)
f.subplots_adjust(top=0.93)
t= f.suptitle('Shape Attributes Correlation Heatmap', fontsize=14)
plt.show()
'''
# Deal with buttons : put it on the grid (0,0) of the master by creating a Frame at that location
rowIndexInfo = 0
rowTileInfo = 1
rowButtonPredefined0 = 2
self.buttonFrame = tk.Frame(master)
self.buttonFrame.grid(row=0,column=0)
## indexes
self.currentIndexDisplay = tk.Label(self.buttonFrame, height = 1 , width = len(f'({self.iTile},{self.jTile}) / ({14},{18})'), text=f'({self.iTile},{self.jTile}) / ({14},{18})')
self.currentIndexDisplay.grid(row=rowIndexInfo, column=0)
self.indexJumpTextBox = ttk.Entry(self.buttonFrame , text="Go to index")
self.indexJumpTextBox.grid(row=rowIndexInfo,column=1)
self.indexJumpButton = ttk.Button(self.buttonFrame , text="Jump to index", command=lambda:[self.updateCanvas(indexString = self.indexJumpTextBox.get()), self.clearTextInput(self.indexJumpTextBox), self.updateIndex()])
self.indexJumpButton.grid(row=rowIndexInfo,column=2)
## dropdown
self.saveButton = ttk.Button(self.buttonFrame , text="Save progress", command=lambda:[self.saveProgress()])
self.saveButton.grid(row=rowTileInfo,column=0)
## contours cleaning buttons
self.interesting = tk.Button(self.buttonFrame, text="Interesting", command=lambda:[self.save(), self.updateIndex()])
self.interesting.grid(row=rowButtonPredefined0,column=0)
self.notInteresting = tk.Button(self.buttonFrame, text="Not interesting", command=lambda:[self.save(), self.updateIndex()])
self.notInteresting.grid(row=rowButtonPredefined0,column=1)
def clearTextInput(self, textBoxAttribute):
textBoxAttribute.delete(0, len(textBoxAttribute.get()))
def fileOpenFunction(self, filePath:str):
extension = Path(filePath).suffix
if extension == '.png' or extension == '.tif':
return np.asarray(Image.open(filePath))
elif extension == 'npz':
return np.load(filePath)['contour'], np.load(filePath)['features']
else:
raise NotImplementedError (f'{extension} opening function is not implemented.')
def drawContourOnMap(self, filePath):
contour, featureDict = self.fileOpenFunction(filePath)
extentX = (featureDict['xTile']-featureDict['W']/2, featureDict['xTile']+featureDict['W']/2)
extentY = (featureDict['yTile']-featureDict['H']/2, featureDict['yTile']+featureDict['H']/2)
xBefore, xAfter = min(self.widthThumbnail, extentX[0]), min(self.widthThumbnail, 11400-extentX[1])
yBefore, yAfter = min(self.heightThumbnail, extentY[0]), min(self.heightThumbnail, 7590 - extentY[1])
cv2.drawContours(self.canvaDrawned, [contour], -1, (0,0,255))
return (self.background+self.canvaDrawned)[extentX[0]-xBefore:extentX[0]+xAfter, extentY[0]-yBefore:extentY[0]+yAfter]
def updateCanvas(self, color = None, indexString=None):
if color:
self.colorisedTile[self.iTile*self.scaleFactor:self.iTile*self.scaleFactor+self.scaleFactor, self.jTile*self.scaleFactor:self.jTile*self.scaleFactor+self.scaleFactor,:] = color
self.jTile +=1
if self.jTile == self.nCols:
if self.iTile== self.nRows:
print('Reached the end of the tile')
else:
self.jTile = 0
self.iTile +=1
self.figThumbnail.clear()
self.figThumbnail.add_subplot(111).matshow(self.fileOpenFunction(self.currentTileName)[self.iTile*self.heightThumbnail:self.iTile*self.heightThumbnail+self.heightThumbnail, self.jTile*self.widthThumbnail:self.jTile*self.widthThumbnail+self.widthThumbnail])
self.canvaThumbnail.draw()
self.figColorisedTile.clear()
self.figColorisedTile.add_subplot(111).imshow(self.colorisedTile)
self.canvaTile.draw()
else:
self.iTile = int(indexString.split(',')[0])
self.jTile = int(indexString.split(',')[-1])
self.figThumbnail.clear()
self.figThumbnail.add_subplot(111).matshow(self.fileOpenFunction(self.currentTileName)[self.iTile*self.heightThumbnail:self.iTile*self.heightThumbnail+self.heightThumbnail, self.jTile*self.widthThumbnail:self.jTile*self.widthThumbnail+self.widthThumbnail])
self.canvaThumbnail.draw()
def changeTile(self):
self.currentTileName =self.tileNameDisplayed.get()
displayName = self.currentTileName.split('\\')[-1]
if Path(PurePath(self.classifiedFolderPath).joinpath(displayName.split('.')[0]+'.npz')).is_file():
loadPath = self.classifiedFolderPath / displayName.split('.')[0]
self.colorisedTile = np.load(loadPath.with_suffix('.npz'))['arr_0']
with open(loadPath.with_suffix('.csv'),'r') as f:
reader = csv.reader(f)
myList = list(reader)
self.iTile, self.jTile = int(myList[0][0]), int(myList[1][0])
else:
self.colorisedTile = np.zeros((self.nRows*self.scaleFactor,self.nCols*self.scaleFactor,3), dtype=np.uint8)
self.iTile, self.jTile = 0,0
self.currentTileName =self.tileNameDisplayed.get()
displayName = self.currentTileName.split('\\')[-1]
print(f'Updating tile : {displayName}')
self.figThumbnail.clear()
self.figThumbnail.add_subplot(111).imshow(self.fileOpenFunction(self.currentTileName)[self.iTile:self.iTile+self.heightThumbnail, self.jTile:self.jTile+self.widthThumbnail])
self.canvaThumbnail.draw()
self.figColorisedTile.clear()
self.figColorisedTile.add_subplot(111).imshow(self.colorisedTile)
self.canvaTile.draw()
def updateIndex(self):
self.currentIndexDisplay['text'] = f'({self.iTile},{self.jTile}) / ({14},{18})'
def saveProgress(self):
displayName = self.currentTileName.split('\\')[-1]
savePath = self.classifiedFolderPath / displayName.split('.')[0]
np.savez(savePath, self.colorisedTile)
with open(savePath.with_suffix('.csv'),'w', newline='') as f:
writer = csv.writer(f)
writer.writerow([self.iTile])
writer.writerow([self.jTile])
| 50.153846 | 269 | 0.653783 |
5dcf695a68f8f714aeb1c7ec23966e27c1044c88 | 2,446 | py | Python | trax/models/atari_cnn.py | koz4k2/trax | 548f671fa3804cb86154ac504fb0c6c4269b42c7 | [
"Apache-2.0"
] | null | null | null | trax/models/atari_cnn.py | koz4k2/trax | 548f671fa3804cb86154ac504fb0c6c4269b42c7 | [
"Apache-2.0"
] | null | null | null | trax/models/atari_cnn.py | koz4k2/trax | 548f671fa3804cb86154ac504fb0c6c4269b42c7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple net for playing Atari games using PPO."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from trax import layers as tl
def FrameStack(n_frames):
"""Stacks a fixed number of frames along the dimension 1."""
# Input shape: (B, T, ..., C).
# Output shape: (B, T, ..., C * n_frames).
assert n_frames >= 1
if n_frames == 1:
return ()
return tl.Serial(
# Make n_frames copies of the input sequence.
[tl.Dup()] * (n_frames - 1),
# Shift copies to the right by [0, .., n_frames - 1] frames.
tl.Parallel(*map(_shift_right, range(n_frames))),
# Concatenate along the channel dimension.
tl.Concatenate(n_items=n_frames, axis=-1)
)
def AtariCnn(n_frames=4, hidden_sizes=(32, 32), output_size=128, mode='train'):
"""An Atari CNN."""
del mode
# TODO(jonni): Include link to paper?
# Input shape: (B, T, H, W, C)
# Output shape: (B, T, output_size)
return tl.Serial(
tl.ToFloat(),
tl.Div(divisor=255.0),
# Set up n_frames successive game frames, concatenated on the last axis.
FrameStack(n_frames=n_frames), # (B, T, H, W, 4C)
tl.Conv(hidden_sizes[0], (5, 5), (2, 2), 'SAME'),
tl.Relu(),
tl.Conv(hidden_sizes[1], (5, 5), (2, 2), 'SAME'),
tl.Relu(),
tl.Flatten(n_axes_to_keep=2), # B, T and rest.
tl.Dense(output_size),
tl.Relu(),
)
def FrameStackMLP(n_frames=4, hidden_sizes=(64,), output_size=64,
mode='train'):
"""MLP operating on a fixed number of last frames."""
del mode
return tl.Serial(
FrameStack(n_frames=n_frames),
[[tl.Dense(d_hidden), tl.Relu()] for d_hidden in hidden_sizes],
tl.Dense(output_size),
)
def _shift_right(n): # pylint: disable=invalid-name
return [tl.ShiftRight()] * n
| 30.575 | 79 | 0.660262 |
861ad088f4d33b74a2ceeddabd5adf8f1f996e08 | 342 | py | Python | Python/02-Django/01-Try-Django/Challenge-Solutions/01-Getting-Started/03/projecturls.py | pabhd3/Code-School | 5115c1bdc1e99ea667bdc942667342123ae3929a | [
"MIT"
] | null | null | null | Python/02-Django/01-Try-Django/Challenge-Solutions/01-Getting-Started/03/projecturls.py | pabhd3/Code-School | 5115c1bdc1e99ea667bdc942667342123ae3929a | [
"MIT"
] | null | null | null | Python/02-Django/01-Try-Django/Challenge-Solutions/01-Getting-Started/03/projecturls.py | pabhd3/Code-School | 5115c1bdc1e99ea667bdc942667342123ae3929a | [
"MIT"
] | null | null | null | # Python
# Django
# Try Django
# Getting Started (Level 1)
# Challenge 03 - Refactor the existing URL Dispatchers
from django.conf.urls import include, url
from django.contrib import admin
from main import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('main.urls')), # empty path, and included app urls
] | 24.428571 | 72 | 0.716374 |
f19bffe1d8db01545aa2bac87ec675c56149bef9 | 195 | py | Python | kali/comandosOs.py | NandoDev-lab/AssistenteEmPython | 3d6e7c4abef39154e710e82807d0534586294c1c | [
"MIT"
] | 1 | 2021-06-30T18:08:42.000Z | 2021-06-30T18:08:42.000Z | kali/comandosOs.py | NandoDev-lab/AssistenteEmPython | 3d6e7c4abef39154e710e82807d0534586294c1c | [
"MIT"
] | null | null | null | kali/comandosOs.py | NandoDev-lab/AssistenteEmPython | 3d6e7c4abef39154e710e82807d0534586294c1c | [
"MIT"
] | null | null | null | import sys
import os
import subprocess
import pyautogui
import time
subprocess.run("C:/Windows/system32/cmd.exe")
time.sleep(3)
pyautogui.typewrite("python")
| 8.478261 | 46 | 0.651282 |
50175cf676bea9c7be8964de5c2cbb9a2a630449 | 2,463 | py | Python | tests/test_git.py | ddub/lint-review | ef620b2147d0a3c95d96b99ed7ca67547b13936d | [
"MIT"
] | null | null | null | tests/test_git.py | ddub/lint-review | ef620b2147d0a3c95d96b99ed7ca67547b13936d | [
"MIT"
] | null | null | null | tests/test_git.py | ddub/lint-review | ef620b2147d0a3c95d96b99ed7ca67547b13936d | [
"MIT"
] | null | null | null | import lintreview.git as git
import os
import shutil
from nose.tools import eq_
from nose.tools import raises
from unittest import skipIf
settings = {
'WORKSPACE': './tests'
}
cant_write_to_test = not(os.access(os.path.abspath('./tests'), os.W_OK))
def test_get_repo_path():
user = 'markstory'
repo = 'asset_compress'
num = '4'
res = git.get_repo_path(user, repo, num, settings)
expected = os.sep.join(
(settings['WORKSPACE'], user, repo, num))
expected = os.path.realpath(expected)
eq_(res, expected)
def test_get_repo_path__int():
user = 'markstory'
repo = 'asset_compress'
num = 4
res = git.get_repo_path(user, repo, num, settings)
expected = os.sep.join(
(settings['WORKSPACE'], user, repo, str(num)))
expected = os.path.realpath(expected)
eq_(res, expected)
def test_get_repo_path__absoulte_dir():
user = 'markstory'
repo = 'asset_compress'
num = 4
settings['WORKSPACE'] = os.path.realpath(settings['WORKSPACE'])
res = git.get_repo_path(user, repo, num, settings)
expected = os.sep.join(
(settings['WORKSPACE'], user, repo, str(num)))
expected = os.path.realpath(expected)
eq_(res, expected)
def test_exists__no_path():
assert not git.exists(settings['WORKSPACE'] + '/herp/derp')
def test_exists__no_git():
assert not git.exists(settings['WORKSPACE'])
@raises(IOError)
def test_repo_clone_no_repo():
path = settings['WORKSPACE'] + '/test_clone'
git.clone(
'git://github.com/markstory/it will never work.git',
path)
@skipIf(cant_write_to_test, 'Cannot write to ./tests skipping')
def test_repo_operations():
path = settings['WORKSPACE'] + '/test_clone'
assert not(git.exists(path)), 'Directory should not exist.'
res = git.clone(
'git://github.com/markstory/lint-review.git',
path)
assert res, 'Cloned successfully.'
assert git.exists(path), 'Cloned dir should be there.'
git.destroy(path)
assert not(git.exists(path)), 'Cloned dir should be gone.'
@skipIf(cant_write_to_test, 'Cannot write to ./tests skipping')
def test_clone_or_update():
path = settings['WORKSPACE'] + '/test_clone'
assert not(git.exists(path)), 'Directory should not exist.'
git.clone_or_update(
'git://github.com/markstory/lint-review.git',
path,
'e4f880c77e6b2c81c81cad5d45dd4e1c39b919a0')
assert git.exists(path)
git.destroy(path)
| 27.988636 | 72 | 0.672757 |
99b47372cfe423bbe42d3e18eea264f1b1ba2f71 | 791 | py | Python | tests/utils.py | zer0-star/verification-helper | 1b91ce3395b22bba2b41149d68c3350b2bd77285 | [
"MIT"
] | null | null | null | tests/utils.py | zer0-star/verification-helper | 1b91ce3395b22bba2b41149d68c3350b2bd77285 | [
"MIT"
] | null | null | null | tests/utils.py | zer0-star/verification-helper | 1b91ce3395b22bba2b41149d68c3350b2bd77285 | [
"MIT"
] | null | null | null | import contextlib
import os
import pathlib
import tempfile
from typing import *
@contextlib.contextmanager
def load_files(files: Dict[str, bytes]) -> Iterator[pathlib.Path]:
with tempfile.TemporaryDirectory() as tempdir_:
tempdir = pathlib.Path(tempdir_).resolve()
for relpath, data in files.items():
assert '/' not in relpath and '\\' not in relpath # we should use pathlib
path = tempdir / relpath
path.parent.mkdir(parents=True, exist_ok=True)
with open(str(path), "wb") as fh:
fh.write(data)
yield tempdir
@contextlib.contextmanager
def chdir(path: pathlib.Path) -> Iterator[None]:
cwd = os.getcwd()
try:
os.chdir(str(path))
yield
finally:
os.chdir(cwd)
| 27.275862 | 86 | 0.630847 |
fd8203c7e84870123e37a7eb93899829806b7d9b | 8,649 | py | Python | eve/endpoints.py | caspervg/eve | 4aaee2c0fa9ca77f3d44047f21ae72f2ab447f83 | [
"BSD-3-Clause"
] | null | null | null | eve/endpoints.py | caspervg/eve | 4aaee2c0fa9ca77f3d44047f21ae72f2ab447f83 | [
"BSD-3-Clause"
] | null | null | null | eve/endpoints.py | caspervg/eve | 4aaee2c0fa9ca77f3d44047f21ae72f2ab447f83 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
eve.endpoints
~~~~~~~~~~~~~
This module implements the API endpoints. Each endpoint (resource, item,
home) invokes the appropriate method handler, returning its response
to the client, properly rendered.
:copyright: (c) 2017 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import re
from bson import tz_util
from flask import abort, request, current_app as app, Response
from eve.auth import requires_auth, resource_auth
from eve.methods import get, getitem, post, patch, delete, deleteitem, put
from eve.methods.common import ratelimit
from eve.render import send_response
from eve.utils import config, weak_date, date_to_rfc1123
import eve
def collections_endpoint(**lookup):
"""Resource endpoint handler
:param url: the url that led here
.. versionchanged:: 0.3
Pass lookup query down to delete_resource, so it can properly process
sub-resources.
.. versionchanged:: 0.2
Relying on request.endpoint to retrieve the resource being consumed.
.. versionchanged:: 0.1.1
Relying on request.path for determining the current endpoint url.
.. versionchanged:: 0.0.7
Using 'utils.request_method' helper function now.
.. versionchanged:: 0.0.6
Support for HEAD requests
.. versionchanged:: 0.0.2
Support for DELETE resource method.
"""
resource = _resource()
response = None
method = request.method
if method in ("GET", "HEAD"):
response = get(resource, lookup)
elif method == "POST":
response = post(resource)
elif method == "DELETE":
response = delete(resource, lookup)
elif method == "OPTIONS":
send_response(resource, response)
else:
abort(405)
return send_response(resource, response)
def item_endpoint(**lookup):
"""Item endpoint handler
:param url: the url that led here
:param lookup: sub resource query
.. versionchanged:: 0.2
Support for sub-resources.
Relying on request.endpoint to retrieve the resource being consumed.
.. versionchanged:: 0.1.1
Relying on request.path for determining the current endpoint url.
.. versionchanged:: 0.1.0
Support for PUT method.
.. versionchanged:: 0.0.7
Using 'utils.request_method' helper function now.
.. versionchanged:: 0.0.6
Support for HEAD requests
"""
resource = _resource()
response = None
method = request.method
if method in ("GET", "HEAD"):
response = getitem(resource, **lookup)
elif method == "PATCH":
response = patch(resource, **lookup)
elif method == "PUT":
response = put(resource, **lookup)
elif method == "DELETE":
response = deleteitem(resource, **lookup)
elif method == "OPTIONS":
send_response(resource, response)
else:
abort(405)
return send_response(resource, response)
@ratelimit()
@requires_auth("home")
def home_endpoint():
"""Home/API entry point. Will provide links to each available resource
.. versionchanged:: 0.5
Resource URLs are relative to API root.
Don't list internal resources.
.. versionchanged:: 0.4
Prevent versioning collections from being added in links.
.. versionchanged:: 0.2
Use new 'resource_title' setting for link titles.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
"""
response = {}
if config.INFO:
info = {}
info["server"] = "Eve"
info["version"] = eve.__version__
if config.API_VERSION:
info["api_version"] = config.API_VERSION
response[config.INFO] = info
if config.HATEOAS:
links = []
for resource in config.DOMAIN.keys():
internal = config.DOMAIN[resource]["internal_resource"]
if not resource.endswith(config.VERSIONS):
if not bool(internal):
links.append(
{
"href": "%s" % config.URLS[resource],
"title": "%s" % config.DOMAIN[resource]["resource_title"],
}
)
if config.SCHEMA_ENDPOINT is not None:
links.append(
{
"href": "%s" % config.SCHEMA_ENDPOINT,
"title": "%s" % config.SCHEMA_ENDPOINT,
}
)
response[config.LINKS] = {"child": links}
return send_response(None, (response,))
else:
return send_response(None, (response,))
def error_endpoint(error):
"""Response returned when an error is raised by the API (e.g. my means of
an abort(4xx).
"""
headers = []
try:
headers.append(error.response.headers)
except AttributeError:
pass
try:
if error.www_authenticate is not None:
headers.append(error.www_authenticate)
except AttributeError:
pass
response = {
config.STATUS: config.STATUS_ERR,
config.ERROR: {"code": error.code, "message": error.description},
}
return send_response(None, (response, None, None, error.code, headers))
def _resource():
return request.endpoint.split("|")[0]
@requires_auth("media")
def media_endpoint(_id):
"""This endpoint is active when RETURN_MEDIA_AS_URL is True. It retrieves
a media file and streams it to the client.
.. versionadded:: 0.6
"""
if request.method == "OPTIONS":
return send_response(None, (None))
file_ = app.media.get(_id)
if file_ is None:
return abort(404)
headers = {
"Last-Modified": date_to_rfc1123(file_.upload_date),
"Content-Length": file_.length,
"Accept-Ranges": "bytes",
}
range_header = request.headers.get("Range")
if range_header and False:
status = 206
size = file_.length
try:
m = re.search(r"(\d+)-(\d*)", range_header)
begin, end = m.groups()
begin = int(begin)
end = int(end)
except:
begin, end = 0, None
length = size - begin
if end is not None:
length = end - begin + 1
file_.seek(begin)
data = file_.read(length)
headers["Content-Range"] = "bytes {0}-{1}/{2}".format(
begin, begin + length - 1, size
)
else:
if_modified_since = weak_date(request.headers.get("If-Modified-Since"))
if if_modified_since:
if not if_modified_since.tzinfo:
if_modified_since = if_modified_since.replace(tzinfo=tz_util.utc)
if if_modified_since > file_.upload_date:
return Response(status=304)
data = file_
status = 200
response = Response(
data,
status=status,
headers=headers,
mimetype=file_.content_type,
direct_passthrough=True,
)
return send_response(None, (response,))
@requires_auth("resource")
def schema_item_endpoint(resource):
"""This endpoint is active when SCHEMA_ENDPOINT != None. It returns the
requested resource's schema definition in JSON format.
"""
resource_config = app.config["DOMAIN"].get(resource)
if not resource_config or resource_config.get("internal_resource") is True:
return abort(404)
return send_response(None, (resource_config["schema"],))
@requires_auth("home")
def schema_collection_endpoint():
"""This endpoint is active when SCHEMA_ENDPOINT != None. It returns the
schema definition for all public or request authenticated resources in
JSON format.
"""
schemas = {}
for resource_name, resource_config in app.config["DOMAIN"].items():
# skip versioned shadow collections
if resource_name.endswith(config.VERSIONS):
continue
# skip internal resources
internal = resource_config.get("internal_resource", False)
if internal:
continue
# skip resources for which request does not have read authorization
auth = resource_auth(resource_name)
if auth and request.method not in resource_config["public_methods"]:
roles = list(resource_config["allowed_roles"])
roles += resource_config["allowed_read_roles"]
if not auth.authorized(roles, resource_name, request.method):
continue
# otherwise include this resource in domain wide schema response
schemas[resource_name] = resource_config["schema"]
return send_response(None, (schemas,))
| 29.518771 | 86 | 0.619725 |
74d5aa0d303e7bb713fa63255c91d0a79ff9c4cc | 17,910 | py | Python | era5dataset/FrontDataset.py | stnie/FrontDetection | 742ebf9619dcde40d42891073739945a05631ea3 | [
"MIT"
] | null | null | null | era5dataset/FrontDataset.py | stnie/FrontDetection | 742ebf9619dcde40d42891073739945a05631ea3 | [
"MIT"
] | null | null | null | era5dataset/FrontDataset.py | stnie/FrontDetection | 742ebf9619dcde40d42891073739945a05631ea3 | [
"MIT"
] | 1 | 2022-01-17T04:58:10.000Z | 2022-01-17T04:58:10.000Z | from typing import final
import numpy as np
import torch
import os
import time
from datetime import datetime
import random
import numbers
from torch.utils.data import Dataset
from .ERA5Reader.readNetCDF import LatTokmPerLon
from .EraExtractors import DefaultEraExtractor
def labelnameToDataname(filename):
return os.path.splitext(filename)[0]+".nc"
def datanameToLabelname(filename, mapTypes, removePrefix):
return {key: os.path.join(str(x[0]), os.path.splitext(filename)[0][removePrefix:]+".txt") for key, x in mapTypes.items()}
# Dataset Class
class WeatherFrontDataset(Dataset):
"""Front dataset."""
def __init__(self, data_dir, label_dir = None, mapTypes = {"NA": ("", (35,70), (-40,35), (0.25,0.25), (1,1), None) }, levelRange = None, transform=None, outSize = None, printFileName = False, labelThickness = 2, label_extractor = None, asCoords = False, era_extractor = DefaultEraExtractor, has_subfolds = (False, False), removePrefix = 0, halfResEval = False):
"""
Args:
data_dir (string): Directory with all the images.
label_dir (string): Directory with all the labls (fronts)
validLats (int,int): Lowest and Highest Latitude (-90 to 90) from wich the data shall be sampled
validLons (int,int): Lowest and Highest Longitude (0 to 360-resolution[1]) from wich the data shall be sampled
resolution (float, float): Step Resolution in Latitudinal and Longitudinal direction
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.data_dir = data_dir
self.label_dir = label_dir
# Cropsize (used before reading from ERA!)
self.cropsize = outSize
# Augmentationtuple (data-augmentation, label-augmentation)
self.transform = transform
# Function that extracts label data from a given range
self.label_extractor = label_extractor
self.asCoords = asCoords
# Function that extracts era data from a given range
self.era_extractor = era_extractor
# Dictionary describing folder, latitudes, longitudes and resolution (signed) for different labels
self.mapTypes = mapTypes
# Should labels be randomly drawn if multiple are available for the same data
self.randomizeMapTypes = True
# Levelrange of era to extract
self.levelrange = levelRange
# Latrange of era to extract for each mapType (uised for crop)
self.latrange = {key: np.arange(int((90-x[1][0])*(1/np.abs(x[3][0]))),int((90-x[1][1])*(1/np.abs(x[3][0]))), 1) for key,x in self.mapTypes.items()}
# lonrange of era to extract for each mapType (used for crop)
self.lonrange = {key: np.arange(int(x[2][0]*(1/x[3][1])), int(x[2][1]*(1/x[3][1])), 1) for key,x in self.mapTypes.items()}
# Print file information
self.printFileName = printFileName
# Extract in a km grid instead of lat lon
self.extractRegularGrid = False
# is the evlauation to be on halfRes
self.halfRes = halfResEval
# Are labels provided? Else do not return labels
self.has_label = (not label_dir is None and not label_extractor is None)
if label_extractor is None:
print("No label extractor given, proceed without extracting labels")
if label_dir is None:
print("No label directory given, Labels need to be generated by the extractor")
# Check if an era_extractor exists
if era_extractor is None:
print("No Era-Extractor given, abort execution!")
exit(1)
self.removePrefix = removePrefix
self.hasSubfolds = has_subfolds
# ERA Data is organized in subfolders (2017->03->20170201_00.nc)
if(self.hasSubfolds[0]):
self.fileList = []
for fold in os.listdir(self.data_dir):
for filen in os.listdir(os.path.join(self.data_dir, fold)):
# if the dataset extracts labels, check if the corresponding labels exist
if(self.has_label):
potLabel = datanameToLabelname(filen, self.mapTypes, self.removePrefix)
labelExists = False
for key, val in potLabel.items():
foldna, filena = val.split("/")
if filena in os.listdir(os.path.join(self.label_dir,foldna)):
labelExists=True
if(labelExists):
self.fileList.append(os.path.join(fold,filen))
# if no labels are to be extracted simply append the data
else:
self.fileList.append(os.path.join(fold,filen))
# ERA Data is organized without subfolders (2017 -> 20170101_00.nc)
else:
self.fileList = []
for filen in os.listdir(self.data_dir):
if(self.has_label):
potLabel = datanameToLabelname(filen, self.mapTypes, self.removePrefix)
labelExists = False
for key, val in potLabel.items():
foldna, filena = val.split("/")
if filena in os.listdir(os.path.join(self.label_dir, foldna)):
labelExists = True
if(labelExists):
self.fileList.append(filen)
else:
self.fileList.append(filen)
# Sort file list
self.fileList = sorted(self.fileList)
def __len__(self):
# Length of all available Data (regardless of the existence of label!)
return len(self.fileList)
# Allow for slices or idx
def __getitem__(self, idx):
if not isinstance(idx, numbers.Number):
print("Currently not working")
exit(1)
return self.getBatch(idx)
filepath = self.fileList[idx]
filename = ""
if(self.hasSubfolds[0]):
filename = filepath.split("/")[-1]
else:
filename = filepath
if(filename == ""):
print("fileNotFound")
print(idx)
img_name = os.path.join(self.data_dir, filepath)
#Initialize projection type and seeds for possible transformations
projection_type = 0
extract_seed = datetime.now()
transform_seed = datetime.now()
mapType = list(self.mapTypes.keys())[0]
fronts = None
if(self.has_label):
# all corresponding front names (Take the first them if multiple are available)
if(self.hasSubfolds[1]):
front_name = datanameToLabelname(filepath, self.mapTypes, self.removePrefix)
else:
if(self.hasSubfolds[0]):
front_name = datanameToLabelname(filename, self.mapTypes, self.removePrefix)
else:
front_name = datanameToLabelname(filename, self.mapTypes, self.removePrefix)
mapType, front_name = self.getProjectionTypeAndFilePath(front_name)
# To distinguish the output name
#filename = os.path.splitext(filename)[0]+mapType+os.path.splitext(filename)[1]
# Read Label Data
#print("label:", filename)
#print(front_name, mapType, filename)
try:
if(self.extractRegularGrid):
fronts = self.getRegularGridLabel(front_name, self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], mapType, extract_seed )
else:
fronts = self.getLabel(front_name, self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], mapType, extract_seed )
except:
print("filename is", front_name)
if(self.printFileName):
print(idx)
print(img_name)
print(front_name)
print()
if(self.has_label and fronts is None):
print("Did not extract a Front even though it should")
print(idx, filename)
# Read Image Data
#print("image:", filename
image = None
try:
if(self.extractRegularGrid):
image = self.getRegularGridImage(img_name, self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], extract_seed, transform_seed)
else:
image = self.getImage(img_name, self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], extract_seed, transform_seed)
except Exception as e:
print(e)
print("filename is", filename)
raise Exception(e,"\nfailed to extract image data {}".format(filename))
if(image is None):
print("failed to extract image data")
print(filename, img_name, front_name)
print(idx)
raise Exception("failed to extract image data {}".format(filename))
mask = None
if(len(self.mapTypes[mapType]) == 5 and (not self.mapTypes[mapType][4] is None)):
mask = self.getMask(self.mapTypes[mapType][-1], self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], extract_seed)
# Perform transformation on the data (affine transformation + randm crop) => Crop enables equally sized images
if self.transform:
finalImage = self.transformImage(image, transform_seed)
if(mask is None):
finalMask = None
else:
finalMask = torch.from_numpy(self.transformImage(mask.reshape((1,*mask.shape)), transform_seed).reshape(*mask.shape)).detach()
if(self.has_label):
finalFronts = self.transformLabel(fronts, transform_seed)
if(self.asCoords):
return [torch.from_numpy(finalImage), finalFronts, filename, finalMask]
else:
return [torch.from_numpy(finalImage), torch.from_numpy(finalFronts), filename, finalMask]
else:
return [torch.from_numpy(finalImage), None, filename, finalMask]
else:
if(mask is None):
pass
else:
mask = torch.from_numpy(mask)
if(self.has_label):
if(self.asCoords):
return [torch.from_numpy(image), fronts, filename, mask]
else:
return [torch.from_numpy(image), torch.from_numpy(fronts), filename, mask]
else:
return [torch.from_numpy(image), None, filename, mask]
def getCropRange(self, latrange, lonrange, res, seed):
if(self.cropsize is None):
return latrange, lonrange
else:
# perform crop before reading data, to reduce memory usage
common_seed= seed
h,w = int(np.abs((latrange[1]-latrange[0]+res[0]-0.001)/res[0])), int(np.abs((lonrange[1]-lonrange[0])/res[1]))
th,tw = self.cropsize
random.seed(common_seed)
i = random.randint(0, h-th)*res[0]
j = random.randint(0, w-tw)*res[1]
th *= res[0]
tw *= res[1]
return (latrange[0]+i, latrange[0]+i+th), (lonrange[0]+j, lonrange[0]+j+tw)
def getImage(self, filename, latrange, lonrange, res, seed, tseed = 0):
tgt_latrange, tgt_lonrange = self.getCropRange(latrange, lonrange, res, seed)
return self.era_extractor(filename, tgt_latrange, tgt_lonrange, self.levelrange, tseed)
def getLabel(self, filename, latrange, lonrange, res, types, seed):
tgt_latrange, tgt_lonrange = self.getCropRange(latrange, lonrange, res, seed)
if(self.halfRes):
return self.label_extractor(filename, (tgt_latrange[0], tgt_latrange[1]), (tgt_lonrange[0], tgt_lonrange[1]), (res[0]*2, res[1]*2), types)
else:
return self.label_extractor(filename, (tgt_latrange[0], tgt_latrange[1]), (tgt_lonrange[0], tgt_lonrange[1]), res, types)
def getMask(self, mask, latrange, lonrange, res, seed):
tgt_latrange, tgt_lonrange = self.getCropRange(latrange, lonrange, res, seed)
return mask[int((90-tgt_latrange[0])/np.abs(res[0])):int((90-tgt_latrange[1])/np.abs(res[0])), int((180+tgt_lonrange[0])/res[1]):int((180+tgt_lonrange[1])/res[1])]
def transformImage(self, image, seed):
if(self.transform[0] is None):
return image
finalImage = np.zeros_like(image)
for channel in range(image.shape[0]):
#for level in range(image.shape[1]):
random.seed(seed)
finalImage[channel, :,:] = self.transform[0](image[channel,:,:])
return finalImage
def transformLabel(self, label, seed):
if(self.transform[1] is None):
return label
if(self.asCoords):
finalLabel = label
for group in range(len(label)):
random.seed(seed)
finalLabel[group] = self.transform[1](finalLabel[group])
else:
finalLabel = np.zeros((label.shape))
for channel in range(label.shape[2]):
random.seed(seed)
finalLabel[:,:,channel] = self.transform[1](label[:,:,channel])
return finalLabel
def getProjectionTypeAndFilePath(self, front_name):
projection_type = ""
keys, names = [], []
for key, fname in front_name.items():
currFold = os.path.join(self.label_dir, key)
# get filename without path
filename = fname.split("/")[-1]
#print(filename, currFold, fname)
#print(os.listdir(currFold))
if(filename in os.listdir(currFold)):
keys.append(key), names.append(os.path.join(self.label_dir, fname))
idx = 0
if(len(keys)>0):
if(self.randomizeMapTypes):
idx = random.randint(0,len(keys)-1)
return keys[idx], names[idx]
# No Label found
print(front_name)
print(os.listdir(self.label_dir))
print("Invalid label data pair, no label found!")
return projection_type, front_name
def __repr__(self):
myString = "WeatherFrontDataset\n"
myString += str(self.__dict__)
return myString
def getInfo(self):
myString = "WeatherFrontDataset\n"
myString += "data_dir :: "+ "str :: " +str(self.data_dir)+" :: end\n"
myString += "label_dir :: "+ "str :: " +str(self.label_dir)+" :: end\n"
myString += "map_types :: "+ "dict(str: tuple(str, tuple(float,float), tuple(float,float), tuple(float,float))) :: " +str(self.mapTypes)+" :: end\n"
myString += "levelrange :: "+ "list(int) :: " +str(list(self.levelrange))+" :: end\n"
myString += "transforms :: "+ "obj :: " +str(self.transform)+" :: end\n"
myString += "outsize :: "+ "tuple(int,int) :: " +str(self.cropsize)+" :: end\n"
myString += "translat :: "+ "tuple(int,int) :: " +str(self.label_extractor.imageCreator.maxOff)+" :: end\n"
myString += "printFileName :: "+ "bool :: " +str(self.printFileName)+" :: end\n"
myString += "labelThickness :: "+ "int :: " +str(self.label_extractor.imageCreator.thickness)+" :: end\n"
myString += "labelGrouping :: "+ "str :: " +str(self.label_extractor.imageCreator.labelGrouping)+" :: end\n"
myString += "Variables :: "+ "list(str) :: " +str(self.era_extractor.variables)+" :: end\n"
myString += "NormType :: "+ "int :: " +str(self.era_extractor.reader.normalize_type)+" :: end\n"
return myString
class WeatherFrontBatch:
def __init__ (self, data, label_as_float = True, transpose_rate = 0.5, swap_indices = None):
transposed_data = (list(zip(*data)))
self.data = torch.stack(transposed_data[0],0).float()
if(transposed_data[1][0] is None):
self.labels = None
else:
if(label_as_float):
self.labels = torch.stack(transposed_data[1],0).float()
else:
self.labels = torch.stack(transposed_data[1],0).long()
self.filenames = transposed_data[2]
def pin_memory(self):
self.data = self.data.pin_memory()
return [self.data, self.labels, self.filenames]
class WeatherFrontsAsCoordinatesBatch:
def __init__ (self, data, label_as_float = True, transpose_rate = 0.5, swap_indices = None):
transposed_data = (list(zip(*data)))
self.data = torch.stack(transposed_data[0],0).float()
if(transposed_data[1][0] is None):
self.labels = None
else:
self.labels = transposed_data[1]
if(transposed_data[3][0] is None):
self.masks = None
else:
self.masks = torch.stack(transposed_data[3],0).float()
self.filenames = transposed_data[2]
def pin_memory(self):
self.data = self.data.pin_memory()
return [self.data, self.labels, self.filenames, self.masks]
class collate_wrapper:
def __init__(self, binary = True, asCoordinates=False, transpose_rate = 0.5, swap_indices = None):
self.label_as_float = binary
self.transpose_rate = transpose_rate
self.swap_indices = swap_indices
self.asCoords = asCoordinates
def __call__(self, batch):
if(self.asCoords):
return WeatherFrontsAsCoordinatesBatch(batch, label_as_float=self.label_as_float, transpose_rate=self.transpose_rate, swap_indices = self.swap_indices)
else:
return WeatherFrontBatch(batch, label_as_float=self.label_as_float, transpose_rate=self.transpose_rate, swap_indices = self.swap_indices)
| 46.884817 | 365 | 0.599107 |
c726e294d33efd9bf21c5bd02dde7fc254c62a5e | 6,459 | py | Python | src/example/read_danseiji.py | jkunimune15/Rubber-Earth | 959b4b2d18c4f00a9f18fe913a8fe0ab5804f73f | [
"Unlicense"
] | 1 | 2020-11-02T07:33:22.000Z | 2020-11-02T07:33:22.000Z | src/example/read_danseiji.py | jkunimune15/Rubber-Earth | 959b4b2d18c4f00a9f18fe913a8fe0ab5804f73f | [
"Unlicense"
] | null | null | null | src/example/read_danseiji.py | jkunimune15/Rubber-Earth | 959b4b2d18c4f00a9f18fe913a8fe0ab5804f73f | [
"Unlicense"
] | null | null | null | # example_impl.py
# An example implementation of the Danseiji map projections.
# Uses data from http://www.naturalearthdata.com/
# This is free and unencumbered software released into the public domain.
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# For more information, please refer to <http://unlicense.org>
import csv
import math
import matplotlib.pyplot as plt
import shapefile
CSV_DIR = '../../output/'
CSV_NAME = 'danseijiIV.csv'
SHP_DIR = '../../data/'
SHP_NAME = ['ne_110m_graticules_30', 'ne_110m_land', 'tissots_indicatrix_30']
SHP_THICKNESS = [.5, .5, .3]
OUT_DIR = '../../output/'
OUT_NAME = 'danseijiIV.svg'
SHOW_MESH = False
LENGTH_THRESHOLD = .2
nodes = [] # a list of nodes: (x, y)
elements = [] # a list of elements: (nNE, nNW, nSW, nSE) where n is the index of a node in the nodes list
cells = [] # a table of cells: (eE, eN, eW, eS) where e is the index of an element in the elements list
boundary = [] # a list of node indices
table = [] # a table of interpolation points: (ɸ, θ)
with open(CSV_DIR+CSV_NAME, 'r') as f:
data = csv.reader(f)
N, m_1, m_2, l, o, p, w, h = [float(x) for x in next(data)]
for i in range(int(N)): # load node locations
x, y = [float(x) for x in next(data)]
nodes.append((x, y))
for i in range(int(m_1)): # load cell vertexen
cells.append([])
for j in range(int(m_2)):
kind, *vertexen = [int(x) for x in next(data)]
if kind == 0: # polar cells
assert len(vertexen) == 4
elements.append([vertexen[0], vertexen[1], vertexen[2], vertexen[3]])
element_idx = len(elements) - 1
cells[i].append([element_idx, element_idx, element_idx, element_idx])
elif kind == 1: # NW-SE cells
assert len(vertexen) == 6
elements.append([vertexen[1], vertexen[2], vertexen[3], None])
elements.append([vertexen[0], None, vertexen[4], vertexen[5]])
northwest_idx = len(elements) - 2
southeast_idx = len(elements) - 1
cells[i].append([southeast_idx, northwest_idx, northwest_idx, southeast_idx])
elif kind == -1: # NE-SW cells
assert len(vertexen) == 6
elements.append([vertexen[0], vertexen[1], None, vertexen[5]])
elements.append([ None, vertexen[2], vertexen[3], vertexen[4]])
northeast_idx = len(elements) - 2
southwest_idx = len(elements) - 1
cells[i].append([northeast_idx, northeast_idx, southwest_idx, southwest_idx])
for i in range(int(l)): # load boundary
node, = [int(x) for x in next(data)]
boundary.append(node)
for i in range(int(o)): # load table
table.append([])
for j in range(int(p)):
φ, θ = [float(x) for x in next(data)]
table[i].append((φ, θ))
try: # assert that the entire file has been read
next(data)
except StopIteration:
print("CSV loaded successfully.")
else:
raise ValueError("CSV file did not terminate at the expected time.")
plt.figure() # now start plotting
if SHOW_MESH:
for element in elements:
xs = [nodes[node_idx][0] for node_idx in element if node_idx is not None]
ys = [nodes[node_idx][1] for node_idx in element if node_idx is not None]
plt.fill(xs, ys, edgecolor='k', linewidth=.5, fill=False) # plot the edges of the elements if desired
xs = [nodes[node_idx][0] for node_idx in boundary]
ys = [nodes[node_idx][1] for node_idx in boundary]
plt.fill(xs, ys, edgecolor='k', linewidth=1, fill=False) # plot the boundary of the map
for element in elements: # extrapolate virtual nodes
for i in range(4):
if element[i] is None:
node_1 = nodes[element[(i+1)%4]]
node_2 = nodes[element[(i+2)%4]]
node_3 = nodes[element[(i+3)%4]]
nodes.append((node_1[0] - node_2[0] + node_3[0], node_1[1] - node_2[1] + node_3[1]))
element[i] = len(nodes) - 1
for shapefilename, thickness in zip(SHP_NAME, SHP_THICKNESS):
sf = shapefile.Reader(SHP_DIR+shapefilename) # map actual coordinates onto the mesh
for shape in sf.shapes():
for k, part in enumerate(shape.parts):
start = shape.parts[k]
stop = shape.parts[k+1] if k+1 < len(shape.parts) else len(shape.points)
xs, ys = [], []
for θ, φ in shape.points[start:stop] + [shape.points[start]]:
θ = min(θ, 179.999) # (these lines are kind of janky, but I don't like having to deal with if statements later)
φ = max(φ, -89.999)
i = (90-φ)*m_1/180 # use the coordinates to select the correct cell
j = (θ+180)*m_2/360
cell = cells[int(i)][int(j)]
if i%1 <= j%1: # and the correct element
if 1 - i%1 <= j%1:
element = elements[cell[0]] # (E)
else:
element = elements[cell[1]] # (N)
else:
if i%1 <= 1 - j%1:
element = elements[cell[2]] # (W)
else:
element = elements[cell[3]] # (S)
x_NE, y_NE = nodes[element[0]]
x_NW, y_NW = nodes[element[1]]
x_SW, y_SW = nodes[element[2]]
x_SE, y_SE = nodes[element[3]]
x = i%1*(j%1*x_SE + (1-j%1)*x_SW) + (1-i%1)*(j%1*x_NE + (1-j%1)*x_NW)
y = i%1*(j%1*y_SE + (1-j%1)*y_SW) + (1-i%1)*(j%1*y_NE + (1-j%1)*y_NW)
if len(xs) > 0 and math.hypot(x - xs[-1], y - ys[-1]) < LENGTH_THRESHOLD: # if this line is short
xs.append(x)
ys.append(y) # add it on
else: # if it is very long,
plt.plot(xs, ys, color='k', linewidth=thickness) # plot what we have and reset
xs = [x]
ys = [y]
if len(xs) > 0:
plt.plot(xs, ys, color='k', linewidth=thickness)
plt.axis('equal')
plt.axis('off')
plt.savefig(OUT_DIR+OUT_NAME, bbox_inches='tight')
| 38.446429 | 115 | 0.666512 |
c3c13e96dae4ed9ba38bdd3f17bc22eea848bd99 | 582 | py | Python | media_nommer/utils/compat.py | duointeractive/media-nommer | ba8095748582934ab44f6972210381b074bd9d14 | [
"BSD-3-Clause"
] | 2 | 2015-11-05T18:52:05.000Z | 2021-02-16T20:26:23.000Z | media_nommer/utils/compat.py | duointeractive/media-nommer | ba8095748582934ab44f6972210381b074bd9d14 | [
"BSD-3-Clause"
] | null | null | null | media_nommer/utils/compat.py | duointeractive/media-nommer | ba8095748582934ab44f6972210381b074bd9d14 | [
"BSD-3-Clause"
] | 3 | 2015-06-04T11:27:39.000Z | 2017-12-06T22:10:07.000Z | """
Python compatibility stuff and backports.
"""
def total_seconds(td):
"""
Given a timedelta object, compute the total number of seconds elapsed
for the entire delta. This is only available in the standard library for
Python 2.7 and up.
Source: http://docs.python.org/library/datetime.html#datetime.timedelta.total_seconds
:param datetime.timedelta td: A timedelta instance.
:rtype: float
:returns: The seconds elapsed during the timedelta.
"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6.0
| 32.333333 | 89 | 0.683849 |
ebec174ada031146006d4b73a87ce244bea018fd | 14,586 | py | Python | tests/unit/errors/reporters/test_trello.py | jguiditta/atkinson | 17479ac1a5b2975f8ec056409ff7b0436c143701 | [
"MIT"
] | null | null | null | tests/unit/errors/reporters/test_trello.py | jguiditta/atkinson | 17479ac1a5b2975f8ec056409ff7b0436c143701 | [
"MIT"
] | 6 | 2018-10-18T17:03:05.000Z | 2021-12-08T15:03:56.000Z | tests/unit/errors/reporters/test_trello.py | jguiditta/atkinson | 17479ac1a5b2975f8ec056409ff7b0436c143701 | [
"MIT"
] | 4 | 2018-07-11T16:11:57.000Z | 2019-02-27T11:04:07.000Z | #! /usr/bin/env python
""" Tests for the TrelloCard reporter """
from unittest.mock import call, create_autospec, patch
import pytest
from trollo import Boards, Cards, Checklists, TrelloApi
from atkinson.errors.reporters.trello import TrelloCard, get_columns
@pytest.fixture()
def get_api():
""" Trello Card api mock fixture """
patcher = patch('atkinson.errors.reporters.trello.get_trello_api')
func = patcher.start()
api_mock = create_autospec(TrelloApi)
api_mock.cards = create_autospec(Cards)
api_mock.checklists = create_autospec(Checklists)
api_mock.boards = create_autospec(Boards)
func.return_value = api_mock
yield func
patcher.stop()
@pytest.fixture()
def get_columns_mock():
""" Trello board column mock """
patcher = patch('atkinson.errors.reporters.trello.get_columns')
column = patcher.start()
column.return_value = ('123', '987')
yield column
patcher.stop()
@pytest.fixture()
def good_config():
""" A good configuration """
return {'board_id': 'abc123', 'new_column': 'a', 'close_column': 'b'}
def get_instance(api):
""" Generate a TrelloCard instance """
return TrelloCard('card_id', api, '123', '987')
def test_columns_good_config(get_api):
"""
Given we have a proper config and a trello api handle
When we call get_columns
Then we get a tuple of trello column ids back
"""
api = get_api()
api.boards.get_list.return_value = [{'name': 'a', 'id': '123'},
{'name': 'b', 'id': '456'}]
conf = {'board_id': 'abc123', 'new_column': 'a', 'close_column': 'b'}
expected = ('123', '456')
actual = get_columns(api.boards, conf)
assert actual == expected
assert api.boards.get_list.called
@pytest.mark.parametrize('conf', [{},
{'board_id': 'abc123'},
{'board_id': 'abc123', 'new_column': 'a'},
{'board_id': 'abc123', 'close_column': 'b'},
{'new_column': 'a', 'close_column': 'b'}])
def test_columns_bad_config(conf, get_api):
"""
Given we have a bad config and a trello api handle
When we call get_columns
Then a KeyError exception is raised and the api is not called
"""
with pytest.raises(KeyError):
api = get_api()
get_columns(api, conf)
def test_new(get_api, get_columns_mock, good_config):
"""
Given we have a TrelloCard instance
When we call new
Then we get a TrelloCard object back
"""
api_mock = get_api()
api_mock.cards.new.return_value = {'id': '12345'}
card = TrelloCard.new('Test', 'Running a Test', good_config)
assert api_mock.cards.new.called
assert get_columns_mock.called
assert isinstance(card, TrelloCard)
assert card.report_id == '12345'
def test_get(get_api, get_columns_mock, good_config):
"""
Given we have a report_id and a configuration
When we call TrelloCard.get
Then we get a TrelloCard object back
"""
api_mock = get_api()
api_mock.cards.get.return_value = {'idChecklists': ['12345'], 'id': '1234'}
api_mock.checklists.get.return_value = {'checkItems': [{'name': 'a',
'id': '789'}],
'name': 'TestList',
'id': 'checklist_id'}
card = TrelloCard.get('1234', good_config)
assert api_mock.cards.get.called
assert api_mock.checklists.get.called
assert get_columns_mock.called
assert isinstance(card, TrelloCard)
assert card.report_id == '1234'
def test_update_decription(get_api, get_columns_mock):
"""
Given we have a TrelloCard instance
When we call update
And the description kwarg is available
Then the card_api update_desc method gets called
"""
api_mock = get_api()
api_mock.cards.get.return_value = {'idChecklists': ['12345']}
api_mock.cards.update_desc.return_value = True
api_mock.checklists.get.return_value = {'checkItems': [{'name': 'a',
'id': '1234'}],
'name': 'TestList',
'id': 'checklist_id'}
card = get_instance(api_mock)
card.update(description='New description')
assert api_mock.cards.update_desc.called
args, kwargs = api_mock.cards.update_desc.call_args
assert args == ('card_id', 'New description')
assert kwargs == {}
def test_update_checklist_complete_url_match(get_api):
"""
Given we have a TrelloCard instance
When we call update
And the checklist kwarg is available with only one checklist item left
And the url matches
Then the missing checklist item is marked complete.
"""
api_mock = get_api
api_mock.cards.get.return_value = {'idChecklists': ['checklist_id'],
'id': 'card_id'}
api_mock.cards.check_checkItem.return_value = True
api_mock.cards.move_checkItem.return_value = True
checklist_ret = {'checkItems': [{'name': '[a](https://failing/a1)',
'id': '1234',
'state': 'incomplete'},
{'name': '[b](https://failing/b1)',
'id': '5678',
'state': 'incomplete'}],
'name': 'TestList', 'id': 'checklist_id'}
api_mock.checklists.get.return_value = checklist_ret
card = get_instance(api_mock)
card.update(checklist={'TestList': [{'name': 'a',
'link': 'https://failing/a1'}]})
assert api_mock.cards.get.called
assert api_mock.checklists.get.called
assert api_mock.cards.check_checkItem.called
# Verify that only one checklist item was updated
args, kwargs = api_mock.cards.check_checkItem.call_args
assert args == ('card_id', '5678')
assert kwargs == {}
# Verify that 'a' was moved to the bottom of the list
assert api_mock.cards.move_checkItem.called
args2, kwargs2 = api_mock.cards.move_checkItem.call_args
assert args2 == ('card_id', '5678', 'bottom')
assert kwargs2 == {}
def test_update_checklist_url_no_match(get_api):
"""
Given we have a TrelloCard instance
When we call update
And the checklist kwarg is available with only one checklist item left
And the url does not match
Then the missing checklist item is marked complete.
"""
api_mock = get_api
api_mock.cards.get.return_value = {'idChecklists': ['checklist_id'],
'id': 'card_id'}
api_mock.cards.rename_checkItem.return_value = True
checklist_ret = {'checkItems': [{'name': '[a](https://failing/a1)',
'id': '1234',
'state': 'incomplete'}],
'name': 'TestList', 'id': 'checklist_id'}
api_mock.checklists.get.return_value = checklist_ret
card = get_instance(api_mock)
card.update(checklist={'TestList': [{'name': 'a',
'link': 'https://failing/a2'}]})
assert api_mock.cards.get.called
assert api_mock.checklists.get.called
assert not api_mock.checklists.new_checkItem.called
assert not api_mock.cards.uncheck_checkItem.called
assert api_mock.cards.rename_checkItem.called
args, kwargs = api_mock.cards.rename_checkItem.call_args
assert args == ('card_id', '1234', '[a](https://failing/a2)')
assert kwargs == {}
def test_update_checklist_no_url(get_api):
"""
Given we have a TrelloCard instance
When we call update
And the checklist item's url is missing
And the checklist kwarg is available with only one checklist item left
Then the missing checklist item is marked complete.
"""
api_mock = get_api
api_mock.cards.get.return_value = {'idChecklists': ['checklist_id'],
'id': 'card_id'}
api_mock.cards.rename_checkItem.return_value = True
checklist_ret = {'checkItems': [{'name': '[a]()',
'id': '1234',
'state': 'incomplete'}],
'name': 'TestList', 'id': 'checklist_id'}
api_mock.checklists.get.return_value = checklist_ret
card = get_instance(api_mock)
card.update(checklist={'TestList': [{'name': 'a',
'link': 'https://failing/a2'}]})
assert api_mock.cards.get.called
assert api_mock.checklists.get.called
assert not api_mock.checklists.new_checkItem.called
assert not api_mock.cards.uncheck_checkItem.called
assert api_mock.cards.rename_checkItem.called
args, kwargs = api_mock.cards.rename_checkItem.call_args
assert args == ('card_id', '1234', '[a](https://failing/a2)')
assert kwargs == {}
def test_update_checklist_incomplete(get_api):
"""
Given we have a TrelloCard instance
When we call update
And the checklist kwarg is available with two items listed
Then the added checklist item is marked incomplete.
"""
api_mock = get_api
api_mock.cards.get.return_value = {'idChecklists': ['checklist_id'],
'id': 'card_id'}
api_mock.cards.uncheck_checkItem.return_value = True
api_mock.cards.check_checkItem.return_value = True
api_mock.cards.move_checkItem.return_value = True
checklist_ret = {'checkItems': [{'name': 'a',
'id': '1234',
'state': 'complete'},
{'name': 'b',
'id': '5678',
'state': 'incomplete'}],
'name': 'TestList', 'id': 'checklist_id'}
api_mock.checklists.get.return_value = checklist_ret
card = get_instance(api_mock)
card.update(checklist={'TestList': [{'name': 'a', 'link': 'url/a'},
{'name': 'b', 'link': 'url/b'}]})
assert api_mock.checklists.get.called
assert api_mock.cards.uncheck_checkItem.called
# Check to see if 'a' was unchecked
args, kwargs = api_mock.cards.uncheck_checkItem.call_args
assert args == ('card_id', '1234')
assert kwargs == {}
# Check to makes sure nothing else was marked complete.
assert not api_mock.cards.check_checkItem.called
# Check to make sure 'a' was moved to the top of the list
assert api_mock.cards.move_checkItem.called
args2, kwargs2 = api_mock.cards.move_checkItem.call_args
assert args2 == ('card_id', '1234', 'top')
assert kwargs2 == {}
def test_update_new_checklist(get_api):
"""
Given we have a TrelloCard instance
When we call update
And we don't have an existing checklist
Then a checklist and all of the items are added.
"""
api_mock = get_api
api_mock.cards.get.side_effect = [{'idChecklists': [], 'id': 'card_id'},
{'idChecklists': ['checklist_id'],
'id': 'card_id'},
{'idChecklists': ['checklist_id'],
'id': 'card_id'}]
api_mock.cards.new_checklist.return_value = {'id': 'checklist_id'}
checklist_ret = [{'name': 'TestList', 'id': 'checklist_id',
'checkItems': []},
{'checkItems': [{'name': '[a](url/a)',
'id': '1234',
'state': 'incomplete'},
{'name': '[b](url/b)',
'id': '5678',
'state': 'incomplete'}],
'name': 'TestList', 'id': 'checklist_id'}]
api_mock.checklists.get.side_effect = checklist_ret
api_mock.checklists.new_checkItem.return_value = True
card = get_instance(api_mock)
card.update(checklist={'TestList': [{'name': 'a', 'link': 'url/a'},
{'name': 'b', 'link': 'url/b'}]})
api_mock.cards.new_checklist.assert_called_once()
assert api_mock.checklists.get.call_count == 2
assert api_mock.checklists.new_checkItem.call_count == 2
call_list = api_mock.checklists.new_checkItem.call_args_list
assert [call('checklist_id', '[a](url/a)'),
call('checklist_id', '[b](url/b)')] == call_list
def test_close_empty(get_api):
"""
Given we have a TrelloCard instance
When we call close
And the card does not have a checklist
Then none of the checklist api calls are made.
"""
api_mock = get_api
api_mock.cards.get.return_value = {'idList': '123', 'id': 'card_id'}
api_mock.cards.update_idList.return_value = True
api_mock.cards.update_pos.return_value = True
api_mock.checklists.get.return_value = {}
card = get_instance(api_mock)
card.close()
assert not api_mock.cards.check_checkItem.called
assert api_mock.cards.update_idList.called
assert api_mock.cards.update_pos.called
def test_close_complete_items(get_api):
"""
Given we have a TrelloCard instance
When we call close
And the card has a checklist
Then the check_checkItem api is called for each item.
"""
api_mock = get_api
api_mock.cards.get.return_value = {'idList': '123', 'id': 'card_id',
'idChecklists': ['checklist_id']}
api_mock.cards.check_checkItem.return_value = True
checklist_ret = {'checkItems': [{'name': 'a',
'id': '1234',
'state': 'incomplete'},
{'name': 'b',
'id': '5678',
'state': 'incomplete'}],
'name': 'TestList', 'id': 'checklist_id'}
api_mock.checklists.get.return_value = checklist_ret
api_mock.cards.update_pos.return_value = True
card = get_instance(api_mock)
card.close()
assert api_mock.cards.check_checkItem.called
call_list = api_mock.cards.check_checkItem.call_args_list
assert call_list == [call('card_id', '1234'), call('card_id', '5678')]
assert api_mock.cards.update_idList.called
assert api_mock.cards.update_pos.called
| 38.896 | 79 | 0.587344 |
55b200293a5e191d11743c86ed2f030b9d630c28 | 661 | py | Python | updateTwitterStatus.py | acesti84/pi4tweeterbot | a72f0008ff334ed81f3534956b83ad14625ab587 | [
"MIT"
] | null | null | null | updateTwitterStatus.py | acesti84/pi4tweeterbot | a72f0008ff334ed81f3534956b83ad14625ab587 | [
"MIT"
] | null | null | null | updateTwitterStatus.py | acesti84/pi4tweeterbot | a72f0008ff334ed81f3534956b83ad14625ab587 | [
"MIT"
] | 1 | 2020-08-12T21:49:37.000Z | 2020-08-12T21:49:37.000Z | #!/usr/bin/env python
import sys
import time
import socket
from myconf import *
from twython import Twython
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
time.sleep(300)
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
tweetStr = host_name + ' - ' + get_ip()
api = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_TOKEN,ACCESS_SECRET)
api.update_status(status=tweetStr)
print "Tweeted: " + tweetStr
| 21.322581 | 70 | 0.673222 |
8f1e72ef113f852bef277c8eb5238b26679440d1 | 4,161 | py | Python | mail.py | alexyvassili/autopublisher | f3e2c78e1594a67739d675474283a7dc0060dacd | [
"MIT"
] | null | null | null | mail.py | alexyvassili/autopublisher | f3e2c78e1594a67739d675474283a7dc0060dacd | [
"MIT"
] | 8 | 2019-10-31T20:44:11.000Z | 2022-03-11T23:50:26.000Z | mail.py | alexyvassili/autopublisher | f3e2c78e1594a67739d675474283a7dc0060dacd | [
"MIT"
] | null | null | null | import os
import logging
import imaplib
import email
from email.header import decode_header
import mimetypes
import shutil
from secrets import MAIL_SERVER, MAIL_LOGIN, MAIL_PASSWORD
def get_connection():
imap = imaplib.IMAP4_SSL(MAIL_SERVER)
status, response = imap.login(MAIL_LOGIN, MAIL_PASSWORD)
if status != 'OK':
raise ConnectionError(f"Error logged in email box. Status: {status}")
imap.select('INBOX')
return imap
def get_new_mails_from(connection, from_email):
status, new_mails_ids = connection.search(None, f'(FROM {from_email} UNSEEN)')
new_mails_ids = [uid for uid in new_mails_ids[0].split(b' ') if uid]
return new_mails_ids
def decode_mail_field(message, field):
try:
data, encoding = decode_header(message[field])[0]
except TypeError:
return ""
if encoding:
data = data.decode(encoding)
return data
def get_attachments_list(message):
attachments = []
for part in message.walk():
if part.get_content_maintype() == 'multipart':
continue
filename = part.get_filename()
if filename:
bts, encoding = decode_header(filename)[0]
if encoding:
filename = bts.decode(encoding)
attachments.append(filename)
return attachments
def get_mail_metadata(message):
mail_metadata = {}
mail_metadata['Date'] = message['Date']
mail_metadata['From'] = decode_mail_field(message, 'From')
mail_metadata['Subject'] = decode_mail_field(message, 'Subject')
try:
# get_payload can return list or bytes str, so
# try with str raise AttributeError: 'str' object has no attribute 'get_payload'
mail_body = message.get_payload()[0].get_payload(decode=True)
except AttributeError:
mail_body = message.get_payload(decode=True)
mail_metadata['Body'] = mail_body.decode()
mail_metadata['Attachments'] = get_attachments_list(message)
return mail_metadata
def get_message(connection, mail_id):
response, mail_binary_data = connection.fetch(mail_id, '(RFC822)')
assert response == "OK"
message = email.message_from_bytes(mail_binary_data[0][1])
return message
def save_email(message, mail_folder):
if os.path.exists(mail_folder):
shutil.rmtree(mail_folder)
os.makedirs(mail_folder)
# The code below was copied from example
counter = 1
for part in message.walk():
# multipart/* are just containers
if part.get_content_maintype() == 'multipart':
continue
# Applications should really sanitize the given filename so that an
# email message can't be used to overwrite important files
filename = part.get_filename()
if not filename:
ext = mimetypes.guess_extension(part.get_content_type())
if not ext:
# Use a generic bag-of-bits extension
ext = '.bin'
filename = 'part-%03d%s' % (counter, ext)
else:
bts, encoding = decode_header(filename)[0]
if encoding:
filename = bts.decode(encoding)
counter += 1
with open(os.path.join(mail_folder, filename), 'wb') as fp:
# Сломано письмом Кошелева от 3 марта 2020
# во вложениях неопознанный .txt (читается)
# и неопознанный .eml (!) (ну это результат mimetypes.guess_extension см. выше)
# на этом .eml get_payload возвращает None и все ломается.
# в принципе, нам это не нужно, но само явление любопытное
# FIX: оборачиваем в ексепшн и создаем пустой файл
try:
fp.write(part.get_payload(decode=True))
except TypeError:
logging.warning(f"Сохранение {filename} не удалось: получен не строковый объект.")
fp.write(b"\n")
def mark_as_unread(connection, mail_id: bytes):
connection.store(mail_id, '-FLAGS', '(\Seen)')
def close_connection(connection):
status, response = connection.logout()
if status != 'BYE':
raise ConnectionError(f"Error logged out email box. Status: {status}")
| 33.556452 | 98 | 0.652247 |
a8f2aa58d341777d5a09173affe67c76b6ee802a | 1,064 | py | Python | scripts/examples/OpenMV/04-Image-Filters/perspective_correction.py | jiskra/openmv | a0f321836f77f94d8118910598dcdb79eb784d58 | [
"MIT"
] | 1,761 | 2015-07-10T23:14:17.000Z | 2022-03-30T07:49:49.000Z | scripts/examples/OpenMV/04-Image-Filters/perspective_correction.py | jiskra/openmv | a0f321836f77f94d8118910598dcdb79eb784d58 | [
"MIT"
] | 487 | 2015-07-07T23:21:20.000Z | 2022-03-30T17:13:22.000Z | scripts/examples/OpenMV/04-Image-Filters/perspective_correction.py | jiskra/openmv | a0f321836f77f94d8118910598dcdb79eb784d58 | [
"MIT"
] | 882 | 2015-08-01T08:34:19.000Z | 2022-03-30T07:36:23.000Z | # Perspective Correction
#
# This example shows off how to use the rotation_corr() to fix perspective
# issues related to how your OpenMV Cam is mounted.
import sensor, image, time
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()
# The image will be warped such that the following points become the new:
#
# (0, 0)
# (w-1, 0)
# (w-1, h-1)
# (0, h-1)
#
# Try setting the points below to the corners of a quadrilateral
# (in clock-wise order) in the field-of-view. You can get points
# on the image by clicking and dragging on the frame buffer and
# recording the values shown in the histogram widget.
w = sensor.width()
h = sensor.height()
TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME!
(w-1, 0), # (x, y) CHANGE ME!
(w-1, h-1), # (x, y) CHANGE ME!
(0, h-1)] # (x, y) CHANGE ME!
while(True):
clock.tick()
img = sensor.snapshot().rotation_corr(corners = TARGET_POINTS)
print(clock.fps())
| 26.6 | 74 | 0.642857 |
313f95665dc08bab9bea8685d4f07c809a15bcf4 | 7,432 | py | Python | app/tools.py | nowakowsky/Pokerstars-Api | 2a70b32dd4c82cc7c428b581c1fb4e39dfaf5b5d | [
"MIT"
] | 4 | 2021-07-11T13:08:37.000Z | 2022-03-17T14:25:44.000Z | app/tools.py | nowakowsky/Pokerstars-Api | 2a70b32dd4c82cc7c428b581c1fb4e39dfaf5b5d | [
"MIT"
] | 1 | 2021-03-04T08:27:21.000Z | 2021-03-25T06:48:18.000Z | app/tools.py | nowakowsky/Pokerstars-Api | 2a70b32dd4c82cc7c428b581c1fb4e39dfaf5b5d | [
"MIT"
] | 4 | 2021-03-06T16:47:28.000Z | 2022-01-29T15:50:02.000Z | import cv2
import numpy as np
from datetime import datetime
from PIL import Image
import pytesseract
import os
import win32gui
import win32ui
from ctypes import windll
import settings
import models
def readPlayerCards(filename: str) -> list:
cards = []
cards_diff = 37
card1 = [285, 296, 298, 312]
card2 = [285+cards_diff, 296+cards_diff, 298, 312]
image = cv2.imread(filename)
# image = cv2.imread("105255_095302.png")
card1 = image[card1[2]:card1[3], card1[0]:card1[1]]
card2 = image[card2[2]:card2[3], card2[0]:card2[1]]
# if card not on table yet
if emptyCard(card2):
pass
else:
cards.append(cardInfo(card1))
cards.append(cardInfo(card2))
return cards
def readTableCards(filename: str) -> list:
cards = []
# y position for cards on table don't change
cards_y1, cards_y2 = 174, 190
# x changes by 39 px right
cards_diff = 39
# hardcoded card 1 x position
table_cards = [226, 237]
# hardcoded floop, turn and river positions
for i in range(1,5):
table_cards.append(table_cards[0] + cards_diff * i)
table_cards.append(table_cards[1] + cards_diff * i)
image = cv2.imread(filename)
for i in range(0,10,2):
#that can be improved, card "10" has other coords
if i == 6 or i == 4:
card = image[cards_y1:cards_y2, table_cards[i]-2:table_cards[i+1]]
elif i == 8:
card = image[cards_y1:cards_y2, table_cards[i]-2:table_cards[i+1]-3]
else:
card = image[cards_y1:cards_y2, table_cards[i]:table_cards[i+1]]
# if card not on table yet
if not emptyCard(card):
cards.append(cardInfo(card))
return cards
def emptyCard(img) -> bool:
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
orange_lower = np.array([10, 100, 20], np.uint8)
orange_upper = np.array([25, 255, 255], np.uint8)
orange_mask = cv2.inRange(img, orange_lower, orange_upper)
if len(np.argwhere(orange_mask)) > 100:
return True
pytesseract.pytesseract.tesseract_cmd = settings.tesseract_path
def __searchHwnds(name: str) -> list:
"""
Searches for windows by partial name and returns list of hwnd
"""
hwnds = []
def foreach_window(hwnd, lParam):
if name in win32gui.GetWindowText(hwnd):
hwnds.append(hwnd)
win32gui.EnumWindows(foreach_window, None)
return hwnds
def grabScreen(windows: list) -> list:
"""
Takes screenshots and returns list of models.Screenshot objects
Credits to hazzey from stackoverflow
I've just edited his function to search for windows by partial name, screenshot all of them and grab table name
https://stackoverflow.com/questions/19695214/python-screenshot-of-inactive-window-printwindow-win32gui
"""
screenshots = []
for hwnd in windows:
left, top, right, bot = win32gui.GetWindowRect(hwnd)
w = right - left
h = bot - top
hwndDC = win32gui.GetWindowDC(hwnd)
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
saveDC.SelectObject(saveBitMap)
result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 0)
bmpinfo = saveBitMap.GetInfo()
bmpstr = saveBitMap.GetBitmapBits(True)
im = Image.frombuffer(
'RGB',
(bmpinfo['bmWidth'], bmpinfo['bmHeight']),
bmpstr, 'raw', 'BGRX', 0, 1)
win32gui.DeleteObject(saveBitMap.GetHandle())
saveDC.DeleteDC()
mfcDC.DeleteDC()
win32gui.ReleaseDC(hwnd, hwndDC)
if result == 1:
filename = datetime.now().strftime("%H%M%S_%f") + '.png'
im.save(filename)
tablename = win32gui.GetWindowText(hwnd).split("-")[0]
screenshot = models.Screenshot(tablename, filename)
screenshots.append(screenshot)
return screenshots
def moveAndResizeWindows() -> list:
"""
Moving windows is not really important. Only windows size matters
this function prepares windows, it can be easly changed if anything will require visibility
returns output of __searchHwnds (all found hwnds)
"""
name = "Limit" # partial window name
game_window = [0,0,640,540]
game_windows = __searchHwnds(name)
for hwnd in game_windows:
win32gui.MoveWindow(hwnd, *game_window, True)
return game_windows
def cardInfo(image) -> models.Card:
"""
Calls __getCardValue and __getCardColor and returns Card object
"""
card = models.Card(__getCardValue(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)),
__getCardColor(image))
return card
def __getCardValue(image) -> str:
"""
Do not use, it's is being called by cardInfo()
Takes an image and returns card as a string:
-> 2
-> ...
-> A
-> X - error
Uses --pcm 7 and --pcm 9 if first one fails
Tested on grayscale images, more tests are requied.
"""
card = pytesseract.image_to_string(image, config=settings.config_single_line).replace("\f", "").replace("\n", "").replace("\r", "").replace(" ", "")
try:
if card in '23456789JQKA':
return card[0]
elif '10' in card:
return card[:2]
except:
try:
card = pytesseract.image_to_string(image, config=settings.config_single_word).replace("\f", "").replace("\n", "").replace("\r", "").replace(" ", "")
if card in '23456789JQKA':
return card[0]
elif '10' in card:
return card[:2]
except:
return "?"
return "X"
def __getCardColor(image) -> models.Colors:
"""
Do not use, it's is being called by cardInfo()
Takes an image in BGR format an returns a string:
-> Tile
-> Heart
-> Clover
-> Pike
-> Error
"""
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
if __checkBlue(image):
return models.Colors.Tiles
elif __checkRed(image):
return models.Colors.Hearts
elif __checkGreen(image):
return models.Colors.Clovers
elif __checkBlack(image):
return models.Colors.Pikes
return models.Colors.Error
def __checkRed(img) -> bool:
red_lower = np.array([136, 87, 111], np.uint8)
red_upper = np.array([180, 255, 255], np.uint8)
red_mask = cv2.inRange(img, red_lower, red_upper)
if len(np.argwhere(red_mask)) > 30:
return True
def __checkGreen(img) -> bool:
green_lower = np.array([25, 52, 72], np.uint8)
green_upper = np.array([102, 255, 255], np.uint8)
green_mask = cv2.inRange(img, green_lower, green_upper)
if len(np.argwhere(green_mask)) > 30:
return True
def __checkBlue(img) -> bool:
blue_lower = np.array([94, 80, 2], np.uint8)
blue_upper = np.array([120, 255, 255], np.uint8)
blue_mask = cv2.inRange(img, blue_lower, blue_upper)
if len(np.argwhere(blue_mask)) > 30:
return True
#more tests to black mask are requied
def __checkBlack(img) -> bool:
black_lower = np.array([0, 0, 0], np.uint8)
black_upper = np.array([50, 50, 50], np.uint8)
black_mask = cv2.inRange(img, black_lower, black_upper)
if len(np.argwhere(black_mask)) > 20:
return True
| 30.334694 | 160 | 0.627557 |
bd851092ecc952622b3c9ae2ff1da668acab3115 | 11,894 | py | Python | sdks/python/apache_beam/runners/dataflow/internal/apiclient_test.py | acidburn0zzz/beam | 92540d0ecd98125e4f6fe13917dca46a77af52f0 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/runners/dataflow/internal/apiclient_test.py | acidburn0zzz/beam | 92540d0ecd98125e4f6fe13917dca46a77af52f0 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/runners/dataflow/internal/apiclient_test.py | acidburn0zzz/beam | 92540d0ecd98125e4f6fe13917dca46a77af52f0 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the apiclient module."""
import unittest
import mock
import pkg_resources
from apache_beam.metrics.cells import DistributionData
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.runners.dataflow.internal import dependency
from apache_beam.runners.dataflow.internal.clients import dataflow
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
apiclient = None
# pylint: enable=wrong-import-order, wrong-import-position
FAKE_PIPELINE_URL = "gs://invalid-bucket/anywhere"
@unittest.skipIf(apiclient is None, 'GCP dependencies are not installed')
class UtilTest(unittest.TestCase):
@unittest.skip("Enable once BEAM-1080 is fixed.")
def test_create_application_client(self):
pipeline_options = PipelineOptions()
apiclient.DataflowApplicationClient(pipeline_options)
def test_pipeline_url(self):
pipeline_options = PipelineOptions(
['--subnetwork', '/regions/MY/subnetworks/SUBNETWORK',
'--temp_location', 'gs://any-location/temp'])
env = apiclient.Environment([],
pipeline_options,
'2.0.0', # any environment version
FAKE_PIPELINE_URL)
recovered_options = None
for additionalProperty in env.proto.sdkPipelineOptions.additionalProperties:
if additionalProperty.key == 'options':
recovered_options = additionalProperty.value
break
else:
self.fail('No pipeline options found in %s'
% env.proto.sdkPipelineOptions)
pipeline_url = None
for property in recovered_options.object_value.properties:
if property.key == 'pipelineUrl':
pipeline_url = property.value
break
else:
self.fail('No pipeline_url found in %s' % recovered_options)
self.assertEqual(pipeline_url.string_value, FAKE_PIPELINE_URL)
def test_set_network(self):
pipeline_options = PipelineOptions(
['--network', 'anetworkname',
'--temp_location', 'gs://any-location/temp'])
env = apiclient.Environment([], #packages
pipeline_options,
'2.0.0', #any environment version
FAKE_PIPELINE_URL)
self.assertEqual(env.proto.workerPools[0].network,
'anetworkname')
def test_set_subnetwork(self):
pipeline_options = PipelineOptions(
['--subnetwork', '/regions/MY/subnetworks/SUBNETWORK',
'--temp_location', 'gs://any-location/temp'])
env = apiclient.Environment([], #packages
pipeline_options,
'2.0.0', #any environment version
FAKE_PIPELINE_URL)
self.assertEqual(env.proto.workerPools[0].subnetwork,
'/regions/MY/subnetworks/SUBNETWORK')
def test_invalid_default_job_name(self):
# Regexp for job names in dataflow.
regexp = '^[a-z]([-a-z0-9]{0,61}[a-z0-9])?$'
job_name = apiclient.Job._build_default_job_name('invalid.-_user_n*/ame')
self.assertRegexpMatches(job_name, regexp)
job_name = apiclient.Job._build_default_job_name(
'invalid-extremely-long.username_that_shouldbeshortened_or_is_invalid')
self.assertRegexpMatches(job_name, regexp)
def test_default_job_name(self):
job_name = apiclient.Job.default_job_name(None)
regexp = 'beamapp-.*-[0-9]{10}-[0-9]{6}'
self.assertRegexpMatches(job_name, regexp)
def test_split_int(self):
number = 12345
split_number = apiclient.to_split_int(number)
self.assertEqual((split_number.lowBits, split_number.highBits),
(number, 0))
shift_number = number << 32
split_number = apiclient.to_split_int(shift_number)
self.assertEqual((split_number.lowBits, split_number.highBits),
(0, number))
def test_translate_distribution(self):
metric_update = dataflow.CounterUpdate()
distribution_update = DistributionData(16, 2, 1, 15)
apiclient.translate_distribution(distribution_update, metric_update)
self.assertEqual(metric_update.distribution.min.lowBits,
distribution_update.min)
self.assertEqual(metric_update.distribution.max.lowBits,
distribution_update.max)
self.assertEqual(metric_update.distribution.sum.lowBits,
distribution_update.sum)
self.assertEqual(metric_update.distribution.count.lowBits,
distribution_update.count)
def test_translate_means(self):
metric_update = dataflow.CounterUpdate()
accumulator = mock.Mock()
accumulator.sum = 16
accumulator.count = 2
apiclient.MetricUpdateTranslators.translate_scalar_mean_int(accumulator,
metric_update)
self.assertEqual(metric_update.integerMean.sum.lowBits, accumulator.sum)
self.assertEqual(metric_update.integerMean.count.lowBits, accumulator.count)
accumulator.sum = 16.0
accumulator.count = 2
apiclient.MetricUpdateTranslators.translate_scalar_mean_float(accumulator,
metric_update)
self.assertEqual(metric_update.floatingPointMean.sum, accumulator.sum)
self.assertEqual(
metric_update.floatingPointMean.count.lowBits, accumulator.count)
def test_default_ip_configuration(self):
pipeline_options = PipelineOptions(
['--temp_location', 'gs://any-location/temp'])
env = apiclient.Environment([],
pipeline_options,
'2.0.0',
FAKE_PIPELINE_URL)
self.assertEqual(env.proto.workerPools[0].ipConfiguration, None)
def test_public_ip_configuration(self):
pipeline_options = PipelineOptions(
['--temp_location', 'gs://any-location/temp',
'--use_public_ips'])
env = apiclient.Environment([],
pipeline_options,
'2.0.0',
FAKE_PIPELINE_URL)
self.assertEqual(
env.proto.workerPools[0].ipConfiguration,
dataflow.WorkerPool.IpConfigurationValueValuesEnum.WORKER_IP_PUBLIC)
def test_private_ip_configuration(self):
pipeline_options = PipelineOptions(
['--temp_location', 'gs://any-location/temp',
'--no_use_public_ips'])
env = apiclient.Environment([],
pipeline_options,
'2.0.0',
FAKE_PIPELINE_URL)
self.assertEqual(
env.proto.workerPools[0].ipConfiguration,
dataflow.WorkerPool.IpConfigurationValueValuesEnum.WORKER_IP_PRIVATE)
def test_harness_override_present_in_dataflow_distributions(self):
pipeline_options = PipelineOptions(
['--temp_location', 'gs://any-location/temp', '--streaming'])
override = ''.join(
['runner_harness_container_image=',
dependency.DATAFLOW_CONTAINER_IMAGE_REPOSITORY,
'/harness:2.2.0'])
distribution = pkg_resources.Distribution(version='2.2.0')
with mock.patch(
'apache_beam.runners.dataflow.internal.dependency.pkg_resources'
'.get_distribution',
mock.MagicMock(return_value=distribution)):
env = apiclient.Environment([], #packages
pipeline_options,
'2.0.0',
FAKE_PIPELINE_URL) #any environment version
self.assertIn(override, env.proto.experiments)
@mock.patch('apache_beam.runners.dataflow.internal.dependency.'
'beam_version.__version__', '2.2.0')
def test_harness_override_present_in_beam_releases(self):
pipeline_options = PipelineOptions(
['--temp_location', 'gs://any-location/temp', '--streaming'])
override = ''.join(
['runner_harness_container_image=',
dependency.DATAFLOW_CONTAINER_IMAGE_REPOSITORY,
'/harness:2.2.0'])
with mock.patch(
'apache_beam.runners.dataflow.internal.dependency.pkg_resources'
'.get_distribution',
mock.Mock(side_effect=pkg_resources.DistributionNotFound())):
env = apiclient.Environment([], #packages
pipeline_options,
'2.0.0',
FAKE_PIPELINE_URL) #any environment version
self.assertIn(override, env.proto.experiments)
@mock.patch('apache_beam.runners.dataflow.internal.dependency.'
'beam_version.__version__', '2.2.0-dev')
def test_harness_override_absent_in_unreleased_sdk(self):
pipeline_options = PipelineOptions(
['--temp_location', 'gs://any-location/temp', '--streaming'])
with mock.patch(
'apache_beam.runners.dataflow.internal.dependency.pkg_resources'
'.get_distribution',
mock.Mock(side_effect=pkg_resources.DistributionNotFound())):
env = apiclient.Environment([], #packages
pipeline_options,
'2.0.0',
FAKE_PIPELINE_URL) #any environment version
if env.proto.experiments:
for experiment in env.proto.experiments:
self.assertNotIn('runner_harness_container_image=', experiment)
def test_labels(self):
pipeline_options = PipelineOptions(
['--project', 'test_project', '--job_name', 'test_job_name',
'--temp_location', 'gs://test-location/temp'])
job = apiclient.Job(pipeline_options, FAKE_PIPELINE_URL)
self.assertIsNone(job.proto.labels)
pipeline_options = PipelineOptions(
['--project', 'test_project', '--job_name', 'test_job_name',
'--temp_location', 'gs://test-location/temp',
'--label', 'key1=value1',
'--label', 'key2',
'--label', 'key3=value3',
'--labels', 'key4=value4',
'--labels', 'key5'])
job = apiclient.Job(pipeline_options, FAKE_PIPELINE_URL)
self.assertEqual(5, len(job.proto.labels.additionalProperties))
self.assertEqual('key1', job.proto.labels.additionalProperties[0].key)
self.assertEqual('value1', job.proto.labels.additionalProperties[0].value)
self.assertEqual('key2', job.proto.labels.additionalProperties[1].key)
self.assertEqual('', job.proto.labels.additionalProperties[1].value)
self.assertEqual('key3', job.proto.labels.additionalProperties[2].key)
self.assertEqual('value3', job.proto.labels.additionalProperties[2].value)
self.assertEqual('key4', job.proto.labels.additionalProperties[3].key)
self.assertEqual('value4', job.proto.labels.additionalProperties[3].value)
self.assertEqual('key5', job.proto.labels.additionalProperties[4].key)
self.assertEqual('', job.proto.labels.additionalProperties[4].value)
if __name__ == '__main__':
unittest.main()
| 43.567766 | 80 | 0.660417 |
74c85aae3447862ada6d1e5bfc1268895aa8a0e3 | 3,182 | py | Python | core/dbt/config/selectors.py | jweibel22/dbt | 9d00c000720d17c42a4fa08a26b75bd500cc857f | [
"Apache-2.0"
] | null | null | null | core/dbt/config/selectors.py | jweibel22/dbt | 9d00c000720d17c42a4fa08a26b75bd500cc857f | [
"Apache-2.0"
] | null | null | null | core/dbt/config/selectors.py | jweibel22/dbt | 9d00c000720d17c42a4fa08a26b75bd500cc857f | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from typing import Dict, Any
from hologram import ValidationError
from .renderer import SelectorRenderer
from dbt.clients.system import (
load_file_contents,
path_exists,
resolve_path_from_base,
)
from dbt.clients.yaml_helper import load_yaml_text
from dbt.contracts.selection import SelectorFile
from dbt.exceptions import DbtSelectorsError, RuntimeException
from dbt.graph import parse_from_selectors_definition, SelectionSpec
MALFORMED_SELECTOR_ERROR = """\
The selectors.yml file in this project is malformed. Please double check
the contents of this file and fix any errors before retrying.
You can find more information on the syntax for this file here:
https://docs.getdbt.com/docs/package-management
Validator Error:
{error}
"""
class SelectorConfig(Dict[str, SelectionSpec]):
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> 'SelectorConfig':
try:
selector_file = SelectorFile.from_dict(data)
selectors = parse_from_selectors_definition(selector_file)
except (ValidationError, RuntimeException) as exc:
raise DbtSelectorsError(
f'Could not read selector file data: {exc}',
result_type='invalid_selector',
) from exc
return cls(selectors)
@classmethod
def render_from_dict(
cls,
data: Dict[str, Any],
renderer: SelectorRenderer,
) -> 'SelectorConfig':
try:
rendered = renderer.render_data(data)
except (ValidationError, RuntimeException) as exc:
raise DbtSelectorsError(
f'Could not render selector data: {exc}',
result_type='invalid_selector',
) from exc
return cls.from_dict(rendered)
@classmethod
def from_path(
cls, path: Path, renderer: SelectorRenderer,
) -> 'SelectorConfig':
try:
data = load_yaml_text(load_file_contents(str(path)))
except (ValidationError, RuntimeException) as exc:
raise DbtSelectorsError(
f'Could not read selector file: {exc}',
result_type='invalid_selector',
path=path,
) from exc
try:
return cls.render_from_dict(data, renderer)
except DbtSelectorsError as exc:
exc.path = path
raise
def selector_data_from_root(project_root: str) -> Dict[str, Any]:
selector_filepath = resolve_path_from_base(
'selectors.yml', project_root
)
if path_exists(selector_filepath):
selectors_dict = load_yaml_text(load_file_contents(selector_filepath))
else:
selectors_dict = None
return selectors_dict
def selector_config_from_data(
selectors_data: Dict[str, Any]
) -> SelectorConfig:
if not selectors_data:
selectors_data = {'selectors': []}
try:
selectors = SelectorConfig.from_dict(selectors_data)
except ValidationError as e:
raise DbtSelectorsError(
MALFORMED_SELECTOR_ERROR.format(error=str(e.message)),
result_type='invalid_selector',
) from e
return selectors
| 30.304762 | 78 | 0.668133 |
f1d577ab4af9b7c64154411f6b7641f51243d14e | 7,634 | py | Python | Accounts/views.py | Larry-Manuel/SMA-TEAM | 14c50bfce4327bb70812341b094c0001b90843f6 | [
"MIT"
] | null | null | null | Accounts/views.py | Larry-Manuel/SMA-TEAM | 14c50bfce4327bb70812341b094c0001b90843f6 | [
"MIT"
] | null | null | null | Accounts/views.py | Larry-Manuel/SMA-TEAM | 14c50bfce4327bb70812341b094c0001b90843f6 | [
"MIT"
] | null | null | null | from django.db.models.query_utils import Q
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth import login, authenticate
from django.contrib import messages #import messages
from .models import User, PasswordReset
from Details.models import Follower
from django.contrib.auth.forms import AuthenticationForm, UsernameField
from .models import UserSocial
from .forms import UserProfileForm
from django.core.mail import send_mail, BadHeaderError
from django.template.loader import render_to_string
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.contrib.auth.tokens import default_token_generator
from django.utils.encoding import force_bytes, force_text
from Details.models import Post, PostComment
from django.shortcuts import get_object_or_404
# Create your views here.
def index(request):
#return HttpResponse("Welcome, this is the SMA app")
return render (request=request, template_name="home.html")
def register_request(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
confirmpwd = request.POST.get('conf_password')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
email = request.POST.get('email')
if password == confirmpwd:
try:
user = User.objects.get(email=email)
messages.error(request, "Email already exists")
return redirect("register")
except User.DoesNotExist:
user = User.objects.create_user(email=email, username=username, password=password, first_name=first_name, last_name=last_name)
user.save()
messages.success(request, "Registration successful")
return redirect("index")
messages.error(request, "Unsuccessful registration. Invalid information.")
return render(request=request, template_name="register.html")
def login_request(request):
if request.user.is_authenticated:
return redirect("index")
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {username}.")
return redirect("index")
else:
messages.error(request,"Invalid username or password.")
return redirect("login")
return render(request, "login.html")
def edit_user(request):
if request.method == 'POST':
form = UserProfileForm(request.POST,instance=UserSocial)
if form.is_valid():
form.save()
messages.success(request,'Your Profile has been updated!')
return redirect('profile')
else:
form = UserProfileForm(instance=UserSocial)
context = {'form': form}
return render(request=request, template_name="profile.html", context=context)
messages.error(request,"Invalid username or password.")
return render(request=request, template_name="login.html")
def password_reset_request(request):
if request.method == "POST":
email = request.POST['email']
user = User.objects.filter(email=email).first()
# Record token in PasswordReset model
token = default_token_generator.make_token(user)
passwordResetModel = PasswordReset.objects.create(user_id=user, token=token)
passwordResetModel.save()
# Create and send e-mail
subject = "Password Reset Requested"
email_template_name = "password_reset_email.txt"
c = {
"email": user.email,
"domain": "127.0.0.1:8000",
"site_name": "SMA Team 2",
"uid": urlsafe_base64_encode(force_bytes(user.pk)),
"user": user,
"token": token,
"protocol": "http",
}
email = render_to_string(email_template_name, c)
try:
send_mail(subject, email, "team2@sma.com", [user.email], fail_silently=False)
except BadHeaderError:
return HttpResponse("Invalid header found.")
return redirect("password_reset_done")
return render(request=request, template_name="password_reset.html")
def password_reset_done_request(request):
return render(request=request, template_name="password_reset_done.html")
def password_reset_confirm_request(request, uidb64, token):
user_id = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.filter(id=user_id).first()
if request.method == "POST":
password = request.POST['password']
confirmpwd = request.POST['conf_password']
if password == confirmpwd:
user.set_password(password)
user.save()
passwordResetModel = PasswordReset.objects.filter(user_id=user, token=token).first()
passwordResetModel.token_used = True
passwordResetModel.save()
return render(request=request, template_name="password_reset_complete.html")
else:
messages.error(request, "Passwords do not match.")
else:
passwordResetModel = PasswordReset.objects.filter(user_id=user, token=token).first()
if passwordResetModel.token_used == True:
return HttpResponse("Token has already been used.")
return render(request=request, template_name="password_reset_confirm.html")
def password_reset_complete_request(request):
return render(request=request, template_name="password_reset_complete.html")
def post_comment_request(request, pk):
try:
post = Post.objects.get(id=pk)
except Post.DoesNotExist:
messages.error("Post does not exist")
return redirect("index")
if request.method == "POST":
comment = request.POST['comment']
session_user = request.user
user = User.objects.get(email=session_user)
post_comment = PostComment(commenter_id=user, post_id=post, content=comment)
post_comment.save()
messages.success(request, "Comment successfully posted")
return redirect("index")
return render(request, "comment.html", {'post':post})
def delete_comment(request):
id = request.POST['comment_id']
pk = request.POST['posts_id']
if request.method == 'POST':
comment = get_object_or_404('Comment', id=id, pk=pk)
try:
comment.delete()
messages.success(request, 'You have successfully deleted the comment')
except:
messages.warning(request, 'The comment could not be deleted.')
return redirect(request, "comment.html", {'post':Post})
def follow_user(request, user_name):
other_user = User.objects.get(username=user_name)
#get id
session_user = request.session['user']
get_user = User.objects.get(username=session_user)
check_follower = Follower.objects.get(username=get_user.id)
is_followed = False
if other_user.username != session_user:
if check_follower.follower_id.filter(username=other_user).exists():
add_usr = Follower.objects.get(username=get_user)
add_usr.follower_id.remove(other_user)
is_followed = False
return redirect(f'/profile/{session_user}')
else:
add_usr = Follower.objects.get(username=get_user)
add_usr.follower_id.add(other_user)
is_followed = True
return redirect(f'/profile/{session_user}')
else:
return redirect(f'/profile/{session_user}')
def search_user(request):
if request.method == "POST":
query = request.POST['query']
if query:
users = User.objects.filter(username__contains=query)
if users:
for user in users:
messages.info(request, user.username)
else:
messages.error(request, "Username not found")
return render(request=request, template_name="search_user.html")
else:
print("Please type username")
return render(request=request, template_name="search_user.html")
return render(request=request, template_name="search_user.html")
| 30.293651 | 130 | 0.735394 |
70480e16f13efa415ed1e008181df227806d1cb9 | 4,530 | py | Python | lib/utils/focal_loss.py | YerongLi2/LTVRR | 26a6a03dd44cb6b008e0974ad9915a149d666786 | [
"MIT"
] | 9 | 2021-02-12T10:29:06.000Z | 2022-03-18T13:35:25.000Z | lib/utils/focal_loss.py | YerongLi2/LTVRR | 26a6a03dd44cb6b008e0974ad9915a149d666786 | [
"MIT"
] | 5 | 2021-02-26T04:44:52.000Z | 2022-03-31T07:16:53.000Z | lib/utils/focal_loss.py | YerongLi2/LTVRR | 26a6a03dd44cb6b008e0974ad9915a149d666786 | [
"MIT"
] | 1 | 2022-03-30T08:06:08.000Z | 2022-03-30T08:06:08.000Z | # -*- coding: utf-8 -*-
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils.one_hot
from utils.one_hot import one_hot
# https://torchgeometry.readthedocs.io/en/latest/_modules/kornia/losses/focal.html#focal_loss
# based on:
# https://github.com/zhezh/focalloss/blob/master/focalloss.py
class FocalLoss(nn.Module):
r"""Criterion that computes Focal loss.
According to [1], the Focal loss is computed as follows:
.. math::
\text{FL}(p_t) = -\alpha_t (1 - p_t)^{\gamma} \, \text{log}(p_t)
where:
- :math:`p_t` is the model's estimated probability for each class.
Arguments:
alpha (float): Weighting factor :math:`\alpha \in [0, 1]`.
gamma (float): Focusing parameter :math:`\gamma >= 0`.
reduction (Optional[str]): Specifies the reduction to apply to the
output: ‘none’ | ‘mean’ | ‘sum’. ‘none’: no reduction will be applied,
‘mean’: the sum of the output will be divided by the number of elements
in the output, ‘sum’: the output will be summed. Default: ‘none’.
Shape:
- Input: :math:`(N, C, H, W)` where C = number of classes.
- Target: :math:`(N, H, W)` where each value is
:math:`0 ≤ targets[i] ≤ C−1`.
Examples:
# >>> N = 5 # num_classes
# >>> args = {"alpha": 0.5, "gamma": 2.0, "reduction": 'mean'}
# >>> loss = kornia.losses.FocalLoss(*args)
# >>> input = torch.randn(1, N, 3, 5, requires_grad=True)
# >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
# >>> output = loss(input, target)
# >>> output.backward()
References:
[1] https://arxiv.org/abs/1708.02002
"""
def __init__(self, alpha: float, gamma: Optional[float] = 2.0,
reduction: Optional[str] = 'none') -> None:
super(FocalLoss, self).__init__()
self.alpha: float = alpha
self.gamma: torch.Tensor = torch.tensor(gamma)
self.reduction: Optional[str] = reduction
self.eps: float = 1e-6
def forward( # type: ignore
self,
input: torch.Tensor,
target: torch.Tensor,
weight_ce=None) -> torch.Tensor:
if not torch.is_tensor(input):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(input)))
if not len(input.shape) == 4:
raise ValueError("Invalid input shape, we expect BxNxHxW. Got: {}"
.format(input.shape))
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError("input and target shapes must be the same. Got: {}"
.format(input.shape, input.shape))
if not input.device == target.device:
raise ValueError(
"input and target must be in the same device. Got: {}" .format(
input.device, target.device))
device_id = input.get_device()
# compute softmax over the classes axis
input_soft = F.softmax(input, dim=1) + self.eps
# create the labels one hot tensor
target_one_hot = one_hot(target, num_classes=input.shape[1],
device=input.device, dtype=input.dtype).cuda(device_id)
# compute the actual focal loss
weight = torch.pow(torch.tensor(1.).cuda(device_id) - input_soft,
self.gamma.to(input.dtype).cuda(device_id))
focal = -self.alpha * weight * torch.log(input_soft)
if weight_ce is not None:
focal = focal * weight_ce
loss_tmp = torch.sum(target_one_hot * focal, dim=1)
if self.reduction == 'none':
loss = loss_tmp
elif self.reduction == 'mean':
loss = torch.mean(loss_tmp)
elif self.reduction == 'sum':
loss = torch.sum(loss_tmp)
else:
raise NotImplementedError("Invalid reduction mode: {}"
.format(self.reduction))
return loss
######################
# functional interface
######################
def focal_loss(
input: torch.Tensor,
target: torch.Tensor,
alpha: float,
gamma: Optional[float] = 2.0,
reduction: Optional[str] = 'none') -> torch.Tensor:
r"""Function that computes Focal loss.
See :class:`~kornia.losses.FocalLoss` for details.
"""
return FocalLoss(alpha, gamma, reduction)(input, target)
| 36.24 | 93 | 0.571302 |
9e7d5886c226e0b89d2a1630ede1fb901f428ad6 | 1,378 | py | Python | mt/base/application/__init__.py | jkrocil/microblog_tests | 4e2b768270230ec7bf7ce20452bb1d0fdce2c69c | [
"Apache-2.0"
] | null | null | null | mt/base/application/__init__.py | jkrocil/microblog_tests | 4e2b768270230ec7bf7ce20452bb1d0fdce2c69c | [
"Apache-2.0"
] | null | null | null | mt/base/application/__init__.py | jkrocil/microblog_tests | 4e2b768270230ec7bf7ce20452bb1d0fdce2c69c | [
"Apache-2.0"
] | null | null | null | import importscan
from pkg_resources import iter_entry_points
from mt.base.application.implementations.web_ui import ViaWebUI
from mt.base.application.implementations import MtImplementationContext
from mt.base.application.entities import EntityCollections
class Application(object):
def __init__(self, hostname=None, path="", scheme="https", username=None, password=None):
self.application = self
self.hostname = hostname
self.path = path
self.scheme = scheme
self.username = username
self.password = password
self.web_ui = ViaWebUI(owner=self)
self.context = MtImplementationContext.from_instances([self.web_ui])
self.collections = EntityCollections.for_application(self)
@property
def address(self):
return "{}://{}/{}".format(self.scheme, self.hostname, self.path)
@property
def destinations(self):
"""Returns a dict of all valid destinations for a particular object"""
return {
impl.name: impl.navigator.list_destinations(self)
for impl in self.application.context.implementations.values()
if impl.navigator
}
def load_application_collections():
return {
ep.name: ep.resolve() for ep in iter_entry_points("mt.application_collections")
}
from mt import base # noqa
importscan.scan(base)
| 31.318182 | 93 | 0.695936 |
e53166bc9ab3170bc7d61cfafce606f6c423b7f9 | 6,019 | py | Python | pyswahili/Swahili/repl.py | Ismaili1995/pyswahili | 6f4b4c10900d9fd268d78e52e80bc8ce65916fe6 | [
"MIT"
] | 1 | 2021-03-03T22:48:20.000Z | 2021-03-03T22:48:20.000Z | pyswahili/Swahili/repl.py | Ismaili1995/pyswahili | 6f4b4c10900d9fd268d78e52e80bc8ce65916fe6 | [
"MIT"
] | null | null | null | pyswahili/Swahili/repl.py | Ismaili1995/pyswahili | 6f4b4c10900d9fd268d78e52e80bc8ce65916fe6 | [
"MIT"
] | 1 | 2021-01-29T17:25:55.000Z | 2021-01-29T17:25:55.000Z | import json
import sys
import code
import ast
import platform
import datetime
import traceback
from Swahili.swahili_node import PySwahili
class PySwahili_Repl:
def __init__(self):
self.translator = PySwahili()
self.block_keywords = list(self.translator.sw_to_en["block_keywords"].values())
self.console = code.InteractiveConsole()
self.intepreter = code.InteractiveInterpreter()
self.newline = "\n"
@property
def logo(self):
return r"""
_____ _ _ _ _
| __ \ | | (_) (_)
| |__) | _ _____ ____ _| |__ _| |_
| ___/ | | / __\ \ /\ / / _` | '_ \| | | |
| | | |_| \__ \\ V V / (_| | | | | | | |
|_| \__, |___/ \_/\_/ \__,_|_| |_|_|_|_|
__/ |
|___/
"""
@staticmethod
def remove_identation(line_of_code, return_number=False):
count = 0
while True:
if line_of_code.startswith("\t"):
line_of_code = line_of_code[1:]
count += 1
continue
break
if return_number:
return count
if not return_number:
return line_of_code
def is_blocky(self, line_of_code):
return any(
[line_of_code.startswith(keyword) for keyword in self.block_keywords]
) and line_of_code.endswith(":")
def is_else(self, line_of_code):
try:
trimmed_line_of_code = line_of_code.replace(" ", "")
if trimmed_line_of_code == "else:":
return True
return
except:
return
def is_return(self, line_of_code):
try:
stripped_line_of_code = line_of_code.strip()
if stripped_line_of_code.startswith("return "):
return True
return
except:
return
def is_compilable(self, line_of_code):
try:
line_of_code = self.remove_identation(line_of_code)
code.compile_command(line_of_code, "<string>", "exec")
return True
except Exception as bug:
# print(bug)
if self.is_else(line_of_code) or self.is_return(line_of_code):
return True
return False
def is_parsable(self, line_of_code):
try:
line_of_code = self.remove_identation(line_of_code)
if ast.parse(line_of_code):
return True
except Exception as bug:
# print(bug)
return False
def is_valid(self, line_of_code):
try:
if self.is_compilable(line_of_code):
print("code is compilable")
return True
if self.is_parsable(line_of_code):
print("code is parsable")
return True
print("code is not valid")
return False
except Exception as bug:
traceback.print_exc()
return False
def is_eval(self, line_of_code, variables):
restricted = ["command", "self", "specifics"]
try:
for var, value in variables.items():
if all(var != r_var for r_var in restricted):
assign_expression = """{}={}""".format(var, value)
eval(compile(assign_expression, "<string>", "exec"))
output = eval(compile(line_of_code, "<string>", "eval"))
if not line_of_code.startswith("print"):
return output
return True
except Exception as bug:
# print(bug)
return False
def read_user_input(self):
try:
user_input = ""
command = input("-> ")
english_command = self.translator.convert_to_english(command)
if self.is_valid(english_command):
user_input = english_command
if self.is_blocky(english_command):
while True:
command = input("...")
if command:
space_count = self.remove_identation(
command, return_number=True
)
english_command = self.translator.convert_to_english(
command
)
english_command = (space_count * "\t") + english_command
if self.is_valid(english_command):
user_input = user_input + "\n" + english_command
continue
break
return user_input
return False
except Exception as bug:
print(bug)
@property
def load_system_specification(self):
specification = platform.uname()
now = datetime.datetime.now().strftime("%A %d, %B %Y")
specification = "Pyswahili 1.0 on {} | {}".format(specification.system, now)
return specification
def repl(self):
while True:
try:
command = self.read_user_input()
if command:
if not self.newline in command:
evaluated = self.is_eval(command, locals())
if evaluated:
if evaluated != True:
print(evaluated)
continue
print(command)
exec(command, globals())
continue
except KeyboardInterrupt:
sys.exit()
except Exception as bug:
print(bug)
continue
if __name__ == "__main__":
PySwahili_Repl().repl()
| 33.071429 | 87 | 0.484466 |
fa4b275e3370a7600e7b1d1bb7cfea27a845467b | 1,057 | py | Python | utilities/for_paraview/lib152/Tensors.py | doliinychenko/iSS | 9391b8830e385c0f5f1600a1cfd1ad355ea582c5 | [
"MIT"
] | 4 | 2018-11-29T14:34:55.000Z | 2020-11-25T14:44:32.000Z | utilities/for_paraview/lib152/Tensors.py | doliinychenko/iSS | 9391b8830e385c0f5f1600a1cfd1ad355ea582c5 | [
"MIT"
] | 1 | 2020-04-05T01:17:31.000Z | 2020-04-05T01:17:31.000Z | utilities/for_paraview/lib152/Tensors.py | doliinychenko/iSS | 9391b8830e385c0f5f1600a1cfd1ad355ea582c5 | [
"MIT"
] | 6 | 2018-04-06T17:08:35.000Z | 2020-10-19T19:10:38.000Z | #!/usr/bin/env python
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.1 $
$Date: 2001-05-20 12:51:29 $
Pearu Peterson
"""
import DataSetAttr
import string
class Tensors(DataSetAttr.DataSetAttr):
"""Holds VTK Tensors.
"""
def __init__(self,tensors,name=None):
self.name = self._get_name(name)
self.tensors = self.get_3_3_tuple_list(tensors,(self.default_value,)*3)
def to_string(self,format='ascii'):
t = self.get_datatype(self.tensors)
ret = ['TENSORS %s %s'%(self.name,t)]
ret.append(self.seq_to_string(self.tensors,format,t))
return string.join(ret,'\n')
def get_size(self):
return len(self.tensors)
if __name__ == "__main__":
print Tensors([[[3,3]],[4,3.],[[240]],3,2,3]).to_string('ascii')
print Tensors(3).to_string('ascii')
| 30.2 | 79 | 0.667928 |
fb253dc9ec8238ed137cc7762a63ea80d1eb9ffd | 8,566 | py | Python | src/toil/test/src/jobFileStoreTest.py | jeffrey856/toil | 2447cece3b0fae6f103322b3d9e4ff3bd7a9f7bf | [
"Apache-2.0"
] | null | null | null | src/toil/test/src/jobFileStoreTest.py | jeffrey856/toil | 2447cece3b0fae6f103322b3d9e4ff3bd7a9f7bf | [
"Apache-2.0"
] | null | null | null | src/toil/test/src/jobFileStoreTest.py | jeffrey856/toil | 2447cece3b0fae6f103322b3d9e4ff3bd7a9f7bf | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from builtins import range
import codecs
import random
import os
import errno
# Python 3 compatibility imports
from six.moves import xrange
from toil.common import Toil
from toil.job import Job
from toil.fileStore import FileID
from toil.test import ToilTest, slow
PREFIX_LENGTH=200
# TODO: This test is ancient and while similar tests exist in `fileStoreTest.py`, none of them look
# at the contents of read files and thus we will let this test remain as-is.
class JobFileStoreTest(ToilTest):
"""
Tests testing the methods defined in :class:toil.fileStore.FileStore.
"""
def testCachingFileStore(self):
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
with Toil(options) as workflow:
workflow.start(Job.wrapJobFn(simpleFileStoreJob))
def testNonCachingFileStore(self):
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.disableCaching = True
with Toil(options) as workflow:
workflow.start(Job.wrapJobFn(simpleFileStoreJob))
def _testJobFileStore(self, retryCount=0, badWorker=0.0, stringNo=1, stringLength=1000000,
testNo=2):
"""
Creates a chain of jobs, each reading and writing files using the
toil.fileStore.FileStore interface. Verifies the files written are always what we
expect.
"""
for test in range(testNo):
#Make a list of random strings, each of 100k chars and hash the first 200
#base prefix to the string
def randomString():
chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
s = "".join([random.choice(chars) for i in range(stringLength)])
return s[:PREFIX_LENGTH], s
#Total length is 2 million characters (20 strings of length 100K each)
testStrings = dict([randomString() for i in range(stringNo)])
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.logLevel = "INFO"
options.retryCount=retryCount
options.badWorker=badWorker
options.badWorkerFailInterval = 1.0
chainLength = 10
# Run the workflow, the return value being the number of failed jobs
Job.Runner.startToil(Job.wrapJobFn(fileTestJob, [],
testStrings, chainLength),
options)
def testJobFileStore(self):
"""
Tests case that about half the files are cached
"""
self._testJobFileStore(retryCount=0, badWorker=0.0, stringNo=5, stringLength=1000000)
@slow
def testJobFileStoreWithBadWorker(self):
"""
Tests case that about half the files are cached and the worker is randomly
failing.
"""
self._testJobFileStore(retryCount=100, badWorker=0.5, stringNo=5, stringLength=1000000)
def fileTestJob(job, inputFileStoreIDs, testStrings, chainLength):
"""
Test job exercises toil.fileStore.FileStore functions
"""
outputFileStoreIds = [] #Strings passed to the next job in the chain
#Load the input jobStoreFileIDs and check that they map to the
#same set of random input strings, exercising the different functions in the fileStore interface
for fileStoreID in inputFileStoreIDs:
if random.random() > 0.5:
#Read the file for the fileStoreID, randomly picking a way to invoke readGlobalFile
if random.random() > 0.5:
tempFile = job.fileStore.readGlobalFile(fileStoreID,
job.fileStore.getLocalTempFileName() if
random.random() > 0.5 else None,
cache=random.random() > 0.5)
with open(tempFile, 'r') as fH:
string = fH.readline()
else:
#Check the local file is as we expect
with job.fileStore.readGlobalFileStream(fileStoreID) as fH:
# File streams are binary in Python 3 and can't do readline.
# But a StreamReader for UTF-8 is exactly the adapter we need.
fH = codecs.getreader('utf-8')(fH)
string = fH.readline()
#Check the string we get back is what we expect
assert testStrings[string[:PREFIX_LENGTH]] == string
#This allows the file to be passed to the next job
outputFileStoreIds.append(fileStoreID)
else:
#This tests deletion
job.fileStore.deleteGlobalFile(fileStoreID)
#Fill out the output strings until we have the same number as the input strings
#exercising different ways of writing files to the file store
while len(outputFileStoreIds) < len(testStrings):
#Pick a string and write it into a file
testString = random.choice(list(testStrings.values()))
if random.random() > 0.5:
#Make a local copy of the file
tempFile = job.fileStore.getLocalTempFile() if random.random() > 0.5 \
else os.path.join(job.fileStore.getLocalTempDir(), "temp.txt")
with open(tempFile, 'w') as fH:
fH.write(testString)
#Write a local copy of the file using the local file
fileStoreID = job.fileStore.writeGlobalFile(tempFile)
# Make sure it returned a valid and correct FileID with the right size
assert isinstance(fileStoreID, FileID)
assert fileStoreID.size == len(testString.encode('utf-8'))
outputFileStoreIds.append(fileStoreID)
else:
#Use the writeGlobalFileStream method to write the file
with job.fileStore.writeGlobalFileStream() as (fH, fileStoreID):
fH.write(testString.encode('utf-8'))
outputFileStoreIds.append(fileStoreID)
#Make sure it returned a valid and correct FileID with the right size
assert isinstance(fileStoreID, FileID)
assert fileStoreID.size == len(testString.encode('utf-8'))
if chainLength > 0:
#Make a child that will read these files and check it gets the same results
job.addChildJobFn(fileTestJob, outputFileStoreIds, testStrings, chainLength-1)
fileStoreString = "Testing writeGlobalFile"
streamingFileStoreString = "Testing writeGlobalFileStream"
def simpleFileStoreJob(job):
localFilePath = os.path.join(job.fileStore.getLocalTempDir(), "parentTemp.txt")
with open(localFilePath, 'w') as f:
f.write(fileStoreString)
testID1 = job.fileStore.writeGlobalFile(localFilePath)
testID2 = None
with job.fileStore.writeGlobalFileStream() as (f, fileID):
f.write(streamingFileStoreString.encode('utf-8'))
testID2 = fileID
job.addChildJobFn(fileStoreChild, testID1, testID2)
def fileStoreChild(job, testID1, testID2):
with job.fileStore.readGlobalFileStream(testID1) as f:
assert(f.read().decode('utf-8') == fileStoreString)
localFilePath = os.path.join(job.fileStore.getLocalTempDir(), "childTemp.txt")
job.fileStore.readGlobalFile(testID2, localFilePath)
with open(localFilePath, 'r') as f:
assert(f.read() == streamingFileStoreString)
job.fileStore.deleteLocalFile(testID2)
try:
job.fileStore.deleteLocalFile(testID1)
except OSError as e:
if e.errno == errno.ENOENT: # indicates that the file was not found
pass
else:
raise
else:
raise RuntimeError("Deleting a non-existant file did not throw an exception")
for fileID in (testID1, testID2):
job.fileStore.deleteGlobalFile(fileID)
| 42.83 | 100 | 0.643824 |
de2f7f3010a59bc8d37b22d0a1b093e1e0a66a93 | 2,636 | py | Python | test/unit/construction/test_gateconstruction.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
] | 73 | 2016-01-28T05:02:05.000Z | 2022-03-30T07:46:33.000Z | test/unit/construction/test_gateconstruction.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
] | 113 | 2016-02-25T15:32:18.000Z | 2022-03-31T13:18:13.000Z | test/unit/construction/test_gateconstruction.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
] | 41 | 2016-03-15T19:32:07.000Z | 2022-02-16T10:22:05.000Z | import numpy as np
import pygsti.tools.gatetools as gc
from ..util import BaseCase
class GateConstructionTester(BaseCase):
def test_single_qubit_gate_matrix(self):
expected = np.array([[1.00000000e+00, 2.77555756e-16, -2.28983499e-16, 0.00000000e+00],
[-3.53885261e-16, -8.09667193e-01, 5.22395269e-01, -2.67473774e-01],
[-3.92523115e-17, 5.22395269e-01, 8.49200550e-01, 7.72114534e-02],
[1.66533454e-16, 2.67473774e-01, -7.72114534e-02, -9.60466643e-01]]
)
mx = gc.single_qubit_gate(24.0, 83.140134, 0.0000)
self.assertArraysAlmostEqual(expected, mx)
def test_two_qubit_gate_mx(self):
gate = gc.two_qubit_gate()
expected = np.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]])
self.assertArraysAlmostEqual(gate, expected)
def test_two_qubit_gate(self):
# For independent operations, can check directly against tensored paulis
for op1, op2 in ['ii', 'xi', 'yi', 'zi', 'ix', 'iy', 'iz']:
coeffs = {'hx': 0, 'hy': 0, 'hz': 0}
if op1 != 'i':
coeffs[f'h{op1}'] = 1
gate1 = gc.single_qubit_gate(**coeffs)
coeffs = {'hx': 0, 'hy': 0, 'hz': 0}
if op2 != 'i':
coeffs[f'h{op2}'] = 1
gate2 = gc.single_qubit_gate(**coeffs)
composed_gate = np.kron(gate1, gate2)
arg = ''.join([op1, op2])
coeffs = {arg: 1}
full_gate = gc.two_qubit_gate(**coeffs)
self.assertArraysAlmostEqual(composed_gate, full_gate)
| 45.448276 | 97 | 0.435508 |
0b661c02a56a6e26b1d88266a7d638bd5f0c939f | 1,022 | py | Python | src/.ipynb_checkpoints/lconv-checkpoint.py | nimadehmamy/L-conv-code | 5a8abfbff3f6564771234df3e177d1d4aafe371d | [
"MIT"
] | 9 | 2021-12-03T16:02:52.000Z | 2022-03-01T14:39:16.000Z | src/lconv.py | LaLaLailalai/L-conv-code | 5a8abfbff3f6564771234df3e177d1d4aafe371d | [
"MIT"
] | null | null | null | src/lconv.py | LaLaLailalai/L-conv-code | 5a8abfbff3f6564771234df3e177d1d4aafe371d | [
"MIT"
] | 2 | 2021-12-14T17:27:01.000Z | 2022-03-08T13:45:31.000Z | import torch
import torch.nn as nn
class Lconv(nn.Module):
""" L-conv layer with full L """
def __init__(self,d,num_L=1,cin=1,cout=1,rank=8):
"""
L:(num_L, d, d)
Wi: (num_L, cout, cin)
"""
super().__init__()
self.L = nn.Parameter(torch.Tensor(num_L, d, d))
self.Wi = nn.Parameter(torch.Tensor(num_L+1, cout, cin)) # W^0 = Wi[0], W^0\epsion^i = Wi[1:]
# initialize weights and biases
nn.init.kaiming_normal_(self.L)
nn.init.kaiming_normal_(self.Wi)
def forward(self, x):
# x:(batch, channel, flat_d)
# res = x W0
residual = torch.einsum('bcd,oc->bod', x, self.Wi[0] )
# y = Li x Wi
y = torch.einsum('kdf,bcf,koc->bod', self.L, x, self.Wi[1:])
return y + residual
class Reshape(nn.Module):
def __init__(self,shape=None):
self.shape = shape
super().__init__()
def forward(self,x):
return x.view(-1,*self.shape)
| 29.2 | 103 | 0.535225 |
a84a70f1e4603fecfd0fd978abe3c0402b9354da | 150,829 | py | Python | python/ccxt/okex.py | An-Net/ccxt | 2f1fd55a711cdf8f74d0273d36944749df849137 | [
"MIT"
] | null | null | null | python/ccxt/okex.py | An-Net/ccxt | 2f1fd55a711cdf8f74d0273d36944749df849137 | [
"MIT"
] | null | null | null | python/ccxt/okex.py | An-Net/ccxt | 2f1fd55a711cdf8f74d0273d36944749df849137 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import TICK_SIZE
class okex(Exchange):
def describe(self):
return self.deep_extend(super(okex, self).describe(), {
'id': 'okex',
'name': 'OKEX',
'countries': ['CN', 'US'],
'version': 'v3',
'rateLimit': 1000, # up to 3000 requests per 5 minutes ≈ 600 requests per minute ≈ 10 requests per second ≈ 100 ms
'pro': True,
'has': {
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': False, # see below
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchOrderTrades': True,
'fetchTime': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': False,
'fetchWithdrawals': True,
'futures': True,
'withdraw': True,
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
'1M': '2678400',
'3M': '8035200',
'6M': '16070400',
'1y': '31536000',
},
'hostname': 'okex.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/32552768-0d6dd3c6-c4a6-11e7-90f8-c043b64756a7.jpg',
'api': {
'rest': 'https://www.{hostname}',
},
'www': 'https://www.okex.com',
'doc': 'https://www.okex.com/docs/en/',
'fees': 'https://www.okex.com/pages/products/fees.html',
'referral': 'https://www.okex.com/join/1888677',
'test': {
'rest': 'https://testnet.okex.com',
},
},
'api': {
'general': {
'get': [
'time',
],
},
'account': {
'get': [
'wallet',
'sub-account',
'asset-valuation',
'wallet/{currency}',
'withdrawal/history',
'withdrawal/history/{currency}',
'ledger',
'deposit/address',
'deposit/history',
'deposit/history/{currency}',
'currencies',
'withdrawal/fee',
],
'post': [
'transfer',
'withdrawal',
],
},
'spot': {
'get': [
'accounts',
'accounts/{currency}',
'accounts/{currency}/ledger',
'orders',
'orders_pending',
'orders/{order_id}',
'orders/{client_oid}',
'trade_fee',
'fills',
'algo',
# public
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
],
'post': [
'order_algo',
'orders',
'batch_orders',
'cancel_orders/{order_id}',
'cancel_orders/{client_oid}',
'cancel_batch_algos',
'cancel_batch_orders',
],
},
'margin': {
'get': [
'accounts',
'accounts/{instrument_id}',
'accounts/{instrument_id}/ledger',
'accounts/availability',
'accounts/{instrument_id}/availability',
'accounts/borrowed',
'accounts/{instrument_id}/borrowed',
'orders',
'accounts/{instrument_id}/leverage',
'orders/{order_id}',
'orders/{client_oid}',
'orders_pending',
'fills',
# public
'instruments/{instrument_id}/mark_price',
],
'post': [
'accounts/borrow',
'accounts/repayment',
'orders',
'batch_orders',
'cancel_orders',
'cancel_orders/{order_id}',
'cancel_orders/{client_oid}',
'cancel_batch_orders',
'accounts/{instrument_id}/leverage',
],
},
'futures': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'accounts/{underlying}',
'accounts/{underlying}/leverage',
'accounts/{underlying}/ledger',
'order_algo/{instrument_id}',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'trade_fee',
'accounts/{instrument_id}/holds',
'order_algo/{instrument_id}',
# public
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/estimated_price',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/liquidation',
],
'post': [
'accounts/{underlying}/leverage',
'order',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'accounts/margin_mode',
'close_position',
'cancel_all',
'order_algo',
'cancel_algos',
],
},
'swap': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'{instrument_id}/accounts',
'accounts/{instrument_id}/settings',
'accounts/{instrument_id}/ledger',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'accounts/{instrument_id}/holds',
'trade_fee',
'order_algo/{instrument_id}',
# public
'instruments',
'instruments/{instrument_id}/depth',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/liquidation',
'instruments/{instrument_id}/funding_time',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/historical_funding_rate',
],
'post': [
'accounts/{instrument_id}/leverage',
'order',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'order_algo',
'cancel_algos',
],
},
'option': {
'get': [
'accounts',
'{underlying}/position',
'accounts/{underlying}',
'orders/{underlying}',
'fills/{underlying}',
'accounts/{underlying}/ledger',
'trade_fee',
'orders/{underlying}/{order_id}',
'orders/{underlying}/{client_oid}',
# public
'underlying',
'instruments/{underlying}',
'instruments/{underlying}/summary',
'instruments/{underlying}/summary/{instrument_id}',
'instruments/{instrument_id}/book',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/candles',
],
'post': [
'order',
'orders',
'cancel_order/{underlying}/{order_id}',
'cancel_order/{underlying}/{client_oid}',
'cancel_batch_orders/{underlying}',
'amend_order/{underlying}',
'amend_batch_orders/{underlying}',
],
},
'index': {
'get': [
'{instrument_id}/constituents',
],
},
},
'fees': {
'trading': {
'taker': 0.0015,
'maker': 0.0010,
},
'spot': {
'taker': 0.0015,
'maker': 0.0010,
},
'futures': {
'taker': 0.0005,
'maker': 0.0002,
},
'swap': {
'taker': 0.00075,
'maker': 0.00020,
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'exceptions': {
# http error codes
# 400 Bad Request — Invalid request format
# 401 Unauthorized — Invalid API Key
# 403 Forbidden — You do not have access to the requested resource
# 404 Not Found
# 429 Client Error: Too Many Requests for url
# 500 Internal Server Error — We had a problem with our server
'exact': {
'1': ExchangeError, # {"code": 1, "message": "System error"}
# undocumented
'failure to get a peer from the ring-balancer': ExchangeNotAvailable, # {"message": "failure to get a peer from the ring-balancer"}
'Server is busy, please try again': ExchangeNotAvailable, # {"message": "Server is busy, please try again."}
'An unexpected error occurred': ExchangeError, # {"message": "An unexpected error occurred"}
'System error': ExchangeError, # {"error_message":"System error","message":"System error"}
'4010': PermissionDenied, # {"code": 4010, "message": "For the security of your funds, withdrawals are not permitted within 24 hours after changing fund password / mobile number / Google Authenticator settings "}
# common
# '0': ExchangeError, # 200 successful,when the order placement / cancellation / operation is successful
'4001': ExchangeError, # no data received in 30s
'4002': ExchangeError, # Buffer full. cannot write data
# --------------------------------------------------------
'30001': AuthenticationError, # {"code": 30001, "message": 'request header "OK_ACCESS_KEY" cannot be blank'}
'30002': AuthenticationError, # {"code": 30002, "message": 'request header "OK_ACCESS_SIGN" cannot be blank'}
'30003': AuthenticationError, # {"code": 30003, "message": 'request header "OK_ACCESS_TIMESTAMP" cannot be blank'}
'30004': AuthenticationError, # {"code": 30004, "message": 'request header "OK_ACCESS_PASSPHRASE" cannot be blank'}
'30005': InvalidNonce, # {"code": 30005, "message": "invalid OK_ACCESS_TIMESTAMP"}
'30006': AuthenticationError, # {"code": 30006, "message": "invalid OK_ACCESS_KEY"}
'30007': BadRequest, # {"code": 30007, "message": 'invalid Content_Type, please use "application/json" format'}
'30008': RequestTimeout, # {"code": 30008, "message": "timestamp request expired"}
'30009': ExchangeError, # {"code": 30009, "message": "system error"}
'30010': AuthenticationError, # {"code": 30010, "message": "API validation failed"}
'30011': PermissionDenied, # {"code": 30011, "message": "invalid IP"}
'30012': AuthenticationError, # {"code": 30012, "message": "invalid authorization"}
'30013': AuthenticationError, # {"code": 30013, "message": "invalid sign"}
'30014': DDoSProtection, # {"code": 30014, "message": "request too frequent"}
'30015': AuthenticationError, # {"code": 30015, "message": 'request header "OK_ACCESS_PASSPHRASE" incorrect'}
'30016': ExchangeError, # {"code": 30015, "message": "you are using v1 apiKey, please use v1 endpoint. If you would like to use v3 endpoint, please subscribe to v3 apiKey"}
'30017': ExchangeError, # {"code": 30017, "message": "apikey's broker id does not match"}
'30018': ExchangeError, # {"code": 30018, "message": "apikey's domain does not match"}
'30019': ExchangeNotAvailable, # {"code": 30019, "message": "Api is offline or unavailable"}
'30020': BadRequest, # {"code": 30020, "message": "body cannot be blank"}
'30021': BadRequest, # {"code": 30021, "message": "Json data format error"}, {"code": 30021, "message": "json data format error"}
'30022': PermissionDenied, # {"code": 30022, "message": "Api has been frozen"}
'30023': BadRequest, # {"code": 30023, "message": "{0} parameter cannot be blank"}
'30024': BadSymbol, # {"code":30024,"message":"\"instrument_id\" is an invalid parameter"}
'30025': BadRequest, # {"code": 30025, "message": "{0} parameter category error"}
'30026': DDoSProtection, # {"code": 30026, "message": "requested too frequent"}
'30027': AuthenticationError, # {"code": 30027, "message": "login failure"}
'30028': PermissionDenied, # {"code": 30028, "message": "unauthorized execution"}
'30029': AccountSuspended, # {"code": 30029, "message": "account suspended"}
'30030': ExchangeNotAvailable, # {"code": 30030, "message": "endpoint request failed. Please try again"}
'30031': BadRequest, # {"code": 30031, "message": "token does not exist"}
'30032': BadSymbol, # {"code": 30032, "message": "pair does not exist"}
'30033': BadRequest, # {"code": 30033, "message": "exchange domain does not exist"}
'30034': ExchangeError, # {"code": 30034, "message": "exchange ID does not exist"}
'30035': ExchangeError, # {"code": 30035, "message": "trading is not supported in self website"}
'30036': ExchangeError, # {"code": 30036, "message": "no relevant data"}
'30037': ExchangeNotAvailable, # {"code": 30037, "message": "endpoint is offline or unavailable"}
# '30038': AuthenticationError, # {"code": 30038, "message": "user does not exist"}
'30038': OnMaintenance, # {"client_oid":"","code":"30038","error_code":"30038","error_message":"Matching engine is being upgraded. Please try in about 1 minute.","message":"Matching engine is being upgraded. Please try in about 1 minute.","order_id":"-1","result":false}
'30044': RequestTimeout, # {"code":30044, "message":"Endpoint request timeout"}
# futures
'32001': AccountSuspended, # {"code": 32001, "message": "futures account suspended"}
'32002': PermissionDenied, # {"code": 32002, "message": "futures account does not exist"}
'32003': CancelPending, # {"code": 32003, "message": "canceling, please wait"}
'32004': ExchangeError, # {"code": 32004, "message": "you have no unfilled orders"}
'32005': InvalidOrder, # {"code": 32005, "message": "max order quantity"}
'32006': InvalidOrder, # {"code": 32006, "message": "the order price or trigger price exceeds USD 1 million"}
'32007': InvalidOrder, # {"code": 32007, "message": "leverage level must be the same for orders on the same side of the contract"}
'32008': InvalidOrder, # {"code": 32008, "message": "Max. positions to open(cross margin)"}
'32009': InvalidOrder, # {"code": 32009, "message": "Max. positions to open(fixed margin)"}
'32010': ExchangeError, # {"code": 32010, "message": "leverage cannot be changed with open positions"}
'32011': ExchangeError, # {"code": 32011, "message": "futures status error"}
'32012': ExchangeError, # {"code": 32012, "message": "futures order update error"}
'32013': ExchangeError, # {"code": 32013, "message": "token type is blank"}
'32014': ExchangeError, # {"code": 32014, "message": "your number of contracts closing is larger than the number of contracts available"}
'32015': ExchangeError, # {"code": 32015, "message": "margin ratio is lower than 100% before opening positions"}
'32016': ExchangeError, # {"code": 32016, "message": "margin ratio is lower than 100% after opening position"}
'32017': ExchangeError, # {"code": 32017, "message": "no BBO"}
'32018': ExchangeError, # {"code": 32018, "message": "the order quantity is less than 1, please try again"}
'32019': ExchangeError, # {"code": 32019, "message": "the order price deviates from the price of the previous minute by more than 3%"}
'32020': ExchangeError, # {"code": 32020, "message": "the price is not in the range of the price limit"}
'32021': ExchangeError, # {"code": 32021, "message": "leverage error"}
'32022': ExchangeError, # {"code": 32022, "message": "self function is not supported in your country or region according to the regulations"}
'32023': ExchangeError, # {"code": 32023, "message": "self account has outstanding loan"}
'32024': ExchangeError, # {"code": 32024, "message": "order cannot be placed during delivery"}
'32025': ExchangeError, # {"code": 32025, "message": "order cannot be placed during settlement"}
'32026': ExchangeError, # {"code": 32026, "message": "your account is restricted from opening positions"}
'32027': ExchangeError, # {"code": 32027, "message": "cancelled over 20 orders"}
'32028': ExchangeError, # {"code": 32028, "message": "account is suspended and liquidated"}
'32029': ExchangeError, # {"code": 32029, "message": "order info does not exist"}
'32030': InvalidOrder, # The order cannot be cancelled
'32031': ArgumentsRequired, # client_oid or order_id is required.
'32038': AuthenticationError, # User does not exist
'32040': ExchangeError, # User have open contract orders or position
'32044': ExchangeError, # {"code": 32044, "message": "The margin ratio after submitting self order is lower than the minimum requirement({0}) for your tier."}
'32045': ExchangeError, # String of commission over 1 million
'32046': ExchangeError, # Each user can hold up to 10 trade plans at the same time
'32047': ExchangeError, # system error
'32048': InvalidOrder, # Order strategy track range error
'32049': ExchangeError, # Each user can hold up to 10 track plans at the same time
'32050': InvalidOrder, # Order strategy rang error
'32051': InvalidOrder, # Order strategy ice depth error
'32052': ExchangeError, # String of commission over 100 thousand
'32053': ExchangeError, # Each user can hold up to 6 ice plans at the same time
'32057': ExchangeError, # The order price is zero. Market-close-all function cannot be executed
'32054': ExchangeError, # Trade not allow
'32055': InvalidOrder, # cancel order error
'32056': ExchangeError, # iceberg per order average should between {0}-{1} contracts
'32058': ExchangeError, # Each user can hold up to 6 initiative plans at the same time
'32059': InvalidOrder, # Total amount should exceed per order amount
'32060': InvalidOrder, # Order strategy type error
'32061': InvalidOrder, # Order strategy initiative limit error
'32062': InvalidOrder, # Order strategy initiative range error
'32063': InvalidOrder, # Order strategy initiative rate error
'32064': ExchangeError, # Time Stringerval of orders should set between 5-120s
'32065': ExchangeError, # Close amount exceeds the limit of Market-close-all(999 for BTC, and 9999 for the rest tokens)
'32066': ExchangeError, # You have open orders. Please cancel all open orders before changing your leverage level.
'32067': ExchangeError, # Account equity < required margin in self setting. Please adjust your leverage level again.
'32068': ExchangeError, # The margin for self position will fall short of the required margin in self setting. Please adjust your leverage level or increase your margin to proceed.
'32069': ExchangeError, # Target leverage level too low. Your account balance is insufficient to cover the margin required. Please adjust the leverage level again.
'32070': ExchangeError, # Please check open position or unfilled order
'32071': ExchangeError, # Your current liquidation mode does not support self action.
'32072': ExchangeError, # The highest available margin for your order’s tier is {0}. Please edit your margin and place a new order.
'32073': ExchangeError, # The action does not apply to the token
'32074': ExchangeError, # The number of contracts of your position, open orders, and the current order has exceeded the maximum order limit of self asset.
'32075': ExchangeError, # Account risk rate breach
'32076': ExchangeError, # Liquidation of the holding position(s) at market price will require cancellation of all pending close orders of the contracts.
'32077': ExchangeError, # Your margin for self asset in futures account is insufficient and the position has been taken over for liquidation.(You will not be able to place orders, close positions, transfer funds, or add margin during self period of time. Your account will be restored after the liquidation is complete.)
'32078': ExchangeError, # Please cancel all open orders before switching the liquidation mode(Please cancel all open orders before switching the liquidation mode)
'32079': ExchangeError, # Your open positions are at high risk.(Please add margin or reduce positions before switching the mode)
'32080': ExchangeError, # Funds cannot be transferred out within 30 minutes after futures settlement
'32083': ExchangeError, # The number of contracts should be a positive multiple of %%. Please place your order again
# token and margin trading
'33001': PermissionDenied, # {"code": 33001, "message": "margin account for self pair is not enabled yet"}
'33002': AccountSuspended, # {"code": 33002, "message": "margin account for self pair is suspended"}
'33003': InsufficientFunds, # {"code": 33003, "message": "no loan balance"}
'33004': ExchangeError, # {"code": 33004, "message": "loan amount cannot be smaller than the minimum limit"}
'33005': ExchangeError, # {"code": 33005, "message": "repayment amount must exceed 0"}
'33006': ExchangeError, # {"code": 33006, "message": "loan order not found"}
'33007': ExchangeError, # {"code": 33007, "message": "status not found"}
'33008': InsufficientFunds, # {"code": 33008, "message": "loan amount cannot exceed the maximum limit"}
'33009': ExchangeError, # {"code": 33009, "message": "user ID is blank"}
'33010': ExchangeError, # {"code": 33010, "message": "you cannot cancel an order during session 2 of call auction"}
'33011': ExchangeError, # {"code": 33011, "message": "no new market data"}
'33012': ExchangeError, # {"code": 33012, "message": "order cancellation failed"}
'33013': InvalidOrder, # {"code": 33013, "message": "order placement failed"}
'33014': OrderNotFound, # {"code": 33014, "message": "order does not exist"}
'33015': InvalidOrder, # {"code": 33015, "message": "exceeded maximum limit"}
'33016': ExchangeError, # {"code": 33016, "message": "margin trading is not open for self token"}
'33017': InsufficientFunds, # {"code": 33017, "message": "insufficient balance"}
'33018': ExchangeError, # {"code": 33018, "message": "self parameter must be smaller than 1"}
'33020': ExchangeError, # {"code": 33020, "message": "request not supported"}
'33021': BadRequest, # {"code": 33021, "message": "token and the pair do not match"}
'33022': InvalidOrder, # {"code": 33022, "message": "pair and the order do not match"}
'33023': ExchangeError, # {"code": 33023, "message": "you can only place market orders during call auction"}
'33024': InvalidOrder, # {"code": 33024, "message": "trading amount too small"}
'33025': InvalidOrder, # {"code": 33025, "message": "base token amount is blank"}
'33026': ExchangeError, # {"code": 33026, "message": "transaction completed"}
'33027': InvalidOrder, # {"code": 33027, "message": "cancelled order or order cancelling"}
'33028': InvalidOrder, # {"code": 33028, "message": "the decimal places of the trading price exceeded the limit"}
'33029': InvalidOrder, # {"code": 33029, "message": "the decimal places of the trading size exceeded the limit"}
'33034': ExchangeError, # {"code": 33034, "message": "You can only place limit order after Call Auction has started"}
'33035': ExchangeError, # This type of order cannot be canceled(This type of order cannot be canceled)
'33036': ExchangeError, # Exceeding the limit of entrust order
'33037': ExchangeError, # The buy order price should be lower than 130% of the trigger price
'33038': ExchangeError, # The sell order price should be higher than 70% of the trigger price
'33039': ExchangeError, # The limit of callback rate is 0 < x <= 5%
'33040': ExchangeError, # The trigger price of a buy order should be lower than the latest transaction price
'33041': ExchangeError, # The trigger price of a sell order should be higher than the latest transaction price
'33042': ExchangeError, # The limit of price variance is 0 < x <= 1%
'33043': ExchangeError, # The total amount must be larger than 0
'33044': ExchangeError, # The average amount should be 1/1000 * total amount <= x <= total amount
'33045': ExchangeError, # The price should not be 0, including trigger price, order price, and price limit
'33046': ExchangeError, # Price variance should be 0 < x <= 1%
'33047': ExchangeError, # Sweep ratio should be 0 < x <= 100%
'33048': ExchangeError, # Per order limit: Total amount/1000 < x <= Total amount
'33049': ExchangeError, # Total amount should be X > 0
'33050': ExchangeError, # Time interval should be 5 <= x <= 120s
'33051': ExchangeError, # cancel order number not higher limit: plan and track entrust no more than 10, ice and time entrust no more than 6
'33059': BadRequest, # {"code": 33059, "message": "client_oid or order_id is required"}
'33060': BadRequest, # {"code": 33060, "message": "Only fill in either parameter client_oid or order_id"}
'33061': ExchangeError, # Value of a single market price order cannot exceed 100,000 USD
'33062': ExchangeError, # The leverage ratio is too high. The borrowed position has exceeded the maximum position of self leverage ratio. Please readjust the leverage ratio
'33063': ExchangeError, # Leverage multiple is too low, there is insufficient margin in the account, please readjust the leverage ratio
'33064': ExchangeError, # The setting of the leverage ratio cannot be less than 2, please readjust the leverage ratio
'33065': ExchangeError, # Leverage ratio exceeds maximum leverage ratio, please readjust leverage ratio
# account
'21009': ExchangeError, # Funds cannot be transferred out within 30 minutes after swap settlement(Funds cannot be transferred out within 30 minutes after swap settlement)
'34001': PermissionDenied, # {"code": 34001, "message": "withdrawal suspended"}
'34002': InvalidAddress, # {"code": 34002, "message": "please add a withdrawal address"}
'34003': ExchangeError, # {"code": 34003, "message": "sorry, self token cannot be withdrawn to xx at the moment"}
'34004': ExchangeError, # {"code": 34004, "message": "withdrawal fee is smaller than minimum limit"}
'34005': ExchangeError, # {"code": 34005, "message": "withdrawal fee exceeds the maximum limit"}
'34006': ExchangeError, # {"code": 34006, "message": "withdrawal amount is lower than the minimum limit"}
'34007': ExchangeError, # {"code": 34007, "message": "withdrawal amount exceeds the maximum limit"}
'34008': InsufficientFunds, # {"code": 34008, "message": "insufficient balance"}
'34009': ExchangeError, # {"code": 34009, "message": "your withdrawal amount exceeds the daily limit"}
'34010': ExchangeError, # {"code": 34010, "message": "transfer amount must be larger than 0"}
'34011': ExchangeError, # {"code": 34011, "message": "conditions not met"}
'34012': ExchangeError, # {"code": 34012, "message": "the minimum withdrawal amount for NEO is 1, and the amount must be an integer"}
'34013': ExchangeError, # {"code": 34013, "message": "please transfer"}
'34014': ExchangeError, # {"code": 34014, "message": "transfer limited"}
'34015': ExchangeError, # {"code": 34015, "message": "subaccount does not exist"}
'34016': PermissionDenied, # {"code": 34016, "message": "transfer suspended"}
'34017': AccountSuspended, # {"code": 34017, "message": "account suspended"}
'34018': AuthenticationError, # {"code": 34018, "message": "incorrect trades password"}
'34019': PermissionDenied, # {"code": 34019, "message": "please bind your email before withdrawal"}
'34020': PermissionDenied, # {"code": 34020, "message": "please bind your funds password before withdrawal"}
'34021': InvalidAddress, # {"code": 34021, "message": "Not verified address"}
'34022': ExchangeError, # {"code": 34022, "message": "Withdrawals are not available for sub accounts"}
'34023': PermissionDenied, # {"code": 34023, "message": "Please enable futures trading before transferring your funds"}
'34026': ExchangeError, # transfer too frequently(transfer too frequently)
'34036': ExchangeError, # Parameter is incorrect, please refer to API documentation
'34037': ExchangeError, # Get the sub-account balance interface, account type is not supported
'34038': ExchangeError, # Since your C2C transaction is unusual, you are restricted from fund transfer. Please contact our customer support to cancel the restriction
'34039': ExchangeError, # You are now restricted from transferring out your funds due to abnormal trades on C2C Market. Please transfer your fund on our website or app instead to verify your identity
# swap
'35001': ExchangeError, # {"code": 35001, "message": "Contract does not exist"}
'35002': ExchangeError, # {"code": 35002, "message": "Contract settling"}
'35003': ExchangeError, # {"code": 35003, "message": "Contract paused"}
'35004': ExchangeError, # {"code": 35004, "message": "Contract pending settlement"}
'35005': AuthenticationError, # {"code": 35005, "message": "User does not exist"}
'35008': InvalidOrder, # {"code": 35008, "message": "Risk ratio too high"}
'35010': InvalidOrder, # {"code": 35010, "message": "Position closing too large"}
'35012': InvalidOrder, # {"code": 35012, "message": "Incorrect order size"}
'35014': InvalidOrder, # {"code": 35014, "message": "Order price is not within limit"}
'35015': InvalidOrder, # {"code": 35015, "message": "Invalid leverage level"}
'35017': ExchangeError, # {"code": 35017, "message": "Open orders exist"}
'35019': InvalidOrder, # {"code": 35019, "message": "Order size too large"}
'35020': InvalidOrder, # {"code": 35020, "message": "Order price too high"}
'35021': InvalidOrder, # {"code": 35021, "message": "Order size exceeded current tier limit"}
'35022': BadRequest, # {"code": 35022, "message": "Contract status error"}
'35024': BadRequest, # {"code": 35024, "message": "Contract not initialized"}
'35025': InsufficientFunds, # {"code": 35025, "message": "No account balance"}
'35026': BadRequest, # {"code": 35026, "message": "Contract settings not initialized"}
'35029': OrderNotFound, # {"code": 35029, "message": "Order does not exist"}
'35030': InvalidOrder, # {"code": 35030, "message": "Order size too large"}
'35031': InvalidOrder, # {"code": 35031, "message": "Cancel order size too large"}
'35032': ExchangeError, # {"code": 35032, "message": "Invalid user status"}
'35037': ExchangeError, # No last traded price in cache
'35039': ExchangeError, # {"code": 35039, "message": "Open order quantity exceeds limit"}
'35040': InvalidOrder, # {"error_message":"Invalid order type","result":"true","error_code":"35040","order_id":"-1"}
'35044': ExchangeError, # {"code": 35044, "message": "Invalid order status"}
'35046': InsufficientFunds, # {"code": 35046, "message": "Negative account balance"}
'35047': InsufficientFunds, # {"code": 35047, "message": "Insufficient account balance"}
'35048': ExchangeError, # {"code": 35048, "message": "User contract is frozen and liquidating"}
'35049': InvalidOrder, # {"code": 35049, "message": "Invalid order type"}
'35050': InvalidOrder, # {"code": 35050, "message": "Position settings are blank"}
'35052': InsufficientFunds, # {"code": 35052, "message": "Insufficient cross margin"}
'35053': ExchangeError, # {"code": 35053, "message": "Account risk too high"}
'35055': InsufficientFunds, # {"code": 35055, "message": "Insufficient account balance"}
'35057': ExchangeError, # {"code": 35057, "message": "No last traded price"}
'35058': ExchangeError, # {"code": 35058, "message": "No limit"}
'35059': BadRequest, # {"code": 35059, "message": "client_oid or order_id is required"}
'35060': BadRequest, # {"code": 35060, "message": "Only fill in either parameter client_oid or order_id"}
'35061': BadRequest, # {"code": 35061, "message": "Invalid instrument_id"}
'35062': InvalidOrder, # {"code": 35062, "message": "Invalid match_price"}
'35063': InvalidOrder, # {"code": 35063, "message": "Invalid order_size"}
'35064': InvalidOrder, # {"code": 35064, "message": "Invalid client_oid"}
'35066': InvalidOrder, # Order interval error
'35067': InvalidOrder, # Time-weighted order ratio error
'35068': InvalidOrder, # Time-weighted order range error
'35069': InvalidOrder, # Time-weighted single transaction limit error
'35070': InvalidOrder, # Algo order type error
'35071': InvalidOrder, # Order total must be larger than single order limit
'35072': InvalidOrder, # Maximum 6 unfulfilled time-weighted orders can be held at the same time
'35073': InvalidOrder, # Order price is 0. Market-close-all not available
'35074': InvalidOrder, # Iceberg order single transaction average error
'35075': InvalidOrder, # Failed to cancel order
'35076': InvalidOrder, # LTC 20x leverage. Not allowed to open position
'35077': InvalidOrder, # Maximum 6 unfulfilled iceberg orders can be held at the same time
'35078': InvalidOrder, # Order amount exceeded 100,000
'35079': InvalidOrder, # Iceberg order price variance error
'35080': InvalidOrder, # Callback rate error
'35081': InvalidOrder, # Maximum 10 unfulfilled trail orders can be held at the same time
'35082': InvalidOrder, # Trail order callback rate error
'35083': InvalidOrder, # Each user can only hold a maximum of 10 unfulfilled stop-limit orders at the same time
'35084': InvalidOrder, # Order amount exceeded 1 million
'35085': InvalidOrder, # Order amount is not in the correct range
'35086': InvalidOrder, # Price exceeds 100 thousand
'35087': InvalidOrder, # Price exceeds 100 thousand
'35088': InvalidOrder, # Average amount error
'35089': InvalidOrder, # Price exceeds 100 thousand
'35090': ExchangeError, # No stop-limit orders available for cancelation
'35091': ExchangeError, # No trail orders available for cancellation
'35092': ExchangeError, # No iceberg orders available for cancellation
'35093': ExchangeError, # No trail orders available for cancellation
'35094': ExchangeError, # Stop-limit order last traded price error
'35095': BadRequest, # Instrument_id error
'35096': ExchangeError, # Algo order status error
'35097': ExchangeError, # Order status and order ID cannot exist at the same time
'35098': ExchangeError, # An order status or order ID must exist
'35099': ExchangeError, # Algo order ID error
# option
'36001': BadRequest, # Invalid underlying index.
'36002': BadRequest, # Instrument does not exist.
'36005': ExchangeError, # Instrument status is invalid.
'36101': AuthenticationError, # Account does not exist.
'36102': PermissionDenied, # Account status is invalid.
'36103': PermissionDenied, # Account is suspended due to ongoing liquidation.
'36104': PermissionDenied, # Account is not enabled for options trading.
'36105': PermissionDenied, # Please enable the account for option contract.
'36106': PermissionDenied, # Funds cannot be transferred in or out, as account is suspended.
'36107': PermissionDenied, # Funds cannot be transferred out within 30 minutes after option exercising or settlement.
'36108': InsufficientFunds, # Funds cannot be transferred in or out, as equity of the account is less than zero.
'36109': PermissionDenied, # Funds cannot be transferred in or out during option exercising or settlement.
'36201': PermissionDenied, # New order function is blocked.
'36202': PermissionDenied, # Account does not have permission to short option.
'36203': InvalidOrder, # Invalid format for client_oid.
'36204': ExchangeError, # Invalid format for request_id.
'36205': BadRequest, # Instrument id does not match underlying index.
'36206': BadRequest, # Order_id and client_oid can not be used at the same time.
'36207': InvalidOrder, # Either order price or fartouch price must be present.
'36208': InvalidOrder, # Either order price or size must be present.
'36209': InvalidOrder, # Either order_id or client_oid must be present.
'36210': InvalidOrder, # Either order_ids or client_oids must be present.
'36211': InvalidOrder, # Exceeding max batch size for order submission.
'36212': InvalidOrder, # Exceeding max batch size for oder cancellation.
'36213': InvalidOrder, # Exceeding max batch size for order amendment.
'36214': ExchangeError, # Instrument does not have valid bid/ask quote.
'36216': OrderNotFound, # Order does not exist.
'36217': InvalidOrder, # Order submission failed.
'36218': InvalidOrder, # Order cancellation failed.
'36219': InvalidOrder, # Order amendment failed.
'36220': InvalidOrder, # Order is pending cancel.
'36221': InvalidOrder, # Order qty is not valid multiple of lot size.
'36222': InvalidOrder, # Order price is breaching highest buy limit.
'36223': InvalidOrder, # Order price is breaching lowest sell limit.
'36224': InvalidOrder, # Exceeding max order size.
'36225': InvalidOrder, # Exceeding max open order count for instrument.
'36226': InvalidOrder, # Exceeding max open order count for underlying.
'36227': InvalidOrder, # Exceeding max open size across all orders for underlying
'36228': InvalidOrder, # Exceeding max available qty for instrument.
'36229': InvalidOrder, # Exceeding max available qty for underlying.
'36230': InvalidOrder, # Exceeding max position limit for underlying.
},
'broad': {
},
},
'precisionMode': TICK_SIZE,
'options': {
'fetchOHLCV': {
'type': 'Candles', # Candles or HistoryCandles
},
'createMarketBuyOrderRequiresPrice': True,
'fetchMarkets': ['spot', 'futures', 'swap', 'option'],
'defaultType': 'spot', # 'account', 'spot', 'margin', 'futures', 'swap', 'option'
'auth': {
'time': 'public',
'currencies': 'private',
'instruments': 'public',
'rate': 'public',
'{instrument_id}/constituents': 'public',
},
},
'commonCurrencies': {
# OKEX refers to ERC20 version of Aeternity(AEToken)
'AE': 'AET', # https://github.com/ccxt/ccxt/issues/4981
'HOT': 'Hydro Protocol',
'HSR': 'HC',
'MAG': 'Maggie',
'YOYO': 'YOYOW',
'WIN': 'WinToken', # https://github.com/ccxt/ccxt/issues/5701
},
})
def fetch_time(self, params={}):
response = self.generalGetTime(params)
#
# {
# "iso": "2015-01-07T23:47:25.201Z",
# "epoch": 1420674445.201
# }
#
return self.parse8601(self.safe_string(response, 'iso'))
def fetch_markets(self, params={}):
types = self.safe_value(self.options, 'fetchMarkets')
result = []
for i in range(0, len(types)):
markets = self.fetch_markets_by_type(types[i], params)
result = self.array_concat(result, markets)
return result
def parse_markets(self, markets):
result = []
for i in range(0, len(markets)):
result.append(self.parse_market(markets[i]))
return result
def parse_market(self, market):
#
# spot markets
#
# {
# base_currency: "EOS",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# quote_currency: "OKB",
# size_increment: "0.000001",
# tick_size: "0.0001"
# }
#
# futures markets
#
# {
# instrument_id: "XRP-USD-200320",
# underlying_index: "XRP",
# quote_currency: "USD",
# tick_size: "0.0001",
# contract_val: "10",
# listing: "2020-03-06",
# delivery: "2020-03-20",
# trade_increment: "1",
# alias: "self_week",
# underlying: "XRP-USD",
# base_currency: "XRP",
# settlement_currency: "XRP",
# is_inverse: "true",
# contract_val_currency: "USD",
# }
#
# swap markets
#
# {
# instrument_id: "BSV-USD-SWAP",
# underlying_index: "BSV",
# quote_currency: "USD",
# coin: "BSV",
# contract_val: "10",
# listing: "2018-12-21T07:53:47.000Z",
# delivery: "2020-03-14T08:00:00.000Z",
# size_increment: "1",
# tick_size: "0.01",
# base_currency: "BSV",
# underlying: "BSV-USD",
# settlement_currency: "BSV",
# is_inverse: "true",
# contract_val_currency: "USD"
# }
#
# options markets
#
# {
# instrument_id: 'BTC-USD-200327-4000-C',
# underlying: 'BTC-USD',
# settlement_currency: 'BTC',
# contract_val: '0.1000',
# option_type: 'C',
# strike: '4000',
# tick_size: '0.0005',
# lot_size: '1.0000',
# listing: '2019-12-25T08:30:36.302Z',
# delivery: '2020-03-27T08:00:00.000Z',
# state: '2',
# trading_start_time: '2019-12-25T08:30:36.302Z',
# timestamp: '2020-03-13T08:05:09.456Z',
# }
#
id = self.safe_string(market, 'instrument_id')
marketType = 'spot'
spot = True
future = False
swap = False
option = False
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
contractVal = self.safe_float(market, 'contract_val')
if contractVal is not None:
if 'option_type' in market:
marketType = 'option'
spot = False
option = True
underlying = self.safe_string(market, 'underlying')
parts = underlying.split('-')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
else:
marketType = 'swap'
spot = False
swap = True
futuresAlias = self.safe_string(market, 'alias')
if futuresAlias is not None:
swap = False
future = True
marketType = 'futures'
baseId = self.safe_string(market, 'underlying_index')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = (base + '/' + quote) if spot else id
lotSize = self.safe_float_2(market, 'lot_size', 'trade_increment')
precision = {
'amount': self.safe_float(market, 'size_increment', lotSize),
'price': self.safe_float(market, 'tick_size'),
}
minAmount = self.safe_float_2(market, 'min_size', 'base_min_size')
active = True
fees = self.safe_value_2(self.fees, marketType, 'trading', {})
return self.extend(fees, {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'type': marketType,
'spot': spot,
'futures': future,
'swap': swap,
'option': option,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': precision['price'],
'max': None,
},
'cost': {
'min': precision['price'],
'max': None,
},
},
})
def fetch_markets_by_type(self, type, params={}):
if type == 'option':
underlying = self.optionGetUnderlying(params)
result = []
for i in range(0, len(underlying)):
response = self.optionGetInstrumentsUnderlying({
'underlying': underlying[i],
})
#
# options markets
#
# [
# {
# instrument_id: 'BTC-USD-200327-4000-C',
# underlying: 'BTC-USD',
# settlement_currency: 'BTC',
# contract_val: '0.1000',
# option_type: 'C',
# strike: '4000',
# tick_size: '0.0005',
# lot_size: '1.0000',
# listing: '2019-12-25T08:30:36.302Z',
# delivery: '2020-03-27T08:00:00.000Z',
# state: '2',
# trading_start_time: '2019-12-25T08:30:36.302Z',
# timestamp: '2020-03-13T08:05:09.456Z',
# },
# ]
#
result = self.array_concat(result, response)
return self.parse_markets(result)
elif (type == 'spot') or (type == 'futures') or (type == 'swap'):
method = type + 'GetInstruments'
response = getattr(self, method)(params)
#
# spot markets
#
# [
# {
# base_currency: "EOS",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# quote_currency: "OKB",
# size_increment: "0.000001",
# tick_size: "0.0001"
# }
# ]
#
# futures markets
#
# [
# {
# instrument_id: "XRP-USD-200320",
# underlying_index: "XRP",
# quote_currency: "USD",
# tick_size: "0.0001",
# contract_val: "10",
# listing: "2020-03-06",
# delivery: "2020-03-20",
# trade_increment: "1",
# alias: "self_week",
# underlying: "XRP-USD",
# base_currency: "XRP",
# settlement_currency: "XRP",
# is_inverse: "true",
# contract_val_currency: "USD",
# }
# ]
#
# swap markets
#
# [
# {
# instrument_id: "BSV-USD-SWAP",
# underlying_index: "BSV",
# quote_currency: "USD",
# coin: "BSV",
# contract_val: "10",
# listing: "2018-12-21T07:53:47.000Z",
# delivery: "2020-03-14T08:00:00.000Z",
# size_increment: "1",
# tick_size: "0.01",
# base_currency: "BSV",
# underlying: "BSV-USD",
# settlement_currency: "BSV",
# is_inverse: "true",
# contract_val_currency: "USD"
# }
# ]
#
return self.parse_markets(response)
else:
raise NotSupported(self.id + ' fetchMarketsByType does not support market type ' + type)
def fetch_currencies(self, params={}):
# has['fetchCurrencies'] is currently set to False
# despite that their docs say these endpoints are public:
# https://www.okex.com/api/account/v3/withdrawal/fee
# https://www.okex.com/api/account/v3/currencies
# it will still reply with {"code":30001, "message": "OK-ACCESS-KEY header is required"}
# if you attempt to access it without authentication
response = self.accountGetCurrencies(params)
#
# [
# {
# name: '',
# currency: 'BTC',
# can_withdraw: '1',
# can_deposit: '1',
# min_withdrawal: '0.0100000000000000'
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
precision = 8 # default precision, todo: fix "magic constants"
name = self.safe_string(currency, 'name')
canDeposit = self.safe_integer(currency, 'can_deposit')
canWithdraw = self.safe_integer(currency, 'can_withdraw')
active = canDeposit and canWithdraw
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': None,
'name': name,
'active': active,
'fee': None, # todo: redesign
'precision': precision,
'limits': {
'amount': {'min': None, 'max': None},
'price': {'min': None, 'max': None},
'cost': {'min': None, 'max': None},
'withdraw': {
'min': self.safe_float(currency, 'min_withdrawal'),
'max': None,
},
},
}
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentId'
method += 'Depth' if (market['type'] == 'swap') else 'Book'
request = {
'instrument_id': market['id'],
}
if limit is not None:
request['size'] = limit # max 200
response = getattr(self, method)(self.extend(request, params))
#
# { asks: [["0.02685268", "0.242571", "1"],
# ["0.02685493", "0.164085", "1"],
# ...
# ["0.02779", "1.039", "1"],
# ["0.027813", "0.0876", "1"] ],
# bids: [["0.02684052", "10.371849", "1"],
# ["0.02684051", "3.707", "4"],
# ...
# ["0.02634963", "0.132934", "1"],
# ["0.02634962", "0.264838", "2"] ],
# timestamp: "2018-12-17T20:24:16.159Z" }
#
timestamp = self.parse8601(self.safe_string(response, 'timestamp'))
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472", # missing in the docs
# bid: "0.02665221", # not mentioned in the docs
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
symbol = None
marketId = self.safe_string(ticker, 'instrument_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
open = self.safe_float(ticker, 'open_24h')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high_24h'),
'low': self.safe_float(ticker, 'low_24h'),
'bid': self.safe_float(ticker, 'best_bid'),
'bidVolume': self.safe_float(ticker, 'best_bid_size'),
'ask': self.safe_float(ticker, 'best_ask'),
'askVolume': self.safe_float(ticker, 'best_ask_size'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'base_volume_24h'),
'quoteVolume': self.safe_float(ticker, 'quote_volume_24h'),
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTicker'
request = {
'instrument_id': market['id'],
}
response = getattr(self, method)(self.extend(request, params))
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472",
# bid: "0.02665221",
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
return self.parse_ticker(response)
def fetch_tickers_by_type(self, type, symbols=None, params={}):
self.load_markets()
method = type + 'GetInstrumentsTicker'
response = getattr(self, method)(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_tickers(self, symbols=None, params={}):
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
return self.fetch_tickers_by_type(type, symbols, self.omit(params, 'type'))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# spot trades
#
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
#
# futures trades, swap trades
#
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
#
# fetchOrderTrades(private)
#
# spot trades, margin trades
#
# {
# "created_at":"2019-03-15T02:52:56.000Z",
# "exec_type":"T", # whether the order is taker or maker
# "fee":"0.00000082",
# "instrument_id":"BTC-USDT",
# "ledger_id":"3963052721",
# "liquidity":"T", # whether the order is taker or maker
# "order_id":"2482659399697408",
# "price":"3888.6",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.00055306",
# "timestamp":"2019-03-15T02:52:56.000Z"
# },
#
# futures trades, swap trades
#
# {
# "trade_id":"197429674631450625",
# "instrument_id":"EOS-USD-SWAP",
# "order_id":"6a-7-54d663a28-0",
# "price":"3.633",
# "order_qty":"1.0000",
# "fee":"-0.000551",
# "created_at":"2019-03-21T04:41:58.0Z", # missing in swap trades
# "timestamp":"2019-03-25T05:56:31.287Z", # missing in futures trades
# "exec_type":"M", # whether the order is taker or maker
# "side":"short", # "buy" in futures trades
# }
#
symbol = None
marketId = self.safe_string(trade, 'instrument_id')
base = None
quote = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
base = market['base']
quote = market['quote']
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
base = market['base']
quote = market['quote']
timestamp = self.parse8601(self.safe_string_2(trade, 'timestamp', 'created_at'))
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'size', 'qty')
amount = self.safe_float(trade, 'order_qty', amount)
takerOrMaker = self.safe_string_2(trade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
side = self.safe_string(trade, 'side')
cost = None
if amount is not None:
if price is not None:
cost = amount * price
feeCost = self.safe_float(trade, 'fee')
fee = None
if feeCost is not None:
feeCurrency = base if (side == 'buy') else quote
fee = {
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
'cost': -feeCost,
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'order_id')
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string_2(trade, 'trade_id', 'ledger_id'),
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTrades'
if (limit is None) or (limit > 100):
limit = 100 # maximum = default = 100
request = {
'instrument_id': market['id'],
'limit': limit,
# from: 'id',
# to: 'id',
}
response = getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
# ]
#
# futures markets, swap markets
#
# [
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# spot markets
#
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
#
# futures markets
#
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331,
# ]
#
if isinstance(ohlcv, list):
numElements = len(ohlcv)
volumeIndex = 6 if (numElements > 6) else 5
timestamp = self.safe_value(ohlcv, 0)
if isinstance(timestamp, basestring):
timestamp = self.parse8601(timestamp)
return [
timestamp, # timestamp
self.safe_float(ohlcv, 1), # Open
self.safe_float(ohlcv, 2), # High
self.safe_float(ohlcv, 3), # Low
self.safe_float(ohlcv, 4), # Close
# self.safe_float(ohlcv, 5), # Quote Volume
# self.safe_float(ohlcv, 6), # Base Volume
self.safe_float(ohlcv, volumeIndex), # Volume, okex will return base volume in the 7th element for future markets
]
else:
return [
self.parse8601(self.safe_string(ohlcv, 'time')),
self.safe_float(ohlcv, 'open'), # Open
self.safe_float(ohlcv, 'high'), # High
self.safe_float(ohlcv, 'low'), # Low
self.safe_float(ohlcv, 'close'), # Close
self.safe_float(ohlcv, 'volume'), # Base Volume
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
duration = self.parse_timeframe(timeframe)
request = {
'instrument_id': market['id'],
'granularity': self.timeframes[timeframe],
}
options = self.safe_value(self.options, 'fetchOHLCV', {})
defaultType = self.safe_string(options, 'type', 'Candles') # Candles or HistoryCandles
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
method = market['type'] + 'GetInstrumentsInstrumentId' + type
if type == 'Candles':
if since is not None:
if limit is not None:
request['end'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['start'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['start'] = self.iso8601(now - limit * duration * 1000)
request['end'] = self.iso8601(now)
elif type == 'HistoryCandles':
if market['option']:
raise NotSupported(self.id + ' fetchOHLCV does not have ' + type + ' for ' + market['type'] + ' markets')
if since is not None:
if limit is None:
limit = 300 # default
request['start'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['end'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['end'] = self.iso8601(now - limit * duration * 1000)
request['start'] = self.iso8601(now)
response = getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# close: "0.02683401",
# high: "0.02683401",
# low: "0.02683401",
# open: "0.02683401",
# time: "2018-12-17T23:47:00.000Z",
# volume: "0"
# },
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
# ]
#
# futures
#
# [
# [
# 1545090660000,
# 0.3171,
# 0.3174,
# 0.3171,
# 0.3173,
# 1648,
# 51930.38579450868
# ],
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331
# ]
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_account_balance(self, response):
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(balance, 'balance')
account['used'] = self.safe_float(balance, 'hold')
account['free'] = self.safe_float(balance, 'available')
result[code] = account
return self.parse_balance(result)
def parse_margin_balance(self, response):
#
# [
# {
# "currency:BTC": {
# "available":"0",
# "balance":"0",
# "borrowed":"0",
# "can_withdraw":"0",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "currency:USDT": {
# "available":"100",
# "balance":"100",
# "borrowed":"0",
# "can_withdraw":"100",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "instrument_id":"BTC-USDT",
# "liquidation_price":"0",
# "product_id":"BTC-USDT",
# "risk_rate":""
# },
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
marketId = self.safe_string(balance, 'instrument_id')
market = self.safe_value(self.markets_by_id, marketId)
symbol = None
if market is None:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = market['symbol']
omittedBalance = self.omit(balance, [
'instrument_id',
'liquidation_price',
'product_id',
'risk_rate',
'margin_ratio',
'maint_margin_ratio',
'tiers',
])
keys = list(omittedBalance.keys())
accounts = {}
for k in range(0, len(keys)):
key = keys[k]
marketBalance = balance[key]
if key.find(':') >= 0:
parts = key.split(':')
currencyId = parts[1]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(marketBalance, 'balance')
account['used'] = self.safe_float(marketBalance, 'hold')
account['free'] = self.safe_float(marketBalance, 'available')
accounts[code] = account
else:
raise NotSupported(self.id + ' margin balance response format has changed!')
result[symbol] = self.parse_balance(accounts)
return result
def parse_futures_balance(self, response):
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# their root field name is "info", so our info will contain their info
result = {'info': response}
info = self.safe_value(response, 'info', {})
ids = list(info.keys())
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
balance = self.safe_value(info, id, {})
account = self.account()
totalAvailBalance = self.safe_float(balance, 'total_avail_balance')
if self.safe_string(balance, 'margin_mode') == 'fixed':
contracts = self.safe_value(balance, 'contracts', [])
free = totalAvailBalance
for i in range(0, len(contracts)):
contract = contracts[i]
fixedBalance = self.safe_float(contract, 'fixed_balance')
realizedPnl = self.safe_float(contract, 'realized_pnl')
marginFrozen = self.safe_float(contract, 'margin_frozen')
marginForUnfilled = self.safe_float(contract, 'margin_for_unfilled')
margin = self.sum(fixedBalance, realizedPnl) - marginFrozen - marginForUnfilled
free = self.sum(free, margin)
account['free'] = free
else:
realizedPnl = self.safe_float(balance, 'realized_pnl')
unrealizedPnl = self.safe_float(balance, 'unrealized_pnl')
marginFrozen = self.safe_float(balance, 'margin_frozen')
marginForUnfilled = self.safe_float(balance, 'margin_for_unfilled')
account['free'] = self.sum(totalAvailBalance, realizedPnl, unrealizedPnl) - marginFrozen - marginForUnfilled
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_float(balance, 'equity')
result[code] = account
return self.parse_balance(result)
def parse_swap_balance(self, response):
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
# their root field name is "info", so our info will contain their info
result = {'info': response}
info = self.safe_value(response, 'info', [])
for i in range(0, len(info)):
balance = info[i]
marketId = self.safe_string(balance, 'instrument_id')
symbol = marketId
if marketId in self.markets_by_id:
symbol = self.markets_by_id[marketId]['symbol']
account = self.account()
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_float(balance, 'equity')
account['free'] = self.safe_float(balance, 'total_avail_balance')
result[symbol] = account
return self.parse_balance(result)
def fetch_balance(self, params={}):
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchBalance requires a type parameter(one of 'account', 'spot', 'margin', 'futures', 'swap')")
self.load_markets()
suffix = 'Wallet' if (type == 'account') else 'Accounts'
method = type + 'Get' + suffix
query = self.omit(params, 'type')
response = getattr(self, method)(query)
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
# margin
#
# [
# {
# "currency:BTC": {
# "available":"0",
# "balance":"0",
# "borrowed":"0",
# "can_withdraw":"0",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "currency:USDT": {
# "available":"100",
# "balance":"100",
# "borrowed":"0",
# "can_withdraw":"100",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "instrument_id":"BTC-USDT",
# "liquidation_price":"0",
# "product_id":"BTC-USDT",
# "risk_rate":""
# },
# ]
#
# futures
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# swap
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
return self.parse_balance_by_type(type, response)
def parse_balance_by_type(self, type, response):
if (type == 'account') or (type == 'spot'):
return self.parse_account_balance(response)
elif type == 'margin':
return self.parse_margin_balance(response)
elif type == 'futures':
return self.parse_futures_balance(response)
elif type == 'swap':
return self.parse_swap_balance(response)
raise NotSupported(self.id + " fetchBalance does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef1234567890', # [a-z0-9]{1,32}
# 'order_type': '0', # 0 = Normal limit order, 1 = Post only, 2 = Fill Or Kill, 3 = Immediatel Or Cancel, 4 = Market for futures only
}
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId')
if clientOrderId is not None:
request['client_oid'] = clientOrderId
params = self.omit(params, ['client_oid', 'clientOrderId'])
method = None
if market['futures'] or market['swap']:
size = self.number_to_string(amount) if market['futures'] else self.amount_to_precision(symbol, amount)
request = self.extend(request, {
'type': type, # 1:open long 2:open short 3:close long 4:close short for futures
'size': size,
# 'match_price': '0', # Order at best counter party price?(0:no 1:yes). The default is 0. If it is set as 1, the price parameter will be ignored. When posting orders at best bid price, order_type can only be 0(regular order).
})
orderType = self.safe_string(params, 'order_type')
# order_type == '4' means a market order
isMarketOrder = (type == 'market') or (orderType == '4')
if isMarketOrder:
request['match_price'] = '1'
else:
request['price'] = self.price_to_precision(symbol, price)
if market['futures']:
request['leverage'] = '10' # or '20'
method = market['type'] + 'PostOrder'
else:
marginTrading = self.safe_string(params, 'margin_trading', '1') # 1 = spot, 2 = margin
request = self.extend(request, {
'side': side,
'type': type, # limit/market
'margin_trading': marginTrading, # 1 = spot, 2 = margin
})
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['size'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
notional = self.safe_float(params, 'notional')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if notional is None:
notional = amount * price
elif notional is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument or in the 'notional' extra parameter(the exchange-specific behaviour)")
else:
notional = amount if (notional is None) else notional
precision = market['precision']['price']
request['notional'] = self.decimal_to_precision(notional, TRUNCATE, precision, self.precisionMode)
else:
request['size'] = self.amount_to_precision(symbol, amount)
method = 'marginPostOrders' if (marginTrading == '2') else 'spotPostOrders'
response = getattr(self, method)(self.extend(request, params))
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
order = self.parse_order(response, market)
return self.extend(order, {
'type': type,
'side': side,
})
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'cancelOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " cancelOrder requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
method = type + 'PostCancelOrder'
request = {
'instrument_id': market['id'],
}
if market['futures'] or market['swap']:
method += 'InstrumentId'
else:
method += 's'
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId')
if clientOrderId is not None:
method += 'ClientOid'
request['client_oid'] = clientOrderId
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, ['type', 'client_oid', 'clientOrderId'])
response = getattr(self, method)(self.extend(request, query))
result = response if ('result' in response) else self.safe_value(response, market['id'], {})
#
# spot, margin
#
# {
# "btc-usdt": [
# {
# "result":true,
# "client_oid":"a123",
# "order_id": "2510832677225473"
# }
# ]
# }
#
# futures, swap
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# "instrument_id": "EOS-USD-190628"
# }
#
return self.parse_order(result, market)
def parse_order_status(self, status):
statuses = {
'-2': 'failed',
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order_side(self, side):
sides = {
'1': 'buy', # open long
'2': 'sell', # open short
'3': 'sell', # close long
'4': 'buy', # close short
}
return self.safe_string(sides, side, side)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
# cancelOrder
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# # instrument_id is missing for spot/margin orders
# # available in futures and swap orders only
# "instrument_id": "EOS-USD-190628",
# }
#
# fetchOrder, fetchOrdersByState, fetchOpenOrders, fetchClosedOrders
#
# # spot and margin orders
#
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001", # filled_qty in futures and swap orders
# "funds":"", # self is most likely the same as notional
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT", # missing in futures and swap orders
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# }
#
# # futures and swap orders
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10", # filled_size in spot and margin orders
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567", # missing in spot and margin orders
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap, spot and margin orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap, spo and margin orders
# "order_type":"0"
# }
#
id = self.safe_string(order, 'order_id')
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'type')
if (side != 'buy') and (side != 'sell'):
side = self.parse_order_side(type)
symbol = None
marketId = self.safe_string(order, 'instrument_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = marketId
if market is not None:
if symbol is None:
symbol = market['symbol']
amount = self.safe_float(order, 'size')
filled = self.safe_float_2(order, 'filled_size', 'filled_qty')
remaining = None
if amount is not None:
if filled is not None:
amount = max(amount, filled)
remaining = max(0, amount - filled)
if type == 'market':
remaining = 0
cost = self.safe_float_2(order, 'filled_notional', 'funds')
price = self.safe_float(order, 'price')
average = self.safe_float(order, 'price_avg')
if cost is None:
if filled is not None and average is not None:
cost = average * filled
else:
if (average is None) and (filled is not None) and (filled > 0):
average = cost / filled
status = self.parse_order_status(self.safe_string(order, 'state'))
feeCost = self.safe_float(order, 'fee')
fee = None
if feeCost is not None:
feeCurrency = None
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
clientOrderId = self.safe_string(order, 'client_oid')
if len(clientOrderId) < 1:
clientOrderId = None # fix empty clientOrderId string
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrder requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
instrumentId = 'InstrumentId' if (market['futures'] or market['swap']) else ''
method = type + 'GetOrders' + instrumentId
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef12345', # optional, [a-z0-9]{1,32}
# 'order_id': id,
}
clientOid = self.safe_string(params, 'client_oid')
if clientOid is not None:
method += 'ClientOid'
request['client_oid'] = clientOid
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# {
# "client_oid":"oktspot70",
# "created_at":"2019-03-15T02:52:56.000Z",
# "filled_notional":"3.8886",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2482659399697408",
# "order_type":"0",
# "price":"3927.3",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-15T02:52:56.000Z",
# "type":"limit"
# }
#
# futures, swap
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T02:46:38.000Z",
# "filled_qty":"10",
# "fee":"-0.0080819",
# "order_id":"2510946213248000",
# "price":"3.712",
# "price_avg":"3.712",
# "status":"2",
# "state": "2",
# "type":"2",
# "contract_val":"10",
# "leverage":"10",
# "client_oid":"", # missing in swap orders
# "pnl":"0", # missing in swap orders
# "order_type":"0"
# }
#
return self.parse_order(response)
def fetch_orders_by_state(self, state, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByState requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrder requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
request = {
'instrument_id': market['id'],
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
'state': state,
}
method = type + 'GetOrders'
if market['futures'] or market['swap']:
method += 'InstrumentId'
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# [
# # in fact, self documented API response does not correspond
# # to their actual API response for spot markets
# # OKEX v3 API returns a plain array of orders(see below)
# [
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# },
# ],
# {
# "before":"2500723297813504",
# "after":"2500650881647616"
# }
# ]
#
# futures, swap
#
# {
# "result":true, # missing in swap orders
# "order_info": [
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10",
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567",
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap orders
# "order_type":"0"
# },
# ]
# }
#
orders = None
if market['swap'] or market['futures']:
orders = self.safe_value(response, 'order_info', [])
else:
orders = response
responseLength = len(response)
if responseLength < 1:
return []
# in fact, self documented API response does not correspond
# to their actual API response for spot markets
# OKEX v3 API returns a plain array of orders
if responseLength > 1:
before = self.safe_value(response[1], 'before')
if before is not None:
orders = response[0]
return self.parse_orders(orders, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return self.fetch_orders_by_state('6', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return self.fetch_orders_by_state('7', symbol, since, limit, params)
def parse_deposit_addresses(self, addresses):
result = {}
for i in range(0, len(addresses)):
address = self.parse_deposit_address(addresses[i])
code = address['currency']
result[code] = address
return result
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# tag: 'abcde12345', # will be missing if the token does not require a deposit tag
# payment_id: 'abcde12345', # will not be returned if the token does not require a payment_id
# # can_deposit: 1, # 0 or 1, documented but missing
# # can_withdraw: 1, # 0 or 1, documented but missing
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string_2(depositAddress, 'tag', 'payment_id')
tag = self.safe_string(depositAddress, 'memo', tag)
currencyId = self.safe_string(depositAddress, 'currency')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.accountGetDepositAddress(self.extend(request, params))
#
# [
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# }
# ]
#
addresses = self.parse_deposit_addresses(response)
address = self.safe_value(addresses, code)
if address is None:
raise InvalidAddress(self.id + ' fetchDepositAddress cannot return nonexistent addresses, you should create withdrawal addresses with the exchange website first')
return address
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
if tag:
address = address + ':' + tag
fee = self.safe_string(params, 'fee')
if fee is None:
raise ArgumentsRequired(self.id + " withdraw() requires a `fee` string parameter, network transaction fee must be ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set '0'. Withdrawing to external digital asset address requires network transaction fee.")
request = {
'currency': currency['id'],
'to_address': address,
'destination': '4', # 2 = OKCoin International, 3 = OKEx 4 = others
'amount': self.number_to_string(amount),
'fee': fee, # String. Network transaction fee ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set as 0. Withdrawal to external digital asset address requires network transaction fee.
}
if 'password' in params:
request['trade_pwd'] = params['password']
elif 'trade_pwd' in params:
request['trade_pwd'] = params['trade_pwd']
elif self.password:
request['trade_pwd'] = self.password
query = self.omit(params, ['fee', 'password', 'trade_pwd'])
if not ('trade_pwd' in request):
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a password / trade_pwd parameter')
response = self.accountPostWithdrawal(self.extend(request, query))
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
return {
'info': response,
'id': self.safe_string(response, 'withdrawal_id'),
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
method = 'accountGetDepositHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
method = 'accountGetWithdrawalHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def parse_transaction_status(self, status):
#
# deposit statuses
#
# {
# '0': 'waiting for confirmation',
# '1': 'confirmation account',
# '2': 'recharge success'
# }
#
# withdrawal statues
#
# {
# '-3': 'pending cancel',
# '-2': 'cancelled',
# '-1': 'failed',
# '0': 'pending',
# '1': 'sending',
# '2': 'sent',
# '3': 'email confirmation',
# '4': 'manual confirmation',
# '5': 'awaiting identity confirmation'
# }
#
statuses = {
'-3': 'pending',
'-2': 'canceled',
'-1': 'failed',
'0': 'pending',
'1': 'pending',
'2': 'ok',
'3': 'pending',
'4': 'pending',
'5': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
# fetchWithdrawals
#
# {
# amount: "4.72100000",
# withdrawal_id: "1729116",
# fee: "0.01000000eth",
# txid: "0xf653125bbf090bcfe4b5e8e7b8f586a9d87aa7de94598702758c0802b…",
# currency: "ETH",
# from: "7147338839",
# to: "0x26a3CB49578F07000575405a57888681249c35Fd",
# timestamp: "2018-08-17T07:03:42.000Z",
# status: "2"
# }
#
# fetchDeposits
#
# {
# "amount": "4.19511659",
# "txid": "14c9a8c925647cdb7e5b2937ea9aefe2b29b2c273150ad3f44b3b8a4635ed437",
# "currency": "XMR",
# "from": "",
# "to": "48PjH3ksv1fiXniKvKvyH5UtFs5WhfS2Vf7U3TwzdRJtCc7HJWvCQe56dRahyhQyTAViXZ8Nzk4gQg6o4BJBMUoxNy8y8g7",
# "deposit_id": 11571659, <-- we can use self
# "timestamp": "2019-10-01T14:54:19.000Z",
# "status": "2"
# }
#
type = None
id = None
address = None
withdrawalId = self.safe_string(transaction, 'withdrawal_id')
addressFrom = self.safe_string(transaction, 'from')
addressTo = self.safe_string(transaction, 'to')
if withdrawalId is not None:
type = 'withdrawal'
id = withdrawalId
address = addressTo
else:
# the payment_id will appear on new deposits but appears to be removed from the response after 2 months
id = self.safe_string_2(transaction, 'payment_id', 'deposit_id')
type = 'deposit'
address = addressTo
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_float(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
txid = self.safe_string(transaction, 'txid')
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
feeCost = None
if type == 'deposit':
feeCost = 0
else:
if currencyId is not None:
feeWithCurrencyId = self.safe_string(transaction, 'fee')
if feeWithCurrencyId is not None:
# https://github.com/ccxt/ccxt/pull/5748
lowercaseCurrencyId = currencyId.lower()
feeWithoutCurrencyId = feeWithCurrencyId.replace(lowercaseCurrencyId, '')
feeCost = float(feeWithoutCurrencyId)
# todo parse tags
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'addressFrom': addressFrom,
'addressTo': addressTo,
'address': address,
'tagFrom': None,
'tagTo': None,
'tag': None,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_my_trade(self, pair, market=None):
# check that trading symbols match in both entries
first = pair[0]
second = pair[1]
firstMarketId = self.safe_string(first, 'instrument_id')
secondMarketId = self.safe_string(second, 'instrument_id')
if firstMarketId != secondMarketId:
raise NotSupported(self.id + ' parseMyTrade() received unrecognized response format, differing instrument_ids in one fill, the exchange API might have changed, paste your verbose output: https://github.com/ccxt/ccxt/wiki/FAQ#what-is-required-to-get-help')
marketId = firstMarketId
# determine the base and quote
quoteId = None
symbol = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
quoteId = market['quoteId']
symbol = market['symbol']
else:
parts = marketId.split('-')
quoteId = self.safe_string(parts, 1)
symbol = marketId
id = self.safe_string(first, 'trade_id')
price = self.safe_float(first, 'price')
# determine buy/sell side and amounts
# get the side from either the first trade or the second trade
feeCost = self.safe_float(first, 'fee')
index = 0 if (feeCost != 0) else 1
userTrade = self.safe_value(pair, index)
otherTrade = self.safe_value(pair, 1 - index)
receivedCurrencyId = self.safe_string(userTrade, 'currency')
side = None
amount = None
cost = None
if receivedCurrencyId == quoteId:
side = 'sell'
amount = self.safe_float(otherTrade, 'size')
cost = self.safe_float(userTrade, 'size')
else:
side = 'buy'
amount = self.safe_float(userTrade, 'size')
cost = self.safe_float(otherTrade, 'size')
feeCost = feeCost if (feeCost != 0) else self.safe_float(second, 'fee')
trade = self.safe_value(pair, index)
#
# simplified structures to show the underlying semantics
#
# # market/limit sell
#
# {
# "currency":"USDT",
# "fee":"-0.04647925", # ←--- fee in received quote currency
# "price":"129.13", # ←------ price
# "size":"30.98616393", # ←-- cost
# },
# {
# "currency":"ETH",
# "fee":"0",
# "price":"129.13",
# "size":"0.23996099", # ←--- amount
# },
#
# # market/limit buy
#
# {
# "currency":"ETH",
# "fee":"-0.00036049", # ←--- fee in received base currency
# "price":"129.16", # ←------ price
# "size":"0.240322", # ←----- amount
# },
# {
# "currency":"USDT",
# "fee":"0",
# "price":"129.16",
# "size":"31.03998952", # ←-- cost
# }
#
timestamp = self.parse8601(self.safe_string_2(trade, 'timestamp', 'created_at'))
takerOrMaker = self.safe_string_2(trade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
fee = None
if feeCost is not None:
feeCurrencyId = self.safe_string(userTrade, 'currency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
'cost': -feeCost,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'order_id')
return {
'info': pair,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def parse_my_trades(self, trades, market=None, since=None, limit=None, params={}):
grouped = self.group_by(trades, 'trade_id')
tradeIds = list(grouped.keys())
result = []
for i in range(0, len(tradeIds)):
tradeId = tradeIds[i]
pair = grouped[tradeId]
# make sure it has exactly 2 trades, no more, no less
numTradesInPair = len(pair)
if numTradesInPair == 2:
trade = self.parse_my_trade(pair)
result.append(trade)
symbol = None
if market is not None:
symbol = market['symbol']
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
# okex actually returns ledger entries instead of fills here, so each fill in the order
# is represented by two trades with opposite buy/sell sides, not one :\
# self aspect renders the 'fills' endpoint unusable for fetchOrderTrades
# until either OKEX fixes the API or we workaround self on our side somehow
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
self.load_markets()
market = self.market(symbol)
if (limit is not None) and (limit > 100):
limit = 100
request = {
'instrument_id': market['id'],
# 'order_id': id, # string
# 'after': '1', # pagination of data to return records earlier than the requested ledger_id
# 'before': '1', # P=pagination of data to return records newer than the requested ledger_id
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
defaultType = self.safe_string_2(self.options, 'fetchMyTrades', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = type + 'GetFills'
response = getattr(self, method)(self.extend(request, query))
#
# [
# # sell
# {
# "created_at":"2020-03-29T11:55:25.000Z",
# "currency":"USDT",
# "exec_type":"T",
# "fee":"-0.04647925",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562924353",
# "liquidity":"T",
# "order_id":"4636470489136128",
# "price":"129.13",
# "product_id":"ETH-USDT",
# "side":"buy",
# "size":"30.98616393",
# "timestamp":"2020-03-29T11:55:25.000Z",
# "trade_id":"18551601"
# },
# {
# "created_at":"2020-03-29T11:55:25.000Z",
# "currency":"ETH",
# "exec_type":"T",
# "fee":"0",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562924352",
# "liquidity":"T",
# "order_id":"4636470489136128",
# "price":"129.13",
# "product_id":"ETH-USDT",
# "side":"sell",
# "size":"0.23996099",
# "timestamp":"2020-03-29T11:55:25.000Z",
# "trade_id":"18551601"
# },
# # buy
# {
# "created_at":"2020-03-29T11:55:16.000Z",
# "currency":"ETH",
# "exec_type":"T",
# "fee":"-0.00036049",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562922669",
# "liquidity":"T",
# "order_id": "4636469894136832",
# "price":"129.16",
# "product_id":"ETH-USDT",
# "side":"buy",
# "size":"0.240322",
# "timestamp":"2020-03-29T11:55:16.000Z",
# "trade_id":"18551600"
# },
# {
# "created_at":"2020-03-29T11:55:16.000Z",
# "currency":"USDT",
# "exec_type":"T",
# "fee":"0",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562922668",
# "liquidity":"T",
# "order_id":"4636469894136832",
# "price":"129.16",
# "product_id":"ETH-USDT",
# "side":"sell",
# "size":"31.03998952",
# "timestamp":"2020-03-29T11:55:16.000Z",
# "trade_id":"18551600"
# }
# ]
#
return self.parse_my_trades(response, market, since, limit, params)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
request = {
# 'instrument_id': market['id'],
'order_id': id,
# 'after': '1', # return the page after the specified page number
# 'before': '1', # return the page before the specified page number
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
return self.fetch_my_trades(symbol, since, limit, self.extend(request, params))
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchLedger', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
suffix = '' if (type == 'account') else 'Accounts'
argument = ''
request = {
# 'from': 'id',
# 'to': 'id',
}
if limit is not None:
request['limit'] = limit
currency = None
if (type == 'spot') or (type == 'futures'):
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger requires a currency code argument for '" + type + "' markets")
argument = 'Currency'
currency = self.currency(code)
request['currency'] = currency['id']
elif (type == 'margin') or (type == 'swap'):
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger requires a code argument(a market symbol) for '" + type + "' markets")
argument = 'InstrumentId'
market = self.market(code) # we intentionally put a market inside here for the margin and swap ledgers
currency = self.currency(market['base'])
request['instrument_id'] = market['id']
#
# if type == 'margin':
# #
# # 3. Borrow
# # 4. Repayment
# # 5. Interest
# # 7. Buy
# # 8. Sell
# # 9. From capital account
# # 10. From C2C
# # 11. From Futures
# # 12. From Spot
# # 13. From ETT
# # 14. To capital account
# # 15. To C2C
# # 16. To Spot
# # 17. To Futures
# # 18. To ETT
# # 19. Mandatory Repayment
# # 20. From Piggybank
# # 21. To Piggybank
# # 22. From Perpetual
# # 23. To Perpetual
# # 24. Liquidation Fee
# # 54. Clawback
# # 59. Airdrop Return.
# #
# request['type'] = 'number' # All types will be returned if self filed is left blank
# }
#
elif type == 'account':
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
#
# #
# # 1. deposit
# # 2. withdrawal
# # 13. cancel withdrawal
# # 18. into futures account
# # 19. out of futures account
# # 20. into sub account
# # 21. out of sub account
# # 28. claim
# # 29. into ETT account
# # 30. out of ETT account
# # 31. into C2C account
# # 32. out of C2C account
# # 33. into margin account
# # 34. out of margin account
# # 37. into spot account
# # 38. out of spot account
# #
# request['type'] = 'number'
#
else:
raise NotSupported(self.id + " fetchLedger does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
method = type + 'Get' + suffix + argument + 'Ledger'
response = getattr(self, method)(self.extend(request, query))
#
# transfer funds transfer in/out
# trade funds moved as a result of a trade, spot and margin accounts only
# rebate fee rebate as per fee schedule, spot and margin accounts only
# match open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
# fee fee, futures only
# settlement settlement/clawback/settle long/settle short
# liquidation force close long/force close short/deliver close long/deliver close short
# funding funding fee, swap only
# margin a change in the amount after adjusting margin, swap only
#
# account
#
# [
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
# ]
#
# spot
#
# [
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
# ]
#
# margin
#
# [
# [
# {
# "created_at":"2019-03-20T03:45:05.000Z",
# "ledger_id":"78918186",
# "timestamp":"2019-03-20T03:45:05.000Z",
# "currency":"EOS",
# "amount":"0", # ?
# "balance":"0.59957711",
# "type":"transfer",
# "details":{
# "instrument_id":"EOS-USDT",
# "order_id":"787057",
# "product_id":"EOS-USDT"
# }
# }
# ],
# {
# "before":"78965766",
# "after":"78918186"
# }
# ]
#
# futures
#
# [
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
# ]
#
# swap
#
# [
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
# ]
#
responseLength = len(response)
if responseLength < 1:
return []
isArray = isinstance(response[0], list)
isMargin = (type == 'margin')
entries = response[0] if (isMargin and isArray) else response
if type == 'swap':
ledgerEntries = self.parse_ledger(entries)
return self.filter_by_symbol_since_limit(ledgerEntries, code, since, limit)
return self.parse_ledger(entries, currency, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'transfer': 'transfer', # # funds transfer in/out
'trade': 'trade', # funds moved as a result of a trade, spot and margin accounts only
'rebate': 'rebate', # fee rebate as per fee schedule, spot and margin accounts only
'match': 'trade', # open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
'fee': 'fee', # fee, futures only
'settlement': 'trade', # settlement/clawback/settle long/settle short
'liquidation': 'trade', # force close long/force close short/deliver close long/deliver close short
'funding': 'fee', # funding fee, swap only
'margin': 'margin', # a change in the amount after adjusting margin, swap only
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
#
# account
#
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
#
# spot
#
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
#
# margin
#
# {
# "created_at":"2019-03-20T03:45:05.000Z",
# "ledger_id":"78918186",
# "timestamp":"2019-03-20T03:45:05.000Z",
# "currency":"EOS",
# "amount":"0", # ?
# "balance":"0.59957711",
# "type":"transfer",
# "details":{
# "instrument_id":"EOS-USDT",
# "order_id":"787057",
# "product_id":"EOS-USDT"
# }
# }
#
# futures
#
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
#
# swap
#
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
#
id = self.safe_string(item, 'ledger_id')
account = None
details = self.safe_value(item, 'details', {})
referenceId = self.safe_string(details, 'order_id')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(self.safe_string(item, 'currency'), currency)
amount = self.safe_float(item, 'amount')
timestamp = self.parse8601(self.safe_string(item, 'timestamp'))
fee = {
'cost': self.safe_float(item, 'fee'),
'currency': code,
}
before = None
after = self.safe_float(item, 'balance')
status = 'ok'
marketId = self.safe_string(item, 'instrument_id')
symbol = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
return {
'info': item,
'id': id,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'symbol': symbol,
'amount': amount,
'before': before, # balance before
'after': after, # balance after
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
isArray = isinstance(params, list)
request = '/api/' + api + '/' + self.version + '/'
request += path if isArray else self.implode_params(path, params)
query = params if isArray else self.omit(params, self.extract_params(path))
url = self.implode_params(self.urls['api']['rest'], {'hostname': self.hostname}) + request
type = self.get_path_authentication_type(path)
if type == 'public':
if query:
url += '?' + self.urlencode(query)
elif type == 'private':
self.check_required_credentials()
timestamp = self.iso8601(self.milliseconds())
headers = {
'OK-ACCESS-KEY': self.apiKey,
'OK-ACCESS-PASSPHRASE': self.password,
'OK-ACCESS-TIMESTAMP': timestamp,
# 'OK-FROM': '',
# 'OK-TO': '',
# 'OK-LIMIT': '',
}
auth = timestamp + method + request
if method == 'GET':
if query:
urlencodedQuery = '?' + self.urlencode(query)
url += urlencodedQuery
auth += urlencodedQuery
else:
if isArray or query:
body = self.json(query)
auth += body
headers['Content-Type'] = 'application/json'
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256, 'base64')
headers['OK-ACCESS-SIGN'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def get_path_authentication_type(self, path):
# https://github.com/ccxt/ccxt/issues/6651
# a special case to handle the optionGetUnderlying interefering with
# other endpoints containing self keyword
if path == 'underlying':
return 'public'
auth = self.safe_value(self.options, 'auth', {})
key = self.find_broadly_matched_key(auth, path)
return self.safe_string(auth, key, 'private')
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return # fallback to default error handler
feedback = self.id + ' ' + body
if code == 503:
# {"message":"name resolution failed"}
raise ExchangeNotAvailable(feedback)
#
# {"error_message":"Order does not exist","result":"true","error_code":"35029","order_id":"-1"}
#
message = self.safe_string(response, 'message')
errorCode = self.safe_string_2(response, 'code', 'error_code')
nonEmptyMessage = ((message is not None) and (message != ''))
nonZeroErrorCode = (errorCode is not None) and (errorCode != '0')
if nonEmptyMessage:
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if nonZeroErrorCode:
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
if nonZeroErrorCode or nonEmptyMessage:
raise ExchangeError(feedback) # unknown message
| 47.430503 | 521 | 0.47705 |
434261bf779d7b5a458eef6887df1cb0f493a8cf | 2,564 | py | Python | angr/analyses/reaching_definitions/def_use.py | KevinPoole/angr | b085b3aaed2d817d647e7937e80dad8eaab1e74b | [
"BSD-2-Clause"
] | null | null | null | angr/analyses/reaching_definitions/def_use.py | KevinPoole/angr | b085b3aaed2d817d647e7937e80dad8eaab1e74b | [
"BSD-2-Clause"
] | null | null | null | angr/analyses/reaching_definitions/def_use.py | KevinPoole/angr | b085b3aaed2d817d647e7937e80dad8eaab1e74b | [
"BSD-2-Clause"
] | null | null | null | import networkx
from .reaching_definitions import LiveDefinitions, ReachingDefinitionsAnalysis
from .definition import Definition
from .atoms import Atom
class GuardUse(Atom):
def __init__(self, target):
self.target = target
def __repr__(self):
return '<Guard %#x>' % self.target
class DefUseState(LiveDefinitions):
def _cycle(self, code_loc):
if code_loc != self.analysis.current_codeloc:
self.analysis.current_codeloc = code_loc
self.analysis.codeloc_uses = set()
def add_use(self, atom, code_loc):
self._cycle(code_loc)
self.analysis.codeloc_uses.update(self.get_definitions(atom))
return super().add_use(atom, code_loc)
def kill_and_add_definition(self, atom, code_loc, data, dummy=False):
self._cycle(code_loc)
definition = super().kill_and_add_definition(atom, code_loc, data, dummy=dummy)
if definition is not None:
self.analysis.def_use_graph.add_node(definition)
for used in self.analysis.codeloc_uses:
# moderately confusing misnomers. this is an edge from a def to a use, since the "uses" are actually the
# definitions that we're using and the "definition" is the new definition
# i.e. the def that the old def is used to construct
# so this is really a graph where nodes are defs and edges are uses
self.analysis.def_use_graph.add_edge(used, definition)
return definition
def mark_guard(self, code_loc, data, target):
self._cycle(code_loc)
atom = GuardUse(target)
kinda_definition = Definition(atom, code_loc, data)
self.analysis.def_use_graph.add_node(kinda_definition)
for used in self.analysis.codeloc_uses:
self.analysis.def_use_graph.add_edge(used, kinda_definition)
class DefUseAnalysis(ReachingDefinitionsAnalysis): # pylint: disable=abstract-method
def __init__(self, *args, **kwargs):
self.def_use_graph = networkx.DiGraph()
self.current_codeloc = None
self.codeloc_uses = set()
super().__init__(*args, **kwargs)
def _initial_abstract_state(self, node):
if self._init_state is not None:
return self._init_state
else:
func_addr = self._function.addr if self._function else None
return DefUseState(self.project.arch, track_tmps=self._track_tmps, analysis=self,
init_func=self._init_func, cc=self._cc, func_addr=func_addr)
| 41.354839 | 120 | 0.673167 |
860766f5b7e396034ed70275ab698c4665993ebb | 584 | py | Python | tests/test_translator.py | Attsun1031/schematics | 90dee53fd1d5c29f2c947bec6f5ffe5f74305ab1 | [
"BSD-3-Clause"
] | 1,430 | 2015-01-01T19:22:19.000Z | 2022-03-29T11:34:35.000Z | tests/test_translator.py | Attsun1031/schematics | 90dee53fd1d5c29f2c947bec6f5ffe5f74305ab1 | [
"BSD-3-Clause"
] | 360 | 2015-01-02T05:27:34.000Z | 2022-03-18T14:08:27.000Z | tests/test_translator.py | Attsun1031/schematics | 90dee53fd1d5c29f2c947bec6f5ffe5f74305ab1 | [
"BSD-3-Clause"
] | 222 | 2015-01-07T20:07:02.000Z | 2022-03-22T16:12:47.000Z | # -*- coding: utf-8 -*-
import pytest
def test_translator():
def translator(string):
translations = {'String value is too long.': 'Tamanho de texto muito grande.'}
return translations.get(string, string)
from schematics.translator import register_translator
register_translator(translator)
from schematics.types import StringType
from schematics.exceptions import ValidationError
with pytest.raises(ValidationError) as exc:
StringType(max_length=1).validate_length('Abc')
assert exc.value == ['Tamanho de texto muito grande.']
| 30.736842 | 86 | 0.72089 |
8ee4060fd5f783a9d115432c7c5036ce2bff5306 | 4,733 | py | Python | CLASSES/RIGID_LOAD_SHARING.py | cfernandesFEUP/GEARpie | 06915490b260970520a36164dc997edc7175054f | [
"MIT"
] | 4 | 2021-12-21T13:50:58.000Z | 2022-01-08T09:55:37.000Z | CLASSES/RIGID_LOAD_SHARING.py | cfernandesFEUP/GEARpie | 06915490b260970520a36164dc997edc7175054f | [
"MIT"
] | null | null | null | CLASSES/RIGID_LOAD_SHARING.py | cfernandesFEUP/GEARpie | 06915490b260970520a36164dc997edc7175054f | [
"MIT"
] | 1 | 2022-02-27T06:49:35.000Z | 2022-02-27T06:49:35.000Z | '''MIT License
Copyright (c) 2022 Carlos M.C.G. Fernandes
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. '''
class LINES:
"""Calculation of lines of contact length assuming a ridig load sharing model"""
def __init__(self, size, GEO):
import numpy as np
# contact size
self.size = size
# condition A
self.COND_A = GEO.b*np.tan(GEO.betab)
# condition B
self.COND_B = GEO.epslon_alpha*GEO.pbt
# condition C
self.COND_C = GEO.epslon_beta*GEO.pbt
# condition D
self.COND_D = (GEO.epslon_alpha + GEO.epslon_beta)*GEO.pbt
# x discretization
self.xx = np.linspace(0., self.COND_D, self.size)
# face width discretization
self.DISC_SIZE_AE = self.COND_B/len(self.xx)
self.DISC_b = int(GEO.b/self.DISC_SIZE_AE)
self.bpos = np.linspace(0, GEO.b, self.DISC_b)
# number of tooth for the analysis
self.N = int(np.ceil(GEO.epslon_alpha + GEO.epslon_beta))
self.kt = np.arange(-self.N, self.N + 1)
# create arrays
self.XC = np.zeros([len(self.kt), len(self.xx), len(self.bpos)])
self.Lh = np.zeros([len(self.kt), len(self.xx), len(self.bpos)])
self.L = np.zeros([len(self.xx), len(self.bpos)])
# offset discretization according to the kth tooth
for i in range(len(self.kt)):
self.XC[i] = (np.tile(self.xx, (len(self.bpos), 1)).T +
self.kt[i]*GEO.pbt +
np.tile(self.bpos*np.tan(GEO.betab),
(len(self.xx), 1)))
# conditions (A to D) to calculate contacting lines
if GEO.epslon_beta < 1:
self.indA = np.where((self.XC >= 0)*(self.XC < self.COND_A))
self.indB = np.where((self.XC >= self.COND_A)
* (self.XC < self.COND_B))
self.indC = np.where((self.XC >= self.COND_B)
* (self.XC < self.COND_D))
self.Lh[self.indA] = self.XC[self.indA]/np.sin(GEO.betab)
self.Lh[self.indB] = GEO.b/np.cos(GEO.betab)
self.Lh[self.indC] = GEO.b/np.cos(GEO.betab)\
- (self.XC[self.indC] - self.COND_B)/np.sin(GEO.betab)
else:
self.indA = np.where((self.XC >= 0)*(self.XC < self.COND_B))
self.indB = np.where((self.XC >= self.COND_B)
* (self.XC < self.COND_C))
self.indC = np.where((self.XC >= self.COND_C)
* (self.XC < self.COND_D))
self.Lh[self.indA] = self.XC[self.indA]/np.sin(GEO.betab)
self.Lh[self.indB] = self.COND_B/np.sin(GEO.betab)
self.Lh[self.indC] = self.COND_B/np.sin(GEO.betab)\
- (self.XC[self.indC] - self.COND_C)/np.sin(GEO.betab)
# cut the arrays after out of contact
self.L = np.sum(self.Lh, axis=0)
self.C1 = self.xx < self.COND_B
# final x coordinate along path of contact
self.xf = self.xx[self.C1]/self.COND_B
self.XF = self.XC[:,self.C1,:]
# sum of contacting lines
self.lsum = self.L[self.C1, :]
# number of lines simultaneously in contact
self.lxi = self.lsum[:, 0]/GEO.b
# x coordinate converted into radius
self.rr1 = ((self.xx*GEO.AE + GEO.T1A)**2 + GEO.rb1**2)**(1/2)
self.rr2 = ((GEO.T2A - self.xx*GEO.AE)**2 + GEO.rb2**2)**(1/2)
# dimensional path of contact
self.xd = self.xf*GEO.AE
# curvature radius (1 - pinion, 2 - wheel)
self.R1 = GEO.T1A + self.xd
self.R2 = GEO.T2A - self.xd
self.Req = 1/((1/self.R1) + (1/self.R2))/np.cos(GEO.betab)
| 46.401961 | 84 | 0.595183 |
f664e3fa65485cdd202aedee1573afa7c80c2d76 | 1,727 | py | Python | francoralite/apps/francoralite_api/serializers/advanced_search.py | Francoralite/francoralite | f8c5eeffe6d395c7e4222a9f5a4a7a01841b503c | [
"BSD-3-Clause"
] | 2 | 2021-07-26T08:29:26.000Z | 2021-07-26T08:29:27.000Z | francoralite/apps/francoralite_api/serializers/advanced_search.py | lluc/telemeta-integration | c2fb116471235674eae597abac84a7113e0f7c82 | [
"BSD-3-Clause"
] | 167 | 2018-10-20T14:34:46.000Z | 2021-06-01T10:40:55.000Z | francoralite/apps/francoralite_api/serializers/advanced_search.py | Francoralite/francoralite | f8c5eeffe6d395c7e4222a9f5a4a7a01841b503c | [
"BSD-3-Clause"
] | 1 | 2021-06-06T12:16:49.000Z | 2021-06-06T12:16:49.000Z | # -*- coding: utf-8 -*-
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Luc LEGER / Coopérative ARTEFACTS <artefacts.lle@gmail.com>
from rest_framework import serializers
from ..models.authority import Authority as AuthorityModel
from .authority import AuthoritySerializer
from ..models.location import Location as LocationModel
from .location_gis import LocationGisSerializer
from ..models.fond import Fond as FondModel
from .fond import FondSerializer
from ..models.mission import Mission as MissionModel
from .mission import MissionSerializer
from ..models.collection import Collection as CollectionModel
from .collection import CollectionSerializer
from ..models.item import Item as ItemModel
from .item import ItemSerializer
class AdvancedSearchSerializer(serializers.Serializer):
def to_representation(self, instance):
if isinstance(instance, AuthorityModel):
serializer = AuthoritySerializer(instance)
elif isinstance(instance, LocationModel):
serializer = LocationGisSerializer(instance)
elif isinstance(instance, FondModel):
serializer = FondSerializer(instance)
elif isinstance(instance, MissionModel):
serializer = MissionSerializer(instance)
elif isinstance(instance, CollectionModel):
serializer = CollectionSerializer(instance)
elif isinstance(instance, ItemModel):
serializer = ItemSerializer(instance)
else:
raise Exception("Not an known instance!")
data = serializer.data
data['entity'] = type(instance).__name__
return data
| 40.162791 | 74 | 0.738274 |
90f7e608be3374f2d3baf3990b5c1de1b225eb14 | 823 | py | Python | backend/app/app/api/schemas/permission.py | bigSAS/fast-api-backend-starter | 21d92632e9c9668de461dd7f40156ae098765242 | [
"MIT"
] | 1 | 2021-06-23T14:38:24.000Z | 2021-06-23T14:38:24.000Z | backend/app/app/api/schemas/permission.py | bigSAS/fast-api-backend-starter | 21d92632e9c9668de461dd7f40156ae098765242 | [
"MIT"
] | null | null | null | backend/app/app/api/schemas/permission.py | bigSAS/fast-api-backend-starter | 21d92632e9c9668de461dd7f40156ae098765242 | [
"MIT"
] | null | null | null | from pydantic import BaseModel
from typing import Optional, List
from app.api.schemas.pagination import PaginatedModel
from enum import Enum
class Permissions(str, Enum):
IS_ADMIN = "IS_ADMIN"
IS_GROUP_MEMBER = "IS_GROUP_MEMBER"
HAS_ACTION_ACCESS = "HAS_ACTION_ACCESS"
class PermissionBase(BaseModel):
name: Permissions
data: Optional[dict] = None
class PermissionCreate(PermissionBase):
user_id: int
# todo: validate permission name
# todo: validate permission data
# ^ 1 custom validation ? pydantic docs -> l8r
class Permission(PermissionBase):
id: int
user_id: int
class Config: orm_mode = True
class PermissionsPaginated(PaginatedModel):
"""
Note list with pagination.
"""
items: List[Permission]
class Meta: orm_model_class = Permission
| 21.102564 | 53 | 0.72418 |
fbc59bec4b7846b7f2fd790e00f6c5c52cc3fbe9 | 5,339 | py | Python | app/apps/user/tests/test_user_api.py | MMartynamajchrzak/recipe-restAPI | 54ca39f7e7ff19f8cba902895ce69504a2fcaf99 | [
"MIT"
] | 1 | 2021-12-10T08:43:54.000Z | 2021-12-10T08:43:54.000Z | app/apps/user/tests/test_user_api.py | MMartynamajchrzak/recipe-restAPI | 54ca39f7e7ff19f8cba902895ce69504a2fcaf99 | [
"MIT"
] | null | null | null | app/apps/user/tests/test_user_api.py | MMartynamajchrzak/recipe-restAPI | 54ca39f7e7ff19f8cba902895ce69504a2fcaf99 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**parameters):
return get_user_model().objects.create_user(**parameters)
class PublicUserApiTests(TestCase):
# Unauthenticated user from the internet gives the request
# Test the user API Public
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
# Test creating user with valid payload is successful
payload = {
'email': 'test@email.com',
'password': 'test123',
'name': 'test_name'
}
response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**response.data)
self.assertTrue(user.check_password(payload['password']))
# Making sure password is encrypted
self.assertNotIn('password', response.data)
def test_user_exists(self):
# Test creating a user that already exists fails
payload = {
'email': 'test@email.com',
'password': 'test123',
'name': 'test_name'
}
create_user(**payload)
response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
# Test password must be more than 5 characters
payload = {
'email': 'test@email.com',
'password': 'test',
'name': 'test_name'
}
response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Check that the user wasn't created
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
# test that the token is created for the user
payload = {
'email': 'test@email.com',
'password': 'test',
'name': 'test_name'
}
create_user(**payload)
response = self.client.post(TOKEN_URL, payload)
self.assertIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
# Test that token is not created if invalid credentials are given
create_user(email='test@email.com', password='test_pass')
payload = {
'email': 'test@email.com',
'password': 'wrong_password'
}
response = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
# Test that token is not created when user doesn't exist
payload = {
'email': 'test@email.com',
'password': 'test',
'name': 'test_name'
}
# not creating a user
response = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_token_missing_field(self):
response = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
# Auth is required for users
response = self.client.get(ME_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserAPITests(TestCase):
# Test APi than requires authentication
def setUp(self):
self.user = create_user(
email='test@user',
password='test_pass',
name='Name'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
# Test retrieving profile for logged in user
response = self.client.get(ME_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_user_not_allowed(self):
# Test that post is not allowed on any url
response = self.client.post(ME_URL, {})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
# Test updating the user profile for unauthenticated user
payload = {
'name': 'new Name',
'password': 'new_pass'
}
response = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
| 33.161491 | 82 | 0.643004 |
82bda2141c79cc5f8d2bba9f401c18ffe86124d0 | 2,143 | py | Python | lnbits/db.py | stepansnigirev/lnbits | 82731dc901780b959d6ebecc4f61be137c8d2884 | [
"MIT"
] | null | null | null | lnbits/db.py | stepansnigirev/lnbits | 82731dc901780b959d6ebecc4f61be137c8d2884 | [
"MIT"
] | null | null | null | lnbits/db.py | stepansnigirev/lnbits | 82731dc901780b959d6ebecc4f61be137c8d2884 | [
"MIT"
] | null | null | null | import os
import trio
from contextlib import asynccontextmanager
from sqlalchemy import create_engine # type: ignore
from sqlalchemy_aio import TRIO_STRATEGY # type: ignore
from sqlalchemy_aio.base import AsyncConnection
from .settings import LNBITS_DATA_FOLDER
class Connection:
def __init__(self, conn: AsyncConnection):
self.conn = conn
async def fetchall(self, query: str, values: tuple = ()) -> list:
result = await self.conn.execute(query, values)
return await result.fetchall()
async def fetchone(self, query: str, values: tuple = ()):
result = await self.conn.execute(query, values)
row = await result.fetchone()
await result.close()
return row
async def execute(self, query: str, values: tuple = ()):
return await self.conn.execute(query, values)
class Database:
def __init__(self, db_name: str):
self.db_name = db_name
db_path = os.path.join(LNBITS_DATA_FOLDER, f"{db_name}.sqlite3")
self.engine = create_engine(f"sqlite:///{db_path}", strategy=TRIO_STRATEGY)
self.lock = trio.StrictFIFOLock()
@asynccontextmanager
async def connect(self):
await self.lock.acquire()
try:
async with self.engine.connect() as conn:
async with conn.begin():
yield Connection(conn)
finally:
self.lock.release()
async def fetchall(self, query: str, values: tuple = ()) -> list:
async with self.connect() as conn:
result = await conn.execute(query, values)
return await result.fetchall()
async def fetchone(self, query: str, values: tuple = ()):
async with self.connect() as conn:
result = await conn.execute(query, values)
row = await result.fetchone()
await result.close()
return row
async def execute(self, query: str, values: tuple = ()):
async with self.connect() as conn:
return await conn.execute(query, values)
@asynccontextmanager
async def reuse_conn(self, conn: Connection):
yield conn
| 32.969231 | 83 | 0.636958 |
df96e7f72beea8673e7fad692d82c778e230ed9b | 641 | py | Python | AcademicDealerBackend/manage.py | Acciente717/AcademicDealerBackend | 8024725f88997fa430fa92e1caa28161ffbb06f6 | [
"MIT"
] | 5 | 2019-03-10T06:57:15.000Z | 2019-03-17T03:04:40.000Z | AcademicDealerBackend/manage.py | Acciente717/AcademicDealerBackend | 8024725f88997fa430fa92e1caa28161ffbb06f6 | [
"MIT"
] | 11 | 2019-05-14T15:13:48.000Z | 2019-05-31T15:31:33.000Z | AcademicDealerBackend/manage.py | Acciente717/AcademicDealerBackend | 8024725f88997fa430fa92e1caa28161ffbb06f6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AcademicDealerBackend.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.136364 | 85 | 0.689548 |
cc047ac4851bcbefa374477beeb4ab0918b1919a | 20 | py | Python | pbixrefresher/__init__.py | sizer99/pbixrefresher-python | 87e6cd93b55d6101f4c0ba8ada99c9a74f33b095 | [
"MIT"
] | 100 | 2017-12-15T00:22:36.000Z | 2022-03-27T09:02:07.000Z | pbixrefresher/__init__.py | ImkeF/pbixrefresher-python | ce583dc3ead440752634bbeb25103eeb16150dc9 | [
"MIT"
] | 34 | 2017-12-19T22:43:24.000Z | 2022-03-11T08:38:59.000Z | pbixrefresher/__init__.py | ImkeF/pbixrefresher-python | ce583dc3ead440752634bbeb25103eeb16150dc9 | [
"MIT"
] | 49 | 2018-03-27T06:07:24.000Z | 2022-01-20T17:29:43.000Z | import pbixrefresher | 20 | 20 | 0.95 |
271c2356b83d4d60564fc77a7a940154496366ec | 2,512 | py | Python | django-polls/polls/views.py | LetsZiggy/Practice-Poll-Application | b352fcf2fd43fb67101d98e8b6ef8bf9ce458946 | [
"MIT"
] | null | null | null | django-polls/polls/views.py | LetsZiggy/Practice-Poll-Application | b352fcf2fd43fb67101d98e8b6ef8bf9ce458946 | [
"MIT"
] | null | null | null | django-polls/polls/views.py | LetsZiggy/Practice-Poll-Application | b352fcf2fd43fb67101d98e8b6ef8bf9ce458946 | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.template import loader
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Question, Choice
# def index(request):
# # template = loader.get_template("polls/index.html")
# latest_question_list = Question.objects.order_by("-pub_date")[:5]
# # output = ", ".join([q.question_text for q in latest_question_list])
# context = {
# "latest_question_list": latest_question_list,
# }
# # return HttpResponse(template.render(context, request))
# return render(request, "polls/index.html", context)
# def detail(request, question_id):
# # try:
# # question = Question.objects.get(pk=question_id)
# # except:
# # raise Http404("Question does not exist")
# question = get_object_or_404(Question, pk=question_id)
# # return HttpResponse(f"You're looking at question {question_id}")
# return render(request, "polls/detail.html", {"question": question})
# def results(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# # return HttpResponse(f"You're are looking at the results of question {question_id}")
# return render(request, "polls/results.html", {"question": question})
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST["choice"])
except (KeyError, Choice.DoesNotExist):
return render(
request, "polls/detail.html", {
"question": question,
"error_message": "You didn't select a choice.",
}
)
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse("polls:results", args=(question.id, )))
# return HttpResponse(f"You're voting on question {question_id}")
class IndexView(generic.ListView):
template_name = "polls/index.html"
context_object_name = "latest_question_list"
def get_queryset(self):
# return Question.objects.order_by("-pub_date")[:5]
return Question.objects.filter(pub_date__lte=timezone.now()).order_by("-pub_date")[:5]
class DetailView(generic.DetailView):
model = Question
template_name = "polls/detail.html"
def get_queryset(self):
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = "polls/results.html"
| 31.797468 | 90 | 0.726513 |
ac9bbccbd718645c2b4543ddd2ade0f40e2b07ad | 5,055 | py | Python | cros/commands/cros_lint.py | bpsinc-native/src_third_party_chromite | b07cf18203c98a14c59819387754428e887ca164 | [
"BSD-3-Clause"
] | null | null | null | cros/commands/cros_lint.py | bpsinc-native/src_third_party_chromite | b07cf18203c98a14c59819387754428e887ca164 | [
"BSD-3-Clause"
] | null | null | null | cros/commands/cros_lint.py | bpsinc-native/src_third_party_chromite | b07cf18203c98a14c59819387754428e887ca164 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run lint checks on the specified files."""
import os
import sys
from chromite.cbuildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import git
from chromite import cros
PYTHON_EXTENSIONS = frozenset(['.py'])
# Note these are defined to keep in line with cpplint.py. Technically, we could
# include additional ones, but cpplint.py would just filter them out.
CPP_EXTENSIONS = frozenset(['.cc', '.cpp', '.h'])
def _GetProjectPath(path):
"""Find the absolute path of the git checkout that contains |path|."""
if git.FindRepoCheckoutRoot(path):
manifest = git.ManifestCheckout.Cached(path)
return manifest.FindCheckoutFromPath(path).GetPath(absolute=True)
else:
# Maybe they're running on a file outside of a checkout.
# e.g. cros lint ~/foo.py /tmp/test.py
return os.path.dirname(path)
def _GetPylintGroups(paths):
"""Return a dictionary mapping pylintrc files to lists of paths."""
groups = {}
for path in paths:
if path.endswith('.py'):
path = os.path.realpath(path)
project_path = _GetProjectPath(path)
parent = os.path.dirname(path)
while project_path and parent.startswith(project_path):
pylintrc = os.path.join(parent, 'pylintrc')
if os.path.isfile(pylintrc):
break
parent = os.path.dirname(parent)
if project_path is None or not os.path.isfile(pylintrc):
pylintrc = os.path.join(constants.SOURCE_ROOT, 'chromite', 'pylintrc')
groups.setdefault(pylintrc, []).append(path)
return groups
def _GetPythonPath(paths):
"""Return the set of Python library paths to use."""
return sys.path + [
# Add the Portage installation inside the chroot to the Python path.
# This ensures that scripts that need to import portage can do so.
os.path.join(constants.SOURCE_ROOT, 'chroot', 'usr', 'lib', 'portage',
'pym'),
# Scripts outside of chromite expect the scripts in src/scripts/lib to
# be importable.
os.path.join(constants.CROSUTILS_DIR, 'lib'),
# Allow platform projects to be imported by name (e.g. crostestutils).
os.path.join(constants.SOURCE_ROOT, 'src', 'platform'),
# Ideally we'd modify meta_path in pylint to handle our virtual chromite
# module, but that's not possible currently. We'll have to deal with
# that at some point if we want `cros lint` to work when the dir is not
# named 'chromite'.
constants.SOURCE_ROOT,
# Also allow scripts to import from their current directory.
] + list(set(os.path.dirname(x) for x in paths))
def _CpplintFiles(files, debug):
"""Returns true if cpplint ran successfully on all files."""
cmd = ['cpplint.py'] + files
res = cros_build_lib.RunCommand(cmd,
error_code_ok=True,
print_cmd=debug)
return res.returncode != 0
def _PylintFiles(files, debug):
"""Returns true if pylint ran successfully on all files."""
errors = False
for pylintrc, paths in sorted(_GetPylintGroups(files).items()):
paths = sorted(list(set([os.path.realpath(x) for x in paths])))
cmd = ['pylint', '--rcfile=%s' % pylintrc] + paths
extra_env = {'PYTHONPATH': ':'.join(_GetPythonPath(paths))}
res = cros_build_lib.RunCommand(cmd, extra_env=extra_env,
error_code_ok=True,
print_cmd=debug)
if res.returncode != 0:
errors = True
return errors
def _BreakoutFilesByLinter(files):
"""Maps a linter method to the list of files to lint."""
map_to_return = {}
for f in files:
extension = os.path.splitext(f)[1]
if extension in PYTHON_EXTENSIONS:
pylint_list = map_to_return.setdefault(_PylintFiles, [])
pylint_list.append(f)
elif extension in CPP_EXTENSIONS:
cpplint_list = map_to_return.setdefault(_CpplintFiles, [])
cpplint_list.append(f)
return map_to_return
@cros.CommandDecorator('lint')
class LintCommand(cros.CrosCommand):
"""Run lint checks on the specified files."""
EPILOG = """
Right now, only supports cpplint and pylint. We may also in the future
run other checks (e.g. pyflakes, etc.)
"""
@classmethod
def AddParser(cls, parser):
super(LintCommand, cls).AddParser(parser)
parser.add_argument('files', help='Files to lint', nargs='*')
def Run(self):
files = self.options.files
if not files:
# Running with no arguments is allowed to make the repo upload hook
# simple, but print a warning so that if someone runs this manually
# they are aware that nothing was linted.
cros_build_lib.Warning('No files provided to lint. Doing nothing.')
errors = False
linter_map = _BreakoutFilesByLinter(files)
for linter, files in linter_map.iteritems():
errors = linter(files, self.options.debug)
if errors:
sys.exit(1)
| 34.155405 | 79 | 0.680119 |
8b320feb707780faced14daf3e3399d279d4fcea | 7,773 | py | Python | src/livestreamer/plugin/plugin.py | sn4kebite/livestreamer | ab2b48eca3830ceb826408ccfb8056e4bc9c380a | [
"BSD-2-Clause",
"MIT"
] | null | null | null | src/livestreamer/plugin/plugin.py | sn4kebite/livestreamer | ab2b48eca3830ceb826408ccfb8056e4bc9c380a | [
"BSD-2-Clause",
"MIT"
] | null | null | null | src/livestreamer/plugin/plugin.py | sn4kebite/livestreamer | ab2b48eca3830ceb826408ccfb8056e4bc9c380a | [
"BSD-2-Clause",
"MIT"
] | null | null | null | import operator
import re
from functools import partial
from ..cache import Cache
from ..exceptions import PluginError, NoStreamsError
from ..options import Options
QUALITY_WEIGTHS_EXTRA = {
"other": {
"live": 1080,
},
"tv": {
"hd": 1080,
"sd": 576,
},
"quality": {
"ehq": 720,
"hq": 576,
"sq": 360,
},
}
FILTER_OPERATORS = {
"<": operator.lt,
"<=": operator.le,
">": operator.gt,
">=": operator.ge,
}
def stream_weight(stream):
for group, weights in QUALITY_WEIGTHS_EXTRA.items():
if stream in weights:
return weights[stream], group
match = re.match("^(\d+)([k]|[p])?([\+])?$", stream)
if match:
if match.group(2) == "k":
bitrate = int(match.group(1))
# FIXME: This is a crude attempt at making a bitrate's
# weight end up similar to the weight of a resolution.
# Someone who knows math, please fix.
weight = bitrate / 2.8
return weight, "bitrate"
elif match.group(2) == "p":
weight = int(match.group(1))
if match.group(3) == "+":
weight += 1
return weight, "pixels"
return 0, "none"
def iterate_streams(streams):
for name, stream in streams.items():
if isinstance(stream, list):
for sub_stream in stream:
yield (name, sub_stream)
else:
yield (name, stream)
def stream_type_priority(stream_types, stream):
stream_type = type(stream[1]).shortname()
try:
prio = stream_types.index(stream_type)
except ValueError:
prio = 99
return prio
def stream_sorting_filter(expr, stream_weight):
match = re.match(r"(?P<op><=|>=|<|>)?(?P<value>[\w\+]+)", expr)
if not match:
raise PluginError("Invalid filter expression: {0}".format(expr))
op, value = match.group("op", "value")
op = FILTER_OPERATORS.get(op, operator.eq)
filter_weight, filter_group = stream_weight(value)
def func(quality):
weight, group = stream_weight(quality)
if group == filter_group:
return not op(weight, filter_weight)
return True
return func
class Plugin(object):
"""A plugin can retrieve stream information from the URL specified.
:param url: URL that the plugin will operate on
"""
cache = None
logger = None
module = "unknown"
options = Options()
session = None
@classmethod
def bind(cls, session, module):
cls.cache = Cache(filename="plugin-cache.json",
key_prefix=module)
cls.logger = session.logger.new_module("plugin." + module)
cls.module = module
cls.session = session
def __init__(self, url):
self.url = url
@classmethod
def can_handle_url(cls, url):
raise NotImplementedError
@classmethod
def set_option(cls, key, value):
cls.options.set(key, value)
@classmethod
def get_option(cls, key):
return cls.options.get(key)
@classmethod
def stream_weight(cls, stream):
return stream_weight(stream)
@classmethod
def default_stream_types(cls, streams):
stream_types = ["rtmp", "hls", "hds", "http"]
for name, stream in iterate_streams(streams):
stream_type = type(stream).shortname()
if stream_type not in stream_types:
stream_types.append(stream_type)
return stream_types
def get_streams(self, stream_types=None, sorting_excludes=None):
"""Attempts to extract available streams.
Returns a :class:`dict` containing the streams, where the key is
the name of the stream, most commonly the quality and the value
is a :class:`Stream` object.
The result can contain the synonyms **best** and **worst** which
points to the streams which are likely to be of highest and
lowest quality respectively.
If multiple streams with the same name are found, the order of
streams specified in *stream_types* will determine which stream
gets to keep the name while the rest will be renamed to
"<name>_<stream type>".
The synonyms can be fine tuned with the *sorting_excludes*
parameter. This can be either of these types:
- A list of filter expressions in the format
*[operator]<value>*. For example the filter ">480p" will
exclude streams ranked higher than "480p" from the list
used in the synonyms ranking. Valid operators are >, >=, <
and <=. If no operator is specified then equality will be
tested.
- A function that is passed to filter() with a list of
stream names as input.
:param stream_types: A list of stream types to return.
:param sorting_excludes: Specify which streams to exclude from
the best/worst synonyms.
.. versionchanged:: 1.4.2
Added *priority* parameter.
.. versionchanged:: 1.5.0
Renamed *priority* to *stream_types* and changed behaviour
slightly.
.. versionchanged:: 1.5.0
Added *sorting_excludes* parameter.
.. versionchanged:: 1.6.0
*sorting_excludes* can now be a list of filter expressions
or a function that is passed to filter().
"""
try:
ostreams = self._get_streams()
except NoStreamsError:
return {}
if not ostreams:
return {}
streams = {}
if stream_types is None:
stream_types = self.default_stream_types(ostreams)
# Add streams depending on stream type and priorities
sorted_streams = sorted(iterate_streams(ostreams),
key=partial(stream_type_priority,
stream_types))
for name, stream in sorted_streams:
stream_type = type(stream).shortname()
if stream_type not in stream_types:
continue
if name in streams:
name = "{0}_{1}".format(name, stream_type)
# Validate stream name and discard the stream if it's bad.
match = re.match("([A-z0-9_+]+)", name)
if match:
name = match.group(1)
else:
self.logger.debug("The stream '{0}' has been ignored "
"since it is badly named.", name)
continue
# Force lowercase name and replace space with underscore.
streams[name.lower()] = stream
# Create the best/worst synonmys
stream_weight_only = lambda s: (self.stream_weight(s)[0] or
(len(streams) == 1 and 1))
stream_names = filter(stream_weight_only, streams.keys())
sorted_streams = sorted(stream_names, key=stream_weight_only)
if isinstance(sorting_excludes, list):
for expr in sorting_excludes:
filter_func = stream_sorting_filter(expr, self.stream_weight)
sorted_streams = list(filter(filter_func, sorted_streams))
elif callable(sorting_excludes):
sorted_streams = list(filter(sorting_excludes, sorted_streams))
if len(sorted_streams) > 0:
best = sorted_streams[-1]
worst = sorted_streams[0]
streams["best"] = streams[best]
streams["worst"] = streams[worst]
return streams
def _get_streams(self):
raise NotImplementedError
__all__ = ["Plugin"]
| 28.368613 | 77 | 0.583044 |
5ea42361f474373f63ad52baa118a6d36ac06605 | 17,735 | py | Python | libs/librosa/onset.py | Tartar-san/montage.ai | c699dfaf300fdca69f3dbc5d63fae9f00a26ca40 | [
"Apache-2.0"
] | 3 | 2018-07-30T01:39:25.000Z | 2021-07-08T16:50:17.000Z | libs/librosa/onset.py | Tartar-san/montage.ai | c699dfaf300fdca69f3dbc5d63fae9f00a26ca40 | [
"Apache-2.0"
] | null | null | null | libs/librosa/onset.py | Tartar-san/montage.ai | c699dfaf300fdca69f3dbc5d63fae9f00a26ca40 | [
"Apache-2.0"
] | 1 | 2021-07-08T17:01:08.000Z | 2021-07-08T17:01:08.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Onset detection
===============
.. autosummary::
:toctree: generated/
onset_detect
onset_backtrack
onset_strength
onset_strength_multi
"""
import numpy as np
import scipy
from . import cache
from . import core
from . import util
from .feature.spectral import melspectrogram
from .util.exceptions import ParameterError
__all__ = ['onset_detect',
'onset_strength',
'onset_strength_multi',
'onset_backtrack']
def onset_detect(y=None, sr=22050, onset_envelope=None, hop_length=512,
backtrack=False, energy=None,
units='frames', **kwargs):
"""Basic onset detector. Locate note onset events by picking peaks in an
onset strength envelope.
The `peak_pick` parameters were chosen by large-scale hyper-parameter
optimization over the dataset provided by [1]_.
.. [1] https://github.com/CPJKU/onset_db
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(m,)]
(optional) pre-computed onset strength envelope
hop_length : int > 0 [scalar]
hop length (in samples)
units : {'frames', 'samples', 'time'}
The units to encode detected onset events in.
By default, 'frames' are used.
backtrack : bool
If `True`, detected onset events are backtracked to the nearest
preceding minimum of `energy`.
This is primarily useful when using onsets as slice points for segmentation.
energy : np.ndarray [shape=(m,)] (optional)
An energy function to use for backtracking detected onset events.
If none is provided, then `onset_envelope` is used.
kwargs : additional keyword arguments
Additional parameters for peak picking.
See `librosa.util.peak_pick` for details.
Returns
-------
onsets : np.ndarray [shape=(n_onsets,)]
estimated positions of detected onsets, in whichever units
are specified. By default, frame indices.
.. note::
If no onset strength could be detected, onset_detect returns
an empty list.
Raises
------
ParameterError
if neither `y` nor `onsets` are provided
or if `units` is not one of 'frames', 'samples', or 'time'
See Also
--------
onset_strength : compute onset strength per-frame
onset_backtrack : backtracking onset events
librosa.util.peak_pick : pick peaks from a time series
Examples
--------
Get onset times from a signal
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=2.0)
>>> onset_frames = librosa.onset.onset_detect(y=y, sr=sr)
>>> librosa.frames_to_time(onset_frames, sr=sr)
array([ 0.07 , 0.395, 0.511, 0.627, 0.766, 0.975,
1.207, 1.324, 1.44 , 1.788, 1.881])
Or use a pre-computed onset envelope
>>> o_env = librosa.onset.onset_strength(y, sr=sr)
>>> times = librosa.frames_to_time(np.arange(len(o_env)), sr=sr)
>>> onset_frames = librosa.onset.onset_detect(onset_envelope=o_env, sr=sr)
>>> import matplotlib.pyplot as plt
>>> D = librosa.stft(y)
>>> plt.figure()
>>> ax1 = plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... x_axis='time', y_axis='log')
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2, sharex=ax1)
>>> plt.plot(times, o_env, label='Onset strength')
>>> plt.vlines(times[onset_frames], 0, o_env.max(), color='r', alpha=0.9,
... linestyle='--', label='Onsets')
>>> plt.axis('tight')
>>> plt.legend(frameon=True, framealpha=0.75)
"""
# First, get the frame->beat strength profile if we don't already have one
if onset_envelope is None:
if y is None:
raise ParameterError('y or onset_envelope must be provided')
onset_envelope = onset_strength(y=y, sr=sr, hop_length=hop_length)
# Shift onset envelope up to be non-negative
# (a common normalization step to make the threshold more consistent)
onset_envelope -= onset_envelope.min()
# Do we have any onsets to grab?
if not onset_envelope.any():
return np.array([], dtype=np.int)
# Normalize onset strength function to [0, 1] range
onset_envelope /= onset_envelope.max()
# These parameter settings found by large-scale search
kwargs.setdefault('pre_max', 0.03*sr//hop_length) # 30ms
kwargs.setdefault('post_max', 0.00*sr//hop_length + 1) # 0ms
kwargs.setdefault('pre_avg', 0.10*sr//hop_length) # 100ms
kwargs.setdefault('post_avg', 0.10*sr//hop_length + 1) # 100ms
kwargs.setdefault('wait', 0.03*sr//hop_length) # 30ms
kwargs.setdefault('delta', 0.07)
# Peak pick the onset envelope
onsets = util.peak_pick(onset_envelope, **kwargs)
# Optionally backtrack the events
if backtrack:
if energy is None:
energy = onset_envelope
onsets = onset_backtrack(onsets, energy)
if units == 'frames':
pass
elif units == 'samples':
onsets = core.frames_to_samples(onsets, hop_length=hop_length)
elif units == 'time':
onsets = core.frames_to_time(onsets, hop_length=hop_length, sr=sr)
else:
raise ParameterError('Invalid unit type: {}'.format(units))
return onsets
def onset_strength(y=None, sr=22050, S=None, lag=1, max_size=1,
detrend=False, center=True,
feature=None, aggregate=None,
centering=None,
**kwargs):
"""Compute a spectral flux onset strength envelope.
Onset strength at time `t` is determined by:
`mean_f max(0, S[f, t] - ref_S[f, t - lag])`
where `ref_S` is `S` after local max filtering along the frequency
axis [1]_.
By default, if a time series `y` is provided, S will be the
log-power Mel spectrogram.
.. [1] Böck, Sebastian, and Gerhard Widmer.
"Maximum filter vibrato suppression for onset detection."
16th International Conference on Digital Audio Effects,
Maynooth, Ireland. 2013.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by `n_fft / (2 * hop_length)` frames
feature : function
Function for computing time-series chunk_features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with `fmax=11025.0`
aggregate : function
Aggregation function to use when combining onsets
at different frequency bins.
Default: `np.mean`
kwargs : additional keyword arguments
Additional parameters to `feature()`, if `S` is not provided.
Returns
-------
onset_envelope : np.ndarray [shape=(m,)]
vector containing the onset strength envelope
Raises
------
ParameterError
if neither `(y, sr)` nor `S` are provided
or if `lag` or `max_size` are not positive integers
See Also
--------
onset_detect
onset_strength_multi
Examples
--------
First, load some audio and plot the spectrogram
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=10.0)
>>> D = librosa.stft(y)
>>> times = librosa.frames_to_time(np.arange(D.shape[1]))
>>> plt.figure()
>>> ax1 = plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
Construct a standard onset function
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr)
>>> plt.subplot(2, 1, 2, sharex=ax1)
>>> plt.plot(times, 2 + onset_env / onset_env.max(), alpha=0.8,
... label='Mean (mel)')
Median aggregation, and custom mel options
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... aggregate=np.median,
... fmax=8000, n_mels=256)
>>> plt.plot(times, 1 + onset_env / onset_env.max(), alpha=0.8,
... label='Median (custom mel)')
Constant-Q spectrogram instead of Mel
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... feature=librosa.cqt)
>>> plt.plot(times, onset_env / onset_env.max(), alpha=0.8,
... label='Mean (CQT)')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.ylabel('Normalized strength')
>>> plt.yticks([])
>>> plt.axis('tight')
>>> plt.tight_layout()
"""
odf_all = onset_strength_multi(y=y,
sr=sr,
S=S,
lag=lag,
max_size=max_size,
detrend=detrend,
center=center,
feature=feature,
aggregate=aggregate,
channels=None,
**kwargs)
return odf_all[0]
def onset_backtrack(events, energy):
'''Backtrack detected onset events to the nearest preceding local
minimum of an energy function.
This function can be used to roll back the timing of detected onsets
from a detected peak amplitude to the preceding minimum.
This is most useful when using onsets to determine slice points for
segmentation, as described by [1]_.
.. [1] Jehan, Tristan.
"Creating music by listening"
Doctoral dissertation
Massachusetts Institute of Technology, 2005.
Parameters
----------
events : np.ndarray, dtype=int
List of onset event frame indices, as computed by `onset_detect`
energy : np.ndarray, shape=(m,)
An energy function
Returns
-------
events_backtracked : np.ndarray, shape=events.shape
The input events matched to nearest preceding minima of `energy`.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=2.0)
>>> oenv = librosa.onset.onset_strength(y=y, sr=sr)
>>> # Detect events without backtracking
>>> onset_raw = librosa.onset.onset_detect(onset_envelope=oenv,
... backtrack=False)
>>> # Backtrack the events using the onset envelope
>>> onset_bt = librosa.onset.onset_backtrack(onset_raw, oenv)
>>> # Backtrack the events using the RMS energy
>>> rmse = librosa.feature.rmse(S=np.abs(librosa.stft(y=y)))
>>> onset_bt_rmse = librosa.onset.onset_backtrack(onset_raw, rmse[0])
>>> # Plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> plt.plot(oenv, label='Onset strength')
>>> plt.vlines(onset_raw, 0, oenv.max(), label='Raw onsets')
>>> plt.vlines(onset_bt, 0, oenv.max(), label='Backtracked', color='r')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.subplot(2,1,2)
>>> plt.plot(rmse[0], label='RMSE')
>>> plt.vlines(onset_bt_rmse, 0, rmse.max(), label='Backtracked (RMSE)', color='r')
>>> plt.legend(frameon=True, framealpha=0.75)
'''
# Find points where energy is non-increasing
# all points: energy[i] <= energy[i-1]
# tail points: energy[i] < energy[i+1]
minima = np.flatnonzero((energy[1:-1] <= energy[:-2]) &
(energy[1:-1] < energy[2:]))
# Pad on a 0, just in case we have onsets with no preceding minimum
# Shift by one to account for slicing in minima detection
minima = util.fix_frames(1 + minima, x_min=0)
# Only match going left from the detected events
return minima[util.match_events(events, minima, right=False)]
@cache(level=30)
def onset_strength_multi(y=None, sr=22050, S=None, lag=1, max_size=1,
detrend=False, center=True, feature=None,
aggregate=None, channels=None, **kwargs):
"""Compute a spectral flux onset strength envelope across multiple channels.
Onset strength for channel `i` at time `t` is determined by:
`mean_{f in channels[i]} max(0, S[f, t+1] - S[f, t])`
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by `n_fft / (2 * hop_length)` frames
feature : function
Function for computing time-series chunk_features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with `fmax=11025.0`
aggregate : function
Aggregation function to use when combining onsets
at different frequency bins.
Default: `np.mean`
channels : list or None
Array of channel boundaries or slice objects.
If `None`, then a single channel is generated to span all bands.
kwargs : additional keyword arguments
Additional parameters to `feature()`, if `S` is not provided.
Returns
-------
onset_envelope : np.ndarray [shape=(n_channels, m)]
array containing the onset strength envelope for each specified channel
Raises
------
ParameterError
if neither `(y, sr)` nor `S` are provided
See Also
--------
onset_strength
Notes
-----
This function caches at level 30.
Examples
--------
First, load some audio and plot the spectrogram
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=10.0)
>>> D = librosa.stft(y)
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log')
>>> plt.title('Power spectrogram')
Construct a standard onset function over four sub-bands
>>> onset_subbands = librosa.onset.onset_strength_multi(y=y, sr=sr,
... channels=[0, 32, 64, 96, 128])
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(onset_subbands, x_axis='time')
>>> plt.ylabel('Sub-bands')
>>> plt.title('Sub-band onset strength')
"""
if feature is None:
feature = melspectrogram
kwargs.setdefault('fmax', 11025.0)
if aggregate is None:
aggregate = np.mean
if lag < 1 or not isinstance(lag, int):
raise ParameterError('lag must be a positive integer')
if max_size < 1 or not isinstance(max_size, int):
raise ParameterError('max_size must be a positive integer')
# First, compute mel spectrogram
if S is None:
S = np.abs(feature(y=y, sr=sr, **kwargs))
# Convert to dBs
S = core.power_to_db(S)
# Retrieve the n_fft and hop_length,
# or default values for onsets if not provided
n_fft = kwargs.get('n_fft', 2048)
hop_length = kwargs.get('hop_length', 512)
# Ensure that S is at least 2-d
S = np.atleast_2d(S)
# Compute the reference spectrogram.
# Efficiency hack: skip filtering step and pass by reference
# if max_size will produce a no-op.
if max_size == 1:
ref_spec = S
else:
ref_spec = scipy.ndimage.maximum_filter1d(S, max_size, axis=0)
# Compute difference to the reference, spaced by lag
onset_env = S[:, lag:] - ref_spec[:, :-lag]
# Discard negatives (decreasing amplitude)
onset_env = np.maximum(0.0, onset_env)
# Aggregate within channels
pad = True
if channels is None:
channels = [slice(None)]
else:
pad = False
onset_env = util.sync(onset_env, channels,
aggregate=aggregate,
pad=pad,
axis=0)
# compensate for lag
pad_width = lag
if center:
# Counter-act framing effects. Shift the onsets by n_fft / hop_length
pad_width += n_fft // (2 * hop_length)
onset_env = np.pad(onset_env, ([0, 0], [int(pad_width), 0]),
mode='constant')
# remove the DC component
if detrend:
onset_env = scipy.signal.lfilter([1.0, -1.0], [1.0, -0.99],
onset_env, axis=-1)
# Trim to match the input duration
if center:
onset_env = onset_env[:, :S.shape[1]]
return onset_env
| 31.333922 | 90 | 0.595038 |
fde7325d37cfae934d1b3cb7ea9bfceb0b660a2e | 4,776 | py | Python | src/azure-cli/azure/cli/command_modules/acs/_client_factory.py | YuanyuanNi/azure-cli | 63844964374858bfacd209bfe1b69eb456bd64ca | [
"MIT"
] | 3,287 | 2016-07-26T17:34:33.000Z | 2022-03-31T09:52:13.000Z | src/azure-cli/azure/cli/command_modules/acs/_client_factory.py | YuanyuanNi/azure-cli | 63844964374858bfacd209bfe1b69eb456bd64ca | [
"MIT"
] | 19,206 | 2016-07-26T07:04:42.000Z | 2022-03-31T23:57:09.000Z | src/azure-cli/azure/cli/command_modules/acs/_client_factory.py | YuanyuanNi/azure-cli | 63844964374858bfacd209bfe1b69eb456bd64ca | [
"MIT"
] | 2,575 | 2016-07-26T06:44:40.000Z | 2022-03-31T22:56:06.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.parameters import get_resources_in_subscription
from azure.cli.core.profiles import ResourceType
from azure.mgmt.msi import ManagedServiceIdentityClient
from knack.util import CLIError
def cf_compute_service(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_COMPUTE)
def cf_container_services(cli_ctx, *_):
return get_container_service_client(cli_ctx).container_services
def cf_managed_clusters(cli_ctx, *_):
return get_container_service_client(cli_ctx).managed_clusters
def cf_agent_pools(cli_ctx, *_):
return get_container_service_client(cli_ctx).agent_pools
def cf_openshift_managed_clusters(cli_ctx, *_):
return get_osa_container_service_client(cli_ctx).open_shift_managed_clusters
def cf_resource_groups(cli_ctx, subscription_id=None):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).resource_groups
def cf_resources(cli_ctx, subscription_id=None):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).resources
def cf_container_registry_service(cli_ctx, subscription_id=None):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_CONTAINERREGISTRY,
subscription_id=subscription_id)
def get_auth_management_client(cli_ctx, scope=None, **_):
import re
subscription_id = None
if scope:
matched = re.match('/subscriptions/(?P<subscription>[^/]*)/', scope)
if matched:
subscription_id = matched.groupdict()['subscription']
else:
raise CLIError("{} does not contain subscription Id.".format(scope))
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION, subscription_id=subscription_id)
def get_container_service_client(cli_ctx, **_):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_CONTAINERSERVICE)
def get_osa_container_service_client(cli_ctx, **_):
from azure.mgmt.containerservice import ContainerServiceClient
return get_mgmt_service_client(cli_ctx, ContainerServiceClient, api_version='2019-09-30-preview')
def get_graph_rbac_management_client(cli_ctx, **_):
from azure.cli.core.commands.client_factory import configure_common_settings
from azure.cli.core._profile import Profile
from azure.graphrbac import GraphRbacManagementClient
profile = Profile(cli_ctx=cli_ctx)
cred, _, tenant_id = profile.get_login_credentials(
resource=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
client = GraphRbacManagementClient(
cred, tenant_id,
base_url=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
configure_common_settings(cli_ctx, client)
return client
def get_resource_by_name(cli_ctx, resource_name, resource_type):
"""Returns the ARM resource in the current subscription with resource_name.
:param str resource_name: The name of resource
:param str resource_type: The type of resource
"""
result = get_resources_in_subscription(cli_ctx, resource_type)
elements = [item for item in result if item.name.lower() ==
resource_name.lower()]
if not elements:
from azure.cli.core._profile import Profile
profile = Profile(cli_ctx=cli_ctx)
message = "The resource with name '{}' and type '{}' could not be found".format(
resource_name, resource_type)
try:
subscription = profile.get_subscription(
cli_ctx.data['subscription_id'])
raise CLIError(
"{} in subscription '{} ({})'.".format(message, subscription['name'], subscription['id']))
except (KeyError, TypeError):
raise CLIError(
"{} in the current subscription.".format(message))
elif len(elements) == 1:
return elements[0]
else:
raise CLIError(
"More than one resources with type '{}' are found with name '{}'.".format(
resource_type, resource_name))
def get_msi_client(cli_ctx, subscription_id=None):
return get_mgmt_service_client(cli_ctx, ManagedServiceIdentityClient,
subscription_id=subscription_id)
| 39.8 | 109 | 0.701424 |
82f8ecd9940de71724d19dde5fe9755177923b59 | 4,353 | py | Python | tests/unit/modules/test_win_snmp.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 19 | 2016-01-29T14:37:52.000Z | 2022-03-30T18:08:01.000Z | tests/unit/modules/test_win_snmp.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 223 | 2016-03-02T16:39:41.000Z | 2022-03-03T12:26:35.000Z | tests/unit/modules/test_win_snmp.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 64 | 2016-02-04T19:45:26.000Z | 2021-12-15T02:02:31.000Z | # -*- coding: utf-8 -*-
"""
:synopsis: Unit Tests for Windows SNMP Module 'module.win_snmp'
:platform: Windows
:maturity: develop
.. versionadded:: 2017.7.0
"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.win_snmp as win_snmp
from salt.exceptions import CommandExecutionError
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
COMMUNITY_NAMES = {"TestCommunity": "Read Create"}
class WinSnmpTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.win_snmp
"""
def setup_loader_modules(self):
return {win_snmp: {}}
def test_get_agent_service_types(self):
"""
Test - Get the sysServices types that can be configured.
"""
self.assertIsInstance(win_snmp.get_agent_service_types(), list)
def test_get_permission_types(self):
"""
Test - Get the permission types that can be configured for communities.
"""
self.assertIsInstance(win_snmp.get_permission_types(), list)
def test_get_auth_traps_enabled(self):
"""
Test - Determine whether the host is configured to send authentication traps.
"""
mock_value = MagicMock(return_value={"vdata": 1})
with patch.dict(win_snmp.__utils__, {"reg.read_value": mock_value}):
self.assertTrue(win_snmp.get_auth_traps_enabled())
def test_set_auth_traps_enabled(self):
"""
Test - Manage the sending of authentication traps.
"""
mock_value = MagicMock(return_value=True)
kwargs = {"status": True}
with patch.dict(win_snmp.__utils__, {"reg.set_value": mock_value}), patch(
"salt.modules.win_snmp.get_auth_traps_enabled", MagicMock(return_value=True)
):
self.assertTrue(win_snmp.set_auth_traps_enabled(**kwargs))
def test_get_community_names(self):
"""
Test - Get the current accepted SNMP community names and their permissions.
"""
mock_ret = MagicMock(return_value=[{"vdata": 16, "vname": "TestCommunity"}])
mock_false = MagicMock(return_value=False)
with patch.dict(
win_snmp.__utils__,
{"reg.list_values": mock_ret, "reg.key_exists": mock_false},
):
self.assertEqual(win_snmp.get_community_names(), COMMUNITY_NAMES)
def test_get_community_names_gpo(self):
"""
Test - Get the current accepted SNMP community names and their permissions.
"""
mock_ret = MagicMock(return_value=[{"vdata": "TestCommunity", "vname": 1}])
mock_false = MagicMock(return_value=True)
with patch.dict(
win_snmp.__utils__,
{"reg.list_values": mock_ret, "reg.key_exists": mock_false},
):
self.assertEqual(
win_snmp.get_community_names(), {"TestCommunity": "Managed by GPO"}
)
def test_set_community_names(self):
"""
Test - Manage the SNMP accepted community names and their permissions.
"""
mock_true = MagicMock(return_value=True)
kwargs = {"communities": COMMUNITY_NAMES}
mock_false = MagicMock(return_value=False)
with patch.dict(
win_snmp.__utils__,
{"reg.set_value": mock_true, "reg.key_exists": mock_false},
), patch(
"salt.modules.win_snmp.get_community_names",
MagicMock(return_value=COMMUNITY_NAMES),
):
self.assertTrue(win_snmp.set_community_names(**kwargs))
def test_set_community_names_gpo(self):
"""
Test - Manage the SNMP accepted community names and their permissions.
"""
mock_true = MagicMock(return_value=True)
kwargs = {"communities": COMMUNITY_NAMES}
with patch.dict(
win_snmp.__utils__,
{"reg.set_value": mock_true, "reg.key_exists": mock_true},
), patch(
"salt.modules.win_snmp.get_community_names",
MagicMock(return_value=COMMUNITY_NAMES),
):
self.assertRaises(
CommandExecutionError, win_snmp.set_community_names, **kwargs
)
| 35.975207 | 88 | 0.645302 |
4a578805ce0384ca4fcd19d712eb842d0afbd2ec | 5,237 | py | Python | moksha.wsgi/moksha/wsgi/widgets/api/live/live.py | hroncok/moksha | 08b3f668a9a3ca45fe49174eaace7b89bb995a92 | [
"Apache-2.0"
] | 11 | 2015-01-17T04:27:26.000Z | 2018-06-07T21:56:25.000Z | moksha.wsgi/moksha/wsgi/widgets/api/live/live.py | hroncok/moksha | 08b3f668a9a3ca45fe49174eaace7b89bb995a92 | [
"Apache-2.0"
] | 41 | 2015-05-08T18:54:46.000Z | 2019-10-25T05:08:31.000Z | moksha.wsgi/moksha/wsgi/widgets/api/live/live.py | hroncok/moksha | 08b3f668a9a3ca45fe49174eaace7b89bb995a92 | [
"Apache-2.0"
] | 16 | 2015-06-26T21:52:42.000Z | 2021-05-10T17:27:32.000Z | # This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Luke Macken <lmacken@redhat.com>
# Ralph Bean <ralph.bean@gmail.com>
import moksha
import moksha.wsgi.lib.utils
from uuid import uuid4
from moksha.common.exc import MokshaException
from moksha.wsgi.widgets.api.stomp import (
StompWidget, stomp_subscribe, stomp_unsubscribe)
from moksha.wsgi.widgets.api.amqp import (
AMQPSocket, amqp_subscribe, amqp_unsubscribe)
from moksha.wsgi.widgets.api.websocket import (
WebSocketWidget, websocket_subscribe, websocket_unsubscribe)
import tw2.core as twc
import tw2.core.params as pm
import tw2.core.widgets
class LiveWidgetMeta(tw2.core.widgets.WidgetMeta):
pass
class LiveWidget(twc.Widget):
""" A live streaming widget based on toscawidgets2
This widget handles automatically subscribing your widget to any given
topics, and registers all of the stomp callbacks.
The basics of the LiveWidget::
class MyLiveWidget(LiveWidget):
topic = 'mytopic'
onmessage = 'console.log(json)'
template = 'mako:myproject.templates.mylivewidget'
"""
__metaclass__ = LiveWidgetMeta
backend = pm.Param('moksha livesocket backend to use',
default=twc.Required)
topic = pm.Param('Topic to which this widget is subscribed')
onmessage = pm.Param('js to execute on message received', default=None)
def prepare(self):
""" Get this widget ready for display
Register this widget's message topic callbacks
"""
if not hasattr(self, 'id'):
self.id = str(uuid4())
self.compound_id = self.id
if not self.onmessage:
raise MokshaException('%s must be provided an onmessage callback' %
self.__class__.__name__)
super(LiveWidget, self).prepare()
if not self.topic:
raise MokshaException('You must specify a `topic` to subscribe to')
topics = isinstance(self.topic, list) and self.topic or [self.topic]
backend_lookup = {
'stomp': StompWidget.callbacks,
'amqp': AMQPSocket.callbacks,
'websocket': WebSocketWidget.callbacks,
}
callbacks = backend_lookup[self.backend]
for callback in callbacks:
if callback == 'onmessageframe':
for topic in topics:
cb = getattr(self, 'onmessage').replace('${id}', self.id)
moksha.wsgi.lib.utils.livewidgets[callback][topic].append(cb)
elif callback in ['onconnectedframe', 'onopen']:
moksha.wsgi.lib.utils.livewidgets[callback].append(
self.subscribe_topics(topics))
elif getattr(self, callback, None):
moksha.wsgi.lib.utils.livewidgets[callback].append(
getattr(self, callback))
@classmethod
def get_topics(cls):
topics = []
for key in ('topic', 'topics'):
if hasattr(cls, key):
topic = getattr(cls, key)
if topic:
if isinstance(topic, basestring):
map(topics.append, topic.split())
else:
topics += topic
return topics
def subscribe_topics(self, topics):
backend_lookup = {
'stomp': stomp_subscribe,
'amqp': amqp_subscribe,
'websocket': websocket_subscribe,
}
try:
return backend_lookup[self.backend](topics)
except KeyError:
raise MokshaException("Unknown `moksha.livesocket.backend` %r. "
"Valid backends are currently %s" % (
self.backend,
", ".join(backend_lookup.keys())
))
def unsubscribe_topics(self, topics):
backend_lookup = {
'stomp': stomp_unsubscribe,
'amqp': amqp_unsubscribe,
'websocket': websocket_unsubscribe,
}
try:
return backend_lookup[self.backend](topics)
except KeyError:
raise MokshaException("Unknown `moksha.livesocket.backend` %r. "
"Valid backends are currently %s" % (
self.backend,
", ".join(backend_lookup.keys())
))
# Moksha Topic subscription handling methods
subscribe_topics = LiveWidget.subscribe_topics
unsubscribe_topics = LiveWidget.unsubscribe_topics
| 35.62585 | 81 | 0.601489 |
8f313fba5f09d49fcbe4aea902a9fcdec8c1e244 | 6,337 | py | Python | datasetsnx/readers/lvis.py | ckxy/part-of-hitogata | 76402d48a336fcd964d0e64bb01d959e8f07f296 | [
"MIT"
] | null | null | null | datasetsnx/readers/lvis.py | ckxy/part-of-hitogata | 76402d48a336fcd964d0e64bb01d959e8f07f296 | [
"MIT"
] | null | null | null | datasetsnx/readers/lvis.py | ckxy/part-of-hitogata | 76402d48a336fcd964d0e64bb01d959e8f07f296 | [
"MIT"
] | null | null | null | import os
import numpy as np
from PIL import Image
from addict import Dict
from copy import deepcopy
from .reader import Reader
from .lvis_v1_categories import LVIS_CATEGORIES as LVIS_V1_CATEGORIES
__all__ = ['LVISAPIReader']
class LVISAPIReader(Reader):
def __init__(self, set_path, img_root, **kwargs):
super(LVISAPIReader, self).__init__(**kwargs)
self.set = set_path
self.img_root = img_root
assert os.path.exists(self.set)
assert os.path.exists(self.img_root)
assert len(LVIS_V1_CATEGORIES) == 1203
cat_ids = [k["id"] for k in LVIS_V1_CATEGORIES]
assert min(cat_ids) == 1 and max(cat_ids) == len(
cat_ids
), "Category ids are not in [1, #categories], as expected"
# Ensure that the category list is sorted by id
lvis_categories = sorted(LVIS_V1_CATEGORIES, key=lambda x: x["id"])
self.thing_classes = [k["synonyms"][0] for k in lvis_categories]
self.meta = {"thing_classes": self.thing_classes}
self.data_lines = self.load_lvis_json()
def get_dataset_info(self):
return range(len(self.data_lines)), Dict({'classes': self.thing_classes})
def get_data_info(self, index):
data_line = self.data_lines[index]
return dict(h=data_line['ori_size'][0], w=data_line['ori_size'][1], bbox=data_line['bbox'])
def load_lvis_json(self):
"""
Load a json file in LVIS's annotation format.
Args:
extra_annotation_keys (list[str]): list of per-annotation keys that should also be
loaded into the dataset dict (besides "bbox", "bbox_mode", "category_id",
"segmentation"). The values for these keys will be returned as-is.
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
from lvis import LVIS
lvis_api = LVIS(self.set)
# sort indices for reproducible results
img_ids = sorted(lvis_api.imgs.keys())
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = lvis_api.load_imgs(img_ids)
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images. Example of anns[0]:
# [{'segmentation': [[192.81,
# 247.09,
# ...
# 219.03,
# 249.06]],
# 'area': 1035.749,
# 'image_id': 1268,
# 'bbox': [192.81, 224.8, 74.73, 33.43],
# 'category_id': 16,
# 'id': 42986},
# ...]
anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
# Sanity check that each annotation has a unique id
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique".format(
json_file
)
imgs_anns = list(zip(imgs, anns))
extra_annotation_keys = []
def get_file_name(img_root, img_dict):
# Determine the path including the split folder ("train2017", "val2017", "test2017") from
# the coco_url field. Example:
# 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg'
split_folder, file_name = img_dict["coco_url"].split("/")[-2:]
return os.path.join(img_root, file_name)
dataset_dicts = []
for (img_dict, anno_dict_list) in imgs_anns:
record = dict()
record["path"] = get_file_name(self.img_root, img_dict)
record['ori_size'] = np.array([img_dict["height"], img_dict["width"]]).astype(np.float32)
record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
record["not_exhaustive_category_ids"] = [i - 1 for i in record["not_exhaustive_category_ids"]]
record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
record["neg_category_ids"] = [i - 1 for i in record["neg_category_ids"]]
image_id = record["id"] = img_dict["id"]
boxes = []
for anno in anno_dict_list:
# Check that the image_id in this annotation is the same as
# the image_id we're looking at.
# This fails only when the data parsing logic or the annotation file is buggy.
assert anno["image_id"] == image_id
x1, y1, w, h = anno["bbox"]
boxes.append([x1, y1, x1 + w, y1 + h, anno["category_id"] - 1]) # Convert 1-indexed to 0-indexed
# segm = anno["segmentation"] # list[list[float]]
# # filter out invalid polygons (< 3 points)
# valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
# assert len(segm) == len(
# valid_segm
# ), "Annotation contains an invalid polygon with < 3 points"
# assert len(segm) > 0
# obj["segmentation"] = segm
if len(boxes) == 0:
record["bbox"] = np.zeros((0, 5)).astype(np.float32)
else:
record["bbox"] = np.array(boxes)
dataset_dicts.append(record)
return dataset_dicts
def __call__(self, index):
data_line = deepcopy(self.data_lines[index])
data_line['image'] = self.read_image(data_line['path'])
data_line['bbox'] = np.concatenate([data_line['bbox'], np.full((len(data_line['bbox']), 1), 1).astype(np.float32)], axis=1)
return data_line
def __repr__(self):
return 'LVISAPIReader(set_path={}, img_root={}, {})'.format(self.set, self.img_root, super(LVISAPIReader, self).__repr__())
| 42.246667 | 131 | 0.590816 |
258b99397899a6974487839945e4e6c4193d2255 | 1,640 | py | Python | setup.py | christophevg/py-service | 6c06caac42537580af4e5cdc880e03c5714f65d3 | [
"MIT"
] | 1 | 2018-12-23T16:56:23.000Z | 2018-12-23T16:56:23.000Z | setup.py | christophevg/py-service | 6c06caac42537580af4e5cdc880e03c5714f65d3 | [
"MIT"
] | 1 | 2021-06-01T22:03:16.000Z | 2021-06-01T22:03:16.000Z | setup.py | christophevg/py-servicefactory | 6c06caac42537580af4e5cdc880e03c5714f65d3 | [
"MIT"
] | null | null | null | import os
import re
import setuptools
NAME = "servicefactory"
AUTHOR = "Christophe VG"
AUTHOR_EMAIL = "contact@christophe.vg"
DESCRIPTION = 'A highly opinionated and very convention-driven framework for creating Python "services"'
LICENSE = "MIT"
KEYWORDS = "'services framework rest api ipc"
URL = "https://github.com/christophevg/py-" + NAME
README = ".github/README.md"
CLASSIFIERS = [
"Environment :: Console",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Topic :: Software Development",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.7",
]
INSTALL_REQUIRES = [ 'werkzeug', 'requests' ]
ENTRY_POINTS = {}
SCRIPTS = []
HERE = os.path.dirname(__file__)
def read(file):
with open(os.path.join(HERE, file), "r") as fh:
return fh.read()
VERSION = re.search(
r'__version__ = [\'"]([^\'"]*)[\'"]',
read(NAME.replace("-", "_") + "/__init__.py")
).group(1)
LONG_DESCRIPTION = read(README)
if __name__ == "__main__":
setuptools.setup(
name=NAME,
version=VERSION,
packages=setuptools.find_packages(),
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
license=LICENSE,
keywords=KEYWORDS,
url=URL,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
entry_points=ENTRY_POINTS,
scripts=SCRIPTS
)
| 28.275862 | 109 | 0.656707 |
82ebd5541f9c4419b077debd76c53499027b426f | 5,993 | py | Python | test/functional/zmq_test.py | plc-ultima/plcu | d99eb669ac339c4d0dcedb77bc68ccd0dfe29d4f | [
"MIT"
] | 1 | 2022-03-28T02:13:10.000Z | 2022-03-28T02:13:10.000Z | test/functional/zmq_test.py | plc-ultima/plcu | d99eb669ac339c4d0dcedb77bc68ccd0dfe29d4f | [
"MIT"
] | null | null | null | test/functional/zmq_test.py | plc-ultima/plcu | d99eb669ac339c4d0dcedb77bc68ccd0dfe29d4f | [
"MIT"
] | 2 | 2022-03-26T23:59:01.000Z | 2022-03-31T13:27:08.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ API."""
import configparser
import os
import struct
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import *
from test_framework.script import hash256
class ZMQTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_nodes(self):
# Try to import python3-zmq. Skip this test if the import fails.
try:
import zmq
except ImportError:
raise SkipTest("python3-zmq module not available.")
# Check that plcultima has been built with ZMQ enabled
config = configparser.ConfigParser()
if not self.options.configfile:
self.options.configfile = os.path.dirname(__file__) + "/../config.ini"
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_ZMQ"):
raise SkipTest("plcultimad has not been built with zmq enabled.")
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.set(zmq.RCVTIMEO, 60000)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"rawblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"rawtx")
ip_address = "tcp://127.0.0.1:28332"
self.zmqSubSocket.connect(ip_address)
self.extra_args = [['-zmqpubhashblock=%s' % ip_address, '-zmqpubhashtx=%s' % ip_address,
'-zmqpubrawblock=%s' % ip_address, '-zmqpubrawtx=%s' % ip_address], []]
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
try:
self._zmq_test()
finally:
# Destroy the zmq context
self.log.debug("Destroying zmq context")
self.zmqContext.destroy(linger=None)
def _zmq_test(self):
genhashes = self.nodes[0].generate(1)
self.sync_all()
self.log.info("Wait for tx")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashtx")
txhash = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) # must be sequence 0 on hashtx
# rawtx
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"rawtx")
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) # must be sequence 0 on rawtx
# Check that the rawtx hashes to the hashtx
assert_equal(reverse(hash256(body)), txhash)
self.log.info("Wait for block")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashblock")
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) # must be sequence 0 on hashblock
blkhash = bytes_to_hex_str(body)
assert_equal(genhashes[0], blkhash) # blockhash from generate must be equal to the hash received over zmq
# rawblock
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"rawblock")
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) #must be sequence 0 on rawblock
# Check the hash of the rawblock's header matches generate
assert_equal(genhashes[0], bytes_to_hex_str(reverse(hash256(body[:80]))))
self.log.info("Generate 10 blocks (and 10 coinbase txes)")
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
zmqRawHashed = []
blockcount = 0
for x in range(n * 4):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount + 1)
blockcount += 1
if topic == b"rawblock":
zmqRawHashed.append(bytes_to_hex_str(reverse(hash256(body[:80]))))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount)
for x in range(n):
assert_equal(genhashes[x], zmqHashes[x]) # blockhash from generate must be equal to the hash received over zmq
assert_equal(genhashes[x], zmqRawHashed[x])
self.log.info("Wait for tx from second node")
# test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
verify_tx_sent(self.nodes[1], hashRPC)
self.sync_all()
# now we should receive a zmq msg because the tx was broadcast
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashtx")
body = msg[1]
hashZMQ = bytes_to_hex_str(body)
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount + 1)
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"rawtx")
body = msg[1]
hashedZMQ = bytes_to_hex_str(reverse(hash256(body)))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount+1)
assert_equal(hashRPC, hashZMQ) # txid from sendtoaddress must be equal to the hash received over zmq
assert_equal(hashRPC, hashedZMQ)
if __name__ == '__main__':
ZMQTest().main()
| 39.169935 | 123 | 0.62306 |
89240cfeb8cf852265f761e03bc2d329e670e612 | 2,568 | py | Python | leet/array/nextPermutation.py | peterlamar/python-cp-cheatsheet | f9f854064a3c657c04fab27d0a496401bfa97da1 | [
"Apache-2.0"
] | 140 | 2020-10-21T13:23:52.000Z | 2022-03-31T15:09:45.000Z | leet/array/nextPermutation.py | stacykutyepov/python-cp-cheatsheet | a00a57e1b36433648d1cace331e15ff276cef189 | [
"Apache-2.0"
] | 1 | 2021-07-22T14:01:25.000Z | 2021-07-22T14:01:25.000Z | leet/array/nextPermutation.py | stacykutyepov/python-cp-cheatsheet | a00a57e1b36433648d1cace331e15ff276cef189 | [
"Apache-2.0"
] | 33 | 2020-10-21T14:17:02.000Z | 2022-03-25T11:25:03.000Z | # find decreasing substring/max permutation
# iterate pivot with next highest number
# reverse substring to right of pivot
# time: 34 min first pass
# errors: lots of bugs,
# did not handle 1,5,1
class Solution:
"""
2nd round, must faster.
More algorithm insights and use of built in reverse and while loops
"""
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
i = len(nums) - 1
# Get largest number index
while i > 0 and nums [i-1] >= nums[i]:
i -= 1
if i == 0:
nums.reverse()
return
# Get next smallest index
j = len(nums) - 1
k = i - 1
while nums[k] > nums[j]:
j -= 1
# Swap pivots
nums[i-1], nums[j] = nums[j], nums[i-1]
l, r = i, len(nums) - 1
while l < r:
nums[l], nums[r] = nums[r], nums[l]
l += 1
r -= 1
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
maxDcr = float('-inf')
maxI = 0
dcr = False
i = 0
if len(nums) == 0 or len(nums) == 1:
return
# Get max num
while i < len(nums):
if maxDcr <= nums[i]:
maxDcr = nums[i]
maxI = i
i += 1
# If max, reverse then return
if maxI == 0:
i = 0
while i < len(nums) // 2:
end = len(nums) - 1 - i
nums[i], nums[end] = nums[end], nums[i]
i += 1
return
if maxI == len(nums) - 1:
nums[maxI], nums[maxI-1] = nums[maxI-1], nums[maxI]
return
# Get next max num (TODO: if no nextMax)
i = maxI + 1
nextMax = float('-inf')
nextMaxI = float('-inf')
while i < len(nums):
if nextMax <= nums[i]:
nextMax = nums[i]
nextMaxI = i
i += 1
# Swap pivots
nums[maxI-1], nums[nextMaxI] = nums[nextMaxI], nums[maxI-1]
#Reverse sub
offset = maxI
i = offset
while i < len(nums) - (len(nums) - offset) // 2:
end = len(nums) - 1 - i + offset
nums[i], nums[end] = nums[end], nums[i]
i += 1
return
| 25.68 | 67 | 0.439252 |
e46e475a1c4a8d816977ad9aca926714e92de70c | 7,901 | py | Python | sample_config.py | abubarst12/BotHub | a2f842c32a39ae8f975dc050046c05c5fb0e7e53 | [
"Apache-2.0"
] | null | null | null | sample_config.py | abubarst12/BotHub | a2f842c32a39ae8f975dc050046c05c5fb0e7e53 | [
"Apache-2.0"
] | null | null | null | sample_config.py | abubarst12/BotHub | a2f842c32a39ae8f975dc050046c05c5fb0e7e53 | [
"Apache-2.0"
] | null | null | null | # PLEASE STOP!
# DO NOT EDIT THIS FILE
# Create a new config.py file in same dir and import, then extend this class.
import os
from telethon.tl.types import ChatBannedRights
class Config(object):
LOGGER = True
# Get this value from my.telegram.org! Please do not steal
APP_ID = int(os.environ.get("APP_ID", 6))
API_HASH = os.environ.get("API_HASH", "eb06d4abfb49dc3eeb1aeb98ae0f581e")
# string session for running on Heroku
# some people upload their session files on GitHub or other third party hosting
# websites, this might prevent the un-authorized use of the
# confidential session files
HU_STRING_SESSION = os.environ.get("HU_STRING_SESSION", None)
# Get your own APPID from https://api.openweathermap.org/data/2.5/weather
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
# Your City
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY", None)
# Get your own ACCESS_KEY from http://api.screenshotlayer.com/api/capture
SCREEN_SHOT_LAYER_ACCESS_KEY = os.environ.get("SCREEN_SHOT_LAYER_ACCESS_KEY", None)
# Send .get_id in any group to fill this value.
PRIVATE_GROUP_BOT_API_ID = int(os.environ.get("PRIVATE_GROUP_BOT_API_ID", -100123456789))
# Send .get_id in any channel to fill this value. ReQuired for @Manuel15 inspiration to work!
PRIVATE_CHANNEL_BOT_API_ID = int(os.environ.get("PRIVATE_CHANNEL_BOT_API_ID", -100123456789))
# This is required for the plugins involving the file system.
TMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY", "./DOWNLOADS/")
# This is required for the speech to text module. Get your USERNAME from https://console.bluemix.net/docs/services/speech-to-text/getting-started.html
IBM_WATSON_CRED_URL = os.environ.get("IBM_WATSON_CRED_URL", None)
IBM_WATSON_CRED_PASSWORD = os.environ.get("IBM_WATSON_CRED_PASSWORD", None)
# This is required for the hash to torrent file functionality to work.
HASH_TO_TORRENT_API = os.environ.get("HASH_TO_TORRENT_API", "https://example.com/torrent/{}");
# This is required for the @telegraph functionality.
TELEGRAPH_SHORT_NAME = os.environ.get("TELEGRAPH_SHORT_NAME", "UniBorg")
# Get a Free API Key from OCR.Space
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
# Send .get_id in any group with all your administration bots (added)
G_BAN_LOGGER_GROUP = int(os.environ.get("G_BAN_LOGGER_GROUP", -100123456789))
# TG API limit. An album can have atmost 10 media!
TG_GLOBAL_ALBUM_LIMIT = int(os.environ.get("TG_GLOBAL_ALBUM_LIMIT", 9))
# Telegram BOT Token from @BotFather
TG_BOT_TOKEN_BF_HER = os.environ.get("TG_BOT_TOKEN_BF_HER", None)
TG_BOT_USER_NAME_BF_HER = os.environ.get("TG_BOT_USER_NAME_BF_HER", None)
#
#
# DO NOT EDIT BELOW THIS LINE IF YOU DO NOT KNOW WHAT YOU ARE DOING
# TG API limit. A message can have maximum 4096 characters!
MAX_MESSAGE_SIZE_LIMIT = 4095
# set blacklist_chats where you do not want userbot's features
UB_BLACK_LIST_CHAT = set(int(x) for x in os.environ.get("UB_BLACK_LIST_CHAT", "").split())
# maximum number of messages for antiflood
MAX_ANTI_FLOOD_MESSAGES = 10
# warn mode for anti flood
ANTI_FLOOD_WARN_MODE = ChatBannedRights(
until_date=None,
view_messages=None,
send_messages=True
)
# chat ids or usernames, it is recommended to use chat ids,
# providing usernames means an additional overhead for the user
CHATS_TO_MONITOR_FOR_ANTI_FLOOD = []
# specify LOAD and NO_LOAD
LOAD = []
# foloowing plugins won't work on Heroku,
# ⚡Remove This To Make Them Work But Would Make Bot Unstable AF...⚡
NO_LOAD = [
"pmpermit",
"left",
"autores",
"hand",
"notification",
]
# Get your own API key from https://www.remove.bg/ or
# feel free to use http://telegram.dog/Remove_BGBot
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
# Set to True if you want to block users that are spamming your PMs.
SLAP_USERNAME = os.environ.get("SLAP_USERNAME", None)
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME", None)
NO_P_M_SPAM = bool(os.environ.get("NO_P_M_SPAM", False))
# define "spam" in PMs
MAX_FLOOD_IN_P_M_s = int(os.environ.get("MAX_FLOOD_IN_P_M_s", 3))
# set to True if you want to log PMs to your PM_LOGGR_BOT_API_ID
NC_LOG_P_M_S = bool(os.environ.get("NC_LOG_P_M_S", False))
# send .get_id in any channel to forward all your NEW PMs to this group
PM_LOGGR_BOT_API_ID = int(os.environ.get("PM_LOGGR_BOT_API_ID", "-100"))
# For Databases
# can be None in which case plugins requiring
# DataBase would not work
DB_URI = os.environ.get("DATABASE_URL", None)
# For Databases
# can be None in which case plugins requiring
# DataBase would not work
MONGO_URI = os.environ.get("MONGO_URI", None)
# number of rows of buttons to be displayed in .helpme command
NO_OF_BUTTONS_DISPLAYED_IN_H_ME_CMD = int(os.environ.get("NO_OF_BUTTONS_DISPLAYED_IN_H_ME_CMD", 5))
# specify command handler that should be used for the plugins
# this should be a valid "regex" pattern
COMMAND_HAND_LER = os.environ.get("COMMAND_HAND_LER", "\.")
# specify list of users allowed to use bot
# WARNING: be careful who you grant access to your bot.
# malicious users could do ".exec rm -rf /*"
SUDO_USERS = set(int(x) for x in os.environ.get("SUDO_USERS", "").split())
# VeryStream only supports video formats
VERY_STREAM_LOGIN = os.environ.get("VERY_STREAM_LOGIN", None)
VERY_STREAM_KEY = os.environ.get("VERY_STREAM_KEY", None)
TEMP_DIR = os.environ.get("TEMP_DIR", None)
CHANNEL_ID = os.environ.get("CHANNEL_ID", None)
# Google Drive ()
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None)
AUTH_TOKEN_DATA = os.environ.get("AUTH_TOKEN_DATA", None)
os.makedirs(TMP_DOWNLOAD_DIRECTORY, exist_ok=True)
t_file = open(TMP_DOWNLOAD_DIRECTORY+"auth_token.txt","w")
t_file.write(AUTH_TOKEN_DATA)
t_file.close()
#
TELE_GRAM_2FA_CODE = os.environ.get("TELE_GRAM_2FA_CODE", None)
#
GROUP_REG_SED_EX_BOT_S = os.environ.get("GROUP_REG_SED_EX_BOT_S", r"(regex|moku|BananaButler_|rgx|l4mR)bot")
# rapidleech plugins
OPEN_LOAD_LOGIN = os.environ.get("OPEN_LOAD_LOGIN", "0")
OPEN_LOAD_KEY = os.environ.get("OPEN_LOAD_KEY", "0")
# Google Chrome Selenium Stuff
# taken from https://github.com/jaskaranSM/UniBorg/blob/9072e3580cc6c98d46f30e41edbe73ffc9d850d3/sample_config.py#L104-L106
GOOGLE_CHROME_DRIVER = os.environ.get("GOOGLE_CHROME_DRIVER", None)
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None)
#
LYDIA_API = os.environ.get("LYDIA_API", None)
#
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
#
HEROKU_MEMEZ = bool(os.environ.get("HEROKU_MEMEZ", "False"))
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", None)
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", None)
#
CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None)
CHROME_BIN = os.environ.get("CHROME_BIN", None)
# define "heroku_link" in conig_vars
HEROKU_LINK = os.environ.get("HEROKU_LINK", None)
# define "repo_link" in conig_vars
REPO_LINK = os.environ.get("REPO_LINK", None)
# define "repo_link" in conig_vars
PACKS_CONTENT = os.environ.get("PACKS_CONTENT", None)
#
BOT_HUB = os.environ.get("BOT_HUB", None)
#
class Production(Config):
LOGGER = False
class Development(Config):
LOGGER = True
| 49.38125 | 154 | 0.711049 |
c3adcdd73c42c0542bea9157463b7c2bb7d8628c | 3,228 | py | Python | src/richard/videos/management/commands/videoreqs.py | codersquid/richard | 2df89abcfdc855db8c20bf541532b680492fe133 | [
"Apache-2.0"
] | null | null | null | src/richard/videos/management/commands/videoreqs.py | codersquid/richard | 2df89abcfdc855db8c20bf541532b680492fe133 | [
"Apache-2.0"
] | null | null | null | src/richard/videos/management/commands/videoreqs.py | codersquid/richard | 2df89abcfdc855db8c20bf541532b680492fe133 | [
"Apache-2.0"
] | null | null | null | # richard -- video index system
# Copyright (C) 2012, 2013 richard contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from django.db.models import fields
from django.core.management.base import BaseCommand
from richard.videos.models import Video
class Command(BaseCommand):
help = 'Generates a JSON file with requirements for video'
def handle(self, *args, **options):
# Generate the basic stuff
reqs = []
for field in Video._meta.fields:
# Skip some things that shouldn't be in an API push
if field.name in ['id', 'updated', 'added']:
continue
data = {
'name': field.name,
'type': field.get_internal_type(),
'has_default': field.default is not fields.NOT_PROVIDED,
'null': field.null,
'empty_strings': field.empty_strings_allowed,
'md': 'markdown' in field.help_text.lower(),
'choices': [mem[0] for mem in field.choices]
}
if field.name == 'category':
data.update({
'type': 'TextField',
'empty_strings': False,
'null': False,
'has_default': False,
'md': False,
'choices': []
})
elif field.name == 'language':
data.update({
'type': 'TextField',
'empty_strings': False,
'null': False,
'has_default': False,
'md': False,
'choices': []
})
reqs.append(data)
# Add tags and speakers which are M2M, but we do them funkily
# in the API.
reqs.append({
'name': 'tags',
'type': 'TextArrayField',
'empty_strings': False,
'null': True,
'has_default': False,
'md': False,
'choices': []
})
reqs.append({
'name': 'speakers',
'type': 'TextArrayField',
'empty_strings': False,
'has_default': False,
'null': True,
'md': False,
'choices': []
})
f = open('video_reqs.json', 'w')
f.write(json.dumps(reqs, indent=2))
f.close()
self.stdout.write('Done!\n')
| 34.709677 | 77 | 0.508984 |
4b9406c1a6f67756c9105e036fae99113d70b48f | 6,216 | py | Python | gluoncv/data/mhp.py | aptsunny/gluon-cv | 7f050d3411b1ada7d2b9515d63b848c55139fdbb | [
"Apache-2.0"
] | 3 | 2019-08-06T00:12:12.000Z | 2019-09-05T15:54:22.000Z | gluoncv/data/mhp.py | aptsunny/gluon-cv | 7f050d3411b1ada7d2b9515d63b848c55139fdbb | [
"Apache-2.0"
] | null | null | null | gluoncv/data/mhp.py | aptsunny/gluon-cv | 7f050d3411b1ada7d2b9515d63b848c55139fdbb | [
"Apache-2.0"
] | 1 | 2020-08-17T09:30:16.000Z | 2020-08-17T09:30:16.000Z | """Multi-Human-Parsing V1 Dataset."""
import os
import numpy as np
from PIL import Image
from PIL import ImageFile
import mxnet as mx
from .segbase import SegmentationDataset
ImageFile.LOAD_TRUNCATED_IMAGES = True
class MHPV1Segmentation(SegmentationDataset):
"""Multi-Human-Parsing V1 Dataset.
Parameters
----------
root : string
Path to MHPV1 folder. Default is '$(HOME)/.mxnet/datasets/mhp/LV-MHP-v1'
split: string
'train', 'val' or 'test'
transform : callable, optional
A function that transforms the image
Examples
--------
>>> from mxnet.gluon.data.vision import transforms
>>> # Transforms for Normalization
>>> input_transform = transforms.Compose([
>>> transforms.ToTensor(),
>>> transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
>>> ])
>>> # Create Dataset
>>> trainset = gluoncv.data.MHPV1Segmentation(split='train', transform=input_transform)
>>> # Create Training Loader
>>> train_data = gluon.data.DataLoader(
>>> trainset, 4, shuffle=True, last_batch='rollover',
>>> num_workers=4)
"""
# pylint: disable=abstract-method
NUM_CLASS = 18
CLASSES = ("hat", "hair", "sunglasses", "upper clothes", "skirt",
"pants", "dress", "belt", "left shoe", "right shoe", "face", "left leg",
"right leg", "left arm", "right arm", "bag", "scarf", "torso skin")
def __init__(self, root=os.path.expanduser('~/.mxnet/datasets/mhp/LV-MHP-v1'),
split='train', mode=None, transform=None, base_size=768, **kwargs):
super(MHPV1Segmentation, self).__init__(root, split, mode, transform, base_size, **kwargs)
assert os.path.exists(root), "Please setup the dataset using" + "scripts/datasets/mhp_v1.py"
self.images, self.masks = _get_mhp_pairs(root, split)
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise(RuntimeError("Found 0 images in subfolders of: \
" + root + "\n"))
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
# nan check
img_np = np.array(img, dtype=np.uint8)
assert not np.isnan(np.sum(img_np))
if self.mode == 'test':
img = self._img_transform(img)
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = _get_mask(self.masks[index])
# Here, we resize input image resolution to the multiples of 8
# for avoiding resolution misalignment during downsampling and upsampling
w, h = img.size
if h < w:
oh = self.base_size
ow = int(1.0 * w * oh / h + 0.5)
if ow % 8:
ow = int(round(ow / 8) * 8)
else:
ow = self.base_size
oh = int(1.0 * h * ow / w + 0.5)
if oh % 8:
oh = int(round(oh / 8) * 8)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
img, mask = self._img_transform(img), self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask
def _mask_transform(self, mask):
return mx.nd.array(np.array(mask), mx.cpu(0)).astype('int32') # - 1
def __len__(self):
return len(self.images)
@property
def classes(self):
"""Category names."""
return type(self).CLASSES
@property
def pred_offset(self):
return 0
def _get_mhp_pairs(folder, split='train'):
img_paths = []
mask_paths = []
img_folder = os.path.join(folder, 'images')
mask_folder = os.path.join(folder, 'annotations')
if split == 'test':
img_list = os.path.join(folder, 'test_list.txt')
else:
img_list = os.path.join(folder, 'train_list.txt')
with open(img_list) as txt:
for filename in txt:
# record mask paths
mask_short_path = []
basename, _ = os.path.splitext(filename)
for maskname in os.listdir(mask_folder):
if maskname.startswith(basename):
maskpath = os.path.join(mask_folder, maskname)
if os.path.isfile(maskpath):
mask_short_path.append(maskpath)
else:
print('cannot find the mask:', maskpath)
# mask_short_path is not empty
if mask_short_path:
mask_paths.append(mask_short_path)
# record img paths
imgpath = os.path.join(img_folder, filename.rstrip('\n'))
if os.path.isfile(imgpath):
img_paths.append(imgpath)
else:
print('cannot find the image:', imgpath)
if split == 'train':
img_paths = img_paths[:3000]
mask_paths = mask_paths[:3000]
elif split == 'val':
img_paths = img_paths[3001:4000]
mask_paths = mask_paths[3001:4000]
return img_paths, mask_paths
def _get_mask(mask_paths):
mask_np = None
mask_idx = None
for _, mask_path in enumerate(mask_paths):
mask_sub = Image.open(mask_path)
mask_sub_np = np.array(mask_sub, dtype=np.uint8)
if mask_idx is None:
mask_idx = np.zeros(mask_sub_np.shape, dtype=np.uint8)
mask_sub_np = np.ma.masked_array(mask_sub_np, mask=mask_idx)
mask_idx += np.minimum(mask_sub_np, 1)
if mask_np is None:
mask_np = mask_sub_np
else:
mask_np += mask_sub_np
# nan check
assert not np.isnan(np.sum(mask_np))
# categories check
assert (np.max(mask_np) <= 18 and np.min(mask_np) >= 0)
mask = Image.fromarray(mask_np)
return mask
| 33.240642 | 100 | 0.582207 |
9edc990ee0be684f117c467dddab7f74376d9408 | 16,824 | py | Python | setup.py | QuasarApp/LIEF | db25689e1cba96ad6624cefe2cec7d10b20dc31f | [
"Apache-2.0"
] | null | null | null | setup.py | QuasarApp/LIEF | db25689e1cba96ad6624cefe2cec7d10b20dc31f | [
"Apache-2.0"
] | null | null | null | setup.py | QuasarApp/LIEF | db25689e1cba96ad6624cefe2cec7d10b20dc31f | [
"Apache-2.0"
] | null | null | null | import os
import sys
import platform
import subprocess
import setuptools
import pathlib
import sysconfig
import copy
import distutils
from pkg_resources import Distribution, get_distribution
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext, copy_file
from distutils import log
from distutils.version import LooseVersion
MIN_SETUPTOOLS_VERSION = "31.0.0"
assert (LooseVersion(setuptools.__version__) >= LooseVersion(MIN_SETUPTOOLS_VERSION)), "LIEF requires a setuptools version '{}' or higher (pip install setuptools --upgrade)".format(MIN_SETUPTOOLS_VERSION)
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
PACKAGE_NAME = "lief"
get_config_var_backup = sysconfig.get_config_var
get_platform_backup = sysconfig.get_platform
get_config_vars_backup = sysconfig.get_config_vars
distutils_get_config_vars_backup = distutils.sysconfig.get_config_vars
class LiefDistribution(setuptools.Distribution):
global_options = setuptools.Distribution.global_options + [
('lief-test', None, 'Build and make tests'),
('ninja', None, 'Use Ninja as build system'),
('sdk', None, 'Build SDK package'),
('doc', None, 'Build LIEF documentation'),
('lief-no-json', None, 'Disable JSON module'),
('lief-no-logging', None, 'Disable logging module'),
('lief-no-elf', None, 'Disable ELF module'),
('lief-no-pe', None, 'Disable PE module'),
('lief-no-macho', None, 'Disable Mach-O module'),
('lief-no-android', None, 'Disable Android formats'),
('lief-no-art', None, 'Disable ART module'),
('lief-no-vdex', None, 'Disable VDEX module'),
('lief-no-oat', None, 'Disable OAT module'),
('lief-no-dex', None, 'Disable DEX module'),
]
def __init__(self, attrs=None):
self.lief_test = False
self.ninja = False
self.sdk = False
self.lief_no_json = False
self.lief_no_logging = False
self.lief_no_elf = False
self.lief_no_pe = False
self.lief_no_macho = False
self.lief_no_art = False
self.lief_no_oat = False
self.lief_no_dex = False
self.lief_no_vdex = False
self.lief_no_android = False
self.doc = False
super().__init__(attrs)
class Module(Extension):
def __init__(self, name, sourcedir='', *args, **kwargs):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(os.path.join(CURRENT_DIR))
class BuildLibrary(build_ext):
def run(self):
try:
subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
for ext in self.extensions:
self.build_extension(ext)
self.copy_extensions_to_source()
@staticmethod
def has_ninja():
try:
subprocess.check_call(['ninja', '--version'])
return True
except Exception:
return False
@staticmethod
def sdk_suffix():
if platform.system() == "Windows":
return "zip"
return "tar.gz"
def build_extension(self, ext):
if self.distribution.lief_test:
log.info("LIEF tests enabled!")
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
jobs = self.parallel if self.parallel else 1
cmake_args = []
source_dir = ext.sourcedir
build_temp = self.build_temp
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_library_output_directory = os.path.abspath(os.path.dirname(build_temp))
cfg = 'RelWithDebInfo' if self.debug else 'Release'
is64 = sys.maxsize > 2**32
# Ninja ?
build_with_ninja = False
if self.has_ninja() and self.distribution.ninja:
build_with_ninja = True
if build_with_ninja:
cmake_args += ["-G", "Ninja"]
cmake_args += [
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}'.format(cmake_library_output_directory),
'-DPYTHON_EXECUTABLE={}'.format(sys.executable),
'-DLIEF_PYTHON_API=on',
]
# LIEF options
# ============
if self.distribution.lief_test:
cmake_args += ["-DLIEF_TESTS=on"]
if self.distribution.lief_no_json:
log.info("LIEF JSON module disabled")
cmake_args += ["-DLIEF_ENABLE_JSON=off"]
if self.distribution.lief_no_logging:
log.info("LIEF logging module disabled")
cmake_args += ["-DLIEF_LOGGING=off"]
if self.distribution.doc:
log.info("LIEF documentation enabled")
cmake_args += ["-DLIEF_DOC=on"]
if self.debug:
log.info("LIEF enables DEBUG messages")
cmake_args += ["-DLIEF_LOGGING_DEBUG=on"]
else:
cmake_args += ["-DLIEF_LOGGING_DEBUG=off"]
# Main formats
# ============
if self.distribution.lief_no_elf:
log.info("LIEF ELF module disabled")
cmake_args += ["-DLIEF_ELF=off"]
if self.distribution.lief_no_pe:
log.info("LIEF PE module disabled")
cmake_args += ["-DLIEF_PE=off"]
if self.distribution.lief_no_macho:
log.info("LIEF MACH-O module disabled")
cmake_args += ["-DLIEF_MACHO=off"]
# Android formats
# ===============
if self.distribution.lief_no_oat or self.distribution.lief_no_android:
log.info("LIEF OAT module disabled")
cmake_args += ["-DLIEF_OAT=off"]
if self.distribution.lief_no_dex or self.distribution.lief_no_android:
log.info("LIEF DEX module disabled")
cmake_args += ["-DLIEF_DEX=off"]
if self.distribution.lief_no_vdex or self.distribution.lief_no_android:
log.info("LIEF VDEX module disabled")
cmake_args += ["-DLIEF_VDEX=off"]
if self.distribution.lief_no_art or self.distribution.lief_no_android:
log.info("LIEF ART module disabled")
cmake_args += ["-DLIEF_ART=off"]
build_args = ['--config', cfg]
env = os.environ
if os.getenv("CXXFLAGS", None) is not None:
cmake_args += [
'-DCMAKE_CXX_FLAGS={}'.format(os.getenv("CXXFLAGS")),
]
if os.getenv("CFLAGS", None) is not None:
cmake_args += [
'-DCMAKE_C_FLAGS={}'.format(os.getenv("CFLAGS")),
]
if platform.system() == "Windows":
from setuptools import msvc
cmake_args += [
'-DCMAKE_BUILD_TYPE={}'.format(cfg),
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), cmake_library_output_directory),
'-DLIEF_USE_CRT_RELEASE=MT',
]
if build_with_ninja:
arch = 'x64' if is64 else 'x86'
ninja_env = msvc.msvc14_get_vc_env(arch)
env.update(ninja_env)
else:
cmake_args += ['-A', 'x64'] if is64 else ['-A', 'win32']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE={}'.format(cfg)]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
log.info("Platform: %s", platform.system())
log.info("Wheel library: %s", self.get_ext_fullname(ext.name))
# 1. Configure
configure_cmd = ['cmake', ext.sourcedir] + cmake_args
log.info(" ".join(configure_cmd))
subprocess.check_call(configure_cmd, cwd=self.build_temp, env=env)
# 2. Build
targets = {
'python_bindings': 'pyLIEF',
}
if self.distribution.sdk:
targets['sdk'] = "package"
if self.distribution.doc:
targets['doc'] = "lief-doc"
if platform.system() == "Windows":
build_cmd = ['cmake', '--build', '.', '--target', "lief_samples"] + build_args
#log.info(" ".join(build_cmd))
if self.distribution.lief_test:
subprocess.check_call(['cmake', '--build', '.', '--target', "lief_samples"] + build_args, cwd=self.build_temp, env=env)
subprocess.check_call(configure_cmd, cwd=self.build_temp, env=env)
if build_with_ninja:
subprocess.check_call(['cmake', '--build', '.', '--target', "all"] + build_args, cwd=self.build_temp, env=env)
else:
subprocess.check_call(['cmake', '--build', '.', '--target', "ALL_BUILD"] + build_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.', '--target', "check-lief"] + build_args, cwd=self.build_temp, env=env)
else:
subprocess.check_call(['cmake', '--build', '.', '--target', targets['python_bindings']] + build_args, cwd=self.build_temp, env=env)
if 'sdk' in targets:
subprocess.check_call(['cmake', '--build', '.', '--target', targets['sdk']] + build_args, cwd=self.build_temp, env=env)
else:
if build_with_ninja:
if self.distribution.lief_test:
subprocess.check_call(['ninja', "lief_samples"], cwd=self.build_temp)
subprocess.check_call(configure_cmd, cwd=self.build_temp)
subprocess.check_call(['ninja'], cwd=self.build_temp)
subprocess.check_call(['ninja', "check-lief"], cwd=self.build_temp)
else:
subprocess.check_call(['ninja', targets['python_bindings']], cwd=self.build_temp, env=env)
if 'sdk' in targets:
subprocess.check_call(['ninja', targets['sdk']], cwd=self.build_temp, env=env)
if 'doc' in targets:
try:
subprocess.check_call(['ninja', targets['doc']], cwd=self.build_temp, env=env)
except Exception as e:
log.error("Documentation failed: %s" % e)
else:
log.info("Using {} jobs".format(jobs))
if self.distribution.lief_test:
subprocess.check_call(['make', '-j', str(jobs), "lief_samples"], cwd=self.build_temp)
subprocess.check_call(configure_cmd, cwd=self.build_temp)
subprocess.check_call(['make', '-j', str(jobs), "all"], cwd=self.build_temp)
subprocess.check_call(['make', '-j', str(jobs), "check-lief"], cwd=self.build_temp)
else:
subprocess.check_call(['make', '-j', str(jobs), targets['python_bindings']], cwd=self.build_temp, env=env)
if 'sdk' in targets:
subprocess.check_call(['make', '-j', str(jobs), targets['sdk']], cwd=self.build_temp, env=env)
if 'doc' in targets:
try:
subprocess.check_call(['make', '-j', str(jobs), targets['doc']], cwd=self.build_temp, env=env)
except Exception as e:
log.error("Documentation failed: %s" % e)
pylief_dst = os.path.join(self.build_lib, self.get_ext_filename(self.get_ext_fullname(ext.name)))
libsuffix = pylief_dst.split(".")[-1]
pylief_path = os.path.join(cmake_library_output_directory, "{}.{}".format(PACKAGE_NAME, libsuffix))
if platform.system() == "Windows":
pylief_base = pathlib.Path(cmake_library_output_directory) / "Release" / "api" / "python"
pylief_path = pylief_base / "Release" / "{}.{}".format(PACKAGE_NAME, libsuffix)
if not pylief_path.is_file():
pylief_path = pylief_base / "{}.{}".format(PACKAGE_NAME, libsuffix)
pylief_path = pylief_path.as_posix()
if not os.path.exists(self.build_lib):
os.makedirs(self.build_lib)
log.info("Copying {} into {}".format(pylief_path, pylief_dst))
copy_file(
pylief_path, pylief_dst, verbose=self.verbose,
dry_run=self.dry_run)
# SDK
# ===
if self.distribution.sdk:
sdk_path = list(pathlib.Path(self.build_temp).rglob("LIEF-*.{}".format(self.sdk_suffix())))
if len(sdk_path) == 0:
log.error("Unable to find SDK archive")
sys.exit(1)
sdk_path = str(sdk_path.pop())
sdk_output = str(pathlib.Path(CURRENT_DIR) / "build")
copy_file(
sdk_path, sdk_output, verbose=self.verbose,
dry_run=self.dry_run)
def get_platform():
out = get_platform_backup()
lief_arch = os.environ.get("LIEF_PY_XARCH", None)
if lief_arch is not None and isinstance(out, str):
original_out = out
out = out.replace("x86_64", lief_arch)
log.info(" Replace %s -> %s", original_out, out)
return out
def get_config_vars(*args):
out = get_config_vars_backup(*args)
lief_arch = os.environ.get("LIEF_PY_XARCH", None)
if lief_arch is None:
return out
out_xfix = copy.deepcopy(out)
for k, v in out.items():
if not (isinstance(v, str) and "x86_64" in v):
continue
if k not in {"SO", "SOABI", "EXT_SUFFIX", "BUILD_GNU_TYPE"}:
continue
fix = v.replace("x86_64", lief_arch)
log.info(" Replace %s: %s -> %s", k, v, fix)
out_xfix[k] = fix
return out_xfix
def distutils_get_config_vars(*args):
out = distutils_get_config_vars_backup(*args)
lief_arch = os.environ.get("LIEF_PY_XARCH", None)
if lief_arch is None:
return out
if isinstance(out, list):
fixes = []
for item in out:
if not (isinstance(item, str) and "x86_64" in item):
fixes.append(item)
else:
fixes.append(item.replace("x86_64", lief_arch))
return fixes
out_xfix = copy.deepcopy(out)
for k, v in out.items():
if not (isinstance(v, str) and "x86_64" in v):
continue
if k not in {"SO", "SOABI", "EXT_SUFFIX", "BUILD_GNU_TYPE"}:
continue
fix = v.replace("x86_64", lief_arch)
log.info(" Replace %s: %s -> %s", k, v, fix)
out_xfix[k] = fix
return out_xfix
sysconfig.get_platform = get_platform
sysconfig.get_config_vars = get_config_vars
distutils.sysconfig.get_config_vars = distutils_get_config_vars
# From setuptools-git-version
command = 'git describe --tags --long --dirty'
is_tagged_cmd = 'git tag --list --points-at=HEAD'
fmt_dev = '{tag}.dev0'
fmt_tagged = '{tag}'
def format_version(version: str, fmt: str = fmt_dev, is_dev: bool = False):
parts = version.split('-')
assert len(parts) in (3, 4)
dirty = len(parts) == 4
tag, count, sha = parts[:3]
MA, MI, PA = map(int, tag.split(".")) # 0.9.0 -> (0, 9, 0)
if is_dev:
tag = "{}.{}.{}".format(MA, MI + 1, 0)
if count == '0' and not dirty:
return tag
return fmt.format(tag=tag, gitsha=sha.lstrip('g'))
def get_git_version(is_tagged: bool) -> str:
git_version = subprocess.check_output(command.split()).decode('utf-8').strip()
if is_tagged:
return format_version(version=git_version, fmt=fmt_tagged)
return format_version(version=git_version, fmt=fmt_dev, is_dev=True)
def check_if_tagged() -> bool:
output = subprocess.check_output(is_tagged_cmd.split()).decode('utf-8').strip()
return output != ""
def get_pkg_info_version(pkg_info_file):
pkg = get_distribution(PACKAGE_NAME)
return pkg.version
def get_version() -> str:
version = "0.11.2"
pkg_info = os.path.join(CURRENT_DIR, "{}.egg-info".format(PACKAGE_NAME), "PKG-INFO")
git_dir = os.path.join(CURRENT_DIR, ".git")
if os.path.isdir(git_dir):
is_tagged = False
try:
is_tagged = check_if_tagged()
except Exception:
is_tagged = False
try:
return get_git_version(is_tagged)
except Exception:
pass
if os.path.isfile(pkg_info):
return get_pkg_info_version(pkg_info)
return version
version = get_version()
print(version)
cmdclass = {
'build_ext': BuildLibrary,
}
setup(
distclass=LiefDistribution,
ext_modules=[Module(PACKAGE_NAME)],
cmdclass=cmdclass,
version=version
)
| 36.494577 | 204 | 0.584582 |
8fda5d4f03ffd139901adbd5a576cb1416e8022c | 330 | py | Python | other/dingding/dingtalk/api/rest/OapiRhinoMosSpacePoiUpsertRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | other/dingding/dingtalk/api/rest/OapiRhinoMosSpacePoiUpsertRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | other/dingding/dingtalk/api/rest/OapiRhinoMosSpacePoiUpsertRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | '''
Created by auto_sdk on 2020.03.07
'''
from dingtalk.api.base import RestApi
class OapiRhinoMosSpacePoiUpsertRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.request = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.rhino.mos.space.poi.upsert'
| 22 | 51 | 0.757576 |
6de60261329210d9d51d0a2e28b13d0976bf29b4 | 182 | py | Python | algorithms/pickingNumbers.py | marismarcosta/hackerrank | 3580b4fe0094e2a13f9a7efeeb0e072810be9ebf | [
"MIT"
] | null | null | null | algorithms/pickingNumbers.py | marismarcosta/hackerrank | 3580b4fe0094e2a13f9a7efeeb0e072810be9ebf | [
"MIT"
] | 3 | 2020-09-27T22:57:05.000Z | 2020-09-29T23:07:44.000Z | algorithms/pickingNumbers.py | marismarcosta/hackerrank-challenges | 3580b4fe0094e2a13f9a7efeeb0e072810be9ebf | [
"MIT"
] | 1 | 2020-11-06T21:16:19.000Z | 2020-11-06T21:16:19.000Z | def pickingNumbers(a):
solution = 0
for num1 in a:
if a.count(num1) + a.count(num1 + 1) > solution:
solution = a.count(num1) + a.count(num1 + 1)
return solution | 22.75 | 52 | 0.60989 |
db016e64f00cac2beaa9f09b604ef256e228537b | 1,065 | py | Python | book2/genhurst.py | welly87/epchanbooks | 6b3aa7f4b2656489149e557519997d14e962d75f | [
"Apache-2.0"
] | 4 | 2021-04-18T04:30:20.000Z | 2022-02-03T01:49:27.000Z | book2/genhurst.py | welly87/epchanbooks | 6b3aa7f4b2656489149e557519997d14e962d75f | [
"Apache-2.0"
] | null | null | null | book2/genhurst.py | welly87/epchanbooks | 6b3aa7f4b2656489149e557519997d14e962d75f | [
"Apache-2.0"
] | 1 | 2022-01-28T06:51:19.000Z | 2022-01-28T06:51:19.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 21 17:08:58 2018
@author: Ernest
"""
import numpy as np
import pandas as pd
import statsmodels.api as sm
def genhurst(z):
# =============================================================================
# calculation of Hurst exponent given log price series z
# =============================================================================
z = pd.DataFrame(z)
taus = np.arange(np.round(len(z) / 10)).astype(
int) # We cannot use tau that is of same magnitude of time series length
logVar = np.empty(len(taus)) # log variance
for tau in taus:
logVar[tau] = np.log(z.diff(tau).var(ddof=0))
X = np.log(taus)
Y = logVar[:len(taus)]
X = X[np.isfinite(logVar)]
Y = Y[np.isfinite(logVar)]
# pd.DataFrame(np.asmatrix([X, Y]).T).to_csv('XY.csv')
X = sm.add_constant(X)
# plt.scatter(X[:,1], Y) # for debug only
model = sm.OLS(Y, X)
results = model.fit()
H = results.params[1] / 2
pVal = results.pvalues[1]
return H, pVal
| 28.026316 | 83 | 0.510798 |
6e0c92074c1e85ed2de813795c1f826b02f68847 | 18,651 | py | Python | myquanwei.com/jibingreader.py | wangzhenjjcn/YSW_Spider | 86afe201350519dad84c05a223f03293bdee001b | [
"Apache-2.0"
] | 1 | 2018-05-16T23:12:50.000Z | 2018-05-16T23:12:50.000Z | myquanwei.com/jibingreader.py | wangzhenjjcn/YSW_Spider | 86afe201350519dad84c05a223f03293bdee001b | [
"Apache-2.0"
] | null | null | null | myquanwei.com/jibingreader.py | wangzhenjjcn/YSW_Spider | 86afe201350519dad84c05a223f03293bdee001b | [
"Apache-2.0"
] | null | null | null | #coding=utf-8
import urllib2,sys,time,datetime,os,requests,urllib,re
read_file=open("readedDisease.txt","a")
readall_file=open("readedDiseasedetial.txt","a")
webdata={}
firstURL="http://weixin.myquanwei.com/newwx/commonDisease"
readed_file = open("readedDisease.txt","r")
for lines in readed_file:
data = lines.strip("\n").split(" ")[0]
webdata[data]=lines
hdr = {
'Host':"weixin.myquanwei.com",
'user-agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36",
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Connection': 'keep-alive',
'cookie':"huaweielbsession=f67b5926a5b4f89e69c69caa1abc9931; __jsluid=67afc29719df7db3211ded2f8039a7ce; UM_distinctid=16308260a0b657-0f261135daea6-39614101-1fa400-16308260a0d4ab; ASP.NET_SessionId=pcmoudz4fg2yvi0vmcdbppgu; subcatalogflag_ssl=1; hotkeywords=%E6%8B%9C%E9%98%BF%E5%8F%B8%E5%8C%B9%E7%81%B5%23%231%23%23med%23%23%24%23%23medicine-584398.html%23%23%24%40%40%E8%A1%A5%E8%A1%80%23%231%23%23wwwsearch%23%23%24%23%23search.html%23%23keyword%3D%25e8%25a1%25a5%25e8%25a1%2580%40%40999%23%230%23%23other_https%3A%2F%2Fwww.yaofangwang.com%2Fsearch%2F13791.html%40%40%E7%89%87%E4%BB%94%E7%99%80%23%231%23%23other_https%3A%2F%2Fwww.yaofangwang.com%2Fsearch%2F39735.html%40%40%E5%A6%88%E5%AF%8C%E9%9A%86%23%230%23%23wwwsearch%23%23%24%23%23search.html%23%23keyword%3D%25e5%25a6%2588%25e5%25af%258c%25e9%259a%2586%40%40%E9%98%BF%E8%83%B6%23%231%23%23other_https%3A%2F%2Fwww.yaofangwang.com%2Fsearch%2F11442.html%40%40%E9%87%91%E6%88%88%23%230%23%23other_https%3A%2F%2Fwww.yaofangwang.com%2Fsearch%2F30642.html%40%40%E6%B1%A4%E8%87%A3%E5%80%8D%E5%81%A5%23%230%23%23other_https%3A%2F%2Fwww.yaofangwang.com%2Fsearch%2F50493.html; Hm_lvt_e5f454eb1aa8e839f8845470af4667eb=1524850101; cartcount=0; historysearch=; CNZZDATA1261831897=1577417923-1524847327-null%7C1524852728; Hm_lpvt_e5f454eb1aa8e839f8845470af4667eb=1524852782; topnavflag_ssl=1"
}
hr = re.compile(r'<[^>]+>',re.S)
try:
cateLogpage=urllib2.urlopen(firstURL)
except Exception,e:
print 'openERR:'
print str(e)
pass
else:
if cateLogpage:
catalogPageTxt=cateLogpage.read()
data=catalogPageTxt.split("<div class=\"twoPill\"")[1].split("</div>")[0]
links=data.split("<dl>")
for j in range(1,len(links)):
print "j"
print j
name=links[j].split("</dt>")[0].split("<dt>")[1]
print "name:"
print name.decode('utf-8').encode(sys.getfilesystemencoding())
data2=links[j].split("<dd")[1].split("</dd>")[0].split("<p onclick=\"toDiseaseIndex(\'")
for k in range(1,len(data2)):
if(">" in data2[k]):
print "k"
print k
name2=data2[k].split(">")[1].split("</p")[0]
print name2.decode('utf-8').encode(sys.getfilesystemencoding())
link2="http://weixin.myquanwei.com/newwx/diseaseDetail/"+data2[k].split("\',")[0]
print link2
webdata[link2]=name+"->"+name2
print (name+"->"+name2).decode('utf-8').encode(sys.getfilesystemencoding())
read_file.write(link2+" "+name+"->"+name2+"\n")
read_file.flush()
pass
print ""
print "detial start:"
print ""
readall_file.write("类别1,类别2,地址,名称,简介,病因,病因详情,疾病特点,疾病特点详情,易混淆疾病,易混淆疾病详情,治疗原则,治疗原则详情,合理生活习惯,合理生活习惯详情,组方原则\n".decode('utf-8').encode(sys.getfilesystemencoding()))
readall_file.flush()
print len(webdata.keys())
for weblink in webdata.keys():
print weblink
try:
#testurl="http://weixin.myquanwei.com/newwx/diseaseDetail/0101"
detialPage=urllib2.urlopen(weblink)
#detialPage=urllib2.urlopen(testurl)
except Exception,e:
print 'openERR:'
print str(e)
pass
else:
if detialPage:
linedata=webdata[weblink]
#http://weixin.myquanwei.com/wx/fourPill/101908 中成药->中成药->>祛风剂--[华康]风湿关节炎片
dizhi=weblink
#print linedata.decode('utf-8').encode(sys.getfilesystemencoding())
fenlei1=linedata.strip("\n").split("->")[0]
fenlei2=linedata.strip("\n").split("->")[1]
detialPageTxt=detialPage.read()
title=re.sub(r'\s', '', hr.sub('',(detialPageTxt.split("<h3>")[1].split("</h3>")[0]))).replace(",",",") .strip("\n")
brief=re.sub(r'\s', '', hr.sub('',(detialPageTxt.split("</h3>")[1].split("</p>")[0]))).replace(",",",") .strip("\n")
#print title.decode('utf-8').encode(sys.getfilesystemencoding())
#print brief.decode('utf-8').encode(sys.getfilesystemencoding())
bingyin=""
bingyinDetial=""
jibingtedian=""
jibingtedianDetial=""
yihunxiaojibing=""
yihunxiaojibingDetial=""
zhiliaoyuanze=""
zhiliaoyuanzeDetial=""
helishenghuoxiguan=""
helishenghuoxiguanDetial=""
zufangyuanze=""
if("病因</h3>"in detialPageTxt):
#bingyin=detialPageTxt.split("病因</h3>")[1].split("</div>")[0].split("id=\"")[1].split("\">")[0]
bingyinid=detialPageTxt.split("病因</h3>")[1].split("</div>")[0].split("id=\"")[1].split("\">")[0]
try:
bingyinurl="http://weixin.myquanwei.com/newwx/diseaseSkip/"+weblink.split("diseaseDetail/")[1]+"/"+bingyinid
#print bingyinurl
bingyinDetialPage=urllib2.urlopen(bingyinurl)
except Exception,e:
print 'openERR:'
print str(e)
else:
if bingyinDetialPage:
bingyinDetialPageTxt=bingyinDetialPage.read()
if("</p>" in bingyinDetialPageTxt):
bingyin=re.sub(r'\s', '', hr.sub('',bingyinDetialPageTxt.split("<p>")[1].split("</p>")[0])).replace(",",",") .strip("\n")
#print bingyin.decode('utf-8').encode(sys.getfilesystemencoding())
bingyinDetial=re.sub(r'\s', '', hr.sub('',bingyinDetialPageTxt.split("</p>")[1].split("</p>")[0])).replace(",",",") .strip("\n")
#print bingyinDetial.decode('utf-8').encode(sys.getfilesystemencoding())
else:
bingyin=re.sub(r'\s', '', hr.sub('',bingyinDetialPageTxt))
bingyinDetial=re.sub(r'\s', '', hr.sub('',bingyinDetialPageTxt))
if("疾病特点</h3>"in detialPageTxt):
jibingtedianid=detialPageTxt.split("疾病特点</h3>")[1].split("</div>")[0].split("id=\"")[1].split("\">")[0]
try:
jibingtedianurl="http://weixin.myquanwei.com/newwx/diseaseSkip/"+weblink.split("diseaseDetail/")[1]+"/"+jibingtedianid
#print jibingtedianurl
jibingtedianDetialPage=urllib2.urlopen(jibingtedianurl)
except Exception,e:
print 'openERR:'
print str(e)
else:
if jibingtedianDetialPage:
jibingtedianDetialPageTxt=jibingtedianDetialPage.read()
if("</p>" in jibingtedianDetialPageTxt):
jibingtedian=re.sub(r'\s', '', hr.sub('',jibingtedianDetialPageTxt.split("<p>")[1].split("</p>")[0])).replace(",",",") .strip("\n")
#print jibingtedian.decode('utf-8').encode(sys.getfilesystemencoding())
jibingtedianDetial=re.sub(r'\s', '', hr.sub('',jibingtedianDetialPageTxt.split("</p>")[1].split("</p>")[0])).replace(",",",") .strip("\n")
#print jibingtedianDetial.decode('utf-8').encode(sys.getfilesystemencoding())
else:
jibingtedian=re.sub(r'\s', '', hr.sub('',jibingtedianDetialPageTxt))
jibingtedianDetial=re.sub(r'\s', '', hr.sub('',jibingtedianDetialPageTxt))
if("易混淆疾病</h3>"in detialPageTxt):
yihunxiaojibingid=detialPageTxt.split("易混淆疾病</h3>")[1].split("</div>")[0].split("id=\"")[1].split("\">")[0]
try:
yihunxiaojibingurl="http://weixin.myquanwei.com/newwx/diseaseSkip/"+weblink.split("diseaseDetail/")[1]+"/"+yihunxiaojibingid
#print yihunxiaojibingurl
yihunxiaojibingDetialPage=urllib2.urlopen(yihunxiaojibingurl)
except Exception,e:
print 'openERR:'
print str(e)
else:
if yihunxiaojibingDetialPage:
yihunxiaojibingDetialPageTxt=yihunxiaojibingDetialPage.read()
if("</p>" in yihunxiaojibingDetialPageTxt):
yihunxiaojibing=re.sub(r'\s', '', hr.sub('',yihunxiaojibingDetialPageTxt.split("<p>")[1].split("</p>")[0])).replace(",",",") .strip("\n")
#print yihunxiaojibing.decode('utf-8').encode(sys.getfilesystemencoding())
yihunxiaojibingDetial=re.sub(r'\s', '', hr.sub('',yihunxiaojibingDetialPageTxt.split("</div>")[0].replace(",",",").replace("</h4>",">>").replace("</p>","。").replace("</a>","、").replace("<span>","·"))).replace(",",",") .strip("\n")
#print yihunxiaojibingDetial.decode('utf-8').encode(sys.getfilesystemencoding())
else:
yihunxiaojibing=re.sub(r'\s', '', hr.sub('',yihunxiaojibingDetialPageTxt))
yihunxiaojibingDetial=re.sub(r'\s', '', hr.sub('',yihunxiaojibingDetialPageTxt))
if("治疗原则</h3>"in detialPageTxt):
zhiliaoyuanzeid=detialPageTxt.split("治疗原则</h3>")[1].split("</div>")[0].split("id=\"")[1].split("\">")[0]
try:
zhiliaoyuanzeurl="http://weixin.myquanwei.com/newwx/diseaseSkip/"+weblink.split("diseaseDetail/")[1]+"/"+zhiliaoyuanzeid
#print zhiliaoyuanzeurl
zhiliaoyuanzeDetialPage=urllib2.urlopen(zhiliaoyuanzeurl)
except Exception,e:
print 'openERR:'
print str(e)
else:
if zhiliaoyuanzeDetialPage:
zhiliaoyuanzeDetialPageTxt=zhiliaoyuanzeDetialPage.read()
if("</p>" in zhiliaoyuanzeDetialPageTxt):
zhiliaoyuanze=re.sub(r'\s', '', hr.sub('',zhiliaoyuanzeDetialPageTxt.split("<p>")[1].split("</p>")[0])).replace(",",",") .strip("\n")
#print zhiliaoyuanze.decode('utf-8').encode(sys.getfilesystemencoding())
zhiliaoyuanzeDetial=re.sub(r'\s', '', hr.sub('',zhiliaoyuanzeDetialPageTxt.split("</p>")[1].split("</p>")[0])).replace(",",",") .strip("\n")
#print zhiliaoyuanzeDetial.decode('utf-8').encode(sys.getfilesystemencoding())
else:
zhiliaoyuanze=re.sub(r'\s', '', hr.sub('',zhiliaoyuanzeDetialPageTxt))
zhiliaoyuanzeDetial=re.sub(r'\s', '', hr.sub('',zhiliaoyuanzeDetialPageTxt))
if("合理生活习惯</h3>"in detialPageTxt):
helishenghuoxiguanid=detialPageTxt.split("合理生活习惯</h3>")[1].split("</div>")[0].split("id=\"")[1].split("\">")[0]
try:
helishenghuoxiguanurl="http://weixin.myquanwei.com/newwx/diseaseSkip/"+weblink.split("diseaseDetail/")[1]+"/"+helishenghuoxiguanid
print helishenghuoxiguanurl
helishenghuoxiguanDetialPage=urllib2.urlopen(helishenghuoxiguanurl)
except Exception,e:
print 'openERR:'
print str(e)
else:
if helishenghuoxiguanDetialPage:
helishenghuoxiguanDetialPageTxt=helishenghuoxiguanDetialPage.read()
if("</p>" in helishenghuoxiguanDetialPageTxt):
helishenghuoxiguan=re.sub(r'\s', '', hr.sub('',helishenghuoxiguanDetialPageTxt.split("<p>")[1].split("</p>")[0])).replace(",",",") .strip("\n")
#print helishenghuoxiguan.decode('utf-8').encode(sys.getfilesystemencoding())
helishenghuoxiguanDetial=re.sub(r'\s', '', hr.sub('',helishenghuoxiguanDetialPageTxt.split("</p>")[1].split("</p>")[0])).replace(",",",") .strip("\n")
#print helishenghuoxiguanDetial.decode('utf-8').encode(sys.getfilesystemencoding())
else:
helishenghuoxiguan=re.sub(r'\s', '', hr.sub('',helishenghuoxiguanDetialPageTxt))
helishenghuoxiguanDetial=re.sub(r'\s', '', hr.sub('',helishenghuoxiguanDetialPageTxt))
zufang=detialPageTxt.split("<div class=\"zl\">")
for i in range(1,len(zufang)):
zufangi=zufang[i].split("</div>")[0].replace(",",",").replace("</h4>",":").replace("</p>","。").replace("</a>","、").replace("<h4>","·")
zufangyuanze+=re.sub(r'\s', '', hr.sub('',zufangi))
#print zufangyuanze.decode('utf-8').encode(sys.getfilesystemencoding())
#print (fenlei1+","+fenlei2+","+dizhi+","+title+","+brief+","+bingyin+","+bingyinDetial+","+jibingtedian+","+jibingtedianDetial+","+yihunxiaojibing+","+yihunxiaojibingDetial+","+helishenghuoxiguan+","+helishenghuoxiguanDetial+zufangyuanze+"\n").decode('utf-8').encode(sys.getfilesystemencoding())
readall_file.write((fenlei1.replace(",",",") .strip("\n")+","+fenlei2.replace(",",",") .strip("\n")+","+dizhi.replace(",",",") .strip("\n")+","+title.replace(",",",") .strip("\n")+","+brief.replace(",",",") .strip("\n")+","+bingyin.replace(",",",") .strip("\n")+","+bingyinDetial.replace(",",",") .strip("\n")+","+jibingtedian.replace(",",",") .strip("\n")+","+jibingtedianDetial.replace(",",",") .strip("\n")+","+yihunxiaojibing.replace(",",",") .strip("\n")+","+yihunxiaojibingDetial.replace(",",",") .strip("\n")+","+helishenghuoxiguan.replace(",",",") .strip("\n")+","+helishenghuoxiguanDetial+zufangyuanze.replace(",",",") .strip("\n")+"\n").strip("?").decode('utf-8').encode(sys.getfilesystemencoding()))
readall_file.flush()
pass
readall_file.close() | 78.696203 | 1,344 | 0.446089 |
36858893fd390b02e898d096327c364b60617962 | 14,322 | py | Python | src/azure-cli/azure/cli/command_modules/security/tests/latest/test_va_sql_scenario.py | tyler-lloyd/azure-cli | 5e999e49594ad51557c05b9c55e00c3c16932575 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/security/tests/latest/test_va_sql_scenario.py | tyler-lloyd/azure-cli | 5e999e49594ad51557c05b9c55e00c3c16932575 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/security/tests/latest/test_va_sql_scenario.py | tyler-lloyd/azure-cli | 5e999e49594ad51557c05b9c55e00c3c16932575 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import time
import json
import tempfile
from azure.cli.testsdk import (ScenarioTest, LiveScenarioTest, ResourceGroupPreparer, LogAnalyticsWorkspacePreparer)
from azure.cli.testsdk.base import execute
from azure.cli.core.mock import DummyCli
from azure.cli.testsdk.exceptions import CliTestError
from .preparers import (SqlVirtualMachinePreparer)
class VulnerabilityAssessmentForSqlTests(LiveScenarioTest):
@ResourceGroupPreparer()
@SqlVirtualMachinePreparer()
@LogAnalyticsWorkspacePreparer()
def test_va_sql_scenario(self, resource_group, resource_group_location, sqlvm, laworkspace):
# Use prepared sql virtual machine and log analytics workspace to setup OMS agent with VA management pack
_enable_intelligence_pack(resource_group, laworkspace)
la_workspace_id = _get_log_analytics_workspace_id(resource_group, laworkspace)
la_workspace_key = _get_log_analytics_workspace_key(resource_group, laworkspace)
_set_vm_registry(resource_group, sqlvm)
_install_oms_agent_on_vm(self, sqlvm, resource_group, la_workspace_id, la_workspace_key)
_restart_monitoring_service(resource_group, sqlvm) # Force start scan
time.sleep(60 * 3) # Graceful sleep
# API parameters
resource_id = _get_resource_id(resource_group, sqlvm)
workspace_id = la_workspace_id
server_name = "MSSQLSERVER"
database_name = "master"
# Test logic
# Verify scan results arrived
scan_summaries = self.cmd('az security va sql scans list --vm-resource-id {} --workspace-id {} --server-name {} --database-name {}'
.format(resource_id, workspace_id, server_name, database_name)).get_output_in_json()
selected_scan_summary = scan_id = scan_summaries["value"][0]
scan_id = selected_scan_summary["name"]
scan_summary = self.cmd('az security va sql scans show --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --scan-id {}'
.format(resource_id, workspace_id, server_name, database_name, scan_id)).get_output_in_json()
self.assertEqual(selected_scan_summary, scan_summary)
scan_results = self.cmd('az security va sql results list --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --scan-id {}'
.format(resource_id, workspace_id, server_name, database_name, scan_id)).get_output_in_json()
scan_results_count = len(scan_results["value"])
selected_scan_results = _get_scans_with_multiple_columns_in_results(scan_results)
selected_scan_result = selected_scan_results[0]
rule_id = selected_scan_result["name"]
scan_result = self.cmd('az security va sql results show --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --scan-id {} --rule-id {}'
.format(resource_id, workspace_id, server_name, database_name, scan_id, rule_id)).get_output_in_json()
self.assertEqual(selected_scan_result, scan_result)
# Verify no baseline exists
_assert_error('az security va sql baseline list --vm-resource-id {} --workspace-id {} --server-name {} --database-name {}'
.format(resource_id, workspace_id, server_name, database_name), error_reason='no baseline set')
_assert_error('az security va sql baseline show --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --rule-id {}'
.format(resource_id, workspace_id, server_name, database_name, rule_id), error_reason='no baseline set')
# Set baseline with latest results
baseline_set_result = self.cmd('az security va sql baseline set --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --latest'
.format(resource_id, workspace_id, server_name, database_name)).get_output_in_json()
baseline_list_result = self.cmd('az security va sql baseline list --vm-resource-id {} --workspace-id {} --server-name {} --database-name {}'
.format(resource_id, workspace_id, server_name, database_name)).get_output_in_json()
self.assertEqual(scan_results_count, len(baseline_list_result["value"]))
self.assertEqual(baseline_set_result["value"], baseline_list_result["value"])
selected_baseline = [baseline for baseline in baseline_list_result["value"] if baseline["name"] == rule_id][0]
baseline_show_result = self.cmd('az security va sql baseline show --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --rule-id {}'
.format(resource_id, workspace_id, server_name, database_name, rule_id)).get_output_in_json()
self.assertEqual(selected_baseline, baseline_show_result)
# Delete an arbitrary baseline and verify it is deleted
self.cmd('az security va sql baseline delete --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --rule-id {}'
.format(resource_id, workspace_id, server_name, database_name, rule_id))
_assert_error('az security va sql baseline show --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --rule-id {}'
.format(resource_id, workspace_id, server_name, database_name, rule_id), error_reason='no baseline set (baseline deleted)')
# Update baseline for single rule with latest results
baseline_update_result = self.cmd('az security va sql baseline update --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --rule-id {} --latest'
.format(resource_id, workspace_id, server_name, database_name, rule_id)).get_output_in_json()
baseline_show_result = self.cmd('az security va sql baseline show --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --rule-id {}'
.format(resource_id, workspace_id, server_name, database_name, rule_id)).get_output_in_json()
self.assertEqual(baseline_update_result["properties"], baseline_show_result["properties"])
# Update baseline for single rule with custom results
baseline_input = _get_single_baseline_input(selected_scan_result)
baseline_update_result = self.cmd('az security va sql baseline update --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --rule-id {} {}'
.format(resource_id, workspace_id, server_name, database_name, rule_id, baseline_input)).get_output_in_json()
baseline_show_result = self.cmd('az security va sql baseline show --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --rule-id {}'
.format(resource_id, workspace_id, server_name, database_name, rule_id)).get_output_in_json()
self.assertEqual(baseline_update_result["properties"], baseline_show_result["properties"])
# Update baseline for multiple rule with custom results
selected_scan_result_2 = selected_scan_results[1]
rule_id_2 = selected_scan_result_2["name"]
baseline_input = _get_multiple_baseline_input(rule_id, selected_scan_result, rule_id_2, selected_scan_result_2)
baseline_set_result = self.cmd('az security va sql baseline set --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} {}'
.format(resource_id, workspace_id, server_name, database_name, baseline_input)).get_output_in_json()
baseline_list_result = self.cmd('az security va sql baseline list --vm-resource-id {} --workspace-id {} --server-name {} --database-name {}'
.format(resource_id, workspace_id, server_name, database_name)).get_output_in_json()
baseline_show_result = self.cmd('az security va sql baseline show --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --rule-id {}'
.format(resource_id, workspace_id, server_name, database_name, rule_id)).get_output_in_json()
baseline_show_result_2 = self.cmd('az security va sql baseline show --vm-resource-id {} --workspace-id {} --server-name {} --database-name {} --rule-id {}'
.format(resource_id, workspace_id, server_name, database_name, rule_id_2)).get_output_in_json()
baseline_rule_1 = [baseline for baseline in baseline_list_result["value"] if baseline["name"] == rule_id][0]
baseline_rule_2 = [baseline for baseline in baseline_list_result["value"] if baseline["name"] == rule_id_2][0]
self.assertEqual(baseline_rule_1, baseline_show_result)
self.assertEqual(baseline_rule_2, baseline_show_result_2)
def _enable_intelligence_pack(resource_group, workspace_name):
intelligence_pack_name = 'SQLVulnerabilityAssessment'
template = 'az monitor log-analytics workspace pack enable -n {} -g {} --workspace-name {}'
execute(DummyCli(), template.format(intelligence_pack_name, resource_group, workspace_name))
def _set_vm_registry(resource_group, sqlvm):
template = 'az vm run-command invoke --command-id {} --name {} -g {} --scripts \'New-Item -ItemType Directory -Force -Path C:\\Users\\admin123\\Desktop\\Va_Logs\' \
\'New-Item -ItemType Directory -Force -Path C:\\Users\\admin123\\Desktop\\Setup_Logs\' \
\'New-Item -Path HKLM:\\Software\\Microsoft\\AzureOperationalInsights\' \
\'Set-ItemProperty -Path HKLM:\\Software\\Microsoft\\AzureOperationalInsights -Name SqlVulnerabilityAssessment_LogDirectoryPath -Value C:\\Users\\admin123\\Desktop\\Va_Logs\' \
\'Set-ItemProperty -Path HKLM:\\Software\\Microsoft\\AzureOperationalInsights -Name SqlVulnerabilityAssessment_BypassHashCheck -Value true\' \
\'Set-ItemProperty -Path HKLM:\\Software\\Microsoft\\AzureOperationalInsights -Name SqlVulnerabilityAssessment_TestMachine -Value true\''
execute(DummyCli(), template.format('RunPowerShellScript', sqlvm, resource_group))
def _install_oms_agent_on_vm(self, vm_name, resource_group, workspace_id, workspace_key):
public_config_file = _get_config_file('workspaceId', workspace_id)
protected_config_file = _get_config_file('workspaceKey', workspace_key)
self.kwargs.update({
'vm': vm_name,
'oms_publisher': 'Microsoft.EnterpriseCloud.Monitoring',
'oms_extension': 'MicrosoftMonitoringAgent',
'oms_version': '1.0',
'public_config': public_config_file,
'protected_config': protected_config_file
})
self.cmd('vm extension set --vm-name {vm} --resource-group {rg} \
-n {oms_extension} --publisher {oms_publisher} --version {oms_version} \
--settings "{public_config}" --protected-settings "{protected_config}" --force-update')
def _restart_monitoring_service(resource_group, sqlvm):
template = 'az vm run-command invoke --command-id {} --name {} -g {} --scripts \'Start-Sleep -Seconds 60\' \
\'Restart-Service HealthService\''
execute(DummyCli(), template.format('RunPowerShellScript', sqlvm, resource_group))
def _get_log_analytics_workspace_id(resource_group, workspace_name):
template = 'az monitor log-analytics workspace show --resource-group {} --workspace-name {}'
workspace_details = execute(DummyCli(), template.format(resource_group, workspace_name)).get_output_in_json()
return workspace_details["customerId"]
def _get_log_analytics_workspace_key(resource_group, workspace_name):
template = 'az monitor log-analytics workspace get-shared-keys --resource-group {} --workspace-name {}'
shared_keys = execute(DummyCli(), template.format(resource_group, workspace_name)).get_output_in_json()
return shared_keys["primarySharedKey"]
def _get_config_file(key, value):
config = {}
config[key] = value
_, config_file = tempfile.mkstemp()
with open(config_file, 'w') as outfile:
json.dump(config, outfile)
return config_file
def _get_resource_id(resource_group, sqlvm):
resource_group_details = execute(DummyCli(), 'az group show -n {}'.format(resource_group)).get_output_in_json()
resource_group_id = resource_group_details["id"]
return f'{resource_group_id}/providers/Microsoft.Compute/VirtualMachines/{sqlvm}'
def _get_scans_with_multiple_columns_in_results(scan_results):
return [scan for scan in scan_results["value"] if _has_multiple_columns(scan["properties"]["queryResults"])]
def _has_multiple_columns(query_results):
return len(query_results) > 0 and len(query_results[0]) > 1
def _get_single_baseline_input(scan_result):
columns_count = len(scan_result["properties"]["queryResults"][0])
two_line_baseline_template = '--baseline {} --baseline {}'
line_template = 'word ' * columns_count
line_template = line_template.rstrip()
return two_line_baseline_template.format(line_template, line_template)
def _get_multiple_baseline_input(rule_id, scan_result_1, rule_id_2, scan_result_2):
columns_count_1 = len(scan_result_1["properties"]["queryResults"][0])
columns_count_2 = len(scan_result_2["properties"]["queryResults"][0])
line_for_rule_1_template = 'word ' * columns_count_1
line_for_rule_2_template = 'word ' * columns_count_2
two_rule_baseline_template = '--baseline rule={} {} --baseline rule={} {}'
return two_rule_baseline_template.format(rule_id, line_for_rule_1_template, rule_id_2, line_for_rule_2_template)
def _assert_error(cmd, error_reason):
error_indicated = False
try:
execute(DummyCli(), cmd)
except:
error_indicated = True
finally:
if (not error_indicated):
raise CliTestError('No error raised when expected. ' + error_reason)
| 66 | 192 | 0.687893 |
5a5bb9e02a5d3b79b2eaccaee8f998ce71dcfb3c | 1,585 | py | Python | Codes/asdf2014/6_zigzag_conversion/zigzag_conversion.py | Doslin/algorithm | df515afbe950cac46da7485e6bdac66f035b662f | [
"Apache-2.0"
] | null | null | null | Codes/asdf2014/6_zigzag_conversion/zigzag_conversion.py | Doslin/algorithm | df515afbe950cac46da7485e6bdac66f035b662f | [
"Apache-2.0"
] | null | null | null | Codes/asdf2014/6_zigzag_conversion/zigzag_conversion.py | Doslin/algorithm | df515afbe950cac46da7485e6bdac66f035b662f | [
"Apache-2.0"
] | null | null | null | # https://leetcode.com/problems/zigzag-conversion/
# P I N
# A L S I G
# Y A H R
# P I
# 0123 45 6789 1011 12131415
def zigzag_conversion(s, num_rows):
s_len = len(s)
if s_len == 0 or num_rows <= 0:
return ''
if s_len < 3 or num_rows == 1:
return s
middle_num = num_rows - 2
cycle_num = num_rows + middle_num
remain = s_len % cycle_num
x_total = (s_len // cycle_num) * (middle_num + 1)
if 0 < remain <= num_rows:
x_total += 1
elif remain > num_rows:
x_total += remain - num_rows + 1
arr = [['' for _ in range(x_total)] for _ in range(num_rows)]
count = 0
for c in s:
cycle_times = count // cycle_num
remain = count % cycle_num
if remain < num_rows:
arr[remain][cycle_times * (middle_num + 1)] = c
else:
arr[num_rows - (remain - (num_rows - 1)) - 1][cycle_times * (middle_num + 1) + remain - num_rows + 1] = c
count += 1
return ''.join(str(x) for inner_arr in arr for x in inner_arr)
assert zigzag_conversion("", 0) == ""
assert zigzag_conversion("", 1) == ""
assert zigzag_conversion("A", 1) == "A"
assert zigzag_conversion("AB", 1) == "AB"
assert zigzag_conversion("AB", 2) == "AB"
assert zigzag_conversion("ABC", 1) == "ABC"
assert zigzag_conversion("ABC", 2) == "ACB"
assert zigzag_conversion("ABCD", 2) == "ACBD"
assert zigzag_conversion("PAYPALISHIRING", 3) == "PAHNAPLSIIGYIR"
assert zigzag_conversion("PAYPALISHIRING", 4) == "PINALSIGYAHRPI"
assert zigzag_conversion("YHAONOUU.MCWZ", 4) == "YUZHOUWAN.COM"
| 32.346939 | 117 | 0.611356 |
cac03fadd4618216d7ae7e2a2f233d3fd10d442d | 1,545 | py | Python | env/lib/python3.8/site-packages/plotly/validators/heatmap/colorbar/_tickfont.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/lib/python3.8/site-packages/plotly/validators/heatmap/colorbar/_tickfont.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/lib/python3.8/site-packages/plotly/validators/heatmap/colorbar/_tickfont.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="tickfont", parent_name="heatmap.colorbar", **kwargs
):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
| 38.625 | 78 | 0.556634 |
0c230c931a1c9492033b0284c54af9608ac47748 | 3,628 | py | Python | app/waterQual/30yr/WRTDS/WRTDS-D.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | null | null | null | app/waterQual/30yr/WRTDS/WRTDS-D.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | null | null | null | app/waterQual/30yr/WRTDS/WRTDS-D.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
] | 2 | 2021-04-04T02:45:59.000Z | 2022-03-19T09:41:39.000Z | import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath, utils
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
from hydroDL.data import usgs, gageII, gridMET, ntn, transform
import torch
import os
import json
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
import statsmodels.api as sm
startDate = pd.datetime(1979, 1, 1)
endDate = pd.datetime(2020, 1, 1)
sn = 1
codeLst = usgs.newC
dirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')
with open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f:
dictSite = json.load(f)
siteNoLst = dictSite['comb']
t0 = time.time()
dirRoot = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-D',)
dirOut = os.path.join(dirRoot, 'B10')
for folder in [dirRoot, dirOut]:
if not os.path.exists(folder):
os.mkdir(folder)
# WRTDS window [Y Q S] copy from EGRET
fitAll = True
for k, siteNo in enumerate(siteNoLst):
print(siteNo)
# prep data
saveName = os.path.join(dirOut, siteNo)
if os.path.exists(saveName):
continue
t0 = time.time()
varQ = '00060'
varLst = codeLst+[varQ]
df = waterQuality.readSiteTS(siteNo, varLst=varLst, freq='D')
dfYP = pd.DataFrame(index=df.index, columns=codeLst)
dfX = pd.DataFrame({'date': df.index}).set_index('date')
dfX = dfX.join(np.log(df[varQ]+sn)).rename(
columns={varQ: 'logQ'})
yr = dfX.index.year.values
t = yr+dfX.index.dayofyear.values/365
dfX['sinT'] = np.sin(2*np.pi*t)
dfX['cosT'] = np.cos(2*np.pi*t)
dfX['yr'] = yr
dfX['t'] = t
xVarLst = ['yr', 'logQ', 'sinT', 'cosT']
# train / test
fitCodeLst = list()
for code in codeLst:
if siteNo in dictSite[code]:
fitCodeLst.append(code)
for code in fitCodeLst:
ind1 = np.where(yr < 2010)[0]
ind2 = np.where(yr >= 2010)[0]
dfXY = dfX.join(np.log(df[code]+sn))
df1 = dfXY.iloc[ind1].dropna()
if fitAll:
df2 = dfXY[xVarLst+['t']].dropna()
else:
df2 = dfXY.iloc[ind2].dropna() # only fit for observations now
n = len(df1)
if n == 0:
break
# calculate weight
h = np.array([7, 2, 0.5]) # window [Y Q S] from EGRET
tLst = df2.index.tolist()
for t in tLst:
dY = np.abs((df2.loc[t]['t']-df1['t']).values)
dQ = np.abs((df2.loc[t]['logQ']-df1['logQ']).values)
dS = np.min(
np.stack([abs(np.ceil(dY)-dY), abs(dY-np.floor(dY))]), axis=0)
d = np.stack([dY, dQ, dS])
if n > 100:
hh = np.repeat(h[:, None], n, axis=1)
bW = False
while ~bW:
bW = np.min(np.sum((hh-d) > 0, axis=1)) > 100
hh = hh*1.1 if not bW else hh
else:
htemp = np.max(d, axis=1)*1.1
hh = np.repeat(htemp[:, None], n, axis=1)
w = (1-(d/hh)**3)**3
w[w < 0] = 0
wAll = w[0]*w[1]*w[2]
ind = np.where(wAll > 0)[0]
ww = wAll[ind]
# fit WLS
Y = df1.iloc[ind][code].values
X = df1.iloc[ind][xVarLst].values
model = sm.WLS(Y, X, weights=ww).fit()
xp = df2.loc[t][xVarLst].values
yp = model.predict(xp)[0]
dfYP.loc[t][code] = np.exp(yp)-sn
t1 = time.time()
print(k, siteNo, code, t1-t0)
saveName = os.path.join(dirOut, siteNo)
dfYP.to_csv(saveName)
| 32.981818 | 78 | 0.558434 |
a3a32c62fe98d3c4e0b3bd5be30a78233026c11d | 10,739 | py | Python | pineboolib/application/tests/test_types.py | Aulla/pineboo | 3ad6412d365a6ad65c3bb2bdc03f5798d7c37004 | [
"MIT"
] | 2 | 2017-12-10T23:06:16.000Z | 2017-12-10T23:06:23.000Z | pineboolib/application/tests/test_types.py | Aulla/pineboo | 3ad6412d365a6ad65c3bb2bdc03f5798d7c37004 | [
"MIT"
] | 36 | 2017-11-05T21:13:47.000Z | 2020-08-26T15:56:15.000Z | pineboolib/application/tests/test_types.py | Aulla/pineboo | 3ad6412d365a6ad65c3bb2bdc03f5798d7c37004 | [
"MIT"
] | 8 | 2017-11-05T15:56:31.000Z | 2019-04-25T16:32:28.000Z | """
Tests for application.types module.
"""
import unittest
import os
from pineboolib.loader.main import init_cli
from pineboolib.core import settings
from pineboolib.application import types
init_cli() # FIXME: This should be avoided
class TestBoolean(unittest.TestCase):
"""Test booleans."""
def test_true(self) -> None:
"""Test for true."""
self.assertEqual(types.boolean(1), True)
self.assertEqual(types.boolean("True"), True)
self.assertEqual(types.boolean("Yes"), True)
self.assertEqual(types.boolean(0.8), True)
self.assertEqual(types.boolean(True), True)
def test_false(self) -> None:
"""Test for false."""
self.assertEqual(types.boolean(0), False)
self.assertEqual(types.boolean("False"), False)
self.assertEqual(types.boolean("No"), False)
self.assertEqual(types.boolean(False), False)
class TestQString(unittest.TestCase):
"""Test QString."""
def test_basic(self) -> None:
"""Basic testing."""
text = types.QString("hello world")
self.assertEqual(text, "hello world")
self.assertEqual(text.mid(5), text[5:])
self.assertEqual(text.mid(5, 2), text[5:7])
class TestFunction(unittest.TestCase):
"""Test function. Parses QSA into Python."""
def test_basic(self) -> None:
"""Basic testing."""
source = "return x + 1"
fun_ = types.function("x", source)
self.assertEqual(fun_(1), 2)
class TestObject(unittest.TestCase):
"""Test object."""
def test_basic1(self) -> None:
"""Basic testing."""
object_ = types.object_()
object_.prop1 = 1
object_.prop2 = 2
self.assertEqual(object_.prop1, object_["prop1"])
def test_basic2(self) -> None:
"""Basic testing."""
object_ = types.object_({"prop1": 1})
self.assertEqual(object_.prop1, object_["prop1"])
class TestArray(unittest.TestCase):
"""Test Array class."""
def test_basic1(self) -> None:
"""Basic testing."""
array_ = types.Array()
array_.value = 1
self.assertEqual(array_.value, array_["value"])
def test_basic2(self) -> None:
"""Basic testing."""
test_arr = [0, 1, 2, 3, 4]
array_ = types.Array(test_arr)
array_b = types.Array(test_arr)
self.assertEqual(array_[3], 3)
self.assertEqual(list(array_._dict.values()), test_arr)
self.assertEqual(len(array_), len(test_arr))
self.assertEqual(array_, test_arr)
self.assertEqual(array_[3], array_b[3])
self.assertNotEqual(array_[3], array_b[0])
test_arr = [3, 4, 2, 1, 0]
array_ = types.Array(test_arr)
self.assertEqual(list(array_._dict.values()), test_arr)
array_.append(10)
self.assertEqual(array_[5], 10)
def test_basic3(self) -> None:
"""Basic Testing."""
test_arr = {"key_0": "item_0", "key_1": "item_1", "key_2": "item_2"}
array_ = types.Array(test_arr)
self.assertEqual(array_["key_0"], "item_0")
self.assertEqual(array_.key_1, array_["key_1"])
self.assertEqual(array_.length(), 3)
self.assertEqual(array_[2], "item_2")
self.assertEqual(list(array_._dict.values()), ["item_0", "item_1", "item_2"])
def test_repr(self) -> None:
"""Test repr method."""
test_arr = [3, 4, 5, 6, 7]
array_ = types.Array(test_arr)
self.assertEqual(repr(array_), "<Array %r>" % test_arr)
def test_iter(self) -> None:
"""Test iterating arrays."""
test_arr = [3, 4, 5, 6, 7]
array_ = types.Array(test_arr)
array_2 = [x for x in array_]
self.assertEqual(test_arr, array_2)
test_arr = [8, 7, 6, 4, 2]
array_ = types.Array(test_arr)
array_2 = [x for x in array_]
self.assertEqual(test_arr, array_2)
def test_splice(self) -> None:
"""Test splice."""
test_arr = [3, 4, 5, 6, 7]
array_ = types.Array(test_arr)
array_.splice(1, 2) # Delete
self.assertEqual(str(array_), str(types.Array([4, 5])))
array_2 = types.Array(test_arr)
array_2.splice(2, 0, 9, 10) # Insertion
self.assertEqual(str(array_2), str(types.Array([3, 4, 5, 9, 10, 6, 7])))
array_3 = types.Array(test_arr)
array_3.splice(2, 1, 9, 10) # Replace
self.assertEqual(str(array_3), str(types.Array([3, 4, 9, 10, 6, 7])))
class TestDate(unittest.TestCase):
"""Test Date class."""
# FIXME: Complete unit tests
def test_basic1(self) -> None:
"""Basic testing."""
date_ = types.Date("2001-02-25")
self.assertEqual(date_.getDay(), 25)
self.assertEqual(date_.getMonth(), 2)
self.assertEqual(date_.getYear(), 2001)
class TestString(unittest.TestCase):
"""TestString class."""
# FIXME: Complete unit tests
def test_fromCharCode(self) -> None:
"""Test fromCharCode."""
temp: str = types.String.fromCharCode(13, 10)
self.assertEqual(temp, "\r\n")
temp2: str = types.String.fromCharCode()
self.assertEqual(temp2, "")
class TestFile(unittest.TestCase):
"""Test File class."""
def test_write_read_values_1(self) -> None:
"""Check that you read the same as you write."""
temporal = "%s%s" % (
settings.CONFIG.value("ebcomportamiento/temp_dir"),
u"/test_types_file.txt",
)
contenido = 'QT_TRANSLATE_NOOP("MetaData","Código")'
contenido_3 = 'QT_TRANSLATE_NOOP("MetaData","Código")'
types.File(temporal).write(contenido)
contenido_2 = types.File(temporal).read()
self.assertEqual(contenido, contenido_2)
os.remove(temporal)
types.File(temporal).write(contenido_3)
contenido_4 = types.File(temporal).read()
self.assertEqual(contenido_3, contenido_4)
os.remove(temporal)
def test_write_read_values_2(self) -> None:
"""Check that you read the same as you write."""
temporal = "%s%s" % (
settings.CONFIG.value("ebcomportamiento/temp_dir"),
u"/test_types_file_static.txt",
)
contenido = 'QT_TRANSLATE_NOOP("MetaData","Código")'
types.FileStatic.write(temporal, contenido)
contenido_2 = types.FileStatic.read(temporal)
self.assertEqual(contenido, contenido_2)
os.remove(temporal)
def test_write_read_bytes_1(self) -> None:
"""Check that you read the same as you write."""
temporal = "%s%s" % (
settings.CONFIG.value("ebcomportamiento/temp_dir"),
u"/test_types_file_bytes.txt",
)
contenido = "Texto escrito en bytes\n".encode("utf-8")
types.File(temporal).write(contenido)
contenido_2 = types.File(temporal).read(True)
self.assertEqual(contenido, contenido_2.encode("utf-8"))
os.remove(temporal)
def test_write_read_byte_1(self) -> None:
"""Check that you read the same as you write."""
temporal = "%s%s" % (
settings.CONFIG.value("ebcomportamiento/temp_dir"),
u"/test_types_file_bytes.txt",
)
contenido = "Texto\n".encode("utf-8")
types.File(temporal).write(contenido)
contenido_2 = types.File(temporal).read(True)
self.assertEqual(contenido, contenido_2.encode("utf-8"))
os.remove(temporal)
def test_write_read_line_1(self) -> None:
"""Check that you read the same as you write."""
temporal = "%s%s" % (
settings.CONFIG.value("ebcomportamiento/temp_dir"),
u"/test_types_file_lines.txt",
)
contenido = "Esta es la linea"
types.File(temporal).writeLine("%s 1" % contenido)
types.File(temporal).writeLine("%s 2" % contenido, 4)
file_read = types.File(temporal)
linea_1 = file_read.readLine()
self.assertEqual("%s 1\n" % contenido, linea_1)
linea_2 = file_read.readLine()
self.assertEqual("%s" % contenido[0:4], linea_2)
os.remove(temporal)
def test_full_name_and_readable(self) -> None:
"""Check fullName"""
temporal = "%s%s" % (
settings.CONFIG.value("ebcomportamiento/temp_dir"),
u"/test_types_file_full_name.txt",
)
contenido = 'QT_TRANSLATE_NOOP("MetaData","Código")'
file_ = types.File(temporal)
file_.write(contenido)
self.assertEqual(file_.fullName, temporal)
self.assertTrue(file_.readable())
def test_last_modified(self) -> None:
"""Test lastModified."""
temporal = "%s%s" % (
settings.CONFIG.value("ebcomportamiento/temp_dir"),
u"/test_last_modified.txt",
)
contenido = 'QT_TRANSLATE_NOOP("MetaData","Código")'
file_ = types.File(temporal)
file_.write(contenido)
file_.close()
self.assertNotEqual(file_.lastModified(), "")
def test_properties(self) -> None:
temporal = "%s%s" % (
settings.CONFIG.value("ebcomportamiento/temp_dir"),
u"/test_last_modified.txt",
)
file_ = types.File(temporal)
self.assertEqual(file_.path, settings.CONFIG.value("ebcomportamiento/temp_dir"))
self.assertEqual(file_.fullName, temporal)
self.assertEqual(file_.extension, ".txt")
self.assertEqual(file_.baseName, "test_last_modified")
self.assertTrue(file_.exists)
self.assertEqual(file_.size, 38)
class TestDir(unittest.TestCase):
"""TestDir class."""
def test_current(self) -> None:
"""Check Dir."""
self.assertEqual(os.curdir, types.Dir().current)
self.assertEqual(os.curdir, types.DirStatic.current)
def test_mkdir_rmdir(self) -> None:
"""Test mkdir and rmdir."""
tmp_dir = settings.CONFIG.value("ebcomportamiento/temp_dir")
my_dir = types.Dir(tmp_dir)
my_dir.mkdir("test")
self.assertTrue(os.path.exists("%s/test" % tmp_dir))
my_dir.rmdirs("test")
self.assertFalse(os.path.exists("%s/test" % tmp_dir))
def test_change_dir(self) -> None:
"""Test change dir."""
tmp_dir = settings.CONFIG.value("ebcomportamiento/temp_dir")
my_dir = types.Dir(tmp_dir)
original_dir = my_dir.current
# my_dir.mkdir("test_change_dir")
# my_dir.cd("%s/test_change_dir" % tmp_dir)
my_dir.cd(original_dir)
self.assertEqual(my_dir.current, original_dir)
my_dir.cdUp()
# self.assertEqual(os.path.realpath(my_dir.current), tmp_dir)
# my_dir.rmdirs("test_change_dir")
my_dir.cd(original_dir)
| 33.984177 | 88 | 0.608902 |
05943c78530d0489a167738975a077277c284fd1 | 24,250 | py | Python | aes_lib.py | GeneralZero/ChaCha-and-Salsa | 51a96b8fb5470b3b23df707ca3fa0a79883995ff | [
"MIT"
] | 5 | 2021-02-17T03:15:48.000Z | 2022-01-25T15:20:37.000Z | aes_lib.py | GeneralZero/ChaCha-and-Salsa | 51a96b8fb5470b3b23df707ca3fa0a79883995ff | [
"MIT"
] | null | null | null | aes_lib.py | GeneralZero/ChaCha-and-Salsa | 51a96b8fb5470b3b23df707ca3fa0a79883995ff | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import binascii, struct
from cryptopals_lib import *
class AES():
#Constants
#Generated by
'''
round_constants = "\x00"
for x in range(10):
if round_constants[x] >= "\x80":
round_constants[x+1] = round_constants[x] *2
else:
round_constants[x+1] = fixed_xor((round_constants[x] *2), '\x1b')
'''
round_constants = bytearray([0x00,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36,0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91])
#Initial state of the Sbox (256 bytes) From definition
sbox = bytearray([0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67,
0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59,
0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7,
0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1,
0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05,
0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83,
0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29,
0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa,
0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c,
0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc,
0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19,
0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee,
0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49,
0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4,
0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6,
0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70,
0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9,
0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e,
0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1,
0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0,
0x54, 0xbb, 0x16])
inverted_sbox = bytearray([0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3,
0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f,
0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54,
0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b,
0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24,
0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8,
0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d,
0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab,
0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3,
0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1,
0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41,
0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6,
0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9,
0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d,
0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0,
0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07,
0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60,
0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f,
0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5,
0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b,
0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55,
0x21, 0x0c, 0x7d])
def __init__(self, key, cipher="ECB"):
#self.key = key
self.cipher_name = cipher
#Get Keysize for AES 128/192/256
self.keysize = len(key)
#print("Keysize: {}".format(self.keysize))
#Block Size is static for AES
self.block_size = 16
self._setup_keysize_info()
#Static blocksize of 4x4
#Doesn't depend on the key
self.state = [bytes(4) for i in range(4)]
self.rounds_keys = self._key_expantion(key)
#self._printkey_expantion(self.rounds_keys)
def _setup_keysize_info(self):
if self.keysize == 16:
self.rounds = 10
self.words = 4
self.key_expantion_size = self.keysize * 11
elif self.keysize == 24:
self.rounds = 12
self.words = 6
self.key_expantion_size = self.keysize * 11
elif self.keysize == 32:
self.rounds = 14
self.words = 8
self.key_expantion_size = self.keysize * (self.rounds +1)
else:
raise ValueError("Invalid key size %d".format(self.keysize))
def _printkey_expantion(self, keys):
row_size = len(keys) // 4
for x in range(4):
print(' '.join('{:02X}'.format(a) for a in keys[(x*row_size):((x+1)*row_size)-1]))
def _key_expantion(self, key, row_idx=4):
row_size = self.key_expantion_size // row_idx
tmp_array = bytearray([0x00 for i in range(row_idx)])
round_keys = bytearray(self.key_expantion_size)
#Setup first key
for idx in range(row_idx):
round_keys[(idx*row_size):(idx*row_size)+ self.words] = [key[(row_idx*x) + idx] for x in range(self.words)]
#self._printkey_expantion(round_keys)
#print("")
#self._printkey_expantion(round_keys)
#Setup rest of the keys
for column_num in range(self.words, row_size):
#Reset round_xor per column
round_xor = bytearray([0x00 for i in range(row_idx)])
#Set up temp array for Key Round expansion
for idx in range(row_idx):
tmp_array[idx] = round_keys[(row_size*idx )+ column_num -1]
#print('Temp: ' + ' '.join('{:02X}'.format(a) for a in tmp_array))
#First round of the Next key
if column_num % self.words == 0:
#Rotate Word
#Take last column and shift top to bottom
tmp_array = tmp_array[1:] + tmp_array[:1]
#print('After Rotate Word: ' + ' '.join('{:02X}'.format(a) for a in tmp_array))
#Sub Bytes
#Take the index from the sbox
tmp_array = [self.sbox[tmp_array[idx]] for idx in range(row_idx)]
#print('After Sub Bytes: ' + ' '.join('{:02X}'.format(a) for a in tmp_array))
#Set Round_xor
round_xor[0] = self.round_constants[(column_num//self.words)]
#print('Rcon: ' + ' '.join('{:02X}'.format(a) for a in round_xor))
#Specific case for 256 bit key
elif self.keysize == 32 and column_num % self.words == 4:
#Sub Bytes
#Take the index from the sbox
tmp_array = [self.sbox[tmp_array[idx]] for idx in range(row_idx)]
#print('After Sub Bytes: ' + ' '.join('{:02X}'.format(a) for a in tmp_array))
#Xor the rmp_array and the round_xor
for idx in range(row_idx):
tmp_array[idx] ^= round_xor[idx]
#print('After Rcon: ' + ' '.join('{:02X}'.format(a) for a in tmp_array))
#XOR the tmp_array and the column_num-3
for idx in range(row_idx):
round_keys[(row_size*idx )+ column_num] = (tmp_array[idx] ^ round_keys[(row_size*idx )+ column_num -self.words])
#print("{:02X} ^ {:02X}".format(tmp_array[idx], round_keys[(row_size*idx )+ column_num -self.words]))
#self._printkey_expantion(round_keys)
return round_keys
def _sub_bytes(self, input_bytes):
output_bytes = bytearray()
#Use the input_bytes as an index to lookup what to substitute from the sbox index.
for lookup in input_bytes:
output_bytes.append(self.sbox[lookup])
return output_bytes
def _shift_rows(self, input_byte_array, idx=4):
#No swap for row 1
input_byte_array[0:idx] = input_byte_array[0:idx]
#Rotate once for row 2
input_byte_array[idx:(idx*2)] = input_byte_array[idx+1:(idx*2)] + input_byte_array[idx:idx+1]
#Rotate twice for row 3
input_byte_array[(idx*2):(idx*3)] = input_byte_array[(idx*2)+2:(idx*3)] + input_byte_array[(idx*2):(idx*2)+2]
#Rotate thrice for row 4
#print(input_byte_array[(idx*3)+3:(idx*4)] + input_byte_array[(idx*3):(idx*4)-1])
input_byte_array[(idx*3):(idx*4)] = input_byte_array[(idx*3)+3:(idx*4)] + input_byte_array[(idx*3):(idx*4)-1]
return input_byte_array
def _mix_columns(self, input_num):
matrix_mult_row = [2,3,1,1]
mix_output = bytearray(16)
#First
for x in range(4):
#print("Index: {}".format((x*4)+(i%4)))
mix_output[0] ^= self._gmult(input_num[(x*4)], matrix_mult_row[x])
mix_output[1] ^= self._gmult(input_num[(x*4)+1], matrix_mult_row[x])
mix_output[2] ^= self._gmult(input_num[(x*4)+2], matrix_mult_row[x])
mix_output[3] ^= self._gmult(input_num[(x*4)+3], matrix_mult_row[x])
#print(mix_output)
#Rotate the matrix_mult_row
matrix_mult_row = matrix_mult_row[-1:] + matrix_mult_row[:-1]
for x in range(4):
#print("Index: {}".format((x*4)+(i%4)))
mix_output[4] ^= self._gmult(input_num[(x*4)], matrix_mult_row[x])
mix_output[5] ^= self._gmult(input_num[(x*4)+1], matrix_mult_row[x])
mix_output[6] ^= self._gmult(input_num[(x*4)+2], matrix_mult_row[x])
mix_output[7] ^= self._gmult(input_num[(x*4)+3], matrix_mult_row[x])
#print(mix_output)
#Rotate the matrix_mult_row
matrix_mult_row = matrix_mult_row[-1:] + matrix_mult_row[:-1]
for x in range(4):
#print("Index: {}".format((x*4)+(i%4)))
mix_output[8] ^= self._gmult(input_num[(x*4)], matrix_mult_row[x])
mix_output[9] ^= self._gmult(input_num[(x*4)+1], matrix_mult_row[x])
mix_output[10] ^= self._gmult(input_num[(x*4)+2], matrix_mult_row[x])
mix_output[11] ^= self._gmult(input_num[(x*4)+3], matrix_mult_row[x])
#print(mix_output)
#Rotate the matrix_mult_row
matrix_mult_row = matrix_mult_row[-1:] + matrix_mult_row[:-1]
for x in range(4):
#print("Index: {}".format((x*4)+(i%4)))
mix_output[12] ^= self._gmult(input_num[(x*4)], matrix_mult_row[x])
mix_output[13] ^= self._gmult(input_num[(x*4)+1], matrix_mult_row[x])
mix_output[14] ^= self._gmult(input_num[(x*4)+2], matrix_mult_row[x])
mix_output[15] ^= self._gmult(input_num[(x*4)+3], matrix_mult_row[x])
#print(mix_output)
return mix_output
def _gmult(self, input_num, mult_parm):
#for the reverse parts
if mult_parm == 0x09:
#Change from 9 to 2x2x2+1
return self._gmult(self._gmult(self._gmult(input_num, 2), 2), 2) ^ input_num
elif mult_parm == 0x0b:
#Change from 11 to (2x2+1)x2+1
return (self._gmult((self._gmult(self._gmult(input_num, 2), 2) ^ input_num), 2) ^ input_num)
elif mult_parm == 0x0d:
#Change from 13 to (2+1)x2x2+1
return (self._gmult(self._gmult((self._gmult(input_num, 2) ^ input_num), 2), 2) ^ input_num)
elif mult_parm == 0x0e:
#Change from 15 to ((2+1)x2+1)x2
return self._gmult((self._gmult((self._gmult(input_num, 2) ^ input_num), 2) ^ input_num), 2)
output_num = 0
if mult_parm & 1 == 1:
#mult_parm is a 1 or a 3
output_num = input_num
if mult_parm & 2 == 2:
#mult_parm is a 2
if input_num & 0x80:
#If high bit is set
output_num ^= (input_num << 1) ^ 0x1b
else:
#if high bit it not set
output_num ^= (input_num << 1)
return output_num & 0xFF
def _add_round_key(self, input_bytes, round_index, row_idx=4):
row_size = self.key_expantion_size // row_idx
#Make Round Key
round_key = self.rounds_keys[(0*row_size)+(row_idx*round_index):(0*row_size)+(row_idx*round_index)+row_idx]
round_key += self.rounds_keys[(1*row_size)+(row_idx*round_index):(1*row_size)+(row_idx*round_index)+row_idx]
round_key += self.rounds_keys[(2*row_size)+(row_idx*round_index):(2*row_size)+(row_idx*round_index)+row_idx]
round_key += self.rounds_keys[(3*row_size)+(row_idx*round_index):(3*row_size)+(row_idx*round_index)+row_idx]
#print("k_sch[{}]: {}".format(round_index, round_key))
#Xor the input bytes with the previous round key
for idx in range(len(input_bytes)):
input_bytes[idx] ^= round_key[idx]
return input_bytes
def _reverse_shift_rows(self, input_byte_array, idx=4):
#No swap for row 1
input_byte_array[0:idx] = input_byte_array[0:idx]
#Rotate back once for row 2
input_byte_array[idx:(idx*2)] = input_byte_array[(idx*2)-1:(idx*2)] + input_byte_array[idx:(idx*2)-1]
#Rotate back twice for row 3
input_byte_array[(idx*2):(idx*3)] = input_byte_array[(idx*2)+2:(idx*3)] + input_byte_array[(idx*2):(idx*2)+2]
#Rotate back thrice for row 4
input_byte_array[(idx*3):(idx*4)] = input_byte_array[(idx*3)+1:(idx*4)] + input_byte_array[(idx*3):(idx*3)+1]
return input_byte_array
def _reverse_sub_bytes(self, input_bytes):
output_bytes = bytearray()
#Use the input_bytes as an index to lookup what to substitute from the inverted_sbox index.
for lookup in input_bytes:
output_bytes.append(self.inverted_sbox[lookup])
return output_bytes
def _reverse_mix_columns(self, input_num):
matrix_mult_row = [14,11,13,9]
mix_output = bytearray(16)
#First
for x in range(4):
#print("Index: {}".format((x*4)+(i%4)))
mix_output[0] ^= self._gmult(input_num[(x*4)], matrix_mult_row[x])
mix_output[1] ^= self._gmult(input_num[(x*4)+1], matrix_mult_row[x])
mix_output[2] ^= self._gmult(input_num[(x*4)+2], matrix_mult_row[x])
mix_output[3] ^= self._gmult(input_num[(x*4)+3], matrix_mult_row[x])
#print(mix_output)
#Rotate the matrix_mult_row
#print(matrix_mult_row)
matrix_mult_row = matrix_mult_row[-1:] + matrix_mult_row[:-1]
#print(matrix_mult_row)
for x in range(4):
#print("Index: {}".format((x*4)+(i%4)))
mix_output[4] ^= self._gmult(input_num[(x*4)], matrix_mult_row[x])
mix_output[5] ^= self._gmult(input_num[(x*4)+1], matrix_mult_row[x])
mix_output[6] ^= self._gmult(input_num[(x*4)+2], matrix_mult_row[x])
mix_output[7] ^= self._gmult(input_num[(x*4)+3], matrix_mult_row[x])
#print(mix_output)
#Rotate the matrix_mult_row
matrix_mult_row = matrix_mult_row[-1:] + matrix_mult_row[:-1]
for x in range(4):
#print("Index: {}".format((x*4)+(i%4)))
mix_output[8] ^= self._gmult(input_num[(x*4)], matrix_mult_row[x])
mix_output[9] ^= self._gmult(input_num[(x*4)+1], matrix_mult_row[x])
mix_output[10] ^= self._gmult(input_num[(x*4)+2], matrix_mult_row[x])
mix_output[11] ^= self._gmult(input_num[(x*4)+3], matrix_mult_row[x])
#print(mix_output)
#Rotate the matrix_mult_row
matrix_mult_row = matrix_mult_row[-1:] + matrix_mult_row[:-1]
for x in range(4):
#print("Index: {}".format((x*4)+(i%4)))
mix_output[12] ^= self._gmult(input_num[(x*4)], matrix_mult_row[x])
mix_output[13] ^= self._gmult(input_num[(x*4)+1], matrix_mult_row[x])
mix_output[14] ^= self._gmult(input_num[(x*4)+2], matrix_mult_row[x])
mix_output[15] ^= self._gmult(input_num[(x*4)+3], matrix_mult_row[x])
#print(mix_output)
return mix_output
def aes_block_encryption(self, plaintext):
if len(plaintext) != 16:
raise ValueError('wrong block length')
#Fix mapping
#print("plaintxt: {}".format(binascii.hexlify(plaintext)))
plaintext = bytearray([ plaintext[0],plaintext[4],plaintext[8],plaintext[12],
plaintext[1],plaintext[5],plaintext[9],plaintext[13],
plaintext[2],plaintext[6],plaintext[10],plaintext[14],
plaintext[3],plaintext[7],plaintext[11],plaintext[15]])
temp = self._add_round_key(plaintext, 0)
#print("After Add Rounds: {}, length: {}".format(temp, len(temp)))
#Do the first 9 rounds
for x in range(1, self.rounds):
#Do Sub Bytes
temp = self._sub_bytes(temp)
#print("s_box[{}]: {}".format(x, binascii.hexlify(temp)))
#Do ShiftRows
temp = self._shift_rows(temp)
#print("s_row[{}]: {}".format(x, binascii.hexlify(temp)))
#Do Mix Columns
temp = self._mix_columns(temp)
#print("m_col[{}]: {}".format(x, binascii.hexlify(temp)))
#Do Round Key
temp = self._add_round_key(temp, x)
#print("k_sch[{}]: {}".format(x, binascii.hexlify(temp)))
#Do last round
#Do Sub Bytes
temp = self._sub_bytes(temp)
#print("s_box[{}]: {}".format(self.rounds, binascii.hexlify(temp)))
#Do ShiftRows
temp = self._shift_rows(temp)
#print("s_row[{}]: {}".format(self.rounds, binascii.hexlify(temp)))
#Do Round Key
temp = self._add_round_key(temp, self.rounds)
#unbox to output
#print("result: {}".format(binascii.hexlify(temp)))
temp = bytearray([ temp[0],temp[4],temp[8],temp[12],
temp[1],temp[5],temp[9],temp[13],
temp[2],temp[6],temp[10],temp[14],
temp[3],temp[7],temp[11],temp[15]])
#print("result: {}".format(binascii.hexlify(temp)))
return temp
def aes_block_decryption(self, ciphertext):
if len(ciphertext) != 16:
raise ValueError('wrong block length')
#Fix mapping
ciphertext = bytearray([ciphertext[0],ciphertext[4],ciphertext[8],ciphertext[12],
ciphertext[1],ciphertext[5],ciphertext[9],ciphertext[13],
ciphertext[2],ciphertext[6],ciphertext[10],ciphertext[14],
ciphertext[3],ciphertext[7],ciphertext[11],ciphertext[15]])
#print("plaintxt: {}".format(binascii.hexlify(ciphertext)))
temp = self._add_round_key(ciphertext, self.rounds)
#print("After Add Rounds: {}, length: {}".format(temp, len(temp)))
#Do the reverse_add_round_key 9-0
for x in range(self.rounds-1,0,-1):
#Do ShiftRows
temp = self._reverse_shift_rows(temp)
#print("s_row[{}]: {}".format(self.rounds-x, binascii.hexlify(temp)))
#Do Sub Bytes
temp = self._reverse_sub_bytes(temp)
#print("s_box[{}]: {}".format(self.rounds-x, binascii.hexlify(temp)))
#Do Round Key
temp = self._add_round_key(temp, x)
#print("k_sch[{}]: {}".format(self.rounds-x, binascii.hexlify(temp)))
#Do Mix Columns
temp = self._reverse_mix_columns(temp)
#print("m_col[{}]: {}".format(self.rounds-x, binascii.hexlify(temp)))
#print("After Add Rounds: {}, length: {}".format(temp, len(temp)))
#Do last round
#Do ShiftRows
temp = self._reverse_shift_rows(temp)
#print("s_row[{}]: {}".format(self.rounds, binascii.hexlify(temp)))
#Do ShiftRows
temp = self._reverse_sub_bytes(temp)
#print("s_box[{}]: {}".format(self.rounds, binascii.hexlify(temp)))
#Do Round Key
temp = self._add_round_key(temp, 0)
#unbox to output
temp = bytearray([ temp[0],temp[4],temp[8],temp[12],
temp[1],temp[5],temp[9],temp[13],
temp[2],temp[6],temp[10],temp[14],
temp[3],temp[7],temp[11],temp[15]])
#print("result: {}".format(binascii.hexlify(temp)))
return temp
#Add Different Cipher Modes
def ctr_decryption(self, nonce, plaintext, little_endian=True):
return self.ctr_encryption(nonce, plaintext, little_endian)
def ctr_encryption(self, nonce, plaintext, little_endian=True):
counter = 0
cipher_blocks = []
for plain_block in to_blocks(plaintext, self.block_size):
if len(nonce) == self.block_size:
counter_nonce = fixedlen_xor(nonce, int_to_bytes_length(counter, self.block_size, False))
else:
counter_nonce = nonce + int_to_bytes_length(counter, (self.block_size - len(nonce)), False)
#print(counter_nonce)
aes_block = self.aes_block_encryption(counter_nonce)
#print(aes_block)
cipher_blocks.append(shortest_xor(aes_block, plain_block))
counter += 1
#Merge Cipher Blocks
return combind_blocks(cipher_blocks)
def ofb_encryption(self, init_iv, plaintext):
plain_blocks = to_blocks(plaintext, self.block_size)
previous_block = init_iv
cipher_blocks = []
for plain_block in plain_blocks:
aes_block = self.aes_block_encryption(previous_block)
cipher_blocks.append(fixedlen_xor(aes_block, plain_block))
previous_block = aes_block
#Merge Cipher Blocks
return combind_blocks(cipher_blocks)
def ofb_decryption(self, ciphertext):
#First Block is the IV
cipher_blocks = to_blocks(ciphertext, self.block_size)
previous_block = cipher_blocks[0]
plain_blocks = []
for cipher_block in cipher_blocks[1:]:
aes_block = self.aes_block_encryption(previous_block)
plain_blocks.append(fixedlen_xor(aes_block, cipher_block))
previous_block = aes_block
#Merge Cipher Blocks
return combind_blocks(plain_blocks)
def cfb_encryption(self, init_iv, plaintext):
#First Block is the IV
plain_blocks = to_blocks(plaintext, self.block_size)
cipher_blocks = [init_iv]
for plain_block in plain_blocks:
aes_block = self.aes_block_encryption(cipher_blocks[-1])
cipher_blocks.append(fixedlen_xor(aes_block, plain_block))
#Merge Cipher Blocks
return combind_blocks(cipher_blocks[1:])
def cfb_decryption(self, ciphertext):
#First Block is the IV
cipher_blocks = to_blocks(ciphertext, self.block_size)
previous_block = cipher_blocks[0]
plain_blocks = []
for cipher_block in cipher_blocks[1:]:
aes_block = self.aes_block_encryption(previous_block)
plain_blocks.append(fixedlen_xor(aes_block, cipher_block))
previous_block = cipher_block
#Merge Cipher Blocks
return combind_blocks(plain_blocks)
def cbc_encryption(self, init_iv, plaintext):
#First Block is the IV
plain_blocks = to_blocks(plaintext, self.block_size)
cipher_blocks = [init_iv]
for plain_block in plain_blocks:
xor_block = fixedlen_xor(plain_block, cipher_blocks[-1])
cipher_blocks.append(self.aes_block_encryption(xor_block))
#Merge Cipher Blocks
return combind_blocks(cipher_blocks)
def cbc_decryption(self, ciphertext):
#First Block is the IV
cipher_blocks = to_blocks(ciphertext, self.block_size)
plain_blocks = []
for index in range(len(cipher_blocks)-1):
#print(cipher_block, len(cipher_block))
xor_block = self.aes_block_decryption(cipher_blocks[index+1])
#print("XOR: ", xor_block, "Last Block: ", cipher_blocks[index])
plain_blocks.append(fixedlen_xor(xor_block, cipher_blocks[index]))
#Merge plain_blocks but remove the first one that contains the IV
#print(plain_blocks)
return combind_blocks(plain_blocks)
def ecb_encryption(self, plaintext):
cipher_blocks = []
for plain_block in to_blocks(plaintext, self.block_size):
cipher_blocks.append(self.aes_block_encryption(plain_block))
return combind_blocks(cipher_blocks)
def ecb_decryption(self, ciphertext):
plain_blocks = []
for cipher_block in to_blocks(ciphertext, self.block_size):
plain_blocks.append(self.aes_block_decryption(cipher_block))
return combind_blocks(plain_blocks)
if __name__ == '__main__':
#AES 128 Key Test
plaintext = [0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff]
key = [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f]
key2 = [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17]
key2a = [0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52, 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5, 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b]
key3 = [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f]
key3a = [0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81, 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4]
test = AES(key3)
#print(plaintext)
#mid = test._shift_rows(plaintext)
#print(mid)
#mid = test._reverse_shift_rows(mid)
#print(mid)
#print(plaintext)
#mid = test._sub_bytes(plaintext)
#print(mid)
#mid = test._reverse_sub_bytes(mid)
#print(mid)
#print(plaintext)
#mid = test._mix_columns(plaintext)
#print(mid)
#mid = test._reverse_mix_columns(mid)
#print(mid)
print(plaintext)
cypher = test.aes_block_encryption(plaintext)
print(binascii.hexlify(cypher))
plaintext2 = test.aes_block_decryption(cypher)
print(binascii.hexlify(plaintext2))
| 38.188976 | 207 | 0.670186 |
bd4acaa995444a0f6624a5d2559bca32d1dd4baf | 2,232 | py | Python | scraper.py | ivoras/covid19hr | 9b9ee3118079e73b80ccefec801fe292f7ede47a | [
"BSD-2-Clause"
] | 2 | 2020-04-10T18:56:56.000Z | 2020-05-01T12:08:13.000Z | scraper.py | ivoras/covid19hr | 9b9ee3118079e73b80ccefec801fe292f7ede47a | [
"BSD-2-Clause"
] | null | null | null | scraper.py | ivoras/covid19hr | 9b9ee3118079e73b80ccefec801fe292f7ede47a | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python3
import sys, os, os.path
import requests
import re
import datetime
ZUPANIJE = [
'bjelovarsko-bilogorska',
'brodsko-posavska',
'dubrovacko-neretvanska',
'grad-zagreb',
'istarska',
'karlovacka',
'koprivnicko-krizevacka',
'krapinsko-zagorska-zupanija',
'licko-senjska',
'medjimurska',
'osjecko-baranjska',
'pozesko-slavonska',
'primorsko-goranska',
'sibensko-kninska',
'sisacko-moslavacka',
'splitsko-dalmatinska',
'varazdinska',
'viroviticko-podravska',
'vukovarsko-srijemska',
'zadarska',
'zagrebacka',
]
RE_ZARAZENI = re.compile(r"""<text class='zarazeni' data-url='https://www.koronavirus.hr/([a-z-]+)/\d+' x='\d+' y='\d+' stroke='transparent' text-anchor='middle' dy='\d+.\d+em' style='font-size: \d+px;'>(\d+)</text>""")
RE_IZLIJECENI = re.compile(r"""<text class='izlijeceni' data-url='https://www.koronavirus.hr/([a-z-]+)/\d+' x='\d+' y='\d+' stroke='transparent' text-anchor='middle' dy='\d+.\d+em' style='font-size: \d+px;'>(\d+)</text>""")
r = requests.get('https://koronavirus.hr/')
if r.status_code != 200:
print("Error getting web page:", r.status_code)
sys.exit(0)
zarazeni = { x: 0 for x in ZUPANIJE }
for data in RE_ZARAZENI.findall(r.text):
zarazeni[data[0]] = int(data[1])
izlijeceni = { x: 0 for x in ZUPANIJE }
for data in RE_IZLIJECENI.findall(r.text):
izlijeceni[data[0]] = int(data[1])
today = datetime.date.today()
zarazeni_exists = os.path.exists("zarazeni.csv")
with open("zarazeni.csv", "at") as f:
if not zarazeni_exists:
f.write('"datum", ')
f.write(", ".join([ '"%s"' % x for x in ZUPANIJE]))
f.write("\n")
f.write('"%d-%02d-%02d"' % (today.year, today.month, today.day))
for zup in ZUPANIJE:
f.write(", %s" % zarazeni[zup])
f.write("\n")
izlijeceni_exists = os.path.exists("izlijeceni.csv")
with open("izlijeceni.csv", "at") as f:
if not izlijeceni_exists:
f.write('"datum", ')
f.write(", ".join([ '"%s"' % x for x in ZUPANIJE]))
f.write("\n")
f.write('"%d-%02d-%02d"' % (today.year, today.month, today.day))
for zup in ZUPANIJE:
f.write(", %s" % izlijeceni[zup])
f.write("\n")
| 30.575342 | 223 | 0.610663 |
cf5a298bab3f1db20a3306a3071f221170fe1dbb | 1,787 | py | Python | 05installpackages.py | Syralist/alltheconfs | 6c5e868b925ad4b88975a884124b75a4ad3d4b81 | [
"Unlicense"
] | null | null | null | 05installpackages.py | Syralist/alltheconfs | 6c5e868b925ad4b88975a884124b75a4ad3d4b81 | [
"Unlicense"
] | null | null | null | 05installpackages.py | Syralist/alltheconfs | 6c5e868b925ad4b88975a884124b75a4ad3d4b81 | [
"Unlicense"
] | null | null | null | import subprocess
import os
# install packages for coding
print("pakete installieren")
install_req = "apt-get install -y aptitude vim-nox build-essential python3-pip cmake code gimp inkscape kicad".split()
sudo = subprocess.Popen(["sudo", "-S"] + install_req, stdout=subprocess.PIPE)
print(sudo.stdout.read())
# install VS Code extensions
print("VS Code Extensions installieren")
install_req = "code --install-extension".split()
extensions = ["alexcvzz.vscode-sqlite",
"DotJoshJohnson.xml",
"efbenson.scad",
"fallenwood.vimL",
"James-Yu.latex-workshop",
"MS-CEINTL.vscode-language-pack-de",
"ms-python.python",
"ms-vscode.cpptools",
"ms-azuretools.vscode-docker",
"PKief.material-icon-theme",
"vsciot-vscode.vscode-arduino",
"vscodevim.vim",
"redhat.vscode-yaml",
"dpkshrma.insert-iso-timestamp",
"rusnasonov.vscode-hugo",
"bungcip.better-toml"]
for ext in extensions:
sudo = subprocess.Popen(install_req + [ext], stdout=subprocess.PIPE)
print(sudo.stdout.read())
# install vundle
print("vundle installieren")
install_req = f"git clone https://github.com/VundleVim/Vundle.vim.git {os.path.expanduser('~/.vim/bundle/Vundle.vim')}".split()
cmd = subprocess.Popen(install_req, stdout=subprocess.PIPE)
print(cmd.stdout.read())
print("vim aufrufen und vundle plugins installieren")
install_req = f"vim +PluginInstall +qall".split()
cmd = subprocess.Popen(install_req, stdout=subprocess.PIPE)
print(cmd.stdout.read())
print("youcompleteme compilieren")
os.chdir(os.path.expanduser("~/.vim/bundle/YouCompleteMe"))
install_req = f"python3 ./install.py --clang-completer".split()
cmd = subprocess.Popen(install_req, stdout=subprocess.PIPE)
print(cmd.stdout.read())
| 38.021277 | 127 | 0.70901 |
853386cb28836d47a52dd6f2009dc65ee2d5e89d | 16,069 | py | Python | salt/modules/rbac_solaris.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 2 | 2015-09-21T14:13:30.000Z | 2016-02-12T11:33:46.000Z | salt/modules/rbac_solaris.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 1 | 2019-09-06T13:57:28.000Z | 2019-09-06T13:57:28.000Z | salt/modules/rbac_solaris.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 4 | 2020-11-04T06:28:05.000Z | 2022-02-09T10:54:49.000Z | # -*- coding: utf-8 -*-
'''
Module for Solaris' Role-Based Access Control
'''
from __future__ import absolute_import
# Import Python libs
import logging
# Import Salt libs
import salt.utils
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'rbac'
def __virtual__():
'''
Provides rbac if we are running on a solaris like system
'''
if __grains__['kernel'] == 'SunOS' and salt.utils.which('profiles'):
return __virtualname__
return (
False,
'{0} module can only be loaded on a solaris like system'.format(
__virtualname__
)
)
def profile_list(default_only=False):
'''
List all available profiles
default_only : boolean
return only default profile
CLI Example:
.. code-block:: bash
salt '*' rbac.profile_list
'''
profiles = {}
default_profiles = ['All']
## lookup default profile(s)
with salt.utils.fopen('/etc/security/policy.conf', 'r') as policy_conf:
for policy in policy_conf:
policy = policy.split('=')
if policy[0].strip() == 'PROFS_GRANTED':
default_profiles.extend(policy[1].strip().split(','))
## read prof_attr file (profname:res1:res2:desc:attr)
with salt.utils.fopen('/etc/security/prof_attr', 'r') as prof_attr:
for profile in prof_attr:
profile = profile.split(':')
# skip comments and non complaint lines
if len(profile) != 5:
continue
# add profile info to dict
profiles[profile[0]] = profile[3]
## filtered profiles
if default_only:
for p in [p for p in profiles if p not in default_profiles]:
del profiles[p]
return profiles
def profile_get(user, default_hidden=True):
'''
List profiles for user
user : string
username
default_hidden : boolean
hide default profiles
CLI Example:
.. code-block:: bash
salt '*' rbac.profile_get leo
salt '*' rbac.profile_get leo default_hidden=False
'''
user_profiles = []
## read user_attr file (user:qualifier:res1:res2:attr)
with salt.utils.fopen('/etc/user_attr', 'r') as user_attr:
for profile in user_attr:
profile = profile.strip().split(':')
# skip comments and non complaint lines
if len(profile) != 5:
continue
# skip other users
if profile[0] != user:
continue
# parse attr
attrs = {}
for attr in profile[4].strip().split(';'):
attr_key, attr_val = attr.strip().split('=')
if attr_key in ['auths', 'profiles', 'roles']:
attrs[attr_key] = attr_val.strip().split(',')
else:
attrs[attr_key] = attr_val
if 'profiles' in attrs:
user_profiles.extend(attrs['profiles'])
## remove default profiles
if default_hidden:
for profile in profile_list(default_only=True):
if profile in user_profiles:
user_profiles.remove(profile)
return list(set(user_profiles))
def profile_add(user, profile):
'''
Add profile to user
user : string
username
profile : string
profile name
CLI Example:
.. code-block:: bash
salt '*' rbac.profile_add martine 'Primary Administrator'
salt '*' rbac.profile_add martine 'User Management,User Security'
'''
ret = {}
## validate profiles
profiles = profile.split(',')
known_profiles = profile_list().keys()
valid_profiles = [p for p in profiles if p in known_profiles]
log.debug(
'rbac.profile_add - profiles={0}, known_profiles={1}, valid_profiles={2}'.format(
profiles,
known_profiles,
valid_profiles,
)
)
## update user profiles
if len(valid_profiles) > 0:
res = __salt__['cmd.run_all']('usermod -P "{profiles}" {login}'.format(
login=user,
profiles=','.join(set(profile_get(user) + valid_profiles)),
))
if res['retcode'] > 0:
ret['Error'] = {
'retcode': res['retcode'],
'message': res['stderr'] if 'stderr' in res else res['stdout']
}
return ret
## update return value
active_profiles = profile_get(user, False)
for p in profiles:
if p not in valid_profiles:
ret[p] = 'Unknown'
elif p in active_profiles:
ret[p] = 'Added'
else:
ret[p] = 'Failed'
return ret
def profile_rm(user, profile):
'''
Remove profile from user
user : string
username
profile : string
profile name
CLI Example:
.. code-block:: bash
salt '*' rbac.profile_rm jorge 'Primary Administrator'
salt '*' rbac.profile_rm jorge 'User Management,User Security'
'''
ret = {}
## validate profiles
profiles = profile.split(',')
known_profiles = profile_list().keys()
valid_profiles = [p for p in profiles if p in known_profiles]
log.debug(
'rbac.profile_rm - profiles={0}, known_profiles={1}, valid_profiles={2}'.format(
profiles,
known_profiles,
valid_profiles,
)
)
## update user profiles
if len(valid_profiles) > 0:
res = __salt__['cmd.run_all']('usermod -P "{profiles}" {login}'.format(
login=user,
profiles=','.join([p for p in profile_get(user) if p not in valid_profiles]),
))
if res['retcode'] > 0:
ret['Error'] = {
'retcode': res['retcode'],
'message': res['stderr'] if 'stderr' in res else res['stdout']
}
return ret
## update return value
active_profiles = profile_get(user, False)
for p in profiles:
if p not in valid_profiles:
ret[p] = 'Unknown'
elif p in active_profiles:
ret[p] = 'Failed'
else:
ret[p] = 'Remove'
return ret
def role_list():
'''
List all available roles
CLI Example:
.. code-block:: bash
salt '*' rbac.role_list
'''
roles = {}
## read user_attr file (user:qualifier:res1:res2:attr)
with salt.utils.fopen('/etc/user_attr', 'r') as user_attr:
for role in user_attr:
role = role.split(':')
# skip comments and non complaint lines
if len(role) != 5:
continue
# parse attr
attrs = {}
for attr in role[4].split(';'):
attr_key, attr_val = attr.split('=')
if attr_key in ['auths', 'profiles', 'roles']:
attrs[attr_key] = attr_val.split(',')
else:
attrs[attr_key] = attr_val
role[4] = attrs
# add role info to dict
if 'type' in role[4] and role[4]['type'] == 'role':
del role[4]['type']
roles[role[0]] = role[4]
return roles
def role_get(user):
'''
List roles for user
user : string
username
CLI Example:
.. code-block:: bash
salt '*' rbac.role_get leo
'''
user_roles = []
## read user_attr file (user:qualifier:res1:res2:attr)
with salt.utils.fopen('/etc/user_attr', 'r') as user_attr:
for role in user_attr:
role = role.strip().strip().split(':')
# skip comments and non complaint lines
if len(role) != 5:
continue
# skip other users
if role[0] != user:
continue
# parse attr
attrs = {}
for attr in role[4].strip().split(';'):
attr_key, attr_val = attr.strip().split('=')
if attr_key in ['auths', 'profiles', 'roles']:
attrs[attr_key] = attr_val.strip().split(',')
else:
attrs[attr_key] = attr_val
if 'roles' in attrs:
user_roles.extend(attrs['roles'])
return list(set(user_roles))
def role_add(user, role):
'''
Add role to user
user : string
username
role : string
role name
CLI Example:
.. code-block:: bash
salt '*' rbac.role_add martine netcfg
salt '*' rbac.role_add martine netcfg,zfssnap
'''
ret = {}
## validate roles
roles = role.split(',')
known_roles = role_list().keys()
valid_roles = [r for r in roles if r in known_roles]
log.debug(
'rbac.role_add - roles={0}, known_roles={1}, valid_roles={2}'.format(
roles,
known_roles,
valid_roles,
)
)
## update user roles
if len(valid_roles) > 0:
res = __salt__['cmd.run_all']('usermod -R "{roles}" {login}'.format(
login=user,
roles=','.join(set(role_get(user) + valid_roles)),
))
if res['retcode'] > 0:
ret['Error'] = {
'retcode': res['retcode'],
'message': res['stderr'] if 'stderr' in res else res['stdout']
}
return ret
## update return value
active_roles = role_get(user)
for r in roles:
if r not in valid_roles:
ret[r] = 'Unknown'
elif r in active_roles:
ret[r] = 'Added'
else:
ret[r] = 'Failed'
return ret
def role_rm(user, role):
'''
Remove role from user
user : string
username
role : string
role name
CLI Example:
.. code-block:: bash
salt '*' rbac.role_rm jorge netcfg
salt '*' rbac.role_rm jorge netcfg,zfssnap
'''
ret = {}
## validate roles
roles = role.split(',')
known_roles = role_list().keys()
valid_roles = [r for r in roles if r in known_roles]
log.debug(
'rbac.role_rm - roles={0}, known_roles={1}, valid_roles={2}'.format(
roles,
known_roles,
valid_roles,
)
)
## update user roles
if len(valid_roles) > 0:
res = __salt__['cmd.run_all']('usermod -R "{roles}" {login}'.format(
login=user,
roles=','.join([r for r in role_get(user) if r not in valid_roles]),
))
if res['retcode'] > 0:
ret['Error'] = {
'retcode': res['retcode'],
'message': res['stderr'] if 'stderr' in res else res['stdout']
}
return ret
## update return value
active_roles = role_get(user)
for r in roles:
if r not in valid_roles:
ret[r] = 'Unknown'
elif r in active_roles:
ret[r] = 'Failed'
else:
ret[r] = 'Remove'
return ret
def auth_list():
'''
List all available authorization
CLI Example:
.. code-block:: bash
salt '*' rbac.auth_list
'''
auths = {}
## read auth_attr file (name:res1:res2:short_desc:long_desc:attr)
with salt.utils.fopen('/etc/security/auth_attr', 'r') as auth_attr:
for auth in auth_attr:
auth = auth.split(':')
# skip comments and non complaint lines
if len(auth) != 6:
continue
# add auth info to dict
if auth[0][-1:] == '.':
auth[0] = '{0}*'.format(auth[0])
auths[auth[0]] = auth[3]
return auths
def auth_get(user, computed=True):
'''
List authorization for user
user : string
username
computed : boolean
merge results from `auths` command into data from user_attr
CLI Example:
.. code-block:: bash
salt '*' rbac.auth_get leo
'''
user_auths = []
## read user_attr file (user:qualifier:res1:res2:attr)
with salt.utils.fopen('/etc/user_attr', 'r') as user_attr:
for auth in user_attr:
auth = auth.strip().split(':')
# skip comments and non complaint lines
if len(auth) != 5:
continue
# skip other users
if auth[0] != user:
continue
# parse attr
attrs = {}
for attr in auth[4].strip().split(';'):
attr_key, attr_val = attr.strip().split('=')
if attr_key in ['auths', 'profiles', 'roles']:
attrs[attr_key] = attr_val.strip().split(',')
else:
attrs[attr_key] = attr_val
if 'auths' in attrs:
user_auths.extend(attrs['auths'])
## also parse auths command
if computed:
res = __salt__['cmd.run_all']('auths {0}'.format(user))
if res['retcode'] == 0:
for auth in res['stdout'].splitlines():
if ',' in auth:
user_auths.extend(auth.strip().split(','))
else:
user_auths.append(auth.strip())
return list(set(user_auths))
def auth_add(user, auth):
'''
Add authorization to user
user : string
username
auth : string
authorization name
CLI Example:
.. code-block:: bash
salt '*' rbac.auth_add martine solaris.zone.manage
salt '*' rbac.auth_add martine solaris.zone.manage,solaris.mail.mailq
'''
ret = {}
## validate auths
auths = auth.split(',')
known_auths = auth_list().keys()
valid_auths = [r for r in auths if r in known_auths]
log.debug(
'rbac.auth_add - auths={0}, known_auths={1}, valid_auths={2}'.format(
auths,
known_auths,
valid_auths,
)
)
## update user auths
if len(valid_auths) > 0:
res = __salt__['cmd.run_all']('usermod -A "{auths}" {login}'.format(
login=user,
auths=','.join(set(auth_get(user, False) + valid_auths)),
))
if res['retcode'] > 0:
ret['Error'] = {
'retcode': res['retcode'],
'message': res['stderr'] if 'stderr' in res else res['stdout']
}
return ret
## update return value
active_auths = auth_get(user, False)
for a in auths:
if a not in valid_auths:
ret[a] = 'Unknown'
elif a in active_auths:
ret[a] = 'Added'
else:
ret[a] = 'Failed'
return ret
def auth_rm(user, auth):
'''
Remove authorization from user
user : string
username
auth : string
authorization name
CLI Example:
.. code-block:: bash
salt '*' rbac.auth_rm jorge solaris.zone.manage
salt '*' rbac.auth_rm jorge solaris.zone.manage,solaris.mail.mailq
'''
ret = {}
## validate auths
auths = auth.split(',')
known_auths = auth_list().keys()
valid_auths = [a for a in auths if a in known_auths]
log.debug(
'rbac.auth_rm - auths={0}, known_auths={1}, valid_auths={2}'.format(
auths,
known_auths,
valid_auths,
)
)
## update user auths
if len(valid_auths) > 0:
res = __salt__['cmd.run_all']('usermod -A "{auths}" {login}'.format(
login=user,
auths=','.join([a for a in auth_get(user, False) if a not in valid_auths]),
))
if res['retcode'] > 0:
ret['Error'] = {
'retcode': res['retcode'],
'message': res['stderr'] if 'stderr' in res else res['stdout']
}
return ret
## update return value
active_auths = auth_get(user, False)
for a in auths:
if a not in valid_auths:
ret[a] = 'Unknown'
elif a in active_auths:
ret[a] = 'Failed'
else:
ret[a] = 'Remove'
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| 25.58758 | 89 | 0.529342 |
f27dc49dc079cb7329627a093a033d204f7383ca | 581 | py | Python | videoProcessingAPI/api/migrations/0010_auto_20200315_1113.py | rohitgeddam/fosseeVideoProcessing | ca20fc03939d49b474eca24c4dca8beb2a57318b | [
"MIT"
] | null | null | null | videoProcessingAPI/api/migrations/0010_auto_20200315_1113.py | rohitgeddam/fosseeVideoProcessing | ca20fc03939d49b474eca24c4dca8beb2a57318b | [
"MIT"
] | null | null | null | videoProcessingAPI/api/migrations/0010_auto_20200315_1113.py | rohitgeddam/fosseeVideoProcessing | ca20fc03939d49b474eca24c4dca8beb2a57318b | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-15 11:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0009_chunk_me'),
]
operations = [
migrations.AddField(
model_name='chunk',
name='audioChunkLocalPath',
field=models.CharField(default='1.mp3', max_length=100),
),
migrations.AddField(
model_name='chunk',
name='videoChunkLocalPath',
field=models.CharField(default='1.mp4', max_length=100),
),
]
| 24.208333 | 68 | 0.583477 |
60c31f06359e2d238f15f81c81902734893de1b9 | 5,811 | py | Python | quanttrader/data/live_data_feed.py | qalpha/quanttrader | e5c407f76c9d0beeccaa8735052a7e7717f0bed6 | [
"Apache-2.0"
] | 135 | 2020-09-07T01:07:18.000Z | 2022-03-31T23:04:40.000Z | quanttrader/data/live_data_feed.py | qalpha/quanttrader | e5c407f76c9d0beeccaa8735052a7e7717f0bed6 | [
"Apache-2.0"
] | 4 | 2021-04-09T22:13:48.000Z | 2021-12-23T02:10:37.000Z | quanttrader/data/live_data_feed.py | qalpha/quanttrader | e5c407f76c9d0beeccaa8735052a7e7717f0bed6 | [
"Apache-2.0"
] | 51 | 2020-09-08T00:18:45.000Z | 2022-03-28T19:42:44.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
from .data_feed_base import DataFeedBase
from ..data.bar_event import BarEvent
import logging
_logger = logging.getLogger(__name__)
class LiveDataFeed(DataFeedBase):
"""
Live DataFeed class
"""
def __init__(
self, events_queue,
init_tickers=None,
start_date=None, end_date=None,
calc_adj_returns=False
):
"""
Takes the CSV directory, the events queue and a possible
list of initial ticker symbols then creates an (optional)
list of ticker subscriptions and associated prices.
"""
self.events_queue = events_queue
self.continue_backtest = True
self.tickers = {}
self.tickers_data = {}
self.start_date = start_date
self.end_date = end_date
if init_tickers is not None:
for ticker in init_tickers:
self.subscribe_ticker(ticker)
self.bar_stream = self._merge_sort_ticker_data()
self.calc_adj_returns = calc_adj_returns
if self.calc_adj_returns:
self.adj_close_returns = []
def _open_ticker_price_online(self, ticker):
"""
Opens the CSV online from yahoo finance, then store in a dictionary.
"""
if self.end_date is not None:
ed = self.end_date
else:
ed = datetime.today()
if self.start_date is not None:
sd = self.start_date
else:
sd = ed- timedelta(days = 365)
data = quandl.get('wiki/'+ticker, start_date=sd, end_date=ed, authtoken='your_token')
self.tickers_data[ticker] = data
self.tickers_data[ticker]["Ticker"] = ticker
def _merge_sort_ticker_data(self):
"""
Concatenates all of the separate equities DataFrames
into a single DataFrame that is time ordered, allowing tick
data events to be added to the queue in a chronological fashion.
Note that this is an idealised situation, utilised solely for
backtesting. In live trading ticks may arrive "out of order".
"""
df = pd.concat(self.tickers_data.values()).sort_index()
start = None
end = None
if self.start_date is not None:
start = df.index.searchsorted(self.start_date)
if self.end_date is not None:
end = df.index.searchsorted(self.end_date)
# Determine how to slice
if start is None and end is None:
return df.iterrows()
elif start is not None and end is None:
return df.ix[start:].iterrows()
elif start is None and end is not None:
return df.ix[:end].iterrows()
else:
return df.ix[start:end].iterrows()
def subscribe_ticker(self, ticker):
"""
Subscribes the price handler to a new ticker symbol.
"""
if ticker not in self.tickers:
try:
self._open_ticker_price_online(ticker)
dft = self.tickers_data[ticker]
row0 = dft.iloc[0]
close = row0["Close"]
adj_close = row0["Adj. Close"]
ticker_prices = {
"close": close,
"adj_close": adj_close,
"timestamp": dft.index[0]
}
self.tickers[ticker] = ticker_prices
except OSError:
_logger.error(f'Could not subscribe ticker {ticker} as no data CSV found for pricing.')
else:
_logger.error(f"Could not subscribe ticker {ticker} as is already subscribed.")
def _create_event(self, index, period, ticker, row):
"""
Obtain all elements of the bar from a row of dataframe
and return a BarEvent
"""
open_price = row["Open"]
high_price = row["High"]
low_price = row["Low"]
close_price = row["Close"]
adj_close_price = row["Adj. Close"]
volume = int(row["Volume"])
bev = BarEvent(
ticker, index, period, open_price,
high_price, low_price, close_price,
volume, adj_close_price
)
return bev
def _store_event(self, event):
"""
Store price event for closing price and adjusted closing price
"""
ticker = event.ticker
# If the calc_adj_returns flag is True, then calculate
# and store the full list of adjusted closing price
# percentage returns in a list
# TODO: Make this faster
if self.calc_adj_returns:
prev_adj_close = self.tickers[ticker][
"adj_close"
]
cur_adj_close = event.adj_close_price
self.tickers[ticker][
"adj_close_ret"
] = cur_adj_close / prev_adj_close - 1.0
self.adj_close_returns.append(self.tickers[ticker]["adj_close_ret"])
self.tickers[ticker]["close"] = event.close_price
self.tickers[ticker]["adj_close"] = event.adj_close_price
self.tickers[ticker]["timestamp"] = event.time
def stream_next(self):
"""
Place the next BarEvent onto the event queue.
"""
try:
index, row = next(self.bar_stream)
except StopIteration:
self.continue_backtest = False
return
# Obtain all elements of the bar from the dataframe
ticker = row["Ticker"]
period = 86400 # Seconds in a day
# Create the tick event for the queue
bev = self._create_event(index, period, ticker, row)
# Store event
self._store_event(bev)
# Send event to queue
self.events_queue.put(bev)
| 34.384615 | 103 | 0.588367 |
c78a9f4ce1315a5e7c0627f19580e974b32841e3 | 73,685 | py | Python | baseline_tokenization/javalang/parser.py | greenmonn/code2seq | 415edc8fd6ca0aeb191e5a995fa7dd5127ffc199 | [
"MIT"
] | 448 | 2019-01-25T22:02:42.000Z | 2022-03-30T07:53:17.000Z | baseline_tokenization/javalang/parser.py | barghouthi/code2seq | 31c39e76bc8ad941fc3a085bda7664dcab13b521 | [
"MIT"
] | 111 | 2019-02-15T10:29:14.000Z | 2022-03-17T13:48:36.000Z | baseline_tokenization/javalang/parser.py | barghouthi/code2seq | 31c39e76bc8ad941fc3a085bda7664dcab13b521 | [
"MIT"
] | 167 | 2019-02-07T20:42:23.000Z | 2022-03-22T01:42:45.000Z | import six
from . import util
from . import tree
from .tokenizer import (
EndOfInput, Keyword, Modifier, BasicType, Identifier,
Annotation, Literal, Operator, JavaToken,
)
ENABLE_DEBUG_SUPPORT = False
def parse_debug(method):
global ENABLE_DEBUG_SUPPORT
if ENABLE_DEBUG_SUPPORT:
def _method(self):
if not hasattr(self, 'recursion_depth'):
self.recursion_depth = 0
if self.debug:
depth = "%02d" % (self.recursion_depth,)
token = six.text_type(self.tokens.look())
start_value = self.tokens.look().value
name = method.__name__
sep = ("-" * self.recursion_depth)
e_message = ""
print("%s %s> %s(%s)" % (depth, sep, name, token))
self.recursion_depth += 1
try:
r = method(self)
except JavaSyntaxError as e:
e_message = e.description
raise
except Exception as e:
e_message = six.text_type(e)
raise
finally:
token = six.text_type(self.tokens.last())
print("%s <%s %s(%s, %s) %s" %
(depth, sep, name, start_value, token, e_message))
self.recursion_depth -= 1
else:
self.recursion_depth += 1
try:
r = method(self)
finally:
self.recursion_depth -= 1
return r
return _method
else:
return method
# ------------------------------------------------------------------------------
# ---- Parsing exception ----
class JavaParserBaseException(Exception):
def __init__(self, message=''):
super(JavaParserBaseException, self).__init__(message)
class JavaSyntaxError(JavaParserBaseException):
def __init__(self, description, at=None):
super(JavaSyntaxError, self).__init__()
self.description = description
self.at = at
class JavaParserError(JavaParserBaseException):
pass
# ------------------------------------------------------------------------------
# ---- Parser class ----
class Parser(object):
operator_precedence = [ set(('||',)),
set(('&&',)),
set(('|',)),
set(('^',)),
set(('&',)),
set(('==', '!=')),
set(('<', '>', '>=', '<=', 'instanceof')),
set(('<<', '>>', '>>>')),
set(('+', '-')),
set(('*', '/', '%')) ]
def __init__(self, tokens):
self.tokens = util.LookAheadListIterator(tokens)
self.tokens.set_default(EndOfInput(None))
self.debug = False
# ------------------------------------------------------------------------------
# ---- Debug control ----
def set_debug(self, debug=True):
self.debug = debug
# ------------------------------------------------------------------------------
# ---- Parsing entry point ----
def parse(self):
return self.parse_compilation_unit()
# ------------------------------------------------------------------------------
# ---- Helper methods ----
def illegal(self, description, at=None):
if not at:
at = self.tokens.look()
raise JavaSyntaxError(description, at)
def accept(self, *accepts):
last = None
if len(accepts) == 0:
raise JavaParserError("Missing acceptable values")
for accept in accepts:
token = next(self.tokens)
if isinstance(accept, six.string_types) and (
not token.value == accept):
self.illegal("Expected '%s'" % (accept,))
elif isinstance(accept, type) and not isinstance(token, accept):
self.illegal("Expected %s" % (accept.__name__,))
last = token
return last.value
def would_accept(self, *accepts):
if len(accepts) == 0:
raise JavaParserError("Missing acceptable values")
for i, accept in enumerate(accepts):
token = self.tokens.look(i)
if isinstance(accept, six.string_types) and (
not token.value == accept):
return False
elif isinstance(accept, type) and not isinstance(token, accept):
return False
return True
def try_accept(self, *accepts):
if len(accepts) == 0:
raise JavaParserError("Missing acceptable values")
for i, accept in enumerate(accepts):
token = self.tokens.look(i)
if isinstance(accept, six.string_types) and (
not token.value == accept):
return False
elif isinstance(accept, type) and not isinstance(token, accept):
return False
for i in range(0, len(accepts)):
next(self.tokens)
return True
def build_binary_operation(self, parts, start_level=0):
if len(parts) == 1:
return parts[0]
operands = list()
operators = list()
i = 0
for level in range(start_level, len(self.operator_precedence)):
for j in range(1, len(parts) - 1, 2):
if parts[j] in self.operator_precedence[level]:
operand = self.build_binary_operation(parts[i:j], level + 1)
operator = parts[j]
i = j + 1
operands.append(operand)
operators.append(operator)
if operands:
break
operand = self.build_binary_operation(parts[i:], level + 1)
operands.append(operand)
operation = operands[0]
for operator, operandr in zip(operators, operands[1:]):
operation = tree.BinaryOperation(operandl=operation)
operation.operator = operator
operation.operandr = operandr
return operation
def is_annotation(self, i=0):
""" Returns true if the position is the start of an annotation application
(as opposed to an annotation declaration)
"""
return (isinstance(self.tokens.look(i), Annotation)
and not self.tokens.look(i + 1).value == 'interface')
def is_annotation_declaration(self, i=0):
""" Returns true if the position is the start of an annotation application
(as opposed to an annotation declaration)
"""
return (isinstance(self.tokens.look(i), Annotation)
and self.tokens.look(i + 1).value == 'interface')
# ------------------------------------------------------------------------------
# ---- Parsing methods ----
# ------------------------------------------------------------------------------
# -- Identifiers --
@parse_debug
def parse_identifier(self):
return self.accept(Identifier)
@parse_debug
def parse_qualified_identifier(self):
qualified_identifier = list()
while True:
identifier = self.parse_identifier()
qualified_identifier.append(identifier)
if not self.try_accept('.'):
break
return '.'.join(qualified_identifier)
@parse_debug
def parse_qualified_identifier_list(self):
qualified_identifiers = list()
while True:
qualified_identifier = self.parse_qualified_identifier()
qualified_identifiers.append(qualified_identifier)
if not self.try_accept(','):
break
return qualified_identifiers
# ------------------------------------------------------------------------------
# -- Top level units --
@parse_debug
def parse_compilation_unit(self):
package = None
package_annotations = None
javadoc = None
import_declarations = list()
type_declarations = list()
self.tokens.push_marker()
next_token = self.tokens.look()
if next_token:
javadoc = next_token.javadoc
if self.is_annotation():
package_annotations = self.parse_annotations()
if self.try_accept('package'):
self.tokens.pop_marker(False)
package_name = self.parse_qualified_identifier()
package = tree.PackageDeclaration(annotations=package_annotations,
name=package_name,
documentation=javadoc)
self.accept(';')
else:
self.tokens.pop_marker(True)
package_annotations = None
while self.would_accept('import'):
import_declaration = self.parse_import_declaration()
import_declarations.append(import_declaration)
while not isinstance(self.tokens.look(), EndOfInput):
try:
type_declaration = self.parse_type_declaration()
except StopIteration:
self.illegal("Unexpected end of input")
if type_declaration:
type_declarations.append(type_declaration)
return tree.CompilationUnit(package=package,
imports=import_declarations,
types=type_declarations)
@parse_debug
def parse_import_declaration(self):
qualified_identifier = list()
static = False
import_all = False
self.accept('import')
if self.try_accept('static'):
static = True
while True:
identifier = self.parse_identifier()
qualified_identifier.append(identifier)
if self.try_accept('.'):
if self.try_accept('*'):
self.accept(';')
import_all = True
break
else:
self.accept(';')
break
return tree.Import(path='.'.join(qualified_identifier),
static=static,
wildcard=import_all)
@parse_debug
def parse_type_declaration(self):
if self.try_accept(';'):
return None
else:
return self.parse_class_or_interface_declaration()
@parse_debug
def parse_class_or_interface_declaration(self):
modifiers, annotations, javadoc = self.parse_modifiers()
type_declaration = None
token = self.tokens.look()
if token.value == 'class':
type_declaration = self.parse_normal_class_declaration()
elif token.value == 'enum':
type_declaration = self.parse_enum_declaration()
elif token.value == 'interface':
type_declaration = self.parse_normal_interface_declaration()
elif self.is_annotation_declaration():
type_declaration = self.parse_annotation_type_declaration()
else:
self.illegal("Expected type declaration")
type_declaration.modifiers = modifiers
type_declaration.annotations = annotations
type_declaration.documentation = javadoc
return type_declaration
@parse_debug
def parse_normal_class_declaration(self):
name = None
type_params = None
extends = None
implements = None
body = None
self.accept('class')
name = self.parse_identifier()
if self.would_accept('<'):
type_params = self.parse_type_parameters()
if self.try_accept('extends'):
extends = self.parse_type()
if self.try_accept('implements'):
implements = self.parse_type_list()
body = self.parse_class_body()
return tree.ClassDeclaration(name=name,
type_parameters=type_params,
extends=extends,
implements=implements,
body=body)
@parse_debug
def parse_enum_declaration(self):
name = None
implements = None
body = None
self.accept('enum')
name = self.parse_identifier()
if self.try_accept('implements'):
implements = self.parse_type_list()
body = self.parse_enum_body()
return tree.EnumDeclaration(name=name,
implements=implements,
body=body)
@parse_debug
def parse_normal_interface_declaration(self):
name = None
type_parameters = None
extends = None
body = None
self.accept('interface')
name = self.parse_identifier()
if self.would_accept('<'):
type_parameters = self.parse_type_parameters()
if self.try_accept('extends'):
extends = self.parse_type_list()
body = self.parse_interface_body()
return tree.InterfaceDeclaration(name=name,
type_parameters=type_parameters,
extends=extends,
body=body)
@parse_debug
def parse_annotation_type_declaration(self):
name = None
body = None
self.accept('@', 'interface')
name = self.parse_identifier()
body = self.parse_annotation_type_body()
return tree.AnnotationDeclaration(name=name,
body=body)
# ------------------------------------------------------------------------------
# -- Types --
@parse_debug
def parse_type(self):
java_type = None
if isinstance(self.tokens.look(), BasicType):
java_type = self.parse_basic_type()
elif isinstance(self.tokens.look(), Identifier):
java_type = self.parse_reference_type()
else:
self.illegal("Expected type")
java_type.dimensions = self.parse_array_dimension()
return java_type
@parse_debug
def parse_basic_type(self):
return tree.BasicType(name=self.accept(BasicType))
@parse_debug
def parse_reference_type(self):
reference_type = tree.ReferenceType()
tail = reference_type
while True:
tail.name = self.parse_identifier()
if self.would_accept('<'):
tail.arguments = self.parse_type_arguments()
if self.try_accept('.'):
tail.sub_type = tree.ReferenceType()
tail = tail.sub_type
else:
break
return reference_type
@parse_debug
def parse_type_arguments(self):
type_arguments = list()
self.accept('<')
while True:
type_argument = self.parse_type_argument()
type_arguments.append(type_argument)
if self.try_accept('>'):
break
self.accept(',')
return type_arguments
@parse_debug
def parse_type_argument(self):
pattern_type = None
base_type = None
if self.try_accept('?'):
if self.tokens.look().value in ('extends', 'super'):
pattern_type = self.tokens.next().value
else:
return tree.TypeArgument(pattern_type='?')
if self.would_accept(BasicType):
base_type = self.parse_basic_type()
self.accept('[', ']')
base_type.dimensions = [None]
else:
base_type = self.parse_reference_type()
base_type.dimensions = []
base_type.dimensions += self.parse_array_dimension()
return tree.TypeArgument(type=base_type,
pattern_type=pattern_type)
@parse_debug
def parse_nonwildcard_type_arguments(self):
self.accept('<')
type_arguments = self.parse_type_list()
self.accept('>')
return [tree.TypeArgument(type=t) for t in type_arguments]
@parse_debug
def parse_type_list(self):
types = list()
while True:
if self.would_accept(BasicType):
base_type = self.parse_basic_type()
self.accept('[', ']')
base_type.dimensions = [None]
else:
base_type = self.parse_reference_type()
base_type.dimensions = []
base_type.dimensions += self.parse_array_dimension()
types.append(base_type)
if not self.try_accept(','):
break
return types
@parse_debug
def parse_type_arguments_or_diamond(self):
if self.try_accept('<', '>'):
return list()
else:
return self.parse_type_arguments()
@parse_debug
def parse_nonwildcard_type_arguments_or_diamond(self):
if self.try_accept('<', '>'):
return list()
else:
return self.parse_nonwildcard_type_arguments()
@parse_debug
def parse_type_parameters(self):
type_parameters = list()
self.accept('<')
while True:
type_parameter = self.parse_type_parameter()
type_parameters.append(type_parameter)
if self.try_accept('>'):
break
else:
self.accept(',')
return type_parameters
@parse_debug
def parse_type_parameter(self):
identifier = self.parse_identifier()
extends = None
if self.try_accept('extends'):
extends = list()
while True:
reference_type = self.parse_reference_type()
extends.append(reference_type)
if not self.try_accept('&'):
break
return tree.TypeParameter(name=identifier,
extends=extends)
@parse_debug
def parse_array_dimension(self):
array_dimension = 0
while self.try_accept('[', ']'):
array_dimension += 1
return [None] * array_dimension
# ------------------------------------------------------------------------------
# -- Annotations and modifiers --
@parse_debug
def parse_modifiers(self):
annotations = list()
modifiers = set()
javadoc = None
next_token = self.tokens.look()
if next_token:
javadoc = next_token.javadoc
while True:
if self.would_accept(Modifier):
modifiers.add(self.accept(Modifier))
elif self.is_annotation():
annotation = self.parse_annotation()
annotations.append(annotation)
else:
break
return (modifiers, annotations, javadoc)
@parse_debug
def parse_annotations(self):
annotations = list()
while True:
annotation = self.parse_annotation()
annotations.append(annotation)
if not self.is_annotation():
break
return annotations
@parse_debug
def parse_annotation(self):
qualified_identifier = None
annotation_element = None
self.accept('@')
qualified_identifier = self.parse_qualified_identifier()
if self.try_accept('('):
if not self.would_accept(')'):
annotation_element = self.parse_annotation_element()
self.accept(')')
return tree.Annotation(name=qualified_identifier,
element=annotation_element)
@parse_debug
def parse_annotation_element(self):
if self.would_accept(Identifier, '='):
return self.parse_element_value_pairs()
else:
return self.parse_element_value()
@parse_debug
def parse_element_value_pairs(self):
pairs = list()
while True:
pair = self.parse_element_value_pair()
pairs.append(pair)
if not self.try_accept(','):
break
return pairs
@parse_debug
def parse_element_value_pair(self):
identifier = self.parse_identifier()
self.accept('=')
value = self.parse_element_value()
return tree.ElementValuePair(name=identifier,
value=value)
@parse_debug
def parse_element_value(self):
if self.is_annotation():
return self.parse_annotation()
elif self.would_accept('{'):
return self.parse_element_value_array_initializer()
else:
return self.parse_expressionl()
@parse_debug
def parse_element_value_array_initializer(self):
self.accept('{')
if self.try_accept('}'):
return list()
element_values = self.parse_element_values()
self.try_accept(',')
self.accept('}')
return tree.ElementArrayValue(values=element_values)
@parse_debug
def parse_element_values(self):
element_values = list()
while True:
element_value = self.parse_element_value()
element_values.append(element_value)
if self.would_accept('}') or self.would_accept(',', '}'):
break
self.accept(',')
return element_values
# ------------------------------------------------------------------------------
# -- Class body --
@parse_debug
def parse_class_body(self):
declarations = list()
self.accept('{')
while not self.would_accept('}'):
declaration = self.parse_class_body_declaration()
if declaration:
declarations.append(declaration)
self.accept('}')
return declarations
@parse_debug
def parse_class_body_declaration(self):
token = self.tokens.look()
if self.try_accept(';'):
return None
elif self.would_accept('static', '{'):
self.accept('static')
return self.parse_block()
elif self.would_accept('{'):
return self.parse_block()
else:
return self.parse_member_declaration()
@parse_debug
def parse_member_declaration(self):
modifiers, annotations, javadoc = self.parse_modifiers()
member = None
token = self.tokens.look()
if self.try_accept('void'):
method_name = self.parse_identifier()
member = self.parse_void_method_declarator_rest()
member.name = method_name
elif token.value == '<':
member = self.parse_generic_method_or_constructor_declaration()
elif token.value == 'class':
member = self.parse_normal_class_declaration()
elif token.value == 'enum':
member = self.parse_enum_declaration()
elif token.value == 'interface':
member = self.parse_normal_interface_declaration()
elif self.is_annotation_declaration():
member = self.parse_annotation_type_declaration()
elif self.would_accept(Identifier, '('):
constructor_name = self.parse_identifier()
member = self.parse_constructor_declarator_rest()
member.name = constructor_name
else:
member = self.parse_method_or_field_declaraction()
member._position = token.position
member.modifiers = modifiers
member.annotations = annotations
member.documentation = javadoc
return member
@parse_debug
def parse_method_or_field_declaraction(self):
member_type = self.parse_type()
member_name = self.parse_identifier()
member = self.parse_method_or_field_rest()
if isinstance(member, tree.MethodDeclaration):
member_type.dimensions += member.return_type.dimensions
member.name = member_name
member.return_type = member_type
else:
member.type = member_type
member.declarators[0].name = member_name
return member
@parse_debug
def parse_method_or_field_rest(self):
if self.would_accept('('):
return self.parse_method_declarator_rest()
else:
rest = self.parse_field_declarators_rest()
self.accept(';')
return rest
@parse_debug
def parse_field_declarators_rest(self):
array_dimension, initializer = self.parse_variable_declarator_rest()
declarators = [tree.VariableDeclarator(dimensions=array_dimension,
initializer=initializer)]
while self.try_accept(','):
declarator = self.parse_variable_declarator()
declarators.append(declarator)
return tree.FieldDeclaration(declarators=declarators)
@parse_debug
def parse_method_declarator_rest(self):
formal_parameters = self.parse_formal_parameters()
additional_dimensions = self.parse_array_dimension()
throws = None
body = None
if self.try_accept('throws'):
throws = self.parse_qualified_identifier_list()
if self.would_accept('{'):
body = self.parse_block()
else:
self.accept(';')
return tree.MethodDeclaration(parameters=formal_parameters,
throws=throws,
body=body,
return_type=tree.Type(dimensions=additional_dimensions))
@parse_debug
def parse_void_method_declarator_rest(self):
formal_parameters = self.parse_formal_parameters()
throws = None
body = None
if self.try_accept('throws'):
throws = self.parse_qualified_identifier_list()
if self.would_accept('{'):
body = self.parse_block()
else:
self.accept(';')
return tree.MethodDeclaration(parameters=formal_parameters,
throws=throws,
body=body)
@parse_debug
def parse_constructor_declarator_rest(self):
formal_parameters = self.parse_formal_parameters()
throws = None
body = None
if self.try_accept('throws'):
throws = self.parse_qualified_identifier_list()
body = self.parse_block()
return tree.ConstructorDeclaration(parameters=formal_parameters,
throws=throws,
body=body)
@parse_debug
def parse_generic_method_or_constructor_declaration(self):
type_parameters = self.parse_type_parameters()
method = None
if self.would_accept(Identifier, '('):
constructor_name = self.parse_identifier()
method = self.parse_constructor_declarator_rest()
method.name = constructor_name
elif self.try_accept('void'):
method_name = self.parse_identifier()
method = self.parse_void_method_declarator_rest()
method.name = method_name
else:
method_return_type = self.parse_type()
method_name = self.parse_identifier()
method = self.parse_method_declarator_rest()
method_return_type.dimensions += method.return_type.dimensions
method.return_type = method_return_type
method.name = method_name
method.type_parameters = type_parameters
return method
# ------------------------------------------------------------------------------
# -- Interface body --
@parse_debug
def parse_interface_body(self):
declarations = list()
self.accept('{')
while not self.would_accept('}'):
declaration = self.parse_interface_body_declaration()
if declaration:
declarations.append(declaration)
self.accept('}')
return declarations
@parse_debug
def parse_interface_body_declaration(self):
if self.try_accept(';'):
return None
modifiers, annotations, javadoc = self.parse_modifiers()
declaration = self.parse_interface_member_declaration()
declaration.modifiers = modifiers
declaration.annotations = annotations
declaration.documentation = javadoc
return declaration
@parse_debug
def parse_interface_member_declaration(self):
declaration = None
if self.would_accept('class'):
declaration = self.parse_normal_class_declaration()
elif self.would_accept('interface'):
declaration = self.parse_normal_interface_declaration()
elif self.would_accept('enum'):
declaration = self.parse_enum_declaration()
elif self.is_annotation_declaration():
declaration = self.parse_annotation_type_declaration()
elif self.would_accept('<'):
declaration = self.parse_interface_generic_method_declarator()
elif self.try_accept('void'):
method_name = self.parse_identifier()
declaration = self.parse_void_interface_method_declarator_rest()
declaration.name = method_name
else:
declaration = self.parse_interface_method_or_field_declaration()
return declaration
@parse_debug
def parse_interface_method_or_field_declaration(self):
java_type = self.parse_type()
name = self.parse_identifier()
member = self.parse_interface_method_or_field_rest()
if isinstance(member, tree.MethodDeclaration):
java_type.dimensions += member.return_type.dimensions
member.name = name
member.return_type = java_type
else:
member.declarators[0].name = name
member.type = java_type
return member
@parse_debug
def parse_interface_method_or_field_rest(self):
rest = None
if self.would_accept('('):
rest = self.parse_interface_method_declarator_rest()
else:
rest = self.parse_constant_declarators_rest()
self.accept(';')
return rest
@parse_debug
def parse_constant_declarators_rest(self):
array_dimension, initializer = self.parse_constant_declarator_rest()
declarators = [tree.VariableDeclarator(dimensions=array_dimension,
initializer=initializer)]
while self.try_accept(','):
declarator = self.parse_constant_declarator()
declarators.append(declarator)
return tree.ConstantDeclaration(declarators=declarators)
@parse_debug
def parse_constant_declarator_rest(self):
array_dimension = self.parse_array_dimension()
self.accept('=')
initializer = self.parse_variable_initializer()
return (array_dimension, initializer)
@parse_debug
def parse_constant_declarator(self):
name = self.parse_identifier()
additional_dimension, initializer = self.parse_constant_declarator_rest()
return tree.VariableDeclarator(name=name,
dimensions=additional_dimension,
initializer=initializer)
@parse_debug
def parse_interface_method_declarator_rest(self):
parameters = self.parse_formal_parameters()
array_dimension = self.parse_array_dimension()
throws = None
body = None
if self.try_accept('throws'):
throws = self.parse_qualified_identifier_list()
if self.would_accept('{'):
body = self.parse_block()
else:
self.accept(';')
return tree.MethodDeclaration(parameters=parameters,
throws=throws,
body=body,
return_type=tree.Type(dimensions=array_dimension))
@parse_debug
def parse_void_interface_method_declarator_rest(self):
parameters = self.parse_formal_parameters()
throws = None
body = None
if self.try_accept('throws'):
throws = self.parse_qualified_identifier_list()
if self.would_accept('{'):
body = self.parse_block()
else:
self.accept(';')
return tree.MethodDeclaration(parameters=parameters,
throws=throws,
body=body)
@parse_debug
def parse_interface_generic_method_declarator(self):
type_parameters = self.parse_type_parameters()
return_type = None
method_name = None
if not self.try_accept('void'):
return_type = self.parse_type()
method_name = self.parse_identifier()
method = self.parse_interface_method_declarator_rest()
method.name = method_name
method.return_type = return_type
method.type_parameters = type_parameters
return method
# ------------------------------------------------------------------------------
# -- Parameters and variables --
@parse_debug
def parse_formal_parameters(self):
formal_parameters = list()
self.accept('(')
if self.try_accept(')'):
return formal_parameters
while True:
modifiers, annotations = self.parse_variable_modifiers()
parameter_type = self.parse_type()
varargs = False
if self.try_accept('...'):
varargs = True
parameter_name = self.parse_identifier()
parameter_type.dimensions += self.parse_array_dimension()
parameter = tree.FormalParameter(modifiers=modifiers,
annotations=annotations,
type=parameter_type,
name=parameter_name,
varargs=varargs)
formal_parameters.append(parameter)
if varargs:
# varargs parameter must be the last
break
if not self.try_accept(','):
break
self.accept(')')
return formal_parameters
@parse_debug
def parse_variable_modifiers(self):
modifiers = set()
annotations = list()
while True:
if self.try_accept('final'):
modifiers.add('final')
elif self.is_annotation():
annotation = self.parse_annotation()
annotations.append(annotation)
else:
break
return modifiers, annotations
@parse_debug
def parse_variable_declators(self):
declarators = list()
while True:
declarator = self.parse_variable_declator()
declarators.append(declarator)
if not self.try_accept(','):
break
return declarators
@parse_debug
def parse_variable_declarators(self):
declarators = list()
while True:
declarator = self.parse_variable_declarator()
declarators.append(declarator)
if not self.try_accept(','):
break
return declarators
@parse_debug
def parse_variable_declarator(self):
identifier = self.parse_identifier()
array_dimension, initializer = self.parse_variable_declarator_rest()
return tree.VariableDeclarator(name=identifier,
dimensions=array_dimension,
initializer=initializer)
@parse_debug
def parse_variable_declarator_rest(self):
array_dimension = self.parse_array_dimension()
initializer = None
if self.try_accept('='):
initializer = self.parse_variable_initializer()
return (array_dimension, initializer)
@parse_debug
def parse_variable_initializer(self):
if self.would_accept('{'):
return self.parse_array_initializer()
else:
return self.parse_expression()
@parse_debug
def parse_array_initializer(self):
array_initializer = tree.ArrayInitializer(initializers=list())
self.accept('{')
if self.try_accept(','):
self.accept('}')
return array_initializer
if self.try_accept('}'):
return array_initializer
while True:
initializer = self.parse_variable_initializer()
array_initializer.initializers.append(initializer)
if not self.would_accept('}'):
self.accept(',')
if self.try_accept('}'):
return array_initializer
# ------------------------------------------------------------------------------
# -- Blocks and statements --
@parse_debug
def parse_block(self):
statements = list()
self.accept('{')
while not self.would_accept('}'):
statement = self.parse_block_statement()
statements.append(statement)
self.accept('}')
return statements
@parse_debug
def parse_block_statement(self):
if self.would_accept(Identifier, ':'):
# Labeled statement
return self.parse_statement()
if self.would_accept('synchronized'):
return self.parse_statement()
token = None
found_annotations = False
i = 0
# Look past annoatations and modifiers. If we find a modifier that is not
# 'final' then the statement must be a class or interface declaration
while True:
token = self.tokens.look(i)
if isinstance(token, Modifier):
if not token.value == 'final':
return self.parse_class_or_interface_declaration()
elif self.is_annotation(i):
found_annotations = True
i += 2
while self.tokens.look(i).value == '.':
i += 2
if self.tokens.look(i).value == '(':
parens = 1
i += 1
while parens > 0:
token = self.tokens.look(i)
if token.value == '(':
parens += 1
elif token.value == ')':
parens -= 1
i += 1
continue
else:
break
i += 1
if token.value in ('class', 'enum', 'interface', '@'):
return self.parse_class_or_interface_declaration()
if found_annotations or isinstance(token, BasicType):
return self.parse_local_variable_declaration_statement()
# At this point, if the block statement is a variable definition the next
# token MUST be an identifier, so if it isn't we can conclude the block
# statement is a normal statement
if not isinstance(token, Identifier):
return self.parse_statement()
# We can't easily determine the statement type. Try parsing as a variable
# declaration first and fall back to a statement
try:
with self.tokens:
return self.parse_local_variable_declaration_statement()
except JavaSyntaxError:
return self.parse_statement()
@parse_debug
def parse_local_variable_declaration_statement(self):
modifiers, annotations = self.parse_variable_modifiers()
java_type = self.parse_type()
declarators = self.parse_variable_declarators()
self.accept(';')
var = tree.LocalVariableDeclaration(modifiers=modifiers,
annotations=annotations,
type=java_type,
declarators=declarators)
return var
@parse_debug
def parse_statement(self):
token = self.tokens.look()
if self.would_accept('{'):
block = self.parse_block()
return tree.BlockStatement(statements=block)
elif self.try_accept(';'):
return tree.Statement()
elif self.would_accept(Identifier, ':'):
identifer = self.parse_identifier()
self.accept(':')
statement = self.parse_statement()
statement.label = identifer
return statement
elif self.try_accept('if'):
condition = self.parse_par_expression()
then = self.parse_statement()
else_statement = None
if self.try_accept('else'):
else_statement = self.parse_statement()
return tree.IfStatement(condition=condition,
then_statement=then,
else_statement=else_statement)
elif self.try_accept('assert'):
condition = self.parse_expression()
value = None
if self.try_accept(':'):
value = self.parse_expression()
self.accept(';')
return tree.AssertStatement(condition=condition,
value=value)
elif self.try_accept('switch'):
switch_expression = self.parse_par_expression()
self.accept('{')
switch_block = self.parse_switch_block_statement_groups()
self.accept('}')
return tree.SwitchStatement(expression=switch_expression,
cases=switch_block)
elif self.try_accept('while'):
condition = self.parse_par_expression()
action = self.parse_statement()
return tree.WhileStatement(condition=condition,
body=action)
elif self.try_accept('do'):
action = self.parse_statement()
self.accept('while')
condition = self.parse_par_expression()
self.accept(';')
return tree.DoStatement(condition=condition,
body=action)
elif self.try_accept('for'):
self.accept('(')
for_control = self.parse_for_control()
self.accept(')')
for_statement = self.parse_statement()
return tree.ForStatement(control=for_control,
body=for_statement)
elif self.try_accept('break'):
label = None
if self.would_accept(Identifier):
label = self.parse_identifier()
self.accept(';')
return tree.BreakStatement(goto=label)
elif self.try_accept('continue'):
label = None
if self.would_accept(Identifier):
label = self.parse_identifier()
self.accept(';')
return tree.ContinueStatement(goto=label)
elif self.try_accept('return'):
value = None
if not self.would_accept(';'):
value = self.parse_expression()
self.accept(';')
return tree.ReturnStatement(expression=value)
elif self.try_accept('throw'):
value = self.parse_expression()
self.accept(';')
return tree.ThrowStatement(expression=value)
elif self.try_accept('synchronized'):
lock = self.parse_par_expression()
block = self.parse_block()
return tree.SynchronizedStatement(lock=lock,
block=block)
elif self.try_accept('try'):
resource_specification = None
block = None
catches = None
finally_block = None
if self.would_accept('{'):
block = self.parse_block()
if self.would_accept('catch'):
catches = self.parse_catches()
if self.try_accept('finally'):
finally_block = self.parse_block()
if catches == None and finally_block == None:
self.illegal("Expected catch/finally block")
else:
resource_specification = self.parse_resource_specification()
block = self.parse_block()
if self.would_accept('catch'):
catches = self.parse_catches()
if self.try_accept('finally'):
finally_block = self.parse_block()
return tree.TryStatement(resources=resource_specification,
block=block,
catches=catches,
finally_block=finally_block)
else:
expression = self.parse_expression()
self.accept(';')
return tree.StatementExpression(expression=expression)
# ------------------------------------------------------------------------------
# -- Try / catch --
@parse_debug
def parse_catches(self):
catches = list()
while True:
catch = self.parse_catch_clause()
catches.append(catch)
if not self.would_accept('catch'):
break
return catches
@parse_debug
def parse_catch_clause(self):
self.accept('catch', '(')
modifiers, annotations = self.parse_variable_modifiers()
catch_parameter = tree.CatchClauseParameter(types=list())
while True:
catch_type = self.parse_qualified_identifier()
catch_parameter.types.append(catch_type)
if not self.try_accept('|'):
break
catch_parameter.name = self.parse_identifier()
self.accept(')')
block = self.parse_block()
return tree.CatchClause(parameter=catch_parameter,
block=block)
@parse_debug
def parse_resource_specification(self):
resources = list()
self.accept('(')
while True:
resource = self.parse_resource()
resources.append(resource)
if not self.would_accept(')'):
self.accept(';')
if self.try_accept(')'):
break
return resources
@parse_debug
def parse_resource(self):
modifiers, annotations = self.parse_variable_modifiers()
reference_type = self.parse_reference_type()
reference_type.dimensions = self.parse_array_dimension()
name = self.parse_identifier()
reference_type.dimensions += self.parse_array_dimension()
self.accept('=')
value = self.parse_expression()
return tree.TryResource(modifiers=modifiers,
annotations=annotations,
type=reference_type,
name=name,
value=value)
# ------------------------------------------------------------------------------
# -- Switch and for statements ---
@parse_debug
def parse_switch_block_statement_groups(self):
statement_groups = list()
while self.tokens.look().value in ('case', 'default'):
statement_group = self.parse_switch_block_statement_group()
statement_groups.append(statement_group)
return statement_groups
@parse_debug
def parse_switch_block_statement_group(self):
labels = list()
statements = list()
while True:
case_type = self.tokens.next().value
case_value = None
if case_type == 'case':
if self.would_accept(Identifier, ':'):
case_value = self.parse_identifier()
else:
case_value = self.parse_expression()
labels.append(case_value)
elif not case_type == 'default':
self.illegal("Expected switch case")
self.accept(':')
if self.tokens.look().value not in ('case', 'default'):
break
while self.tokens.look().value not in ('case', 'default', '}'):
statement = self.parse_block_statement()
statements.append(statement)
return tree.SwitchStatementCase(case=labels,
statements=statements)
@parse_debug
def parse_for_control(self):
# Try for_var_control and fall back to normal three part for control
try:
with self.tokens:
return self.parse_for_var_control()
except JavaSyntaxError:
pass
init = None
if not self.would_accept(';'):
init = self.parse_for_init_or_update()
self.accept(';')
condition = None
if not self.would_accept(';'):
condition = self.parse_expression()
self.accept(';')
update = None
if not self.would_accept(')'):
update = self.parse_for_init_or_update()
return tree.ForControl(init=init,
condition=condition,
update=update)
@parse_debug
def parse_for_var_control(self):
modifiers, annotations = self.parse_variable_modifiers()
var_type = self.parse_type()
var_name = self.parse_identifier()
var_type.dimensions += self.parse_array_dimension()
var = tree.VariableDeclaration(modifiers=modifiers,
annotations=annotations,
type=var_type)
rest = self.parse_for_var_control_rest()
if isinstance(rest, tree.Expression):
var.declarators = [tree.VariableDeclarator(name=var_name)]
return tree.EnhancedForControl(var=var,
iterable=rest)
else:
declarators, condition, update = rest
declarators[0].name = var_name
var.declarators = declarators
return tree.ForControl(init=var,
condition=condition,
update=update)
@parse_debug
def parse_for_var_control_rest(self):
if self.try_accept(':'):
expression = self.parse_expression()
return expression
declarators = None
if not self.would_accept(';'):
declarators = self.parse_for_variable_declarator_rest()
else:
declarators = [tree.VariableDeclarator()]
self.accept(';')
condition = None
if not self.would_accept(';'):
condition = self.parse_expression()
self.accept(';')
update = None
if not self.would_accept(')'):
update = self.parse_for_init_or_update()
return (declarators, condition, update)
@parse_debug
def parse_for_variable_declarator_rest(self):
initializer = None
if self.try_accept('='):
initializer = self.parse_variable_initializer()
declarators = [tree.VariableDeclarator(initializer=initializer)]
while self.try_accept(','):
declarator = self.parse_variable_declarator()
declarators.append(declarator)
return declarators
@parse_debug
def parse_for_init_or_update(self):
expressions = list()
while True:
expression = self.parse_expression()
expressions.append(expression)
if not self.try_accept(','):
break
return expressions
# ------------------------------------------------------------------------------
# -- Expressions --
@parse_debug
def parse_expression(self):
expressionl = self.parse_expressionl()
assignment_type = None
assignment_expression = None
if self.tokens.look().value in Operator.ASSIGNMENT:
assignment_type = self.tokens.next().value
assignment_expression = self.parse_expression()
return tree.Assignment(expressionl=expressionl,
type=assignment_type,
value=assignment_expression)
else:
return expressionl
@parse_debug
def parse_expressionl(self):
expression_2 = self.parse_expression_2()
true_expression = None
false_expression = None
if self.try_accept('?'):
true_expression = self.parse_expression()
self.accept(':')
false_expression = self.parse_expressionl()
return tree.TernaryExpression(condition=expression_2,
if_true=true_expression,
if_false=false_expression)
if self.would_accept('->'):
body = self.parse_lambda_method_body()
return tree.LambdaExpression(parameters=[expression_2],
body=body)
if self.try_accept('::'):
method_reference, type_arguments = self.parse_method_reference()
return tree.MethodReference(
expression=expression_2,
method=method_reference,
type_arguments=type_arguments)
return expression_2
@parse_debug
def parse_expression_2(self):
expression_3 = self.parse_expression_3()
token = self.tokens.look()
if token.value in Operator.INFIX or token.value == 'instanceof':
parts = self.parse_expression_2_rest()
parts.insert(0, expression_3)
return self.build_binary_operation(parts)
return expression_3
@parse_debug
def parse_expression_2_rest(self):
parts = list()
token = self.tokens.look()
while token.value in Operator.INFIX or token.value == 'instanceof':
if self.try_accept('instanceof'):
comparison_type = self.parse_type()
parts.extend(('instanceof', comparison_type))
else:
operator = self.parse_infix_operator()
expression = self.parse_expression_3()
parts.extend((operator, expression))
token = self.tokens.look()
return parts
# ------------------------------------------------------------------------------
# -- Expression operators --
@parse_debug
def parse_expression_3(self):
prefix_operators = list()
while self.tokens.look().value in Operator.PREFIX:
prefix_operators.append(self.tokens.next().value)
if self.would_accept('('):
try:
with self.tokens:
lambda_exp = self.parse_lambda_expression()
if lambda_exp:
return lambda_exp
except JavaSyntaxError:
pass
try:
with self.tokens:
self.accept('(')
cast_target = self.parse_type()
self.accept(')')
expression = self.parse_expression_3()
return tree.Cast(type=cast_target,
expression=expression)
except JavaSyntaxError:
pass
primary = self.parse_primary()
primary.prefix_operators = prefix_operators
primary.selectors = list()
primary.postfix_operators = list()
token = self.tokens.look()
while token.value in '[.':
selector = self.parse_selector()
primary.selectors.append(selector)
token = self.tokens.look()
while token.value in Operator.POSTFIX:
primary.postfix_operators.append(self.tokens.next().value)
token = self.tokens.look()
return primary
@parse_debug
def parse_method_reference(self):
type_arguments = list()
if self.would_accept('<'):
type_arguments = self.parse_nonwildcard_type_arguments()
if self.would_accept('new'):
method_reference = tree.MemberReference(member=self.accept('new'))
else:
method_reference = self.parse_expression()
return method_reference, type_arguments
@parse_debug
def parse_lambda_expression(self):
lambda_expr = None
parameters = None
if self.would_accept('(', Identifier, ','):
self.accept('(')
parameters = []
while not self.would_accept(')'):
parameters.append(tree.InferredFormalParameter(
name=self.parse_identifier()))
self.try_accept(',')
self.accept(')')
else:
parameters = self.parse_formal_parameters()
body = self.parse_lambda_method_body()
return tree.LambdaExpression(parameters=parameters,
body=body)
@parse_debug
def parse_lambda_method_body(self):
if self.accept('->'):
if self.would_accept('{'):
return self.parse_block()
else:
return self.parse_expression()
@parse_debug
def parse_infix_operator(self):
operator = self.accept(Operator)
if not operator in Operator.INFIX:
self.illegal("Expected infix operator")
if operator == '>' and self.try_accept('>'):
operator = '>>'
if self.try_accept('>'):
operator = '>>>'
return operator
# ------------------------------------------------------------------------------
# -- Primary expressions --
@parse_debug
def parse_primary(self):
token = self.tokens.look()
if isinstance(token, Literal):
return self.parse_literal()
elif token.value == '(':
return self.parse_par_expression()
elif self.try_accept('this'):
arguments = None
if self.would_accept('('):
arguments = self.parse_arguments()
return tree.ExplicitConstructorInvocation(arguments=arguments)
return tree.This()
elif self.would_accept('super', '::'):
self.accept('super')
return token
elif self.try_accept('super'):
super_suffix = self.parse_super_suffix()
return super_suffix
elif self.try_accept('new'):
return self.parse_creator()
elif token.value == '<':
type_arguments = self.parse_nonwildcard_type_arguments()
if self.try_accept('this'):
arguments = self.parse_arguments()
return tree.ExplicitConstructorInvocation(type_arguments=type_arguments,
arguments=arguments)
else:
invocation = self.parse_explicit_generic_invocation_suffix()
invocation.type_arguments = type_arguments
return invocation
elif isinstance(token, Identifier):
qualified_identifier = [self.parse_identifier()]
while self.would_accept('.', Identifier):
self.accept('.')
identifier = self.parse_identifier()
qualified_identifier.append(identifier)
identifier_suffix = self.parse_identifier_suffix()
if isinstance(identifier_suffix, (tree.MemberReference, tree.MethodInvocation)):
# Take the last identifer as the member and leave the rest for the qualifier
identifier_suffix.member = qualified_identifier.pop()
elif isinstance(identifier_suffix, tree.ClassReference):
identifier_suffix.type = tree.ReferenceType(name=qualified_identifier.pop())
identifier_suffix.qualifier = '.'.join(qualified_identifier)
return identifier_suffix
elif isinstance(token, BasicType):
base_type = self.parse_basic_type()
base_type.dimensions = self.parse_array_dimension()
self.accept('.', 'class')
return tree.ClassReference(type=base_type)
elif self.try_accept('void'):
self.accept('.', 'class')
return tree.VoidClassReference()
self.illegal("Expected expression")
@parse_debug
def parse_literal(self):
literal = self.accept(Literal)
return tree.Literal(value=literal)
@parse_debug
def parse_par_expression(self):
self.accept('(')
expression = self.parse_expression()
self.accept(')')
return expression
@parse_debug
def parse_arguments(self):
expressions = list()
self.accept('(')
if self.try_accept(')'):
return expressions
while True:
expression = self.parse_expression()
expressions.append(expression)
if not self.try_accept(','):
break
self.accept(')')
return expressions
@parse_debug
def parse_super_suffix(self):
identifier = None
type_arguments = None
arguments = None
if self.try_accept('.'):
if self.would_accept('<'):
type_arguments = self.parse_nonwildcard_type_arguments()
identifier = self.parse_identifier()
if self.would_accept('('):
arguments = self.parse_arguments()
else:
arguments = self.parse_arguments()
if identifier and arguments is not None:
return tree.SuperMethodInvocation(member=identifier,
arguments=arguments,
type_arguments=type_arguments)
elif arguments is not None:
return tree.SuperConstructorInvocation(arguments=arguments)
else:
return tree.SuperMemberReference(member=identifier)
@parse_debug
def parse_explicit_generic_invocation_suffix(self):
identifier = None
arguments = None
if self.try_accept('super'):
return self.parse_super_suffix()
else:
identifier = self.parse_identifier()
arguments = self.parse_arguments()
return tree.MethodInvocation(member=identifier,
arguments=arguments)
# ------------------------------------------------------------------------------
# -- Creators --
@parse_debug
def parse_creator(self):
constructor_type_arguments = None
if self.would_accept(BasicType):
created_name = self.parse_basic_type()
rest = self.parse_array_creator_rest()
rest.type = created_name
return rest
if self.would_accept('<'):
constructor_type_arguments = self.parse_nonwildcard_type_arguments()
created_name = self.parse_created_name()
if self.would_accept('['):
if constructor_type_arguments:
self.illegal("Array creator not allowed with generic constructor type arguments")
rest = self.parse_array_creator_rest()
rest.type = created_name
return rest
else:
arguments, body = self.parse_class_creator_rest()
return tree.ClassCreator(constructor_type_arguments=constructor_type_arguments,
type=created_name,
arguments=arguments,
body=body)
@parse_debug
def parse_created_name(self):
created_name = tree.ReferenceType()
tail = created_name
while True:
tail.name = self.parse_identifier()
if self.would_accept('<'):
tail.arguments = self.parse_type_arguments_or_diamond()
if self.try_accept('.'):
tail.sub_type = tree.ReferenceType()
tail = tail.sub_type
else:
break
return created_name
@parse_debug
def parse_class_creator_rest(self):
arguments = self.parse_arguments()
class_body = None
if self.would_accept('{'):
class_body = self.parse_class_body()
return (arguments, class_body)
@parse_debug
def parse_array_creator_rest(self):
if self.would_accept('[', ']'):
array_dimension = self.parse_array_dimension()
array_initializer = self.parse_array_initializer()
return tree.ArrayCreator(dimensions=array_dimension,
initializer=array_initializer)
else:
array_dimensions = list()
while self.would_accept('[') and not self.would_accept('[', ']'):
self.accept('[')
expression = self.parse_expression()
array_dimensions.append(expression)
self.accept(']')
array_dimensions += self.parse_array_dimension()
return tree.ArrayCreator(dimensions=array_dimensions)
@parse_debug
def parse_identifier_suffix(self):
if self.try_accept('[', ']'):
array_dimension = [None] + self.parse_array_dimension()
self.accept('.', 'class')
return tree.ClassReference(type=tree.Type(dimensions=array_dimension))
elif self.would_accept('('):
arguments = self.parse_arguments()
return tree.MethodInvocation(arguments=arguments)
elif self.try_accept('.', 'class'):
return tree.ClassReference()
elif self.try_accept('.', 'this'):
return tree.This()
elif self.would_accept('.', '<'):
next(self.tokens)
return self.parse_explicit_generic_invocation()
elif self.try_accept('.', 'new'):
type_arguments = None
if self.would_accept('<'):
type_arguments = self.parse_nonwildcard_type_arguments()
inner_creator = self.parse_inner_creator()
inner_creator.constructor_type_arguments = type_arguments
return inner_creator
elif self.would_accept('.', 'super', '('):
self.accept('.', 'super')
arguments = self.parse_arguments()
return tree.SuperConstructorInvocation(arguments=arguments)
else:
return tree.MemberReference()
@parse_debug
def parse_explicit_generic_invocation(self):
type_arguments = self.parse_nonwildcard_type_arguments()
invocation = self.parse_explicit_generic_invocation_suffix()
invocation.type_arguments = type_arguments
return invocation
@parse_debug
def parse_inner_creator(self):
identifier = self.parse_identifier()
type_arguments = None
if self.would_accept('<'):
type_arguments = self.parse_nonwildcard_type_arguments_or_diamond()
java_type = tree.ReferenceType(name=identifier,
arguments=type_arguments)
arguments, class_body = self.parse_class_creator_rest()
return tree.InnerClassCreator(type=java_type,
arguments=arguments,
body=class_body)
@parse_debug
def parse_selector(self):
if self.try_accept('['):
expression = self.parse_expression()
self.accept(']')
return tree.ArraySelector(index=expression)
elif self.try_accept('.'):
token = self.tokens.look()
if isinstance(token, Identifier):
identifier = self.tokens.next().value
arguments = None
if self.would_accept('('):
arguments = self.parse_arguments()
return tree.MethodInvocation(member=identifier,
arguments=arguments)
else:
return tree.MemberReference(member=identifier)
elif self.would_accept('super', '::'):
self.accept('super')
return token
elif self.would_accept('<'):
return self.parse_explicit_generic_invocation()
elif self.try_accept('this'):
return tree.This()
elif self.try_accept('super'):
return self.parse_super_suffix()
elif self.try_accept('new'):
type_arguments = None
if self.would_accept('<'):
type_arguments = self.parse_nonwildcard_type_arguments()
inner_creator = self.parse_inner_creator()
inner_creator.constructor_type_arguments = type_arguments
return inner_creator
self.illegal("Expected selector")
# ------------------------------------------------------------------------------
# -- Enum and annotation body --
@parse_debug
def parse_enum_body(self):
constants = list()
body_declarations = list()
self.accept('{')
if not self.try_accept(','):
while not (self.would_accept(';') or self.would_accept('}')):
constant = self.parse_enum_constant()
constants.append(constant)
if not self.try_accept(','):
break
if self.try_accept(';'):
while not self.would_accept('}'):
declaration = self.parse_class_body_declaration()
if declaration:
body_declarations.append(declaration)
self.accept('}')
return tree.EnumBody(constants=constants,
declarations=body_declarations)
@parse_debug
def parse_enum_constant(self):
annotations = list()
javadoc = None
constant_name = None
arguments = None
body = None
next_token = self.tokens.look()
if next_token:
javadoc = next_token.javadoc
if self.would_accept(Annotation):
annotations = self.parse_annotations()
constant_name = self.parse_identifier()
if self.would_accept('('):
arguments = self.parse_arguments()
if self.would_accept('{'):
body = self.parse_class_body()
return tree.EnumConstantDeclaration(annotations=annotations,
name=constant_name,
arguments=arguments,
body=body,
documentation=javadoc)
@parse_debug
def parse_annotation_type_body(self):
declarations = None
self.accept('{')
declarations = self.parse_annotation_type_element_declarations()
self.accept('}')
return declarations
@parse_debug
def parse_annotation_type_element_declarations(self):
declarations = list()
while not self.would_accept('}'):
declaration = self.parse_annotation_type_element_declaration()
declarations.append(declaration)
return declarations
@parse_debug
def parse_annotation_type_element_declaration(self):
modifiers, annotations, javadoc = self.parse_modifiers()
declaration = None
if self.would_accept('class'):
declaration = self.parse_normal_class_declaration()
elif self.would_accept('interface'):
declaration = self.parse_normal_interface_declaration()
elif self.would_accept('enum'):
declaration = self.parse_enum_declaration()
elif self.is_annotation_declaration():
declaration = self.parse_annotation_type_declaration()
else:
attribute_type = self.parse_type()
attribute_name = self.parse_identifier()
declaration = self.parse_annotation_method_or_constant_rest()
self.accept(';')
if isinstance(declaration, tree.AnnotationMethod):
declaration.name = attribute_name
declaration.return_type = attribute_type
else:
declaration.declarators[0].name = attribute_name
declaration.type = attribute_type
declaration.modifiers = modifiers
declaration.annotations = annotations
declaration.documentation = javadoc
return declaration
@parse_debug
def parse_annotation_method_or_constant_rest(self):
if self.try_accept('('):
self.accept(')')
array_dimension = self.parse_array_dimension()
default = None
if self.try_accept('default'):
default = self.parse_element_value()
return tree.AnnotationMethod(dimensions=array_dimension,
default=default)
else:
return self.parse_constant_declarators_rest()
def parse(tokens, debug=False):
parser = Parser(tokens)
parser.set_debug(debug)
return parser.parse()
| 31.288747 | 97 | 0.554889 |
8a3918f495c69eb57404d07e789a159ff0873681 | 30,007 | py | Python | preql/core/sql.py | erezsh/Preql | e0066882ff78c6a77e7ac6948757b39da97c1cb3 | [
"MIT"
] | 522 | 2018-12-11T19:06:08.000Z | 2022-03-27T10:04:16.000Z | preql/core/sql.py | erezsh/Preql | e0066882ff78c6a77e7ac6948757b39da97c1cb3 | [
"MIT"
] | 23 | 2020-11-01T12:05:41.000Z | 2022-03-23T22:14:40.000Z | preql/core/sql.py | erezsh/Preql | e0066882ff78c6a77e7ac6948757b39da97c1cb3 | [
"MIT"
] | 12 | 2020-11-01T12:11:42.000Z | 2022-02-24T03:42:44.000Z | from typing import List, Optional, Dict
from preql.utils import dataclass, X, listgen, safezip
from . import pql_types
from .pql_types import T, Type, dp_type, Id
from .types_impl import join_names, flatten_type
from .state import get_db
from .exceptions import Signal
duck = 'duck'
sqlite = 'sqlite'
postgres = 'postgres'
bigquery = 'bigquery'
mysql = 'mysql'
class QueryBuilder:
def __init__(self, is_root=True, start_count=0):
self.target = get_db().target
self.is_root = is_root
self.counter = start_count
self.table_name = []
def unique_name(self):
self.counter += 1
return 't%d' % self.counter
def replace(self, is_root):
if is_root == self.is_root:
return self # Optimize
return QueryBuilder(is_root, self.counter)
def push_table(self, t):
self.table_name.append(t)
def pop_table(self, t):
t2 = self.table_name.pop()
assert t2 == t
class Sql:
pass
@dataclass
class SqlTree(Sql):
_is_select = False
_needs_select = False
def _compile(self, qb):
raise NotImplementedError()
def compile_wrap(self, qb): # Move to Expr? Doesn't apply to statements
return self.compile(qb).wrap(qb)
def compile(self, qb):
sql_code = self._compile(qb.replace(is_root=False))
assert isinstance(sql_code, list), self
assert all(isinstance(c, (str, Parameter)) for c in sql_code), self
return CompiledSQL(self.type, sql_code, self, self._is_select, self._needs_select)
def finalize_with_subqueries(self, qb, subqueries):
if subqueries:
subqs = [q.compile_wrap(qb).finalize(qb) for (name, q) in subqueries.items()]
sql_code = ['WITH RECURSIVE '] if qb.target in (postgres, mysql) else ['WITH ']
sql_code += join_comma([q, '\n '] for q in subqs)
else:
sql_code = []
sql_code += self.compile_wrap(qb).finalize(qb)
return ''.join(sql_code)
@dataclass
class CompiledSQL(Sql):
type: Type
code: list
source_tree: Optional[Sql]
_is_select: bool # Needed for embedding in SqlTree
_needs_select: bool
def finalize(self, qb):
wrapped = self.wrap(qb)
assert qb.is_root
if wrapped.type <= T.primitive and not wrapped.code[0].lower().startswith('select '):
code = ['SELECT '] + wrapped.code
else:
code = wrapped.code
return ''.join(code)
def wrap(self, qb):
code = self.code
if qb.is_root:
if self._needs_select:
code = ['SELECT * FROM '] + code
return self.replace(code=code, _needs_select=False, _is_select=True)
else:
if self._is_select and not self._needs_select:
# Bad recursion (?)
code = ['('] + code + [')']
if get_db().requires_subquery_name:
code += [' ' + qb.unique_name()]
return self.replace(code=code, _is_select=False)
return self
def compile_wrap(self, qb):
return self.wrap(qb)
def compile(self, qb):
return self
def finalize_with_subqueries(self, qb, subqueries):
# Why not inherit from Sql?
return SqlTree.finalize_with_subqueries(self, qb, subqueries)
def optimize(self):
if not self.code:
return self
# unify strings for faster parameter resolution
new_code = []
last = self.code[0]
for c in self.code[1:]:
if isinstance(c, str) and isinstance(last, str):
last += c
else:
new_code.append(last)
last = c
new_code.append(last)
return self.replace(code=new_code)
@dataclass
class RawSql(SqlTree):
type: Type
text: str
def _compile(self, qb):
return [self.text]
@property
def _is_select(self):
return self.text.lstrip().lower().startswith('select') # XXX Hacky! Is there a cleaner solution?
@dataclass
class Null(SqlTree):
type = T.nulltype
def _compile(self, qb):
return ['null']
@dataclass
class Unknown(SqlTree):
def _compile(self, qb):
raise NotImplementedError("Unknown")
null = Null()
unknown = Unknown()
@dataclass
class Parameter(SqlTree):
type: Type
name: str
def _compile(self, qb):
return [self]
@dataclass
class Scalar(SqlTree):
pass
@dataclass
class Atom(Scalar):
pass
@dataclass
class Primitive(Atom):
type: Type
text: str
def _compile(self, qb):
return [self.text]
@dataclass
class Table(SqlTree):
pass
@dataclass
class EmptyList(Table):
type: Type
_is_select = True
def _compile(self, qb):
return ['SELECT NULL AS ITEM LIMIT 0']
@dataclass
class TableName(Table):
type: Type
name: Id
def _compile(self, qb):
return [quote_id(self.name)]
_needs_select = True
class TableOperation(Table):
_is_select = True
@dataclass
class FieldFunc(SqlTree):
name: str
field: Sql
type = T.int
def _compile(self, qb):
return [f'{self.name}('] + self.field.compile_wrap(qb).code + [')']
@dataclass
class CountTable(Scalar):
table: Sql
type = T.int
def _compile(self, qb):
return [f'(SELECT COUNT(*) FROM '] + self.table.compile_wrap(qb).code + [')']
@dataclass
class JsonLength(Scalar):
expr: Sql
type = T.int
def _compile(self, qb):
code = self.expr.compile_wrap(qb).code
if qb.target == sqlite:
return [f'(length('] + code + [') - length(replace('] + code + [f', "{_ARRAY_SEP}", ""))) / length("{_ARRAY_SEP}") + 1']
elif qb.target == postgres:
return [f'array_length('] + code + [', 1)']
elif qb.target == bigquery:
return [f'array_length('] + code + [')']
else:
return [f'json_length('] + code + [')']
@dataclass
class FuncCall(SqlTree):
type: Type
name: str
fields: List[Sql]
def _compile(self, qb):
s = join_comma(f.compile_wrap(qb).code for f in self.fields)
return [f'{self.name}('] + s + [')']
@dataclass
class Cast(SqlTree):
type: Type
value: Sql
def _compile(self, qb):
if qb.target == mysql and self.type <= T.string:
# XXX distinguish between these cases properly, not as a hack
t = 'char'
else:
t = _compile_type(qb.target, self.type.as_nullable()) # XXX as-nullable here is a hack
return [f'CAST('] + self.value.compile_wrap(qb).code + [f' AS {t})']
@dataclass
class Case(SqlTree):
"SQL conditional"
cond: Sql
then: Sql
else_: Optional[Sql]
type = T.bool
def _compile(self, qb):
cond = self.cond.compile_wrap(qb).code
then = self.then.compile_wrap(qb).code
code = ["CASE WHEN "] + cond +[" THEN "] + then
if self.else_:
code += [ " ELSE " ] + self.else_.compile_wrap(qb).code
return code + [" END "]
_ARRAY_SEP = '||'
@dataclass
class MakeArray(SqlTree):
type: Type
field: Sql
def _compile(self, qb):
field = self.field.compile_wrap(qb).code
if qb.target in (sqlite, duck):
return ['group_concat('] + field + [f', "{_ARRAY_SEP}")']
elif qb.target == postgres:
return ['array_agg('] + field + [')']
elif qb.target == mysql:
return ['json_arrayagg('] + field + [')']
elif qb.target == bigquery:
return ['array_agg('] + field + [')']
assert False, qb.target
@dataclass
class Contains(Scalar):
op: str
exprs: List[Sql]
type = T.bool
def _compile(self, qb):
assert self.op
item, container = self.exprs
c_item = item.compile_wrap(qb).code
c_cont = container.compile_wrap(qb.replace(is_root=True)).code
return c_item + [' ', self.op, ' '] + parens(c_cont)
def parens(x):
return ['('] + x + [')']
@dataclass
class Compare(Scalar):
op: str
exprs: List[Sql]
type = T.bool
def __post_init__(self):
assert self.op in ('=', '<=', '>=', '<', '>', '!=', 'IN'), self.op
def _compile(self, qb):
elems = [e.compile_wrap(qb).code for e in self.exprs]
op = self.op
if any(e.type.maybe_null() for e in self.exprs):
# Null values are possible, so we'll use identity operators
if qb.target in (sqlite, duck):
op = {
'=': 'is',
'!=': 'is not'
}.get(op, op)
elif qb.target is mysql:
if op == '!=':
# Special case,
return parens( ['not '] + join_sep(elems, f' <=> ') )
op = {
'=': '<=>',
}.get(op, op)
else:
op = {
'=': 'is not distinct from',
'!=': 'is distinct from'
}.get(op, op)
return parens( join_sep(elems, f' {op} ') )
@dataclass
class LogicalBinOp(Scalar):
op: str
exprs: List[Sql]
def _compile(self, qb):
x = join_sep([e.compile_wrap(qb).code for e in self.exprs], f' {self.op} ')
return parens(x)
type = T.bool
@dataclass
class LogicalNot(Scalar):
expr: Sql
def _compile(self, qb):
x = ['NOT '] + self.expr.compile_wrap(qb).code
return parens(x)
type = T.bool
@dataclass
class BinOp(Scalar):
type: Type
op: str
exprs: List[Sql]
def _compile(self, qb):
x = join_sep([e.compile_wrap(qb).code for e in self.exprs], f' {self.op} ')
return parens(x)
@dataclass
class TableArith(TableOperation):
op: str
exprs: List[Sql]
def _compile(self, qb):
tables = [t.compile_wrap(qb) for t in self.exprs]
selects = [[f"SELECT * FROM "] + t.code for t in tables]
code = join_sep(selects, f" {self.op} ")
if qb.target == sqlite:
# Limit -1 is due to a strange bug in SQLite (fixed in newer versions), where the limit is reset otherwise.
code += [" LIMIT -1"]
return code
type = property(X.exprs[0].type) # TODO ensure type correctness
@dataclass
class Neg(SqlTree):
expr: Sql
def _compile(self, qb):
s = self.expr.compile_wrap(qb)
return ["-"] + s.code
type = property(X.expr.type)
@dataclass
class Desc(SqlTree):
expr: Sql
def _compile(self, qb):
s = self.expr.compile_wrap(qb)
return s.code + [" DESC"]
type = property(X.expr.type)
_reserved = {'index', 'create', 'unique', 'table', 'select', 'where', 'group', 'by', 'over', 'user'}
@dataclass
class Name(SqlTree):
type: Type
name: str
def __post_init__(self):
assert self.name, self.type
def _compile(self, qb):
name = quote_name(self.name)
if qb.table_name:
name = qb.table_name[-1] + '.' + name
return [name]
@dataclass
class Attr(SqlTree):
type: Type
obj: Sql
name: str
# return base
@dataclass
class ColumnAlias(SqlTree):
value: Sql
alias: str
@classmethod
def make(cls, value, alias):
return cls(value, alias)
def _compile(self, qb):
alias = quote_name(self.alias)
value = self.value.compile_wrap(qb).code
assert alias and value, (alias, value)
if value == [alias]: # TODO disable when unoptimized?
return value # This is just for beauty, it's not necessary for function
return value + [f' AS {alias}']
type = property(X.value.type)
class SqlStatement(SqlTree):
type = T.nulltype
@dataclass
class AddIndex(SqlStatement):
index_name: Id
table_name: Id
column: str
unique: bool
def _compile(self, qb):
return [f"CREATE {'UNIQUE' if self.unique else ''} INDEX IF NOT EXISTS {quote_id(self.index_name)}"
f" ON {quote_id(self.table_name)}({self.column})"]
@dataclass
class Insert(SqlStatement):
table_name: Id
columns: List[str]
query: Sql
def _compile(self, qb):
columns = [quote_name(c) for c in self.columns]
return [f'INSERT INTO {quote_id(self.table_name)}({", ".join(columns)}) '] + self.query.compile(qb).code
def finalize_with_subqueries(self, qb, subqueries):
if qb.target in (mysql, bigquery):
columns = [quote_name(c) for c in self.columns]
sql_code = f'INSERT INTO {quote_id(self.table_name)}({", ".join(columns)}) '
sql_code += self.query.finalize_with_subqueries(qb, subqueries)
return ''.join(sql_code)
return super().finalize_with_subqueries(qb, subqueries)
@dataclass
class InsertConsts(SqlStatement):
table: Id
cols: List[str]
tuples: list #List[List[Sql]]
def _compile(self, qb):
cols = self.cols
tuples = self.tuples
assert self.tuples, self
if not cols:
return ['INSERT INTO', quote_id(self.table), 'DEFAULT VALUES']
values = join_comma(
parens(join_comma([e.compile_wrap(qb).code for e in tpl]))
for tpl in tuples
)
cols = [quote_name(c) for c in cols]
q = ['INSERT INTO', quote_id(self.table),
"(", ', '.join(cols), ")",
"VALUES ",
]
return [' '.join(q)] + values #+ [';']
@dataclass
class InsertConsts2(SqlStatement):
table: Id
cols: List[str]
tuples: list #List[List[Sql]]
def _compile(self, qb):
assert self.tuples, self
values = join_comma(
parens(join_comma([t] for t in tpl))
for tpl in self.tuples
)
q = ['INSERT INTO', quote_id(self.table),
"(", ', '.join(self.cols), ")",
"VALUES ",
]
return [' '.join(q)] + values #+ ';'
@dataclass
class LastRowId(Atom):
type = T.int
def _compile(self, qb):
if qb.target in (sqlite, duck):
return ['last_insert_rowid()'] # Sqlite
elif qb.target == mysql:
return ['last_insert_id()']
else:
return ['lastval()'] # Postgres
@dataclass
class SelectValue(Atom, TableOperation):
# XXX Just use a regular select?
value: Sql
def _compile(self, qb):
value = self.value.compile_wrap(qb)
return [f'SELECT '] + value.code + [' AS '] + value
type = property(X.value.type)
@dataclass
class Values(Table):
type: Type
values: List[Sql]
def _compile(self, qb):
values = [v.compile_wrap(qb) for v in self.values]
if not values: # SQL doesn't support empty values
nulls = ', '.join(['NULL' for _ in range(len(self.type.elems))])
return ['SELECT ' + nulls + ' LIMIT 0']
if qb.target == mysql:
def row_func(x):
return ['ROW('] + x + [')']
else:
row_func = parens
return ['VALUES '] + join_comma(row_func(v.code) for v in values)
@dataclass
class Tuple(SqlTree):
type: Type
values: List[Sql]
def _compile(self, qb):
values = [v.compile_wrap(qb).code for v in self.values]
return join_comma(values)
@dataclass
class ValuesTuple(Tuple):
type: Type
values: List[Sql]
def _compile(self, qb):
values = [v.compile_wrap(qb) for v in self.values]
return join_comma(v.code for v in values)
@dataclass
class ValuesTuples(Table):
type: Type
values: List[Tuple]
def _compile(self, qb):
if not self.values: # SQL doesn't support empty values
return ['SELECT '] + join_comma(['NULL']*len(self.type.elems)) + ['LIMIT 0']
values = [v.compile_wrap(qb) for v in self.values]
return ['VALUES '] + join_comma(v.code for v in values)
@dataclass
class AllFields(SqlTree):
type: Type
def _compile(self, qb):
return ['*']
@dataclass
class Update(SqlTree):
table: TableName
fields: Dict[Sql, Sql]
conds: List[Sql]
type = T.nulltype
def _compile(self, qb):
fields_sql = [k.compile_wrap(qb).code + [' = '] + v.compile_wrap(qb).code for k, v in self.fields.items()]
fields_sql = join_comma(fields_sql)
sql = ['UPDATE '] + self.table.compile_wrap(qb).code + [' SET '] + fields_sql
if self.conds:
sql += [' WHERE '] + join_sep([c.compile_wrap(qb).code for c in self.conds], ' AND ')
return sql
@dataclass
class Delete(SqlTree):
table: TableName
conds: List[Sql]
type = T.nulltype
def _compile(self, qb):
conds = join_sep([c.compile_wrap(qb).code for c in self.conds], ' AND ')
return ['DELETE FROM '] + self.table.compile_wrap(qb).code + [' WHERE '] + conds
@dataclass
class Select(TableOperation):
type: Type
table: Sql # XXX Table won't work with RawSQL
fields: List[Sql]
conds: List[Sql] = []
group_by: List[Sql] = []
order: List[Sql] = []
# MySQL doesn't support arithmetic in offset/limit, and we don't need it anyway
offset: Optional[int] = None
limit: Optional[int] = None
def __post_init__(self):
assert self.fields, self
def _is_conds_only(self):
if self.group_by or self.order or self.offset or self.limit:
return False
if len(self.fields) == 1 and isinstance(self.fields[0], AllFields):
return True
return False
def _compile(self, qb):
# XXX very primitive optimization. Be smarter.
#
# Simplify
#
if isinstance(self.table, Select):
s1 = self
s2 = self.table
if s2._is_conds_only():
s = s1.replace(conds=list(s1.conds) + list(s2.conds), table=s2.table)
return s._compile(qb)
# elif s1._is_limit_only() and not (s2.offset or s2.limit):
# s = s2.replace(limit=s1.limit, offset=s1.offset)
# return s._compile(qb)
#
# Compile
#
fields_sql = [f.compile_wrap(qb) for f in self.fields]
select_sql = join_comma(f.code for f in fields_sql)
sql = ['SELECT '] + select_sql + [' FROM '] + self.table.compile_wrap(qb).code
if self.conds:
sql += [' WHERE '] + join_sep([c.compile_wrap(qb).code for c in self.conds], ' AND ')
if self.group_by:
sql += [' GROUP BY '] + join_comma(e.compile_wrap(qb).code for e in self.group_by)
if self.order:
sql += [' ORDER BY '] + join_comma(o.compile_wrap(qb).code for o in self.order)
if self.limit is not None:
sql += [' LIMIT ', str(self.limit)]
elif self.offset is not None:
if qb.target == sqlite:
sql += [' LIMIT -1'] # Sqlite only (and only old versions of it)
elif qb.target == mysql:
# MySQL requires a specific limit, always!
# See: https://stackoverflow.com/questions/255517/mysql-offset-infinite-rows
sql += [' LIMIT 18446744073709551615']
elif qb.target == bigquery:
# BigQuery requires a specific limit, always!
sql += [' LIMIT 9223372036854775807']
if self.offset is not None:
sql += [' OFFSET ', str(self.offset)]
return sql
@listgen
def join_sep(code_list, sep):
code_list = list(code_list)
if code_list:
yield from code_list[0]
for c in code_list[1:]:
assert isinstance(c, list)
yield sep
yield from c
def join_comma(code_list):
return join_sep(code_list, ", ")
@dataclass
class Subquery(SqlTree):
table_name: str
fields: Optional[List[Name]]
query: Sql
type = property(X.query.type)
def _compile(self, qb):
query = self.query.compile(qb).code
if qb.target == bigquery:
fields_str = []
else:
fields = [f.compile_wrap(qb.replace(is_root=False)).code for f in self.fields]
fields_str = ["("] + join_comma(fields) + [")"] if fields else []
return [f"{self.table_name}"] + fields_str + [" AS ("] + query + [")"]
def _enum_is_last(seq):
last = len(seq) - 1
for i, item in enumerate(seq):
yield i == last, item
@dataclass
class Join(TableOperation):
type: Type
join_op: str
tables: List[Table]
conds: List[Sql]
def _compile(self, qb):
tables_sql = [t.compile_wrap(qb).code for t in self.tables]
join_op = ' %s ' % self.join_op.upper()
code = [f'SELECT * FROM '] + tables_sql[0]
for is_last, t_sql in _enum_is_last(tables_sql[1:]):
code += [join_op] + t_sql + [' ON ']
if self.conds and is_last:
code += join_sep([c.compile_wrap(qb).code for c in self.conds], ' AND ')
else:
code += ['1=1'] # Postgres requires ON clause
return code
@dataclass
class BigQueryValues(SqlTree):
type: Type
values: List[Sql]
def _compile(self, qb):
cols = list(self.type.elems)
rows = [
(
['STRUCT('] + join_comma(v.compile(qb).code + [" as ", name]
for name, v in safezip(cols, row.values)) + [")"]
) if isinstance(row, Tuple) else row.compile(qb).code
for row in self.values
]
return ["SELECT * FROM UNNEST(["] + join_comma(rows) + ["]) as item"]
@dataclass
class TableQueryValues(SqlTree):
"A subquery which returns a table of given values"
type: Type
name: str
rows: list
def _compile(self, qb):
if qb.target != 'bigquery':
values_cls = Values
fields = [Name(col_type, col_name) for col_name, col_type in self.type.elems.items()]
else:
values_cls = BigQueryValues
fields = None
subq = Subquery(self.name, fields, values_cls(self.type, self.rows))
return subq._compile(qb)
@dataclass
class StringSlice(SqlTree):
string: Sql
start: Sql
stop: Optional[Sql]
type = T.string
def _compile(self, qb):
string = self.string.compile_wrap(qb).code
start = self.start.compile_wrap(qb).code
if self.stop:
stop = self.stop.compile_wrap(qb).code
length = parens(stop + ['-'] + start)
else:
length = None
if qb.target in (sqlite, duck, bigquery):
f = 'substr'
params = string + [', '] + start
if length:
params += [', '] + length
elif qb.target in (postgres, mysql):
f = 'substring'
params = string + [' from '] + start
if length:
params += [' for '] + length
else:
assert False
return [f'{f}('] + params + [')']
@dp_type
def _repr(_t: T.number, x):
return str(x)
@dp_type
def _repr(_t: T.bool, x):
return ['false', 'true'][x]
@dp_type
def _repr(_t: T.decimal, x):
return repr(float(x)) # TODO SQL decimal?
@dp_type
def _repr(_t: T.datetime, x):
# TODO Better to pass the object instead of a string?
return repr(str(x))
@dp_type
def _repr(_t: T.timestamp, x):
# TODO Better to pass the object instead of a string?
return repr(str(x))
@dp_type
def _repr(_t: T.union[T.string, T.text], x):
target = get_db().target
quoted_quote = r"\'" if target == bigquery else "''"
res = "'%s'" % str(x).replace("'", quoted_quote)
if target == bigquery:
res = res.replace('\n', '\\n')
return res
def quote_name(name):
assert isinstance(name, str), name
return get_db().quote_name(name)
def quote_id(id_):
assert isinstance(id_, Id)
return '.'.join(quote_name(n) for n in id_.parts)
@dp_type
def _compile_type(target, type_: T.t_relation):
# TODO might have a different type
#return 'INTEGER' # Foreign-key is integer
return _compile_type(target, type_.elems['item'])
class Types_PqlToSql:
bool = "BOOLEAN"
time = "TIME"
date = "DATE"
datetime = "DATETIME"
timestamp = "TIMESTAMP"
int = "INTEGER"
float = "FLOAT"
string = "VARCHAR(4000)"
text = "TEXT"
class P2S_BigQuery(Types_PqlToSql):
int = "INT64"
string = "STRING"
float = "FLOAT64"
text = "STRING"
class P2S_MySql(Types_PqlToSql):
int = "SIGNED"
class P2S_Sqlite(Types_PqlToSql):
datetime = "DATETIME"
timestamp = "TIMESTAMP"
class P2S_Postgres(Types_PqlToSql):
datetime = "timestamp with time zone"
_pql_to_sql_by_target = {
bigquery: P2S_BigQuery,
mysql: P2S_MySql,
"mysql_def": Types_PqlToSql, # Standard (Different) types for declaration!
sqlite: P2S_Sqlite,
duck: P2S_Sqlite,
postgres: P2S_Postgres,
}
@dp_type
def _compile_type(target, type_: T.primitive):
s = getattr(_pql_to_sql_by_target[target], type_.typename)
if not type_.maybe_null():
s += " NOT NULL"
return s
@dp_type
def _compile_type(target, _type: T.nulltype):
if target == bigquery:
return 'INT64'
return 'INTEGER' # TODO is there a better value here? Maybe make it read-only somehow
@dp_type
def _compile_type(target, idtype: T.t_id):
if target == bigquery:
s = "STRING"
else:
s = "INTEGER" # TODO non-int idtypes
if not idtype.maybe_null():
s += " NOT NULL"
return s
@dp_type
def _compile_type(target, _type: T.json):
return 'JSON'
# API
def compile_drop_table(table_name) -> Sql:
return RawSql(T.nulltype, f'DROP TABLE {quote_id(table_name)}')
def compile_type_def(table_name, table) -> Sql:
assert isinstance(table_name, Id)
assert table <= T.table
db = get_db()
posts = []
pks = []
columns = []
pks = {join_names(pk) for pk in table.options['pk']}
for name, c in flatten_type(table):
if name in pks and c <= T.t_id:
type_decl = db.id_type_decl
else:
type_decl = _compile_type(db.target if db.target != mysql else 'mysql_def', c)
columns.append( f'{quote_name(name)} {type_decl}' )
if c <= T.t_relation:
if db.supports_foreign_key:
# TODO any column, using projection / get_attr
if not table.options.get('temporary', False):
# In postgres, constraints on temporary tables may reference only temporary tables
rel = c.options['rel']
if rel['key']: # Requires a unique constraint
tbl_name = rel['table'].options['name']
s = f"FOREIGN KEY({name}) REFERENCES {quote_id(tbl_name)}({rel['column']})"
posts.append(s)
if pks and db.supports_foreign_key:
names = ", ".join(pks)
posts.append(f"PRIMARY KEY ({names})")
# Consistent among SQL databases
if db.target == 'bigquery':
command = ("CREATE TABLE" if table.options.get('temporary', False) else "CREATE TABLE IF NOT EXISTS")
else:
command = "CREATE TEMPORARY TABLE" if table.options.get('temporary', False) else "CREATE TABLE IF NOT EXISTS"
return RawSql(T.nulltype, f'{command} {quote_id(table_name)} (' + ', '.join(columns + posts) + ')')
def deletes_by_ids(table, ids):
for id_ in ids:
compare = Compare('=', [Name(T.t_id, 'id'), Primitive(T.t_id, repr(id_))])
yield Delete(TableName(table.type, table.type.options['name']), [compare])
def updates_by_ids(table, proj, ids):
# TODO this function is not safe & secure enough
sql_proj = {Name(value.type, name): value.code for name, value in proj.items()}
for id_ in ids:
compare = Compare('=', [Name(T.t_id, 'id'), Primitive(T.t_id, repr(id_))])
yield Update(TableName(table.type, table.type.options['name']), sql_proj, [compare])
def create_list(name, elems):
# Assumes all elems have the same type!
t = T.list[elems[0].type]
subq = TableQueryValues(t, name, elems)
table = TableName(t, Id(name))
return table, subq, t
def create_table(t, name, rows):
subq = TableQueryValues(t, name, rows)
table = TableName(t, Id(name))
return table, subq
def table_slice(table, start, stop):
limit = stop - start if stop else None
return Select(table.type, table.code, [AllFields(table.type)], offset=start, limit=limit)
def table_selection(table, conds):
return Select(table.type, table.code, [AllFields(table.type)], conds=conds)
def table_order(table, fields):
return Select(table.type, table.code, [AllFields(table.type)], order=fields)
def arith(res_type, op, args):
target = get_db().target
arg_codes = list(args)
if res_type == T.string:
assert op == '+'
op = '||'
if target is mysql: # doesn't support a || b
return FuncCall(res_type, 'concat', arg_codes)
elif op == '/':
if target != mysql:
# In MySQL division returns a float. All others return int
arg_codes[0] = Cast(T.float, arg_codes[0])
elif op == '/~':
if target == mysql:
op = 'DIV'
elif target == bigquery:
# safe div?
return FuncCall(res_type, 'div', arg_codes)
else:
op = '/'
elif op == '%':
if target is bigquery:
return FuncCall(res_type, 'mod', arg_codes)
elif op == '**':
return FuncCall(T.float, 'power', arg_codes)
return BinOp(res_type, op, arg_codes)
def make_value(x):
if x is None:
return null
try:
t = pql_types.from_python(type(x))
except KeyError as e:
raise Signal.make(T.ValueError, x, f"Cannot import value of Python type {type(x)}") from e
return Primitive(t, _repr(t, x))
def add_one(x):
return BinOp(x.type, '+', [x, make_value(1)])
| 26.36819 | 132 | 0.579565 |
dba535ed1d18209e795d57fdf79898d34b66b5ed | 10,270 | py | Python | examples/trials/mnist-annotation/mnist.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 9,680 | 2019-05-07T01:42:30.000Z | 2022-03-31T16:48:33.000Z | examples/trials/mnist-annotation/mnist.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 1,957 | 2019-05-06T21:44:21.000Z | 2022-03-31T09:21:53.000Z | examples/trials/mnist-annotation/mnist.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 1,571 | 2019-05-07T06:42:55.000Z | 2022-03-31T03:19:24.000Z | """A deep MNIST classifier using convolutional layers."""
import argparse
import logging
import math
import tempfile
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = None
logger = logging.getLogger('mnist_AutoML')
class MnistNetwork(object):
'''
MnistNetwork is for initializing and building basic network for mnist.
'''
def __init__(self,
channel_1_num,
channel_2_num,
conv_size,
hidden_size,
pool_size,
learning_rate,
x_dim=784,
y_dim=10):
self.channel_1_num = channel_1_num
self.channel_2_num = channel_2_num
"""@nni.variable(nni.choice(2, 3, 5, 7),name=self.conv_size)"""
self.conv_size = conv_size
"""@nni.variable(nni.choice(124, 512, 1024), name=self.hidden_size)"""
self.hidden_size = hidden_size
self.pool_size = pool_size
"""@nni.variable(nni.loguniform(0.0001, 0.1), name=self.learning_rate)"""
self.learning_rate = learning_rate
self.x_dim = x_dim
self.y_dim = y_dim
self.images = tf.placeholder(tf.float32, [None, self.x_dim], name='input_x')
self.labels = tf.placeholder(tf.float32, [None, self.y_dim], name='input_y')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.train_step = None
self.accuracy = None
def build_network(self):
'''
Building network for mnist
'''
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
try:
input_dim = int(math.sqrt(self.x_dim))
except:
print(
'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger.debug(
'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim))
raise
x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
w_conv1 = weight_variable(
[self.conv_size, self.conv_size, 1, self.channel_1_num])
b_conv1 = bias_variable([self.channel_1_num])
"""@nni.function_choice(tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1), tf.nn.sigmoid(conv2d(x_image, w_conv1) + b_conv1), tf.nn.tanh(conv2d(x_image, w_conv1) + b_conv1), name=tf.nn.relu)"""
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
"""@nni.function_choice(max_pool(h_conv1, self.pool_size), avg_pool(h_conv1, self.pool_size), name=max_pool)"""
h_pool1 = max_pool(h_conv1, self.pool_size)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
w_conv2 = weight_variable([self.conv_size, self.conv_size,
self.channel_1_num, self.channel_2_num])
b_conv2 = bias_variable([self.channel_2_num])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool(h_conv2, self.pool_size)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
last_dim = int(input_dim / (self.pool_size * self.pool_size))
with tf.name_scope('fc1'):
w_fc1 = weight_variable(
[last_dim * last_dim * self.channel_2_num, self.hidden_size])
b_fc1 = bias_variable([self.hidden_size])
h_pool2_flat = tf.reshape(
h_pool2, [-1, last_dim * last_dim * self.channel_2_num])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with tf.name_scope('dropout'):
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
w_fc2 = weight_variable([self.hidden_size, self.y_dim])
b_fc2 = bias_variable([self.y_dim])
y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
with tf.name_scope('loss'):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv))
with tf.name_scope('adam_optimizer'):
self.train_step = tf.train.AdamOptimizer(
self.learning_rate).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(
tf.argmax(y_conv, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32))
def conv2d(x_input, w_matrix):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(x_input, pool_size):
"""max_pool downsamples a feature map by 2X."""
return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
def avg_pool(x_input, pool_size):
return tf.nn.avg_pool(x_input, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def download_mnist_retry(data_dir, max_num_retries=20):
"""Try to download mnist dataset and avoid errors"""
for _ in range(max_num_retries):
try:
return input_data.read_data_sets(data_dir, one_hot=True)
except tf.errors.AlreadyExistsError:
time.sleep(1)
raise Exception("Failed to download MNIST.")
def main(params):
'''
Main function, build mnist network, run and send result to NNI.
'''
# Import data
mnist = download_mnist_retry(params['data_dir'])
print('Mnist download data done.')
logger.debug('Mnist download data done.')
# Create the model
# Build the graph for the deep net
mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'],
channel_2_num=params['channel_2_num'],
conv_size=params['conv_size'],
hidden_size=params['hidden_size'],
pool_size=params['pool_size'],
learning_rate=params['learning_rate'])
mnist_network.build_network()
logger.debug('Mnist build network done.')
# Write log
graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s', graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
test_acc = 0.0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
"""@nni.variable(nni.choice(16, 32), name=batch_size)"""
batch_size = params['batch_size']
for i in range(params['batch_num']):
batch = mnist.train.next_batch(batch_size)
"""@nni.variable(nni.choice(0.5, 0.9), name=dropout_rate)"""
dropout_rate = params['dropout_rate']
mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0],
mnist_network.labels: batch[1],
mnist_network.keep_prob: 1 - dropout_rate}
)
if i % 100 == 0:
test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0})
"""@nni.report_intermediate_result(test_acc)"""
logger.debug('test accuracy %g', test_acc)
logger.debug('Pipe send intermediate result done.')
test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0})
"""@nni.report_final_result(test_acc)"""
logger.debug('Final result is %g', test_acc)
logger.debug('Send final result done.')
def get_params():
''' Get parameters from command line '''
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default='/tmp/tensorflow/mnist/input_data', help="data directory")
parser.add_argument("--dropout_rate", type=float, default=0.5, help="dropout rate")
parser.add_argument("--channel_1_num", type=int, default=32)
parser.add_argument("--channel_2_num", type=int, default=64)
parser.add_argument("--conv_size", type=int, default=5)
parser.add_argument("--pool_size", type=int, default=2)
parser.add_argument("--hidden_size", type=int, default=1024)
parser.add_argument("--learning_rate", type=float, default=1e-4)
parser.add_argument("--batch_num", type=int, default=2000)
parser.add_argument("--batch_size", type=int, default=32)
args, _ = parser.parse_known_args()
return args
if __name__ == '__main__':
'''@nni.get_next_parameter()'''
try:
main(vars(get_params()))
except Exception as exception:
logger.exception(exception)
raise
| 41.41129 | 202 | 0.613827 |
e059c6cb185e92bc09fac15aeb9b1c68d1cbac9b | 1,979 | py | Python | about.py | Hochikong/HexoPubAssistant | 1b250fa783d9616292fbe06e553498699fd33d63 | [
"MIT"
] | 3 | 2019-03-15T09:36:34.000Z | 2021-06-19T11:18:37.000Z | about.py | Hochikong/HexoPubAssistant | 1b250fa783d9616292fbe06e553498699fd33d63 | [
"MIT"
] | null | null | null | about.py | Hochikong/HexoPubAssistant | 1b250fa783d9616292fbe06e553498699fd33d63 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'about.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_About_Dialog(object):
def setupUi(self, About_Dialog):
About_Dialog.setObjectName("About_Dialog")
About_Dialog.resize(396, 178)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("ICON/hexo logo.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
About_Dialog.setWindowIcon(icon)
About_Dialog.setStyleSheet("background-color: rgb(243, 242, 238);\n"
"selection-background-color: rgb(199, 197, 184);\n"
"font: 57 10pt \"GenWanMin TW TTF Medium\";")
self.buttonBox = QtWidgets.QDialogButtonBox(About_Dialog)
self.buttonBox.setGeometry(QtCore.QRect(40, 140, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.about_page = QtWidgets.QLabel(About_Dialog)
self.about_page.setGeometry(QtCore.QRect(20, 20, 361, 111))
self.about_page.setStyleSheet("font: 57 10pt \"GenWanMin TW TTF Medium\";")
self.about_page.setText("")
self.about_page.setObjectName("about_page")
self.retranslateUi(About_Dialog)
self.buttonBox.accepted.connect(About_Dialog.accept)
self.buttonBox.rejected.connect(About_Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(About_Dialog)
def retranslateUi(self, About_Dialog):
_translate = QtCore.QCoreApplication.translate
About_Dialog.setWindowTitle(_translate("About_Dialog", "About"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
About_Dialog = QtWidgets.QDialog()
ui = Ui_About_Dialog()
ui.setupUi(About_Dialog)
About_Dialog.show()
sys.exit(app.exec_())
| 38.803922 | 96 | 0.706923 |
dbbd3d6b47a037ac2dd7450f751ba4980043ff76 | 1,116 | py | Python | tests/__init__.py | JIC-CSB/dtool-info | f8b5fb4b7d32f25f6ec10648697d29a8e3728b42 | [
"MIT"
] | null | null | null | tests/__init__.py | JIC-CSB/dtool-info | f8b5fb4b7d32f25f6ec10648697d29a8e3728b42 | [
"MIT"
] | null | null | null | tests/__init__.py | JIC-CSB/dtool-info | f8b5fb4b7d32f25f6ec10648697d29a8e3728b42 | [
"MIT"
] | null | null | null | """Test fixtures."""
import os
import shutil
import tempfile
import pytest
import dtoolcore
_HERE = os.path.dirname(__file__)
@pytest.fixture
def chdir_fixture(request):
d = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(d)
@request.addfinalizer
def teardown():
os.chdir(curdir)
shutil.rmtree(d)
@pytest.fixture
def tmp_dir_fixture(request):
d = tempfile.mkdtemp()
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
@pytest.fixture
def local_tmp_dir_fixture(request):
d = tempfile.mkdtemp(dir=_HERE)
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
@pytest.fixture
def dataset_fixture(request):
d = tempfile.mkdtemp()
dataset = dtoolcore.DataSet("test", "data")
dataset.persist_to_path(d)
for s in ["hello", "world"]:
fname = s + ".txt"
fpath = os.path.join(d, "data", fname)
with open(fpath, "w") as fh:
fh.write(s)
dataset.update_manifest()
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
| 17.169231 | 47 | 0.633513 |
b6c8cc5a50db4e4a0316143e81e1d74973aa7fe3 | 2,136 | py | Python | nautobot_netbox_importer/diffsync/models/users.py | chadell/nautobot-plugin-netbox-importer | 9baebac18ffd26eb4e3096eddc5c145a04914a21 | [
"Apache-2.0"
] | null | null | null | nautobot_netbox_importer/diffsync/models/users.py | chadell/nautobot-plugin-netbox-importer | 9baebac18ffd26eb4e3096eddc5c145a04914a21 | [
"Apache-2.0"
] | null | null | null | nautobot_netbox_importer/diffsync/models/users.py | chadell/nautobot-plugin-netbox-importer | 9baebac18ffd26eb4e3096eddc5c145a04914a21 | [
"Apache-2.0"
] | null | null | null | """User-related DiffSync models for nautobot-netbox-importer."""
from datetime import datetime
from typing import List, Optional
import nautobot.users.models as users
from .abstract import ArrayField, NautobotBaseModel
from .references import ContentTypeRef, GroupRef, PermissionRef, UserRef
class ObjectPermission(NautobotBaseModel):
"""A mapping of view, add, change, and/or delete permissions for users and/or groups."""
_modelname = "objectpermission"
_attributes = ("name", "object_types", "groups", "users", "actions", "constraints", "description", "enabled")
_nautobot_model = users.ObjectPermission
name: str
object_types: List[ContentTypeRef] = []
groups: List[GroupRef] = []
users: List[UserRef] = []
actions: ArrayField
constraints: Optional[dict]
description: str
enabled: bool
class Token(NautobotBaseModel):
"""An API token used for user authentication."""
_modelname = "token"
_attributes = ("key", "user", "expires", "write_enabled", "description")
_nautobot_model = users.Token
key: str
user: UserRef
expires: Optional[datetime]
write_enabled: bool
description: str
class User(NautobotBaseModel):
"""A user account, for authentication and authorization purposes.
Note that in NetBox this is actually two separate models - Django's built-in User class, and
a custom UserConfig class - while in Nautobot it is a single custom User class model.
"""
_modelname = "user"
_attributes = (
"username",
"first_name",
"last_name",
"email",
"password",
"is_staff",
"is_active",
"is_superuser",
"date_joined",
"groups",
"user_permissions",
"config_data",
)
_nautobot_model = users.User
username: str
first_name: str
last_name: str
email: str
password: str
groups: List[GroupRef] = []
user_permissions: List[PermissionRef] = []
is_staff: bool
is_active: bool
is_superuser: bool
date_joined: datetime
config_data: dict
last_login: Optional[datetime]
| 25.73494 | 113 | 0.668539 |
01ccc9d15a6a3f1c6d17093d09c1157886a3e81b | 6,276 | py | Python | pandas/io/excel/_util.py | joybhallaa/pandas | 1779155552631a30d4bb176dec70b8cc477defd7 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2022-02-02T02:05:28.000Z | 2022-02-02T02:09:37.000Z | pandas/io/excel/_util.py | north-star-saj/pandas | fc9fdba6592bdb5d0d1147ce4d65639acd897565 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-02-16T06:43:48.000Z | 2021-03-19T00:07:02.000Z | pandas/io/excel/_util.py | north-star-saj/pandas | fc9fdba6592bdb5d0d1147ce4d65639acd897565 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-10-28T03:32:40.000Z | 2020-10-28T03:32:40.000Z | from typing import List, MutableMapping
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.common import is_integer, is_list_like
_writers: MutableMapping[str, str] = {}
def register_writer(klass):
"""
Add engine to the excel writer registry.io.excel.
You must use this method to integrate with ``to_excel``.
Parameters
----------
klass : ExcelWriter
"""
if not callable(klass):
raise ValueError("Can only register callables as engines")
engine_name = klass.engine
_writers[engine_name] = klass
def get_default_engine(ext, mode="reader"):
"""
Return the default reader/writer for the given extension.
Parameters
----------
ext : str
The excel file extension for which to get the default engine.
mode : str {'reader', 'writer'}
Whether to get the default engine for reading or writing.
Either 'reader' or 'writer'
Returns
-------
str
The default engine for the extension.
"""
_default_readers = {
"xlsx": "openpyxl",
"xlsm": "openpyxl",
"xlsb": "pyxlsb",
"xls": "xlrd",
"ods": "odf",
}
_default_writers = {
"xlsx": "openpyxl",
"xlsm": "openpyxl",
"xlsb": "pyxlsb",
"xls": "xlwt",
"ods": "odf",
}
assert mode in ["reader", "writer"]
if mode == "writer":
# Prefer xlsxwriter over openpyxl if installed
xlsxwriter = import_optional_dependency("xlsxwriter", errors="warn")
if xlsxwriter:
_default_writers["xlsx"] = "xlsxwriter"
return _default_writers[ext]
else:
if (
import_optional_dependency("openpyxl", errors="ignore") is None
and import_optional_dependency("xlrd", errors="ignore") is not None
):
# if no openpyxl but xlrd installed, return xlrd
# the version is handled elsewhere
_default_readers["xlsx"] = "xlrd"
return _default_readers[ext]
def get_writer(engine_name):
try:
return _writers[engine_name]
except KeyError as err:
raise ValueError(f"No Excel writer '{engine_name}'") from err
def _excel2num(x: str) -> int:
"""
Convert Excel column name like 'AB' to 0-based column index.
Parameters
----------
x : str
The Excel column name to convert to a 0-based column index.
Returns
-------
num : int
The column index corresponding to the name.
Raises
------
ValueError
Part of the Excel column name was invalid.
"""
index = 0
for c in x.upper().strip():
cp = ord(c)
if cp < ord("A") or cp > ord("Z"):
raise ValueError(f"Invalid column name: {x}")
index = index * 26 + cp - ord("A") + 1
return index - 1
def _range2cols(areas: str) -> List[int]:
"""
Convert comma separated list of column names and ranges to indices.
Parameters
----------
areas : str
A string containing a sequence of column ranges (or areas).
Returns
-------
cols : list
A list of 0-based column indices.
Examples
--------
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
cols: List[int] = []
for rng in areas.split(","):
if ":" in rng:
rngs = rng.split(":")
cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1))
else:
cols.append(_excel2num(rng))
return cols
def maybe_convert_usecols(usecols):
"""
Convert `usecols` into a compatible format for parsing in `parsers.py`.
Parameters
----------
usecols : object
The use-columns object to potentially convert.
Returns
-------
converted : object
The compatible format of `usecols`.
"""
if usecols is None:
return usecols
if is_integer(usecols):
raise ValueError(
"Passing an integer for `usecols` is no longer supported. "
"Please pass in a list of int from 0 to `usecols` inclusive instead."
)
if isinstance(usecols, str):
return _range2cols(usecols)
return usecols
def validate_freeze_panes(freeze_panes):
if freeze_panes is not None:
if len(freeze_panes) == 2 and all(
isinstance(item, int) for item in freeze_panes
):
return True
raise ValueError(
"freeze_panes must be of form (row, column) "
"where row and column are integers"
)
# freeze_panes wasn't specified, return False so it won't be applied
# to output sheet
return False
def fill_mi_header(row, control_row):
"""
Forward fill blank entries in row but only inside the same parent index.
Used for creating headers in Multiindex.
Parameters
----------
row : list
List of items in a single row.
control_row : list of bool
Helps to determine if particular column is in same parent index as the
previous value. Used to stop propagation of empty cells between
different indexes.
Returns
-------
Returns changed row and control_row
"""
last = row[0]
for i in range(1, len(row)):
if not control_row[i]:
last = row[i]
if row[i] == "" or row[i] is None:
row[i] = last
else:
control_row[i] = False
last = row[i]
return row, control_row
def pop_header_name(row, index_col):
"""
Pop the header name for MultiIndex parsing.
Parameters
----------
row : list
The data row to parse for the header name.
index_col : int, list
The index columns for our data. Assumed to be non-null.
Returns
-------
header_name : str
The extracted header name.
trimmed_row : list
The original data row with the header name removed.
"""
# Pop out header name and fill w/blank.
i = index_col if not is_list_like(index_col) else max(index_col)
header_name = row[i]
header_name = None if header_name == "" else header_name
return header_name, row[:i] + [""] + row[i + 1 :]
| 25.003984 | 81 | 0.587635 |
9cea4dd7c94bebbfae8043423de2f6af536b424f | 24,524 | py | Python | psyneulink/library/subsystems/agt/agtcontrolmechanism.py | mihaic/psyneulink | 3d2fc3117c82bccc92fc585add330b0f9b35c830 | [
"Apache-2.0"
] | null | null | null | psyneulink/library/subsystems/agt/agtcontrolmechanism.py | mihaic/psyneulink | 3d2fc3117c82bccc92fc585add330b0f9b35c830 | [
"Apache-2.0"
] | null | null | null | psyneulink/library/subsystems/agt/agtcontrolmechanism.py | mihaic/psyneulink | 3d2fc3117c82bccc92fc585add330b0f9b35c830 | [
"Apache-2.0"
] | null | null | null | # Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ************************************** AGTControlMechanism ************************************************
"""
Overview
--------
An AGTControlMechanism is a `ControlMechanism <ControlMechanism>` that uses an ObjectiveMechanism with a `AGTUtilityIntegrator`
Function to regulate its `allocation_policy <ControlMechanism.allocation_policy>`. When used with an `LCControlMechanism`
to regulate the `mode <FHNIntegrator.mode>` parameter of its `FHNIntegrator` Function, it implements a form of the
`Adaptive Gain Theory <http://www.annualreviews.org/doi/abs/10.1146/annurev.neuro.28.061604.135709>`_ of the locus
coeruleus-norepinephrine (LC-NE) system.
.. _AGTControlMechanism_Creation:
Creating an AGTControlMechanism
-------------------------------
An AGTControlMechanism can be created in any of the ways used to `create a ControlMechanism <ControlMechanism_Creation>`.
Like all ControlMechanisms, an AGTControlMechanism it receives its `input <AGTControlMechanism_Input>` from an `ObjectiveMechanism`.
However, unlike standard ControlMechanism, an AGTControlMechanism does not have an **objective_mechanism** argument in its
constructor. When an AGTControlMechanism is created, it automatically creates an ObjectiveMechanism and assigns a
`AGTUtilityIntegrator` Function as its `function <ObjectiveMechanism.function>`.
The OutputStates to be monitored by the AGTControlMechanism's `objective_mechanism <AGTControlMechanism.objective_mechanism>` are
specified using the **monitored_output_states** argument of the AGTControlMechanism's constructor, using any of the ways to
`specify the OutputStates monitored by ObjectiveMechanism <ObjectiveMechanism_Monitored_Output_States>`. The
monitored OutputStates are listed in the LCControlMechanism's `monitored_output_states <AGTControlMechanism.monitored_output_states>`
attribute, as well as that of its `objective_mechanism <AGTControlMechanism.objective_mechanism>`.
The parameter(s) controlled by an AGTControlMechanism are specified in the **control_signals** argument of its constructor,
in the `standard way for a ControlMechanism <ControlMechanism_Control_Signals>`.
.. _AGTControlMechanism_Structure:
Structure
---------
.. _AGTControlMechanism_Input:
Input: ObjectiveMechanism and Monitored OutputStates
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An AGTControlMechanism has a single (primary) `InputState <InputState_Primary>` that receives its input via a
`MappingProjection` from the *OUTCOME* `OutputState <ObjectiveMechanism_Output>` of an `ObjectiveMechanism`.
The ObjectiveMechanism is created automatically when the AGTControlMechanism is created, using a `AGTUtilityIntegrator` as its
`function <ObjectiveMechanism.function>`, and is listed in the AGTControlMechanism's `objective_mechanism
<AGTControlMechanism.objective_mechanism>` attribute. The ObjectiveMechanism aggregates the `value <OutputState.value>`\\s
of the OutputStates that it monitors, integrates their aggregated value at two different rates, and combines those to
generate the its output, which is used by the AGTControlMechanism as its input. The OutputStates monitored by the
ObjectiveMechanism, listed in its `monitored_output_states <ObjectiveMechanism.monitored_output_states>`
attribute, are also listed in the AGTControlMechanism's `monitored_output_states <AGTControlMechanism_Base.monitored_output_states>`
attribute. They can be displayed using the AGTControlMechanism's `show <AGTControlMechanism.show>` method.
.. _AGTControlMechanism_Function:
Function
~~~~~~~~
An AGTControlMechanism uses the default function for a `ControlMechanism` (a default `Linear` Function), that simply passes
its input to its output. Thus, it is the output of the AGTControlMechanism's `objective_mechanism
<AGTControlMechanism.objective_mechanism>` that determines its `allocation_policy <ControlMechanism.allocation_policy>`
and the `allocation <ControlSignal.allocation>` of its `ControlSignal(s) <ControlSignal>`.
.. _AGTControlMechanism_Output:
Output
~~~~~~
An AGTControlMechanism has a `ControlSignal` for each parameter specified in its `control_signals
<ControlMechanism.control_signals>` attribute, that sends a `ControlProjection` to the `ParameterState` for the
corresponding parameter. ControlSignals are a type of `OutputState`, and so they are also listed in the
AGTControlMechanism's `output_states <AGTControlMechanism_Base.output_states>` attribute. The parameters modulated by an
AGTControlMechanism's ControlSignals can be displayed using its `show <AGTControlMechanism_Base.show>` method. By default,
all of its ControlSignals are assigned the result of the AGTControlMechanism's `function <AGTControlMechanism.function>`, which is
the `input <AGTControlMechanism_Input>` it receives from its `objective_mechanism <AGTControlMechanism.objective_mechanism>`.
above). The `allocation <ControlSignal.allocation>` is used by the ControlSignal(s) to determine
their `intensity <ControlSignal.intensity>`, which is then assigned as the `value <ControlProjection.value>` of the
ControlSignal's `ControlProjection`. The `value <ControlProjection.value>` of the ControlProjection is used by the
`ParameterState` to which it projects to modify the value of the parameter it controls (see
`ControlSignal_Modulation` for description of how a ControlSignal modulates the value of a parameter).
.. _AGTControlMechanism_Execution:
Execution
---------
An AGTControlMechanism's `function <AGTControlMechanism_Base.function>` takes as its input the `value <InputState.value>` of
its *ERROR_SIGNAL* `input_state <Mechanism_Base.input_state>`, and uses that to determine its `allocation_policy
<ITC.allocation_policy>` which specifies the value assigned to the `allocation <ControlSignal.allocation>` of each of
its `ControlSignals <ControlSignal>`. An AGTControlMechanism assigns the same value (the `input <AGTControlMechanism_Input>` it
receives from its `objective_mechanism <AGTControlMechanism.objective_mechanism>` to all of its ControlSignals. Each
ControlSignal uses that value to calculate its `intensity <ControlSignal.intensity>`, which is used by its
`ControlProjection(s) <ControlProjection>` to modulate the value of the ParameterState(s) for the parameter(s) it
controls, which are then used in the subsequent `TRIAL` of execution.
.. note::
A `ParameterState` that receives a `ControlProjection` does not update its value until its owner Mechanism
executes (see `Lazy Evaluation <LINK>` for an explanation of "lazy" updating). This means that even if a
ControlMechanism has executed, a parameter that it controls will not assume its new value until the Mechanism
to which it belongs has executed.
.. _AGTControlMechanism_Class_Reference:
Class Reference
---------------
"""
import typecheck as tc
from psyneulink.components.functions.function import AGTUtilityIntegrator, ModulationParam, _is_modulation_param
from psyneulink.components.mechanisms.adaptive.control.controlmechanism import ControlMechanism
from psyneulink.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism
from psyneulink.components.shellclasses import Mechanism, System_Base
from psyneulink.components.states.outputstate import OutputState
from psyneulink.globals.keywords import CONTROL, CONTROL_PROJECTIONS, CONTROL_SIGNALS, INIT__EXECUTE__METHOD_ONLY, MECHANISM, OBJECTIVE_MECHANISM
from psyneulink.globals.preferences.componentpreferenceset import is_pref_set
from psyneulink.globals.preferences.preferenceset import PreferenceLevel
from psyneulink.scheduling.time import TimeScale
__all__ = [
'AGTControlMechanism', 'AGTControlMechanismError', 'ControlMechanismRegistry', 'MONITORED_OUTPUT_STATE_NAME_SUFFIX',
'MONITORED_OUTPUT_STATES',
]
MONITORED_OUTPUT_STATES = 'monitored_output_states'
MONITORED_OUTPUT_STATE_NAME_SUFFIX = '_Monitor'
ControlMechanismRegistry = {}
class AGTControlMechanismError(Exception):
def __init__(self, error_value):
self.error_value = error_value
class AGTControlMechanism(ControlMechanism):
"""
AGTControlMechanism( \
system=None, \
monitored_output_states=None, \
function=Linear, \
control_signals=None, \
params=None, \
name=None, \
prefs=None)
Subclass of `ControlMechanism <AdaptiveMechanism>` that modulates the `multiplicative_param
<Function_Modulatory_Params>` of the `function <Mechanism_Base.function>` of one or more `Mechanisms <Mechanism>`.
Arguments
---------
system : System : default None
specifies the `System` for which the AGTControlMechanism should serve as a `controller <System.controller>`;
the AGTControlMechanism will inherit any `OutputStates <OutputState>` specified in the **monitor_for_control**
argument of the `system <EVCControlMechanism.system>`'s constructor, and any `ControlSignals <ControlSignal>`
specified in its **control_signals** argument.
monitored_output_states : List[`OutputState`, `Mechanism`, str, value, dict, `MonitoredOutputStatesOption`] or Dict
specifies the OutputStates to be monitored by the `objective_mechanism <AGTControlMechanism.objective_mechanism>`
(see `monitored_output_states <ObjectiveMechanism.monitored_output_states>` for details of specification).
function : TransferFunction : default Linear(slope=1, intercept=0)
specifies the Function used to convert the AGTControlMechanism's `input <AGTControlMechanism_Input>` into its
`allocation_policy <AGTControlMechanism.allocation_policy>`, that is used to assign the `allocation
<ControlSignal.allocation>` of its `ControlSignal(s) <ControlSignal>`.
control_signals : List[ParameterState, tuple[str, Mechanism] or dict]
specifies the parameters to be controlled by the AGTControlMechanism; a `ControlSignal` is created for each
(see `ControlSignal_Specification` for details of specification).
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterState_Specification>` that can be used to specify the parameters
for the Mechanism, parameters for its function, and/or a custom function and its parameters. Values
specified for parameters in the dictionary override any assigned to those parameters in arguments of the
constructor.
name : str : default see `name <AGTControlMechanism.name>`
specifies the name of the AGTControlMechanism.
prefs : PreferenceSet or specification dict : default Mechanism.classPreferences
specifies the `PreferenceSet` for the AGTControlMechanism; see `prefs <AGTControlMechanism.prefs>` for details.
Attributes
----------
system : System_Base
the `System` for which AGTControlMechanism is the `controller <System.controller>`;
the AGTControlMechanism inherits any `OutputStates <OutputState>` specified in the **monitor_for_control**
argument of the `system <EVCControlMechanism.system>`'s constructor, and any `ControlSignals <ControlSignal>`
specified in its **control_signals** argument.
objective_mechanism : ObjectiveMechanism
`ObjectiveMechanism` that monitors and evaluates the values specified in the ControlMechanism's
**objective_mechanism** argument, the output of which is used as `input <AGTControlMechanism_Input>` to the
AGTControlMechanism. It is created automatically when AGTControlMechanism is created, and uses as a `AGTUtilityIntegrator` as
is `function <ObjectiveMechanism.function>`.
monitored_output_states : List[OutputState]
each item is an `OutputState` monitored by the `objective_mechanism <AGTControlMechanism.objective_mechanism>`; it is
the same as the ObjectiveMechanism's `monitored_output_states <ObjectiveMechanism.monitored_output_states>`
attribute. The `value <OutputState.value>` of the OutputStates listed are used by the ObjectiveMechanism to
generate the AGTControlMechanism's `input <AGTControlMechanism_Input>`.
monitored_output_states_weights_and_exponents : List[Tuple(float, float)]
each tuple in the list contains the weight and exponent associated with a corresponding item of
`monitored_output_states <AGTControlMechanism.monitored_output_states>`; these are the same as those in
the `monitored_output_states_weights_and_exponents
<ObjectiveMechanism.monitored_output_states_weights_and_exponents>` attribute of the `objective_mechanism
<AGTControlMechanism.objective_mechanism>`, and are used by the ObjectiveMechanism's `function
<ObjectiveMechanism.function>` to parametrize the contribution made to its output by each of the values that
it monitors (see `ObjectiveMechanism Function <ObjectiveMechanism_Function>`).
function : TransferFunction : default Linear(slope=1, intercept=0)
determines the Function used to convert the AGTControlMechanism's `input <AGTControlMechanism_Input>` into its
`allocation_policy <AGTControlMechanism.allocation_policy>`, that is used to assign the
`allocation <ControlSignal.allocation>` for its `ControlSignal(s) <ControlSignal>`.
allocation_policy : 2d np.array
contains the value(s) assigned as the `allocation <ControlSignal.allocation>` for the `ControlSignal(s)
<ControlSignal>` listed in the `control_signals` attribute; if the default `function <AGTControlMechanism.function>`
is used, it contains a single value that is assigned as the `allocation <ControlSignal.allocation>` for
all of the AGTControlMechanism's `control_signals <AGTControlMechanism.control_signals>`. The AGTControlMechanism's allocation_policy
is the same as its `value <Mechanism_Base.value>` attribute).
control_signals : List[ControlSignal]
list of the AGTControlMechanism's `ControlSignals <ControlSignals>` , including any inherited from a `system
<ControlMechanism.system>` for which it is a `controller <System.controller>` (same as
ControlMechanism's `output_states <Mechanism_Base.output_states>` attribute); each sends a `ControlProjection`
to the `ParameterState` for the parameter it controls
control_projections : List[ControlProjection]
list of `ControlProjections <ControlProjection>`, one for each `ControlSignal` in `control_signals`.
modulation : ModulationParam
the default form of modulation used by the ControlMechanism's `ControlSignals <GatingSignal>`,
unless they are `individually specified <ControlSignal_Specification>`.
name : str
the name of the AGTControlMechanism; if it is not specified in the **name** argument of the constructor, a
default is assigned by MechanismRegistry (see `Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict
the `PreferenceSet` for the AGTControlMechanism; if it is not specified in the **prefs** argument of the
constructor, a default is assigned using `classPreferences` defined in __init__.py (see :doc:`PreferenceSet
<LINK>` for details).
"""
componentName = "AGTControlMechanism"
initMethod = INIT__EXECUTE__METHOD_ONLY
classPreferenceLevel = PreferenceLevel.TYPE
# Any preferences specified below will override those specified in TypeDefaultPreferences
# Note: only need to specify setting; level will be assigned to TYPE automatically
# classPreferences = {
# kwPreferenceSetName: 'ControlMechanismClassPreferences',
# kp<pref>: <setting>...}
from psyneulink.components.functions.function import Linear
paramClassDefaults = ControlMechanism.paramClassDefaults.copy()
paramClassDefaults.update({CONTROL_SIGNALS: None,
CONTROL_PROJECTIONS: None
})
@tc.typecheck
def __init__(self,
system:tc.optional(System_Base)=None,
monitored_output_states=None,
function = Linear(slope=1, intercept=0),
# control_signals:tc.optional(list) = None,
control_signals= None,
modulation:tc.optional(_is_modulation_param)=ModulationParam.MULTIPLICATIVE,
params=None,
name=None,
prefs:is_pref_set=None,
context=None):
# Assign args to params and functionParams dicts (kwConstants must == arg names)
params = self._assign_args_to_param_dicts(function=function,
control_signals=control_signals,
params=params)
super().__init__(system=system,
objective_mechanism=ObjectiveMechanism(monitored_output_states=monitored_output_states,
function=AGTUtilityIntegrator),
control_signals=control_signals,
modulation=modulation,
params=params,
name=name,
prefs=prefs,
context=self)
self.objective_mechanism.name = self.name+'_ObjectiveMechanism'
self.objective_mechanism._role = CONTROL
def _validate_params(self, request_set, target_set=None, context=None):
"""Validate SYSTEM, MONITOR_FOR_CONTROL and CONTROL_SIGNALS
Check that all items in MONITOR_FOR_CONTROL are Mechanisms or OutputStates for Mechanisms in self.system
Check that every item in `modulated_mechanisms <AGTControlMechanism.modulated_mechanisms>` is a Mechanism
and that its function has a multiplicative_param
"""
super()._validate_params(request_set=request_set,
target_set=target_set,
context=context)
if MONITORED_OUTPUT_STATES in target_set and target_set[MONITORED_OUTPUT_STATES] is not None:
# It is a MonitoredOutputStatesOption specification
if isinstance(target_set[MONITORED_OUTPUT_STATES], MonitoredOutputStatesOption):
# Put in a list (standard format for processing by _parse_monitored_output_states_list)
target_set[MONITORED_OUTPUT_STATES] = [target_set[MONITORED_OUTPUT_STATES]]
# It is NOT a MonitoredOutputStatesOption specification, so assume it is a list of Mechanisms or States
else:
# Validate each item of MONITORED_OUTPUT_STATES
for item in target_set[MONITORED_OUTPUT_STATES]:
if isinstance(item, MonitoredOutputStatesOption):
continue
if isinstance(item, tuple):
item = item[0]
if isinstance(item, dict):
item = item[MECHANISM]
if isinstance(item, (OutputState, Mechanism)):
item = item.name
if not isinstance(item, str):
raise AGTControlMechanismError("Specification of {} arg for {} appears to be a list of "
"Mechanisms and/or OutputStates to be monitored, but one"
"of the items ({}) is invalid".
format(OBJECTIVE_MECHANISM, self.name, item))
_parse_monitored_output_states(source=self, output_state_list=item, context=context)
@property
def initial_short_term_utility(self):
return self._objective_mechanism.function_object._initial_short_term_utility
@initial_short_term_utility.setter
def initial_short_term_utility(self, value):
self._objective_mechanism.function_object.initial_short_term_utility = value
@property
def initial_long_term_utility(self):
return self._objective_mechanism.function_object._initial_long_term_utility
@initial_long_term_utility.setter
def initial_long_term_utility(self, value):
self._objective_mechanism.function_object.initial_long_term_utility = value
@property
def short_term_gain(self):
return self._objective_mechanism.function_object._short_term_gain
@short_term_gain.setter
def short_term_gain(self, value):
self._objective_mechanism.function_object.short_term_gain = value
@property
def long_term_gain(self):
return self._objective_mechanism.function_object._long_term_gain
@long_term_gain.setter
def long_term_gain(self, value):
self._objective_mechanism.function_object.long_term_gain = value
@property
def short_term_bias(self):
return self._objective_mechanism.function_object._short_term_bias
@short_term_bias.setter
def short_term_bias(self, value):
self._objective_mechanism.function_object.short_term_bias = value
@property
def long_term_bias(self):
return self._objective_mechanism.function_object._long_term_bias
@long_term_bias.setter
def long_term_bias(self, value):
self._objective_mechanism.function_object.long_term_bias = value
@property
def short_term_rate(self):
return self._objective_mechanism.function_object._short_term_rate
@short_term_rate.setter
def short_term_rate(self, value):
self._objective_mechanism.function_object.short_term_rate = value
@property
def long_term_rate(self):
return self._objective_mechanism.function_object._long_term_rate
@long_term_rate.setter
def long_term_rate(self, value):
self._objective_mechanism.function_object.long_term_rate = value
@property
def operation(self):
return self._objective_mechanism.function_object._operation
@operation.setter
def operation(self, value):
self._objective_mechanism.function_object.operation = value
def show(self):
"""Display the `OutputStates <OutputState>` monitored by the AGTControlMechanism's `objective_mechanism`
and the `multiplicative_params <Function_Modulatory_Params>` modulated by the AGTControlMechanism.
"""
print ("\n---------------------------------------------------------")
print ("\n{0}".format(self.name))
print("\n\tMonitoring the following Mechanism OutputStates:")
if self.objective_mechanism is None:
print ("\t\tNone")
else:
for state in self.objective_mechanism.input_states:
for projection in state.path_afferents:
monitored_state = projection.sender
monitored_state_mech = projection.sender.owner
monitored_state_index = self.monitored_output_states.index(monitored_state)
weight = self.monitored_output_states_weights_and_exponents[monitored_state_index][0]
exponent = self.monitored_output_states_weights_and_exponents[monitored_state_index][1]
print ("\t\t{0}: {1} (exp: {2}; wt: {3})".
format(monitored_state_mech.name, monitored_state.name, weight, exponent))
print ("\n\tModulating the following parameters:".format(self.name))
# Sort for consistency of output:
state_names_sorted = sorted(self.output_states.names)
for state_name in state_names_sorted:
for projection in self.output_states[state_name].efferents:
print ("\t\t{0}: {1}".format(projection.receiver.owner.name, projection.receiver.name))
print ("\n---------------------------------------------------------")
| 54.256637 | 145 | 0.72529 |
9c2007275d2115fbe36d93345f83478c0433d666 | 2,705 | py | Python | backend/app/questionnaire/models.py | idris-rampurawala/form-fueled | 128f15e73deac07feef286e41dc337efdd4242a1 | [
"MIT"
] | null | null | null | backend/app/questionnaire/models.py | idris-rampurawala/form-fueled | 128f15e73deac07feef286e41dc337efdd4242a1 | [
"MIT"
] | null | null | null | backend/app/questionnaire/models.py | idris-rampurawala/form-fueled | 128f15e73deac07feef286e41dc337efdd4242a1 | [
"MIT"
] | null | null | null | import uuid
from django.contrib.auth import get_user_model
from django.contrib.postgres.fields import ArrayField
from django.db import models
from .enums import QuestionTypesEnum
class Questionnaire(models.Model):
"""Questionnaire base (resembles a form)
"""
# id will also serve as a token for a shareable URL of this Questionnaire
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE) # form owner
name = models.CharField(max_length=255)
description = models.TextField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.user.email} - {self.name}'
class Question(models.Model):
"""Questions for a Questionnaire
"""
questionnaire = models.ForeignKey(Questionnaire, on_delete=models.CASCADE)
question_text = models.TextField()
question_type = models.CharField(
max_length=20,
choices=QuestionTypesEnum.choices,
default=QuestionTypesEnum.TEXT.value)
options = ArrayField( # for QuestionTypesEnum.TEXT, this will be null
models.JSONField(default=dict),
blank=True,
null=True,
size=5 # limiting the options to 5
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.questionnaire.name} - {self.question_text}'
class QuestionnaireRespondent(models.Model):
"""Questionnaire respondent signifies an entry per response to a questionnaire
"""
questionnaire = models.ForeignKey(Questionnaire, related_name='responses', on_delete=models.CASCADE)
respondent_email = models.EmailField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ['questionnaire', 'respondent_email'] # unique response per email to a questionnaire
def __str__(self):
return f'{self.questionnaire} - {self.respondent_email}'
class QResponse(models.Model):
"""Questions' responses for a Questionnaire
"""
question = models.ForeignKey(Question, related_name='answers', on_delete=models.CASCADE)
questionnaire_respondent = models.ForeignKey(
QuestionnaireRespondent,
related_name='respondent',
on_delete=models.CASCADE)
answers = ArrayField( # any response will be saved here.
models.TextField()
)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.question} - {self.questionnaire_respondent}'
| 35.592105 | 111 | 0.720518 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.