hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
81c4dabb1b151d6e6f2c39ffb1b3787cc781ba9f | 2,591 | py | Python | mne/commands/mne_browse_raw.py | Anevar/mne-python | 15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb | [
"BSD-3-Clause"
] | 2 | 2015-09-27T20:33:49.000Z | 2020-04-22T19:10:56.000Z | mne/commands/mne_browse_raw.py | Anevar/mne-python | 15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb | [
"BSD-3-Clause"
] | null | null | null | mne/commands/mne_browse_raw.py | Anevar/mne-python | 15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb | [
"BSD-3-Clause"
] | 1 | 2018-09-15T09:45:38.000Z | 2018-09-15T09:45:38.000Z | #!/usr/bin/env python
"""Browse raw data
You can do for example:
$ mne browse_raw --raw sample_audvis_raw.fif --proj sample_audvis_ecg_proj.fif --eve sample_audvis_raw-eve.fif
"""
# Authors : Eric Larson, PhD
import sys
import mne
if __name__ == '__main__':
import matplotlib.pyplot as plt
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option("--raw", dest="raw_in",
help="Input raw FIF file", metavar="FILE")
parser.add_option("--proj", dest="proj_in",
help="Projector file", metavar="FILE",
default='')
parser.add_option("--eve", dest="eve_in",
help="Events file", metavar="FILE",
default='')
parser.add_option("-d", "--duration", dest="duration", type="float",
help="Time window for plotting (sec)",
default=10.0)
parser.add_option("-t", "--start", dest="start", type="float",
help="Initial start time for plotting",
default=0.0)
parser.add_option("-n", "--n_channels", dest="n_channels", type="int",
help="Number of channels to plot at a time",
default=20)
parser.add_option("-o", "--order", dest="order",
help="Order for plotting ('type' or 'original')",
default='type')
parser.add_option("-p", "--preload", dest="preload",
help="Preload raw data (for faster navigaton)",
default=False)
parser.add_option("-s", "--show_options", dest="show_options",
help="Show projection options dialog",
default=False)
options, args = parser.parse_args()
raw_in = options.raw_in
duration = options.duration
start = options.start
n_channels = options.n_channels
order = options.order
preload = options.preload
show_options = options.show_options
proj_in = options.proj_in
eve_in = options.eve_in
if raw_in is None:
parser.print_help()
sys.exit(1)
raw = mne.fiff.Raw(raw_in, preload=preload)
if len(proj_in) > 0:
projs = mne.read_proj(proj_in)
raw.info['projs'] = projs
if len(eve_in) > 0:
events = mne.read_events(eve_in)
else:
events = None
fig = raw.plot(duration=duration, start=start, n_channels=n_channels,
order=order, show_options=show_options, events=events)
plt.show(block=True)
| 33.649351 | 110 | 0.575068 |
b77c5293a121a4da2ce5a38914424fb508f2c690 | 1,662 | py | Python | sdk/python/pulumi_azure_nextgen/authorization/v20170601preview/__init__.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/authorization/v20170601preview/__init__.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/authorization/v20170601preview/__init__.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .get_policy_assignment import *
from .get_policy_set_definition import *
from .get_policy_set_definition_at_management_group import *
from .policy_assignment import *
from .policy_set_definition import *
from .policy_set_definition_at_management_group import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-nextgen:authorization/v20170601preview:PolicyAssignment":
return PolicyAssignment(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-nextgen:authorization/v20170601preview:PolicySetDefinition":
return PolicySetDefinition(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-nextgen:authorization/v20170601preview:PolicySetDefinitionAtManagementGroup":
return PolicySetDefinitionAtManagementGroup(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-nextgen", "authorization/v20170601preview", _module_instance)
_register_module()
| 39.571429 | 112 | 0.72503 |
0d2558f04d2d8ed68125d71c2345026a86d2fbdf | 511 | py | Python | algorithms/912.Sort-an-Array/Python/solution_8.py | hopeness/leetcode | 496455fa967f0704d729b4014f92f52b1d69d690 | [
"MIT"
] | null | null | null | algorithms/912.Sort-an-Array/Python/solution_8.py | hopeness/leetcode | 496455fa967f0704d729b4014f92f52b1d69d690 | [
"MIT"
] | null | null | null | algorithms/912.Sort-an-Array/Python/solution_8.py | hopeness/leetcode | 496455fa967f0704d729b4014f92f52b1d69d690 | [
"MIT"
] | null | null | null | """
https://leetcode.com/problems/sort-an-array/submissions/
"""
from typing import List
# Counting Sort
class Solution:
def sortArray(self, nums: List[int]) -> List[int]:
minNum = min(nums)
maxNum = max(nums)
count = [0] * (maxNum-minNum+1)
for num in nums:
count[num-minNum] += 1
i = 0
for k, c in enumerate(count):
while c > 0:
nums[i] = k+minNum
i += 1
c -= 1
return nums
| 22.217391 | 56 | 0.495108 |
55e45e755637a47be6c6962e1c358e3c1350d530 | 885 | py | Python | estofadora/client/admin.py | delete/estofadora | 2f46ba65fb0e376361ff47c86ea7a62c50b6c91b | [
"MIT"
] | 6 | 2016-04-13T21:30:30.000Z | 2017-09-29T04:47:07.000Z | estofadora/client/admin.py | delete/estofadora | 2f46ba65fb0e376361ff47c86ea7a62c50b6c91b | [
"MIT"
] | 13 | 2016-04-13T23:52:09.000Z | 2020-06-05T18:25:13.000Z | estofadora/client/admin.py | delete/estofadora | 2f46ba65fb0e376361ff47c86ea7a62c50b6c91b | [
"MIT"
] | 1 | 2016-06-24T13:48:26.000Z | 2016-06-24T13:48:26.000Z | # coding: utf-8
from django.utils.datetime_safe import datetime
from django.contrib import admin
from estofadora.client.models import Client
class ClientAdmin(admin.ModelAdmin):
list_display = ('name', 'adress', 'email', 'telephone1', 'is_active')
search_fields = ('name', 'adress', 'email', 'telephone1', 'telephone2')
date_hierarchy = 'date_join'
list_filter = ['date_join']
def subscribed_today(self, obj):
return obj.date_join.date() == datetime.today().date()
subscribed_today.short_description = (u'Cadastrado hoje?')
subscribed_today.boolean = True
def mark_as_active(self, request, queryset):
count = queryset.update(is_active=True)
msg = u'%d clientes ativos.'
self.message_user(request, msg % count)
mark_as_active.short_description = ('Marcar como ativo')
admin.site.register(Client, ClientAdmin)
| 30.517241 | 75 | 0.705085 |
9c2d26affaad30ae18e3a3a20701fb774ede4fa8 | 8,879 | py | Python | async_io/rest/rest_client.py | vnpy/vnpy_lab | 370ec82d65584eac28a00ec34b839ad790bee414 | [
"MIT"
] | 12 | 2019-02-16T20:03:23.000Z | 2022-01-28T02:37:56.000Z | async_io/rest/rest_client.py | vnpy/vnpy_lab | 370ec82d65584eac28a00ec34b839ad790bee414 | [
"MIT"
] | 1 | 2019-03-08T04:57:11.000Z | 2019-03-12T01:21:56.000Z | async_io/rest/rest_client.py | vnpy/vnpy_lab | 370ec82d65584eac28a00ec34b839ad790bee414 | [
"MIT"
] | 9 | 2019-02-24T03:35:05.000Z | 2021-12-21T08:55:59.000Z | import asyncio
import sys
import traceback
from datetime import datetime
from enum import Enum
from typing import Any, Callable, Optional, Union
import aiohttp
import requests
from vnpy.api.asyncio.async_executor import create_async_task, loop, start_asyncio, stop_asyncio, \
wait_for_async_task, wrap_as_sync
class RequestStatus(Enum):
ready = 0 # Request created
success = 1 # Request successful (status code 2xx)
failed = 2 # Request failed (status code not 2xx)
error = 3 # Exception raised
class Request(object):
"""
Request object for status check.
"""
def __init__(
self,
method: str,
path: str,
params: dict,
data: Union[dict, str, bytes],
headers: dict,
callback: Callable = None,
on_failed: Callable = None,
on_error: Callable = None,
extra: Any = None,
):
""""""
self.method = method
self.path = path
self.callback = callback
self.params = params
self.data = data
self.headers = headers
self.on_failed = on_failed
self.on_error = on_error
self.extra = extra
self.response: Optional[aiohttp.ClientResponse] = None
self.status = RequestStatus.ready
def __str__(self):
if self.response is None:
status_code = "terminated"
else:
status_code = self.response.status
return (
"request : {} {} {} because {}: \n"
"headers: {}\n"
"params: {}\n"
"data: {}\n"
"response:"
"{}\n".format(
self.method,
self.path,
self.status.name,
status_code,
self.headers,
self.params,
self.data,
"" if self.response is None else self.response.text,
)
)
class RestClient(object):
"""
HTTP Client designed for all sorts of trading RESTFul API.
* Reimplement sign function to add signature function.
* Reimplement on_failed function to handle Non-2xx responses.
* Use on_failed parameter in add_request function for individual Non-2xx response handling.
* Reimplement on_error function to handle exception msg.
"""
def __init__(self):
"""
"""
self.url_base = '' # type: str
self.requests_proxies = None
self.aiohttp_proxy = None
self._session: Optional[aiohttp.ClientSession] = None
self._stop_task: Optional[asyncio.Task] = None
def init(self, url_base: str, proxy_host: str = "", proxy_port: int = 0):
"""
Init rest client with url_base which is the API root address.
e.g. 'https://www.bitmex.com/api/v1/'
"""
self.url_base = url_base
if proxy_host and proxy_port:
proxy = f"{proxy_host}:{proxy_port}"
self.requests_proxies = {"http": proxy, "https": proxy}
self.aiohttp_proxy = f'http://{proxy}'
self._session = aiohttp.ClientSession(loop=loop)
def start(self, _):
""""""
start_asyncio()
def stop(self):
"""
Stop the client.
"""
self._stop_task: asyncio.Task = create_async_task(self._session.close())
stop_asyncio()
def join(self):
"""
Wait until all worker exit.
"""
if self._stop_task:
wait_for_async_task(self._stop_task)
def add_request(
self,
method: str,
path: str,
callback: Callable,
params: dict = None,
data: Union[dict, str, bytes] = None,
headers: dict = None,
on_failed: Callable = None,
on_error: Callable = None,
extra: Any = None,
):
"""
Add a new request.
:param method: GET, POST, PUT, DELETE, QUERY
:param path:
:param callback: callback function if 2xx status, type: (dict, Request)
:param params: dict for query string
:param data: Http body. If it is a dict, it will be converted to form-data. Otherwise, it will be converted to bytes.
:param headers: dict for headers
:param on_failed: callback function if Non-2xx status, type, type: (code, dict, Request)
:param on_error: callback function when catching Python exception, type: (etype, evalue, tb, Request)
:param extra: Any extra data which can be used when handling callback
:return: Request
"""
request = Request(
method,
path,
params,
data,
headers,
callback,
on_failed,
on_error,
extra,
)
create_async_task(self._process_request(request))
return request
def sign(self, request: Request):
"""
This function is called before sending any request out.
Please implement signature method here.
@:return (request)
"""
return request
def on_failed(self, status_code: int, request: Request):
"""
Default on_failed handler for Non-2xx response.
"""
sys.stderr.write(str(request))
def on_error(
self,
exception_type: type,
exception_value: Exception,
tb,
request: Optional[Request],
):
"""
Default on_error handler for Python exception.
"""
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb, request)
)
sys.excepthook(exception_type, exception_value, tb)
def exception_detail(
self,
exception_type: type,
exception_value: Exception,
tb,
request: Optional[Request],
):
text = "[{}]: Unhandled RestClient Error:{}\n".format(
datetime.now().isoformat(), exception_type
)
text += "request:{}\n".format(request)
text += "Exception trace: \n"
text += "".join(
traceback.format_exception(exception_type, exception_value, tb)
)
return text
async def _process_request(
self, request: Request,
):
"""
Sending request to server and get result.
"""
try:
request = self.sign(request)
url = self.make_full_url(request.path)
response: aiohttp.ClientResponse = await self._session.request(
method=request.method,
url=url,
headers=request.headers,
params=request.params,
data=request.data,
proxy=self.aiohttp_proxy,
)
response.json = wrap_as_sync(response.json())
request.response = response
status_code = response.status
if status_code // 100 == 2: # 2xx codes are all successful
if status_code == 204:
json_body = None
else:
json_body = response.json()
request.callback(json_body, request)
request.status = RequestStatus.success
else:
request.status = RequestStatus.failed
if request.on_failed:
request.on_failed(status_code, request)
else:
self.on_failed(status_code, request)
except Exception:
request.status = RequestStatus.error
t, v, tb = sys.exc_info()
if request.on_error:
request.on_error(t, v, tb, request)
else:
self.on_error(t, v, tb, request)
def make_full_url(self, path: str):
"""
Make relative api path into full url.
eg: make_full_url('/get') == 'http://xxxxx/get'
"""
url = self.url_base + path
return url
def request(
self,
method: str,
path: str,
params: dict = None,
data: dict = None,
headers: dict = None,
):
"""
Add a new request.
:param method: GET, POST, PUT, DELETE, QUERY
:param path:
:param params: dict for query string
:param data: dict for body
:param headers: dict for headers
:return: requests.Response
"""
request = Request(
method,
path,
params,
data,
headers
)
request = self.sign(request)
url = self.make_full_url(request.path)
response = requests.request(
request.method,
url,
headers=request.headers,
params=request.params,
data=request.data,
proxies=self.requests_proxies,
)
return response
| 28.921824 | 125 | 0.549499 |
84b414d025ee0e4708ca40fdbca814eb43c5689e | 3,482 | py | Python | atlantic_server/atl/views.py | matteli/atlantic_server | d2c77fa172600ee304ebcf86df8242f466f5fb81 | [
"MIT"
] | 4 | 2019-08-08T12:46:27.000Z | 2019-11-09T19:24:38.000Z | atlantic_server/atl/views.py | matteli/atlantic_server | d2c77fa172600ee304ebcf86df8242f466f5fb81 | [
"MIT"
] | 1 | 2019-10-21T12:10:39.000Z | 2019-10-21T16:35:41.000Z | atlantic_server/atl/views.py | matteli/atlantic_server | d2c77fa172600ee304ebcf86df8242f466f5fb81 | [
"MIT"
] | null | null | null | from django.db.models import Max
from rest_framework.permissions import (
IsAdminUser,
IsAuthenticatedOrReadOnly,
BasePermission,
SAFE_METHODS,
)
from rest_framework import viewsets
from rest_framework.generics import get_object_or_404
from django_filters import rest_framework as filters
from ..com.models import Plane
from .models import Page, Comment, Camera
from .serializers import (
PageSerializer,
ListPageSerializer,
CommentSerializer,
CameraSerializer,
)
from ..com.const import PROGRESS_CHOICES, NATURE_CHOICES
class ReadOnly(BasePermission):
def has_permission(self, request, view):
return request.method in SAFE_METHODS
class PageFilter(filters.FilterSet):
nature = filters.MultipleChoiceFilter(choices=NATURE_CHOICES)
progress = filters.MultipleChoiceFilter(choices=PROGRESS_CHOICES)
class Meta:
model = Page
fields = ["nature", "progress"]
class PageViewSet(viewsets.ModelViewSet):
serializer_class = PageSerializer
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = PageFilter
permission_classes = (IsAuthenticatedOrReadOnly,)
def get_serializer_class(self):
if self.action == "list":
return ListPageSerializer
# elif self.action == 'retrieve':
else:
return PageSerializer
def get_queryset(self):
return Page.objects.filter(
plane__registration=self.kwargs["plane_registration"]
)
def perform_create(self, serializer):
plane = get_object_or_404(Plane, registration=self.kwargs["plane_registration"])
serializer.validated_data["comments"]["editor"] = self.request.user
serializer.save(plane=plane)
class TourViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = PageSerializer
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = PageFilter
def get_queryset(self):
return Page.objects.filter(
plane__registration=self.kwargs["plane_registration"]
).filter(tour__gt=0)
class CommentViewSet(viewsets.ModelViewSet):
serializer_class = CommentSerializer
permission_classes = (IsAuthenticatedOrReadOnly,)
def get_queryset(self):
return Comment.objects.filter(page__id=self.kwargs["page_pk"])
def perform_create(self, serializer):
page = get_object_or_404(Page, id=self.kwargs["page_pk"])
comment = serializer.save(page=page, editor=self.request.user)
page_updated = PageSerializer(
page, data={"progress": comment.progress}, partial=True
)
if page_updated.is_valid():
print("valid")
page_updated.save()
class CameraViewSet(viewsets.ModelViewSet):
serializer_class = CameraSerializer
permission_classes = (IsAdminUser | ReadOnly,)
def get_queryset(self):
return (
Camera.objects.filter(plane__registration=self.kwargs["plane_registration"])
.filter(view__gt=0)
.order_by("view")
)
def perform_create(self, serializer):
plane = get_object_or_404(Plane, registration=self.kwargs["plane_registration"])
max_view = Camera.objects.filter(
plane__registration=self.kwargs["plane_registration"]
).aggregate(Max("view"))["view__max"]
if max_view:
view = int(max_view) + 1
else:
view = 1
serializer.save(plane=plane, view=view)
| 30.814159 | 88 | 0.697013 |
54804d8b4ba22b874b729e8cd255ea3f5763936d | 3,040 | py | Python | bddm/trainer/ema.py | tencent-ailab/bddm | 8c3f807e84f0ebf1a4942a990f369a92cba79c61 | [
"Apache-2.0"
] | 76 | 2022-03-25T08:28:34.000Z | 2022-03-31T07:44:25.000Z | bddm/trainer/ema.py | shaun95/bddm | c78786e6de6b58c7c6ac4f97e22fe08b99a4d88a | [
"Apache-2.0"
] | 1 | 2022-03-29T15:49:16.000Z | 2022-03-29T15:49:16.000Z | bddm/trainer/ema.py | shaun95/bddm | c78786e6de6b58c7c6ac4f97e22fe08b99a4d88a | [
"Apache-2.0"
] | 10 | 2022-03-25T14:26:18.000Z | 2022-03-30T03:11:10.000Z | #!/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# EMA Helper Class
#
# Author: Max W. Y. Lam (maxwylam@tencent.com)
# Copyright (c) 2021Tencent. All Rights Reserved
#
########################################################################
import torch.nn as nn
class EMAHelper(object):
def __init__(self, mu=0.999):
"""
Exponential Moving Average Training Helper Class
Parameters:
mu (float): decaying rate
"""
self.mu = mu
self.shadow = {}
def register(self, module):
"""
Register module by copying all learnable parameters to self.shadow
Parameters:
module (nn.Module): model to be trained
"""
if isinstance(module, nn.DataParallel):
module = module.module
for name, param in module.named_parameters():
if param.requires_grad:
self.shadow[name] = param.data.clone()
def update(self, module):
"""
Update self.shadow using the module parameters
Parameters:
module (nn.Module): model in training
"""
if isinstance(module, nn.DataParallel):
module = module.module
for name, param in module.named_parameters():
if param.requires_grad:
self.shadow[name].data = (
1. - self.mu) * param.data + self.mu * self.shadow[name].data
def ema(self, module):
"""
Copy self.shadow to the module parameters
Parameters:
module (nn.Module): model in training
"""
if isinstance(module, nn.DataParallel):
module = module.module
for name, param in module.named_parameters():
if param.requires_grad:
param.data.copy_(self.shadow[name].data)
def ema_copy(self, module):
"""
Initialize a new module using self.shadow as the parameters
Parameters:
module (nn.Module): model in training
"""
if isinstance(module, nn.DataParallel):
inner_module = module.module
module_copy = type(inner_module)(
inner_module.config).to(inner_module.config.device)
module_copy.load_state_dict(inner_module.state_dict())
module_copy = nn.DataParallel(module_copy)
else:
module_copy = type(module)(module.config).to(module.config.device)
module_copy.load_state_dict(module.state_dict())
self.ema(module_copy)
return module_copy
def state_dict(self):
"""
Get self.shadow as the state dict
Returns:
shadow (dict): state dict
"""
return self.shadow
def load_state_dict(self, state_dict):
"""
Load a state dict to self.shadow
Parameters:
state dict (dict): state dict to be copied to self.shadow
"""
self.shadow = state_dict
| 29.230769 | 81 | 0.549013 |
3c944ec620bb6c6969b969051697d0f0912516c2 | 2,517 | py | Python | .travis/pipeline_configs/default_classification.py | usc-isi-i2/dsbox-cleaning | 3cb5146dbf89f0ea2f8bf71a843eb1cfa63f7917 | [
"MIT"
] | 7 | 2017-06-28T18:36:46.000Z | 2018-01-27T01:40:29.000Z | .travis/pipeline_configs/default_classification.py | usc-isi-i2/dsbox-cleaning | 3cb5146dbf89f0ea2f8bf71a843eb1cfa63f7917 | [
"MIT"
] | 47 | 2017-06-09T19:25:19.000Z | 2019-04-12T08:50:32.000Z | .travis/pipeline_configs/default_classification.py | usc-isi-i2/dsbox-cleaning | 3cb5146dbf89f0ea2f8bf71a843eb1cfa63f7917 | [
"MIT"
] | 7 | 2017-09-25T20:30:45.000Z | 2018-10-11T18:34:30.000Z | class config:
config = {'sampling_step': {'primitive': 'd3m.primitives.data_preprocessing.DoNothingForDataset.DSBOX', 'hyperparameters': {}}, 'denormalize_step': {'primitive': 'd3m.primitives.data_transformation.denormalize.Common', 'hyperparameters': {}}, 'to_dataframe_step': {'primitive': 'd3m.primitives.data_transformation.dataset_to_dataframe.Common', 'hyperparameters': {}}, 'extract_attribute_step': {'primitive': 'd3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon', 'hyperparameters': {'semantic_types': ('https://metadata.datadrivendiscovery.org/types/PrimaryKey', 'https://metadata.datadrivendiscovery.org/types/Attribute')}}, 'profiler_step': {'primitive': 'd3m.primitives.schema_discovery.Profiler.DSBOX', 'hyperparameters': {}}, 'clean_step': {'primitive': 'd3m.primitives.data_cleaning.CleaningFeaturizer.DSBOX', 'hyperparameters': {}}, 'encode_step': {'primitive': 'd3m.primitives.data_preprocessing.Encoder.DSBOX', 'hyperparameters': {}}, 'corex_step': {'primitive': 'd3m.primitives.feature_construction.corex_text.CorexText', 'hyperparameters': {}}, 'to_numeric_step': {'primitive': 'd3m.primitives.data_transformation.ToNumeric.DSBOX', 'hyperparameters': {}}, 'impute_step': {'primitive': 'd3m.primitives.data_preprocessing.MeanImputation.DSBOX', 'hyperparameters': {}}, 'scaler_step': {'primitive': 'd3m.primitives.data_preprocessing.DoNothing.DSBOX', 'hyperparameters': {}}, 'data': {'primitive': 'd3m.primitives.data_preprocessing.DoNothing.DSBOX', 'hyperparameters': {}}, 'pre_target': {'primitive': 'd3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon', 'hyperparameters': {'semantic_types': ('https://metadata.datadrivendiscovery.org/types/TrueTarget',)}}, 'target': {'primitive': 'd3m.primitives.data_transformation.ToNumeric.DSBOX', 'hyperparameters': {'drop_non_numeric_columns': False}}, 'feature_selector_step': {'primitive': 'd3m.primitives.feature_selection.generic_univariate_select.SKlearn', 'hyperparameters': {'use_semantic_types': True, 'return_result': 'new', 'add_index_columns': True, 'mode': 'percentile', 'param': 10}}, 'model_step': {'primitive': 'd3m.primitives.classification.random_forest.SKlearn', 'hyperparameters': {'use_semantic_types': True, 'return_result': 'new', 'add_index_columns': True, 'bootstrap': False, 'max_depth': None, 'min_samples_leaf': 4, 'min_samples_split': 5, 'max_features': 'sqrt', 'n_estimators': 100}}}
pipeline_type = "classification"
test_dataset_id = "38_sick" | 629.25 | 2,434 | 0.769567 |
59ac93d7a03868bcd419cdd7953b07cc579178c7 | 3,332 | py | Python | rich/measure.py | DarkCode01/rich | c4287eff031d03addac79fd9035e146c7d868b78 | [
"MIT"
] | 2 | 2021-05-11T19:27:06.000Z | 2021-05-12T06:08:08.000Z | rich/measure.py | DarkCode01/rich | c4287eff031d03addac79fd9035e146c7d868b78 | [
"MIT"
] | 2 | 2020-05-09T12:42:28.000Z | 2020-05-09T14:44:04.000Z | rich/measure.py | DarkCode01/rich | c4287eff031d03addac79fd9035e146c7d868b78 | [
"MIT"
] | 1 | 2020-08-14T13:47:25.000Z | 2020-08-14T13:47:25.000Z | from operator import itemgetter
from typing import Iterable, NamedTuple, TYPE_CHECKING, Union
from . import errors
from .protocol import is_renderable
from .segment import Segment
if TYPE_CHECKING:
from .console import Console, RenderableType
class Measurement(NamedTuple):
"""Stores the minimum and maximum widths (in characters) required to render an object."""
minimum: int
maximum: int
@property
def span(self) -> int:
"""Get difference between maximum and minimum."""
return self.maximum - self.minimum
def normalize(self) -> "Measurement":
minimum, maximum = self
minimum = max(0, minimum)
return Measurement(minimum, max(minimum, maximum))
def with_maximum(self, width: int) -> "Measurement":
"""Get a RenderableWith where the widths are <= width.
Args:
width (int): Maximum desired width.
Returns:
RenderableWidth: new RenderableWidth object.
"""
minimum, maximum = self
return Measurement(min(minimum, width), min(maximum, width))
@classmethod
def get(
cls, console: "Console", renderable: "RenderableType", max_width: int
) -> "Measurement":
"""Get a measurement for a renderable.
Args:
console (~rich.console.Console): Console instance.
renderable (RenderableType): An object that may be rendered with Rich.
max_width (int): The maximum width available.
Raises:
errors.NotRenderableError: If the object is not renderable.
Returns:
Measurement: Measurement object containing range of character widths required to render the object.
"""
if isinstance(renderable, str):
renderable = console.render_str(renderable)
if is_renderable(renderable):
get_console_width = getattr(renderable, "__measure__", None)
if get_console_width is not None:
render_width = get_console_width(console, max_width).with_maximum(
max_width
)
return render_width.normalize()
else:
return Measurement(1, max_width)
else:
raise errors.NotRenderableError(
f"Unable to get render width for {renderable!r}; "
"a str, Segment, or object with __console__ method is required"
)
def measure_renderables(
console: "Console", renderables: Iterable["RenderableType"], max_width: int
) -> "Measurement":
"""Get a measurement that would fit a number of renderables.
Args:
console (~rich.console.Console): Console instance.
renderables (Iterable[RenderableType]): One or more renderable objects.
max_width (int): The maximum width available.
Returns:
Measurement: Measurement object containing range of character widths required to
contain all given renderables.
"""
get_measurement = Measurement.get
measurements = [
get_measurement(console, renderable, max_width) for renderable in renderables
]
measured_width = Measurement(
max(measurements, key=itemgetter(0)).minimum,
max(measurements, key=itemgetter(1)).maximum,
)
return measured_width
| 33.32 | 111 | 0.641657 |
1809598bc7900c9e8aea2a8501895b78deca3282 | 11,772 | py | Python | dedup/imgproc.py | Kahsolt/pic-dedup | 91bc2b6e979b57719103b5c62b859311bd37fdd0 | [
"WTFPL"
] | null | null | null | dedup/imgproc.py | Kahsolt/pic-dedup | 91bc2b6e979b57719103b5c62b859311bd37fdd0 | [
"WTFPL"
] | null | null | null | dedup/imgproc.py | Kahsolt/pic-dedup | 91bc2b6e979b57719103b5c62b859311bd37fdd0 | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python3
import logging
import pickle
import gzip
from io import BytesIO
import numpy as np
from enum import IntEnum
from PIL import Image, ImageFilter, ImageDraw, ImageFont
from .settings import *
from .models import *
__all__ = ['SimRatioMatrix', 'Feature', 'pkl', 'HWRatio', 'resize_by_hwlimit', 'high_contrast_bw_hexstr']
class SimRatioMatrix:
TYPES_DEPTH = 3 # 3 types of sim_ratio: edge_avghash, grey_avghash, grey_absdiff
MASK_DEPTH = 2 # whether use round_mask
def __init__(self, size):
self.size = size
self.sr_mat = np.full((size, size, self.TYPES_DEPTH, self.MASK_DEPTH), 0.0, dtype=np.float32)
self.modified = False # indicates pkl('save')
def __setitem__(self, xy, val):
try:
self.sr_mat[xy] = val
except IndexError:
self.expand(max(xy))
self.sr_mat[xy] = val
self.modified = True
def __getitem__(self, xy):
try:
return self.sr_mat[xy]
except IndexError:
self.expand(max(xy))
self.modified = True
return self.sr_mat[xy]
def expand(self, newsize):
if newsize <= self.size: return
logging.debug("[%s] expand from %dx%d to %dx%d"
% (self.__class__.__name__, self.size, self.size, newsize, newsize))
sr_mat = np.full((newsize, newsize, self.TYPES_DEPTH, self.MASK_DEPTH), 0.0, dtype=np.float32)
_sz = self.size
sr_mat[0:_sz, 0:_sz] = self.sr_mat[0:_sz, 0:_sz]
self.sr_mat = sr_mat
self.size = newsize
@staticmethod
def from_bytes(bytes):
buf = BytesIO(bytes)
with gzip.GzipFile(mode='rb', fileobj=buf) as fh:
return pickle.loads(fh.read())
def to_bytes(self) -> bytes:
buf = BytesIO()
with gzip.GzipFile(mode='wb', fileobj=buf) as fh:
fh.write(pickle.dumps(self, protocol=4))
return buf.getvalue()
class PrincipleHues:
def __init__(self, phs):
if not isinstance(phs, list): raise TypeError
self.phs = phs # list of 3-tuples [(R, G, B)]
self.phs_hexstr = [rgb2hexstr(ph) for ph in phs]
@staticmethod
def from_image(img, count):
# thumbnailize
_hw = FEATURE_VECTOR_HW
img = resize_by_hwlimit(img, _hw)
# mosaic filter
# keep 0 as unchanged (H = S = 0 means pure greyness in HSV space)
# offset by 0.5 for linear interplotion
img = img.convert('HSV')
hsv = list(img.split())
for i in range(len(hsv)):
_ratio = 256 // REDUCED_HUE_SCALES[i]
hsv[i] = hsv[i].point(lambda x: x and int((x // _ratio + 0.5) * _ratio) or 0)
img = Image.merge('HSV', hsv)
# mode filter to reduce hues
img = img.filter(ImageFilter.ModeFilter((_hw >> 3) + 1))
img = img.convert('RGB')
# decide priciple ones
phs = [ ]
for hue in [rgb for _, rgb in sorted(img.getcolors(_hw ** 2), reverse=True)]:
ignore = False
for ph in phs:
if rgb_distance(hue, ph) < HUE_DISTINGUISH_DISTANCE:
ignore = True
break
if ignore: continue
phs.append(hue)
if len(phs) == count: break
return PrincipleHues(phs)
def to_image(self):
_hw = 50
img = Image.new('RGB', (_hw, _hw * len(self.phs)))
draw = ImageDraw.Draw(img)
font = ImageFont.truetype('arial.ttf')
for i, ph in enumerate(self.phs):
xy = (0, i * _hw), (_hw, (i + 1) * _hw)
draw.rectangle(xy, fill=ph)
xy = (0, i * _hw)
draw.text(xy, self.phs_hexstr[i], high_contrast_bw_hexstr(ph), font=font)
return img
def compability(self, hue):
mindist = HUE_MAX_DISTANCE + 1
_alpha, _portion = 1.0, 0.6 / len(self.phs)
for ph in self.phs:
dist = rgb_distance(ph, hue)
if dist < mindist:
mindist, alpha = dist, _alpha
_alpha -= _portion
return (1 - (mindist / HUE_MAX_DISTANCE)) * alpha
class FeatureVector:
def __init__(self, featvec):
if not isinstance(featvec, np.ndarray): raise TypeError
if len(featvec.shape) != 2: raise ValueError
self.fv = featvec.astype(np.float16)
self.fv_len = np.prod(self.fv.shape) # flattened length
_mean = np.uint8(self.fv[self.fv != NONE_PIXEL_PADDING].mean())
f = lambda x: 0 if x == NONE_PIXEL_PADDING else (x <= _mean and 1 or -1) # FIXME: x <= mean is risky for single color image
self.fv_bin = np.array([[f(x) for x in row] for row in featvec], dtype=np.int8)
self.fv_masked = None # calc and save on necessaryd
@staticmethod
def from_image(img, hw):
imgpad = square_padding(img)
thumbnail = imgpad.resize((hw, hw), Image.ANTIALIAS)
im = np.array(thumbnail, dtype=np.uint8)
return FeatureVector(im)
def to_image(self):
return Image.fromarray(self.fv).convert('L')
def similarity_by_avghash(self, other):
if self is other: return 1.0
#for idx, row in enumerate(self.fv_bin):
# for idy, x in enumerate(row):
# y = other.fv_bin[idx, idy]
# # according to this table:
# # x^y -1 0 1
# # -1 0 1 -2
# # 0 1 0 1
# # 1 -2 1 0
# if x ^ y == -2: # one 1 and one -1
# dist += 2
# elif abs(x ^ y) == 1: # one 0 (padding) and one other (pixel)
# dist += 1
dist = abs(self.fv_bin - other.fv_bin).sum()
return 1 - (dist / 2 / self.fv_len)
def similarity_by_absdiff(self, other):
if self is other: return 1.0
#for idx, row in enumerate(self.fv):
# for idy, x in enumerate(row):
# y = other.fv[idx, idy]
# if (x != NONE_PIXEL_PADDING) and (y != NONE_PIXEL_PADDING):
# dist += abs(int(x) - int(y))
# elif (x == NONE_PIXEL_PADDING) ^ (y == NONE_PIXEL_PADDING):
# dist += 127.0
dist = abs(self.fv_bin - other.fv_bin).sum()
return 1 - (dist / 255 / self.fv_len)
def round_mask(self):
mfv = self.fv_masked
if mfv is None:
r = self.fv.shape[0] / 2
mfv = np.full_like(self.fv, NONE_PIXEL_PADDING, dtype=np.uint8)
for idx, row in enumerate(self.fv):
for idy, x in enumerate(row):
if (r - idx) ** 2 + (r - idy) ** 2 <= r ** 2:
mfv[idx, idy] = x
self.fv_masked = mfv
return FeatureVector(mfv)
class Feature:
def __init__(self):
self.principle_hues = None # instance of PrincipleHues
self.featvec_edge = None # instance of FeatureVector
self.featvec_grey = None
@staticmethod
def featurize(img, hw=FEATURE_VECTOR_HW, phs=PRINCIPLE_HUES):
if isinstance(img, Image.Image): pass
elif isinstance(img, np.ndarray): img = Image.fromarray(img)
elif isinstance(img, str): img = Image.open(img)
else: raise TypeError
ft = Feature()
img = img.convert('RGB')
ft.principle_hues = PrincipleHues.from_image(img, phs)
grey = img.convert('L')
ft.featvec_grey = FeatureVector.from_image(grey, hw)
ft.featvec_grey._parent = ft # backref of Feature
edge = grey.filter(ImageFilter.CONTOUR) # .filter(ImageFilter.EDGE_ENHANCE_MORE)
ft.featvec_edge = FeatureVector.from_image(edge, hw)
ft.featvec_edge._parent = ft # backref of Feature
return ft
@staticmethod
def from_bytes(bytes):
buf = BytesIO(bytes)
with gzip.GzipFile(mode='rb', fileobj=buf) as fh:
return pickle.loads(fh.read())
def to_bytes(self) -> bytes:
buf = BytesIO()
with gzip.GzipFile(mode='wb', fileobj=buf) as fh:
fh.write(pickle.dumps(self, protocol=4))
return buf.getvalue()
def pkl(what='load', model=None):
# auxiliary onvert function for pickled data in models
if not model: return
if isinstance(model, Folder):
fld = model
if what == 'load':
sr_mat = fld.sr_matrix_pkl
if not sr_mat:
with db_lock: sz = int(fld.pictures.count() * 1.5)
sr_mat = SimRatioMatrix(sz).to_bytes()
fld.sr_matrix_pkl = sr_mat
save(fld)
fld.sr_matrix = SimRatioMatrix.from_bytes(sr_mat)
elif what == 'save':
fld.sr_matrix_pkl = fld.sr_matrix.to_bytes()
save(fld)
elif isinstance(model, Picture):
pic = model
if what == 'load':
ft = pic.feature_pkl
if not ft:
ft = Feature.featurize(pic.path)
pic.feature_pkl = ft
save(pic)
pic.feature = Feature.from_bytes(ft)
class HWRatio(IntEnum):
# item named <shape>_<width>_<height>, but value is hwr = height / width
SQUARE_1_1 = 100
HORIZONTAL_4_3 = 100 * 3 // 4
HORIZONTAL_3_2 = 100 * 2 // 3
HORIZONTAL_16_9 = 100 * 9 // 16
VERTICLE_3_4 = 100 * 4 // 3
VERTICLE_2_3 = 100 * 3 // 2
VERTICLE_9_16 = 100 * 16 // 9
def resize_by_hwlimit(img, hwlimit=640, sample=Image.NEAREST):
# shrink image by given hwlimit, with aspect ratio kept
# use default NEAREST keeps original color other than interplotion
if max(img.size) > hwlimit:
w, h = img.size
if w >= h:
sz = (hwlimit, h * hwlimit // w)
else:
sz = (w * hwlimit // h, hwlimit)
img = img.resize(sz, sample)
return img
def square_padding(img):
# expand to fit a minimal square canvas
w, h = img.size
if w == h: return img
# let's use the magic number 255 to represent the
# padded invalid pixels, so adjust all REAL 255 to 254
im = np.array(img, dtype=np.uint8)
im[im == NONE_PIXEL_PADDING] = NONE_PIXEL_PADDING - 1
mhw = max(img.size)
impad = np.full((mhw, mhw), NONE_PIXEL_PADDING, dtype=np.uint8)
if w > h:
_len = (mhw * h) // w
_y = (mhw - _len) >> 1
rs, re = _y, _y + _len
cs, ce = 0, w
else:
_len = (mhw * w) // h
_x = (mhw - _len) >> 1
rs, re = 0, h
cs, ce = _x, _x + _len
impad[rs:re, cs:ce] = im[0:h, 0:w]
return Image.fromarray(impad).convert('L')
def rgb2grey(rgb) -> int:
# ITU-R 601-2 luma transform
r, g, b = float(rgb[0]), float(rgb[1]), float(rgb[2])
return int((r * 299 + g * 587 + b * 114) // 1000)
def rgb2hexstr(rgb) -> str:
r, g, b = int(rgb[0]), int(rgb[1]), int(rgb[2])
return '#' + hex((r << 16) + (g << 8) + (b))[2:].rjust(6, '0')
def rgb_distance(rgb1, rgb2) -> float:
# distance in LAB color space, but input is RGB
# see: https://blog.csdn.net/qq_16564093/article/details/80698479
R, G, B = [x - y for x, y in zip(rgb1, rgb2)]
rmean = (rgb1[0] + rgb2[0]) / 2
c = np.sqrt((2 + rmean / 256) * (R ** 2) + 4 * (G ** 2) + (2 + (255 - rmean) / 256) * (B ** 2))
return float(c)
def high_contrast_bw_hexstr(rgb):
return rgb2grey(rgb) <= 192 and '#FFFFFF' or '#000000'
def hsv2rgb(hsv) -> (int, int, int):
h, s, v = float(hsv[0] / 255.0 * 360), float(hsv[1] / 255.0), float(hsv[2] / 255.0)
h60 = h / 60.0
h60f = np.floor(h60)
hi = int(h60f) % 6
f = h60 - h60f
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
r, g, b = 0, 0, 0
if hi == 0: r, g, b = v, t, p
elif hi == 1: r, g, b = q, v, p
elif hi == 2: r, g, b = p, v, t
elif hi == 3: r, g, b = p, q, v
elif hi == 4: r, g, b = t, p, v
elif hi == 5: r, g, b = v, p, q
return round(r * 255), round(g * 255), round(b * 255)
def rgb2hsv(rgb) -> (int, int, int):
r, g, b = float(rgb[0] / 255.0), float(rgb[1] / 255.0), float(rgb[2] / 255.0)
mx, mn = max(r, g, b), min(r, g, b)
df = mx - mn
if mx == mn: h = 0
elif mx == r:
h = (60 * ((g - b) / df) + 360) % 360
elif mx == g:
h = (60 * ((b - r) / df) + 120) % 360
elif mx == b:
h = (60 * ((r - g) / df) + 240) % 360
s = 0 if mx == 0 else df / mx
v = mx
return round(h / 360 * 255), round(s * 255), round(v * 255)
| 32.163934 | 129 | 0.581804 |
7993d32d8ff416580b9094c58d11cba7702453d0 | 19,025 | py | Python | test/api/test_zone.py | choonho/inventory | cc89757490d28fecb7ffccdfd6f89d4c0aa40da5 | [
"Apache-2.0"
] | null | null | null | test/api/test_zone.py | choonho/inventory | cc89757490d28fecb7ffccdfd6f89d4c0aa40da5 | [
"Apache-2.0"
] | null | null | null | test/api/test_zone.py | choonho/inventory | cc89757490d28fecb7ffccdfd6f89d4c0aa40da5 | [
"Apache-2.0"
] | null | null | null | import os
import uuid
import random
import unittest
from langcodes import Language
from spaceone.core import config
from spaceone.core import pygrpc
from spaceone.core import utils
from spaceone.core.unittest.runner import RichTestRunner
from google.protobuf.json_format import MessageToDict
def random_string():
return uuid.uuid4().hex
class TestZone(unittest.TestCase):
config = config.load_config(
os.environ.get('SPACEONE_TEST_CONFIG_FILE', './config.yml'))
identity_v1 = None
inventory_v1 = None
domain = None
domain_owner = None
owner_id = None
owner_pw = None
token = None
@classmethod
def setUpClass(cls):
super(TestZone, cls).setUpClass()
endpoints = cls.config.get('ENDPOINTS', {})
cls.identity_v1 = pygrpc.client(endpoint=endpoints.get('identity', {}).get('v1'), version='v1')
cls.inventory_v1 = pygrpc.client(endpoint=endpoints.get('inventory', {}).get('v1'), version='v1')
cls._create_domain()
cls._create_domain_owner()
cls._issue_owner_token()
@classmethod
def tearDownClass(cls):
super(TestZone, cls).tearDownClass()
cls.identity_v1.DomainOwner.delete({
'domain_id': cls.domain.domain_id,
'owner_id': cls.owner_id
})
if cls.domain:
cls.identity_v1.Domain.delete({'domain_id': cls.domain.domain_id})
@classmethod
def _create_domain(cls):
name = utils.random_string()
param = {
'name': name,
'tags': {utils.random_string(): utils.random_string(), utils.random_string(): utils.random_string()},
'config': {
'aaa': 'bbbb'
}
}
cls.domain = cls.identity_v1.Domain.create(param)
print(f'domain_id: {cls.domain.domain_id}')
print(f'domain_name: {cls.domain.name}')
@classmethod
def _create_domain_owner(cls):
cls.owner_id = utils.random_string()[0:10]
cls.owner_pw = 'qwerty'
param = {
'owner_id': cls.owner_id,
'password': cls.owner_pw,
'name': 'Steven' + utils.random_string()[0:5],
'timezone': 'utc+9',
'email': 'Steven' + utils.random_string()[0:5] + '@mz.co.kr',
'mobile': '+821026671234',
'domain_id': cls.domain.domain_id
}
owner = cls.identity_v1.DomainOwner.create(
param
)
cls.domain_owner = owner
print(f'owner_id: {cls.owner_id}')
print(f'owner_pw: {cls.owner_pw}')
@classmethod
def _issue_owner_token(cls):
token_param = {
'credentials': {
'user_type': 'DOMAIN_OWNER',
'user_id': cls.owner_id,
'password': cls.owner_pw
},
'domain_id': cls.domain.domain_id
}
issue_token = cls.identity_v1.Token.issue(token_param)
cls.token = issue_token.access_token
print(f'token: {cls.token}')
def setUp(self):
self.regions = []
self.region = None
self.zones = []
self.zone = None
self.users = []
self.user = None
def tearDown(self):
for zone in self.zones:
self.inventory_v1.Zone.delete(
{'zone_id': zone.zone_id,
'domain_id': self.domain.domain_id},
metadata=(('token', self.token),)
)
for region in self.regions:
self.inventory_v1.Region.delete(
{'region_id': region.region_id,
'domain_id': self.domain.domain_id},
metadata=(('token', self.token),)
)
for user in self.users:
self.identity_v1.User.delete(
{'user_id': user.user_id,
'domain_id': self.domain.domain_id},
metadata=(('token', self.token),)
)
def _create_user(self, user_id=None):
lang_code = random.choice(['zh-hans', 'jp', 'ko', 'en', 'es'])
language = Language.get(lang_code)
user_id = utils.random_string()[0:10] if user_id is None else user_id
param = {
'user_id': user_id,
'domain_id': self.domain.domain_id,
'password': 'qwerty123',
'name': 'Steven' + utils.random_string()[0:5],
'language': language.__str__(),
'timezone': 'Asia/Seoul',
'tags': {'aa': 'bb'},
'email': 'Steven' + utils.random_string()[0:5] + '@mz.co.kr',
'mobile': '+821026671234',
'group': 'group-id',
}
user = self.identity_v1.User.create(
param,
metadata=(('token', self.token),)
)
self.user = user
self.users.append(user)
self.assertEqual(self.user.name, param['name'])
def _create_region(self, name=None):
""" Create Region
"""
if not name:
name = random_string()
params = {
'name': name,
'domain_id': self.domain.domain_id
}
self.region = self.inventory_v1.Region.create(params,
metadata=(('token', self.token),)
)
self.regions.append(self.region)
def test_create_zone(self, name=None, region_id=None):
""" Create Zone
"""
if region_id is None:
self._create_region()
region_id = self.region.region_id
if not name:
name = random_string()
params = {
'name': name,
'region_id': region_id,
'domain_id': self.domain.domain_id
}
self.zone = self.inventory_v1.Zone.create(params,
metadata=(('token', self.token),)
)
self.zones.append(self.zone)
self.assertEqual(self.zone.name, name)
def test_update_zone_name(self):
self.test_create_zone()
name = random_string()
param = { 'zone_id': self.zone.zone_id,
'name': name,
'domain_id': self.domain.domain_id,
}
self.zone = self.inventory_v1.Zone.update(param,
metadata=(('token', self.token),)
)
self.assertEqual(self.zone.name, name)
def test_update_zone_tags(self):
self.test_create_zone()
tags = {
random_string(): random_string(),
random_string(): random_string()
}
param = { 'zone_id': self.zone.zone_id,
'tags': tags,
'domain_id': self.domain.domain_id,
}
self.zone = self.inventory_v1.Zone.update(param,
metadata=(('token', self.token),)
)
self.assertEqual(MessageToDict(self.zone.tags), tags)
def test_get_zone(self):
name = 'test-zone'
self.test_create_zone(name)
param = {
'zone_id': self.zone.zone_id,
'domain_id': self.domain.domain_id
}
self.zone = self.inventory_v1.Zone.get(param,
metadata=(('token', self.token),)
)
self.assertEqual(self.zone.name, name)
def test_add_member_zone(self):
self.test_create_zone()
self._create_user()
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
zone_admin = self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token),))
user_info = MessageToDict(zone_admin.user_info)
self.assertEqual(user_info.get('user_id'), self.user.user_id)
def test_add_member_not_exist_user(self):
self.test_create_zone()
self._create_user()
param = {
'zone_id': self.zone.zone_id,
'user_id': 'test',
'domain_id': self.domain.domain_id
}
with self.assertRaises(Exception):
self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token),))
def test_add_member_duplicate_user(self):
self.test_create_zone()
self._create_user()
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
self.inventory_v1.Zone.add_member(param,metadata=(('token', self.token),))
with self.assertRaises(Exception):
self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token),))
def test_add_member_not_exist_zone(self):
self.test_create_zone()
self._create_user()
param = {
'zone_id': 'test',
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
with self.assertRaises(Exception):
self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token),))
def test_modify_member_zone(self):
self.test_create_zone()
self._create_user()
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token),))
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
zone_member = self.inventory_v1.Zone.modify_member(param, metadata=(('token', self.token),))
user_info = MessageToDict(zone_member.user_info)
self.assertEqual(user_info.get('user_id'), self.user.user_id)
def test_modify_member_zone_labels(self):
self.test_create_zone()
self._create_user()
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
labels = ['developer', 'operator', 'operator']
self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token),))
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id,
'labels': labels
}
zone_member = self.inventory_v1.Zone.modify_member(param, metadata=(('token', self.token),))
print(zone_member.labels)
user_info = MessageToDict(zone_member.user_info)
self.assertEqual(user_info.get('user_id'), self.user.user_id)
def test_modify_member_not_exist_user(self):
self.test_create_zone()
self._create_user()
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token),))
param = {
'zone_id': self.zone.zone_id,
'user_id': 'test',
'domain_id': self.domain.domain_id,
}
with self.assertRaises(Exception):
self.inventory_v1.Zone.modify_member(param, metadata=(('token', self.token),))
def test_modify_member_not_exist_zone(self):
self.test_create_zone()
self._create_user()
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token),))
param = {
'zone_id': 'test',
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id,
}
with self.assertRaises(Exception):
self.inventory_v1.Zone.modify_member(param, metadata=(('token', self.token),))
def test_remove_member_region(self):
self.test_create_zone()
self._create_user()
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token),))
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
self.inventory_v1.Zone.remove_member(param, metadata=(('token', self.token),))
param = {
'zone_id': self.zone.zone_id,
'domain_id': self.domain.domain_id
}
zone_members = self.inventory_v1.Zone.list_members(param, metadata=(('token', self.token),))
self.assertEqual(0, zone_members.total_count)
def test_remove_member_not_exist_user(self):
self.test_create_zone()
self._create_user()
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token),))
param = {
'zone_id': self.zone.zone_id,
'user_id': 'test',
'domain_id': self.domain.domain_id
}
with self.assertRaises(Exception):
self.inventory_v1.Zone.remove_member(param, metadata=(('token', self.token),))
def test_list_members_zone_id(self):
self.test_create_zone()
self._create_user()
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token),))
param = {
'zone_id': self.zone.zone_id,
'domain_id': self.domain.domain_id
}
zone_members = self.inventory_v1.Zone.list_members(param, metadata=(('token', self.token),))
self.assertEqual(1, zone_members.total_count)
def test_list_members_zone_user_id(self):
self.test_create_zone()
self._create_user()
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token),))
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
zone_members = self.inventory_v1.Zone.list_members(param, metadata=(('token', self.token),))
self.assertEqual(1, zone_members.total_count)
def test_list_members_zone_query(self):
self.test_create_zone()
self._create_user()
param = {
'zone_id': self.zone.zone_id,
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
}
self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token),))
param = {
'zone_id': self.zone.zone_id,
'domain_id': self.domain.domain_id,
'query': {
'filter': [
{'k': 'user_id',
'v': self.user.user_id,
'o': 'eq'}
]
}
}
zone_members = self.inventory_v1.Zone.list_members(param, metadata=(('token', self.token),))
self.assertEqual(1, zone_members.total_count)
def test_list_region_id(self):
self.test_create_zone()
self.test_create_zone(region_id=self.region.region_id)
param = {
'region_id': self.region.region_id,
'domain_id': self.domain.domain_id
}
zones = self.inventory_v1.Zone.list(param, metadata=(('token', self.token),))
self.assertEqual(2, zones.total_count)
def test_list_zone_id(self):
self.test_create_zone()
self.test_create_zone()
param = {
'zone_id': self.zone.zone_id,
'domain_id': self.domain.domain_id
}
zones = self.inventory_v1.Zone.list(param, metadata=(('token', self.token),))
self.assertEqual(1, zones.total_count)
def test_list_name(self):
self.test_create_zone()
self.test_create_zone()
param = {
'name': self.zone.name,
'domain_id': self.domain.domain_id
}
zones = self.inventory_v1.Zone.list(param, metadata=(('token', self.token),))
self.assertEqual(1, zones.total_count)
def test_list_query(self):
self.test_create_zone()
self.test_create_zone()
self.test_create_zone()
param = {
'domain_id': self.domain.domain_id,
'query': {
'filter': [
{
'k': 'zone_id',
'v': list(map(lambda zone: zone.zone_id, self.zones)),
'o': 'in'
}
]
}
}
zones = self.inventory_v1.Zone.list(param, metadata=(('token', self.token),))
self.assertEqual(len(self.zones), zones.total_count)
def test_list_query_2(self):
self.test_create_zone()
self.test_create_zone()
self.test_create_zone()
self.test_create_zone()
self.test_create_zone()
self.test_create_zone()
self.test_create_zone()
param = {
'domain_id': self.domain.domain_id,
'query': {
'minimal': True
}
}
zones = self.inventory_v1.Zone.list(param, metadata=(('token', self.token),))
print(zones.results)
self.assertEqual(len(self.zones), zones.total_count)
def test_stat_zones(self):
self.test_list_query()
params = {
'domain_id': self.domain.domain_id,
'query': {
'aggregate': {
'group': {
'keys': [{
'key': 'zone_id',
'name': 'Id'
}],
'fields': [{
'operator': 'count',
'name': 'Count'
}]
}
},
'sort': {
'name': 'Count',
'desc': True
}
}
}
result = self.inventory_v1.Zone.stat(
params, metadata=(('token', self.token),))
print(result)
if __name__ == "__main__":
unittest.main(testRunner=RichTestRunner)
| 30.537721 | 113 | 0.538449 |
77c9c8e314362593a96866983edaaa8fca59361f | 18,126 | py | Python | vispy/visuals/line/line.py | gouarin/vispy | 877433e83b9b77e6f7d1105918364122cb8503a7 | [
"BSD-3-Clause"
] | null | null | null | vispy/visuals/line/line.py | gouarin/vispy | 877433e83b9b77e6f7d1105918364122cb8503a7 | [
"BSD-3-Clause"
] | null | null | null | vispy/visuals/line/line.py | gouarin/vispy | 877433e83b9b77e6f7d1105918364122cb8503a7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Line visual implementing Agg- and GL-based drawing modes.
"""
from __future__ import division
import numpy as np
from ... import gloo, glsl
from ...color import Color, ColorArray, get_colormap
from ...ext.six import string_types
from ..shaders import Function
from ..visual import Visual, CompoundVisual
from ...util.profiler import Profiler
from .dash_atlas import DashAtlas
vec2to4 = Function("""
vec4 vec2to4(vec2 inp) {
return vec4(inp, 0, 1);
}
""")
vec3to4 = Function("""
vec4 vec3to4(vec3 inp) {
return vec4(inp, 1);
}
""")
"""
TODO:
* Agg support is very minimal; needs attention.
* Optimization--avoid creating new buffers, avoid triggering program
recompile.
"""
joins = {'miter': 0, 'round': 1, 'bevel': 2}
caps = {'': 0, 'none': 0, '.': 0,
'round': 1, ')': 1, '(': 1, 'o': 1,
'triangle in': 2, '<': 2,
'triangle out': 3, '>': 3,
'square': 4, '=': 4, 'butt': 4,
'|': 5}
class LineVisual(CompoundVisual):
"""Line visual
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
Can also be a colormap name, or appropriate `Function`.
width:
The width of the line in px. Line widths > 1px are only
guaranteed to work when using 'agg' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* numpy arrays specify the exact set of segment pairs to
connect.
method : str
Mode to use for drawing.
* "agg" uses anti-grain geometry to draw nicely antialiased lines
with proper joins and endcaps.
* "gl" uses OpenGL's built-in line rendering. This is much faster,
but produces much lower-quality results and is not guaranteed to
obey the requested line width or join/endcap styles.
antialias : bool
Enables or disables antialiasing.
For method='gl', this specifies whether to use GL's line smoothing,
which may be unavailable or inconsistent on some platforms.
"""
def __init__(self, pos=None, color=(0.5, 0.5, 0.5, 1), width=1,
connect='strip', method='gl', antialias=False):
self._line_visual = None
self._changed = {'pos': False, 'color': False, 'width': False,
'connect': False}
self._pos = None
self._color = None
self._width = None
self._connect = None
self._bounds = None
self._antialias = None
self._method = 'none'
CompoundVisual.__init__(self, [])
# don't call subclass set_data; these often have different
# signatures.
LineVisual.set_data(self, pos=pos, color=color, width=width,
connect=connect)
self.antialias = antialias
self.method = method
@property
def antialias(self):
return self._antialias
@antialias.setter
def antialias(self, aa):
self._antialias = bool(aa)
self.update()
@property
def method(self):
"""The current drawing method"""
return self._method
@method.setter
def method(self, method):
if method not in ('agg', 'gl'):
raise ValueError('method argument must be "agg" or "gl".')
if method == self._method:
return
self._method = method
if self._line_visual is not None:
self.remove_subvisual(self._line_visual)
if method == 'gl':
self._line_visual = _GLLineVisual(self)
elif method == 'agg':
self._line_visual = _AggLineVisual(self)
self.add_subvisual(self._line_visual)
for k in self._changed:
self._changed[k] = True
def set_data(self, pos=None, color=None, width=None, connect=None):
""" Set the data used to draw this visual.
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
width:
The width of the line in px. Line widths > 1px are only
guaranteed to work when using 'agg' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* int numpy arrays specify the exact set of segment pairs to
connect.
* bool numpy arrays specify which _adjacent_ pairs to connect.
"""
if pos is not None:
self._bounds = None
self._pos = pos
self._changed['pos'] = True
if color is not None:
self._color = color
self._changed['color'] = True
if width is not None:
self._width = width
self._changed['width'] = True
if connect is not None:
self._connect = connect
self._changed['connect'] = True
self.update()
@property
def color(self):
return self._color
@property
def width(self):
return self._width
@property
def connect(self):
return self._connect
@property
def pos(self):
return self._pos
def _interpret_connect(self):
if isinstance(self._connect, np.ndarray):
# Convert a boolean connection array to a vertex index array
if self._connect.ndim == 1 and self._connect.dtype == bool:
index = np.empty((len(self._connect), 2), dtype=np.uint32)
index[:] = np.arange(len(self._connect))[:, np.newaxis]
index[:, 1] += 1
return index[self._connect]
elif self._connect.ndim == 2 and self._connect.shape[1] == 2:
return self._connect.astype(np.uint32)
else:
raise TypeError("Got invalid connect array of shape %r and "
"dtype %r" % (self._connect.shape,
self._connect.dtype))
else:
return self._connect
def _interpret_color(self):
if isinstance(self._color, string_types):
try:
colormap = get_colormap(self._color)
color = Function(colormap.glsl_map)
except KeyError:
color = Color(self._color).rgba
elif isinstance(self._color, Function):
color = Function(self._color)
else:
color = ColorArray(self._color).rgba
if len(color) == 1:
color = color[0]
return color
def _compute_bounds(self, axis, view):
"""Get the bounds
Parameters
----------
mode : str
Describes the type of boundary requested. Can be "visual", "data",
or "mouse".
axis : 0, 1, 2
The axis along which to measure the bounding values, in
x-y-z order.
"""
# Can and should we calculate bounds?
if (self._bounds is None) and self._pos is not None:
pos = self._pos
self._bounds = [(pos[:, d].min(), pos[:, d].max())
for d in range(pos.shape[1])]
# Return what we can
if self._bounds is None:
return
else:
if axis < len(self._bounds):
return self._bounds[axis]
else:
return (0, 0)
def _prepare_draw(self, view):
if self._width == 0:
return False
CompoundVisual._prepare_draw(self, view)
class _GLLineVisual(Visual):
VERTEX_SHADER = """
varying vec4 v_color;
void main(void) {
gl_Position = $transform($to_vec4($position));
v_color = $color;
}
"""
FRAGMENT_SHADER = """
varying vec4 v_color;
void main() {
gl_FragColor = v_color;
}
"""
def __init__(self, parent):
self._parent = parent
self._pos_vbo = gloo.VertexBuffer()
self._color_vbo = gloo.VertexBuffer()
self._connect_ibo = gloo.IndexBuffer()
self._connect = None
Visual.__init__(self, vcode=self.VERTEX_SHADER,
fcode=self.FRAGMENT_SHADER)
self.set_gl_state('translucent')
def _prepare_transforms(self, view):
xform = view.transforms.get_transform()
view.view_program.vert['transform'] = xform
def _prepare_draw(self, view):
prof = Profiler()
if self._parent._changed['pos']:
if self._parent._pos is None:
return False
# todo: does this result in unnecessary copies?
pos = np.ascontiguousarray(self._parent._pos.astype(np.float32))
self._pos_vbo.set_data(pos)
self._program.vert['position'] = self._pos_vbo
if pos.shape[-1] == 2:
self._program.vert['to_vec4'] = vec2to4
elif pos.shape[-1] == 3:
self._program.vert['to_vec4'] = vec3to4
else:
raise TypeError("Got bad position array shape: %r"
% (pos.shape,))
if self._parent._changed['color']:
color = self._parent._interpret_color()
# If color is not visible, just quit now
if isinstance(color, Color) and color.is_blank:
return False
if isinstance(color, Function):
# TODO: Change to the parametric coordinate once that is done
self._program.vert['color'] = color(
'(gl_Position.x + 1.0) / 2.0')
else:
if color.ndim == 1:
self._program.vert['color'] = color
else:
self._color_vbo.set_data(color)
self._program.vert['color'] = self._color_vbo
# Do we want to use OpenGL, and can we?
GL = None
from ...app._default_app import default_app
if default_app is not None and \
default_app.backend_name != 'ipynb_webgl':
try:
import OpenGL.GL as GL
except Exception: # can be other than ImportError sometimes
pass
# Turn on line smooth and/or line width
if GL:
if self._parent._antialias:
GL.glEnable(GL.GL_LINE_SMOOTH)
else:
GL.glDisable(GL.GL_LINE_SMOOTH)
px_scale = self.transforms.pixel_scale
width = px_scale * self._parent._width
GL.glLineWidth(max(width, 1.))
if self._parent._changed['connect']:
self._connect = self._parent._interpret_connect()
if isinstance(self._connect, np.ndarray):
self._connect_ibo.set_data(self._connect)
if self._connect is None:
return False
prof('prepare')
# Draw
if self._connect == 'strip':
self._draw_mode = 'line_strip'
self._index_buffer = None
elif self._connect == 'segments':
self._draw_mode = 'lines'
self._index_buffer = None
elif isinstance(self._connect, np.ndarray):
self._draw_mode = 'lines'
self._index_buffer = self._connect_ibo
else:
raise ValueError("Invalid line connect mode: %r" % self._connect)
prof('draw')
class _AggLineVisual(Visual):
_agg_vtype = np.dtype([('a_position', 'f4', 2),
('a_tangents', 'f4', 4),
('a_segment', 'f4', 2),
('a_angles', 'f4', 2),
('a_texcoord', 'f4', 2),
('alength', 'f4', 1),
('color', 'f4', 4)])
VERTEX_SHADER = glsl.get('lines/agg.vert')
FRAGMENT_SHADER = glsl.get('lines/agg.frag')
def __init__(self, parent):
self._parent = parent
self._vbo = gloo.VertexBuffer()
self._pos = None
self._color = None
self._da = DashAtlas()
dash_index, dash_period = self._da['solid']
self._U = dict(dash_index=dash_index, dash_period=dash_period,
linejoin=joins['round'],
linecaps=(caps['round'], caps['round']),
dash_caps=(caps['round'], caps['round']),
antialias=1.0)
self._dash_atlas = gloo.Texture2D(self._da._data)
Visual.__init__(self, vcode=self.VERTEX_SHADER,
fcode=self.FRAGMENT_SHADER)
self._index_buffer = gloo.IndexBuffer()
self.set_gl_state('translucent', depth_test=False)
self._draw_mode = 'triangles'
def _prepare_transforms(self, view):
data_doc = view.get_transform('visual', 'document')
doc_px = view.get_transform('document', 'framebuffer')
px_ndc = view.get_transform('framebuffer', 'render')
vert = view.view_program.vert
vert['transform'] = data_doc
vert['doc_px_transform'] = doc_px
vert['px_ndc_transform'] = px_ndc
def _prepare_draw(self, view):
bake = False
if self._parent._changed['pos']:
if self._parent._pos is None:
return False
# todo: does this result in unnecessary copies?
self._pos = np.ascontiguousarray(
self._parent._pos.astype(np.float32))
bake = True
if self._parent._changed['color']:
self._color = self._parent._interpret_color()
bake = True
if self._parent._changed['connect']:
if self._parent._connect not in [None, 'strip']:
raise NotImplementedError("Only 'strip' connection mode "
"allowed for agg-method lines.")
if bake:
V, I = self._agg_bake(self._pos, self._color)
self._vbo.set_data(V)
self._index_buffer.set_data(I)
#self._program.prepare()
self.shared_program.bind(self._vbo)
uniforms = dict(closed=False, miter_limit=4.0, dash_phase=0.0,
linewidth=self._parent._width)
for n, v in uniforms.items():
self.shared_program[n] = v
for n, v in self._U.items():
self.shared_program[n] = v
self.shared_program['u_dash_atlas'] = self._dash_atlas
@classmethod
def _agg_bake(cls, vertices, color, closed=False):
"""
Bake a list of 2D vertices for rendering them as thick line. Each line
segment must have its own vertices because of antialias (this means no
vertex sharing between two adjacent line segments).
"""
n = len(vertices)
P = np.array(vertices).reshape(n, 2).astype(float)
idx = np.arange(n) # used to eventually tile the color array
dx, dy = P[0] - P[-1]
d = np.sqrt(dx*dx+dy*dy)
# If closed, make sure first vertex = last vertex (+/- epsilon=1e-10)
if closed and d > 1e-10:
P = np.append(P, P[0]).reshape(n+1, 2)
idx = np.append(idx, idx[-1])
n += 1
V = np.zeros(len(P), dtype=cls._agg_vtype)
V['a_position'] = P
# Tangents & norms
T = P[1:] - P[:-1]
N = np.sqrt(T[:, 0]**2 + T[:, 1]**2)
# T /= N.reshape(len(T),1)
V['a_tangents'][+1:, :2] = T
V['a_tangents'][0, :2] = T[-1] if closed else T[0]
V['a_tangents'][:-1, 2:] = T
V['a_tangents'][-1, 2:] = T[0] if closed else T[-1]
# Angles
T1 = V['a_tangents'][:, :2]
T2 = V['a_tangents'][:, 2:]
A = np.arctan2(T1[:, 0]*T2[:, 1]-T1[:, 1]*T2[:, 0],
T1[:, 0]*T2[:, 0]+T1[:, 1]*T2[:, 1])
V['a_angles'][:-1, 0] = A[:-1]
V['a_angles'][:-1, 1] = A[+1:]
# Segment
L = np.cumsum(N)
V['a_segment'][+1:, 0] = L
V['a_segment'][:-1, 1] = L
# V['a_lengths'][:,2] = L[-1]
# Step 1: A -- B -- C => A -- B, B' -- C
V = np.repeat(V, 2, axis=0)[1:-1]
V['a_segment'][1:] = V['a_segment'][:-1]
V['a_angles'][1:] = V['a_angles'][:-1]
V['a_texcoord'][0::2] = -1
V['a_texcoord'][1::2] = +1
idx = np.repeat(idx, 2)[1:-1]
# Step 2: A -- B, B' -- C -> A0/A1 -- B0/B1, B'0/B'1 -- C0/C1
V = np.repeat(V, 2, axis=0)
V['a_texcoord'][0::2, 1] = -1
V['a_texcoord'][1::2, 1] = +1
idx = np.repeat(idx, 2)
I = np.resize(np.array([0, 1, 2, 1, 2, 3], dtype=np.uint32),
(n-1)*(2*3))
I += np.repeat(4*np.arange(n-1, dtype=np.uint32), 6)
# Length
V['alength'] = L[-1] * np.ones(len(V))
# Color
if color.ndim == 1:
color = np.tile(color, (len(V), 1))
elif color.ndim == 2 and len(color) == n:
color = color[idx]
else:
raise ValueError('Color length %s does not match number of '
'vertices %s' % (len(color), n))
V['color'] = color
return V, I
| 33.504621 | 78 | 0.538894 |
3da841c408558ef9e411343cb7fb2fefe5145b2f | 7,197 | py | Python | python/pyspark/mllib/tests/test_feature.py | ChenWeiye83/spark | 1f1d98c6facd556b70f457184231b5af78de8d53 | [
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 11 | 2020-01-29T10:29:53.000Z | 2022-02-10T09:52:54.000Z | python/pyspark/mllib/tests/test_feature.py | ChenWeiye83/spark | 1f1d98c6facd556b70f457184231b5af78de8d53 | [
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 7 | 2017-05-08T23:53:03.000Z | 2020-11-25T01:31:17.000Z | python/pyspark/mllib/tests/test_feature.py | ChenWeiye83/spark | 1f1d98c6facd556b70f457184231b5af78de8d53 | [
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14 | 2015-10-31T14:19:10.000Z | 2022-01-31T05:52:41.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import sqrt
import unittest
from numpy import array, random, exp, abs, tile
from pyspark.mllib.linalg import Vector, SparseVector, DenseVector, VectorUDT, Vectors
from pyspark.mllib.linalg.distributed import RowMatrix
from pyspark.mllib.feature import HashingTF, IDF, StandardScaler, ElementwiseProduct, Word2Vec
from pyspark.testing.mllibutils import MLlibTestCase
class FeatureTest(MLlibTestCase):
def test_idf_model(self):
data = [
Vectors.dense([1, 2, 6, 0, 2, 3, 1, 1, 0, 0, 3]),
Vectors.dense([1, 3, 0, 1, 3, 0, 0, 2, 0, 0, 1]),
Vectors.dense([1, 4, 1, 0, 0, 4, 9, 0, 1, 2, 0]),
Vectors.dense([2, 1, 0, 3, 0, 0, 5, 0, 2, 3, 9])
]
model = IDF().fit(self.sc.parallelize(data, 2))
idf = model.idf()
self.assertEqual(len(idf), 11)
class Word2VecTests(MLlibTestCase):
def test_word2vec_setters(self):
model = Word2Vec() \
.setVectorSize(2) \
.setLearningRate(0.01) \
.setNumPartitions(2) \
.setNumIterations(10) \
.setSeed(1024) \
.setMinCount(3) \
.setWindowSize(6)
self.assertEqual(model.vectorSize, 2)
self.assertTrue(model.learningRate < 0.02)
self.assertEqual(model.numPartitions, 2)
self.assertEqual(model.numIterations, 10)
self.assertEqual(model.seed, 1024)
self.assertEqual(model.minCount, 3)
self.assertEqual(model.windowSize, 6)
def test_word2vec_get_vectors(self):
data = [
["a", "b", "c", "d", "e", "f", "g"],
["a", "b", "c", "d", "e", "f"],
["a", "b", "c", "d", "e"],
["a", "b", "c", "d"],
["a", "b", "c"],
["a", "b"],
["a"]
]
model = Word2Vec().fit(self.sc.parallelize(data))
self.assertEqual(len(model.getVectors()), 3)
class StandardScalerTests(MLlibTestCase):
def test_model_setters(self):
data = [
[1.0, 2.0, 3.0],
[2.0, 3.0, 4.0],
[3.0, 4.0, 5.0]
]
model = StandardScaler().fit(self.sc.parallelize(data))
self.assertIsNotNone(model.setWithMean(True))
self.assertIsNotNone(model.setWithStd(True))
self.assertEqual(model.transform([1.0, 2.0, 3.0]), DenseVector([-1.0, -1.0, -1.0]))
def test_model_transform(self):
data = [
[1.0, 2.0, 3.0],
[2.0, 3.0, 4.0],
[3.0, 4.0, 5.0]
]
model = StandardScaler().fit(self.sc.parallelize(data))
self.assertEqual(model.transform([1.0, 2.0, 3.0]), DenseVector([1.0, 2.0, 3.0]))
class ElementwiseProductTests(MLlibTestCase):
def test_model_transform(self):
weight = Vectors.dense([3, 2, 1])
densevec = Vectors.dense([4, 5, 6])
sparsevec = Vectors.sparse(3, [0], [1])
eprod = ElementwiseProduct(weight)
self.assertEqual(eprod.transform(densevec), DenseVector([12, 10, 6]))
self.assertEqual(
eprod.transform(sparsevec), SparseVector(3, [0], [3]))
class HashingTFTest(MLlibTestCase):
def test_binary_term_freqs(self):
hashingTF = HashingTF(100).setBinary(True)
doc = "a a b c c c".split(" ")
n = hashingTF.numFeatures
output = hashingTF.transform(doc).toArray()
expected = Vectors.sparse(n, {hashingTF.indexOf("a"): 1.0,
hashingTF.indexOf("b"): 1.0,
hashingTF.indexOf("c"): 1.0}).toArray()
for i in range(0, n):
self.assertAlmostEqual(output[i], expected[i], 14, "Error at " + str(i) +
": expected " + str(expected[i]) + ", got " + str(output[i]))
class DimensionalityReductionTests(MLlibTestCase):
denseData = [
Vectors.dense([0.0, 1.0, 2.0]),
Vectors.dense([3.0, 4.0, 5.0]),
Vectors.dense([6.0, 7.0, 8.0]),
Vectors.dense([9.0, 0.0, 1.0])
]
sparseData = [
Vectors.sparse(3, [(1, 1.0), (2, 2.0)]),
Vectors.sparse(3, [(0, 3.0), (1, 4.0), (2, 5.0)]),
Vectors.sparse(3, [(0, 6.0), (1, 7.0), (2, 8.0)]),
Vectors.sparse(3, [(0, 9.0), (2, 1.0)])
]
def assertEqualUpToSign(self, vecA, vecB):
eq1 = vecA - vecB
eq2 = vecA + vecB
self.assertTrue(sum(abs(eq1)) < 1e-6 or sum(abs(eq2)) < 1e-6)
def test_svd(self):
denseMat = RowMatrix(self.sc.parallelize(self.denseData))
sparseMat = RowMatrix(self.sc.parallelize(self.sparseData))
m = 4
n = 3
for mat in [denseMat, sparseMat]:
for k in range(1, 4):
rm = mat.computeSVD(k, computeU=True)
self.assertEqual(rm.s.size, k)
self.assertEqual(rm.U.numRows(), m)
self.assertEqual(rm.U.numCols(), k)
self.assertEqual(rm.V.numRows, n)
self.assertEqual(rm.V.numCols, k)
# Test that U returned is None if computeU is set to False.
self.assertEqual(mat.computeSVD(1).U, None)
# Test that low rank matrices cannot have number of singular values
# greater than a limit.
rm = RowMatrix(self.sc.parallelize(tile([1, 2, 3], (3, 1))))
self.assertEqual(rm.computeSVD(3, False, 1e-6).s.size, 1)
def test_pca(self):
expected_pcs = array([
[0.0, 1.0, 0.0],
[sqrt(2.0) / 2.0, 0.0, sqrt(2.0) / 2.0],
[sqrt(2.0) / 2.0, 0.0, -sqrt(2.0) / 2.0]
])
n = 3
denseMat = RowMatrix(self.sc.parallelize(self.denseData))
sparseMat = RowMatrix(self.sc.parallelize(self.sparseData))
for mat in [denseMat, sparseMat]:
for k in range(1, 4):
pcs = mat.computePrincipalComponents(k)
self.assertEqual(pcs.numRows, n)
self.assertEqual(pcs.numCols, k)
# We can just test the updated principal component for equality.
self.assertEqualUpToSign(pcs.toArray()[:, k - 1], expected_pcs[:, k - 1])
if __name__ == "__main__":
from pyspark.mllib.tests.test_feature import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 37.290155 | 96 | 0.575657 |
1f5aec8ca76efa6d7c1a543fd5da9d15f87af8c6 | 4,405 | py | Python | DNA_mRNA_Protein.py | KapileshP/DNA_to_mRNA_to_Protein | a478ee07159a001dcd3f3c6101a86b76f63d5fd1 | [
"BSD-3-Clause"
] | 1 | 2021-03-23T17:02:14.000Z | 2021-03-23T17:02:14.000Z | DNA_mRNA_Protein.py | KapileshP/DNA_to_mRNA_to_Protein | a478ee07159a001dcd3f3c6101a86b76f63d5fd1 | [
"BSD-3-Clause"
] | null | null | null | DNA_mRNA_Protein.py | KapileshP/DNA_to_mRNA_to_Protein | a478ee07159a001dcd3f3c6101a86b76f63d5fd1 | [
"BSD-3-Clause"
] | null | null | null | # ---------- About this Program ----------
"""
Name: DNA to mRNA to Protein
Creator: Kapilesh Pennichetty
Description: DNA to mRNA to Protein is a program created by Kapilesh Pennichetty to assist in converting a DNA sequence to an mRNA sequence and amino acids (protein) through the processes of transcription and translation. This program takes user input (DNA sequence) via the Python shell, performs transcription and translation, and returns the corresponding amino acid and mRNA codon as output.
"""
# ---------- Import Statements ----------
import sys
# ---------- Dictionaries for Transcription and Translation ----------
transcription = {
"A": "U",
"T": "A",
"C": "G",
"G": "C",
" ": " ",
}
translation = {
"['U', 'U', 'U']": "Phenylalanine",
"['U', 'U', 'C']": "Phenylalanine",
"['U', 'U', 'A']": "Leucine",
"['U', 'U', 'G']": "Leucine",
"['U', 'C', 'U']": "Serine",
"['U', 'C', 'C']": "Serine",
"['U', 'C', 'A']": "Serine",
"['U', 'C', 'G']": "Serine",
"['U', 'A', 'U']": "Tyrosine",
"['U', 'A', 'C']": "Tyrosine",
"['U', 'A', 'A']": "STOP",
"['U', 'A', 'G']": "STOP",
"['U', 'G', 'U']": "Cysteine",
"['U', 'G', 'C']": "Cysteine",
"['U', 'G', 'A']": "STOP",
"['U', 'G', 'G']": "Tryptophan",
"['C', 'U', 'U']": "Leucine",
"['C', 'U', 'C']": "Leucine",
"['C', 'U', 'A']": "Leucine",
"['C', 'U', 'G']": "Leucine",
"['C', 'C', 'U']": "Proline",
"['C', 'C', 'C']": "Proline",
"['C', 'C', 'A']": "Proline",
"['C', 'C', 'G']": "Proline",
"['C', 'A', 'U']": "Histidine",
"['C', 'A', 'C']": "Histidine",
"['C', 'A', 'A']": "Glutamine",
"['C', 'A', 'G']": "Glutamine",
"['C', 'G', 'U']": "Arginine",
"['C', 'G', 'C']": "Arginine",
"['C', 'G', 'A']": "Arginine",
"['C', 'G', 'G']": "Arginine",
"['A', 'U', 'U']": "Isoleucine",
"['A', 'U', 'C']": "Isoleucine",
"['A', 'U', 'A']": "Isoleucine",
"['A', 'U', 'G']": "Methionine (START)",
"['A', 'C', 'U']": "Threonine",
"['A', 'C', 'C']": "Threonine",
"['A', 'C', 'A']": "Threonine",
"['A', 'C', 'G']": "Threonine",
"['A', 'A', 'U']": "Asparagine",
"['A', 'A', 'C']": "Asparagine",
"['A', 'A', 'A']": "Lysine",
"['A', 'A', 'G']": "Lysine",
"['A', 'G', 'U']": "Serine",
"['A', 'G', 'C']": "Serine",
"['A', 'G', 'A']": "Arginine",
"['A', 'G', 'G']": "Arginine",
"['G', 'U', 'U']": "Valine",
"['G', 'U', 'C']": "Valine",
"['G', 'U', 'A']": "Valine",
"['G', 'U', 'G']": "Valine",
"['G', 'C', 'U']": "Alanine",
"['G', 'C', 'C']": "Alanine",
"['G', 'C', 'A']": "Alanine",
"['G', 'C', 'G']": "Alanine",
"['G', 'A', 'U']": "Aspartate",
"['G', 'A', 'C']": "Aspartate",
"['G', 'A', 'A']": "Glutamate",
"['G', 'A', 'G']": "Glutamate",
"['G', 'G', 'U']": "Glycine",
"['G', 'G', 'C']": "Glycine",
"['G', 'G', 'A']": "Glycine",
"['G', 'G', 'G']": "Glycine",
}
# ---------- User Input (DNA Sequence) ----------
input_prompt = input(
"Please enter the DNA sequence to be converted to mRNA. Please make sure that your DNA sequence starts with the start codon for accurate results: "
)
user_input = input_prompt.upper()
dna_sequence = user_input
list_dna_sequence = list(dna_sequence)
mRNA_sequence = ""
try:
for i in dna_sequence:
mRNA_sequence += transcription[i]
except:
print(
"Please make sure that your DNA sequence is valid. Re-run this program to re-enter a valid sequence."
)
sys.exit()
modified_mRNA_sequence = mRNA_sequence.replace(" ", "")
list_mRNA_sequence = list(modified_mRNA_sequence)
formatted_mRNA_list = [
str(list_mRNA_sequence[x:x + 3])
for x in range(0, len(list_mRNA_sequence), 3)
]
# ---------- Output (Amino Acids and mRNA Codons) ----------
amino_acid_number = 0
print(
"After transcription and translation, here are the amino acids and their respective mRNA codons:"
)
try:
for i in formatted_mRNA_list:
amino_acid_number += 1
if translation.get(i) == "STOP":
print(amino_acid_number, ".", translation.get(i), i)
print("\n")
sys.exit()
else:
print(amino_acid_number, ".", translation.get(i), i)
except:
print(
"Please make sure that your DNA sequence is valid. Re-run this program to re-enter a valid sequence."
)
sys.exit() | 34.414063 | 395 | 0.48286 |
e00aa77cde159d454b7a79cbef95975588f6f984 | 8,779 | py | Python | public/data/userguides/v0.1.0/_downloads/8bbb158ade27efed48c2b55ccd020566/tutorial5.py | libcellml/website-src | a9563941e0dd3b5dcfee922ab53f4adeb891047c | [
"CC0-1.0"
] | null | null | null | public/data/userguides/v0.1.0/_downloads/8bbb158ade27efed48c2b55ccd020566/tutorial5.py | libcellml/website-src | a9563941e0dd3b5dcfee922ab53f4adeb891047c | [
"CC0-1.0"
] | 39 | 2020-06-04T01:20:53.000Z | 2021-11-03T10:06:44.000Z | public/data/userguides/v0.1.0/_downloads/8bbb158ade27efed48c2b55ccd020566/tutorial5.py | libcellml/website-src | a9563941e0dd3b5dcfee922ab53f4adeb891047c | [
"CC0-1.0"
] | 3 | 2020-11-24T21:54:09.000Z | 2021-01-25T20:41:27.000Z | """
TUTORIAL 6: Annotating a mystery model
This tutorial is a guide to playing Marco Polo using libCellML.
By the time you have worked through this tutorial you will be able to:
- Parse a CellML file into a Model instance
- Determine the type of item with a given id
- Use the Annotator class to retrieve an item using only its id string
- Repair duplicated id strings within the model scope and
- Automatically generate and assign unique ids to any or all items.
Background:
'Marco Polo' is a game played with many people in a swimming pool. One person
calls 'Marco' with their eyes closed. Others answer 'Polo' and the first person
must find them by following the sound. In this tutorial you are given two id
strings - 'marco' and 'polo' - and a mystery CellML model file. We will work
through how the Annotator class can be used to locate the desired objects.
"""
from libcellml import Annotator, CellmlElementType, Component, Importer, Model, Parser, Units, Variable
from utilities import print_issues, print_model, get_cellml_element_type_from_enum, get_issue_level_from_enum
if __name__ == '__main__':
print('----------------------------------------------------------')
print(' STEP 1: Parse a mystery model ')
print('----------------------------------------------------------')
# 1.a
# Read the mystery file, MysteryModel.cellml.
# 1.b
# Create a Parser item.
# 1.c
# Use the parser to deserialise the contents of the string you've read
# and return the model.
# 1.d
# Check that the parser has not raised any issues.
print('----------------------------------------------------------')
print(' STEP 2: Find "marco" ')
print('----------------------------------------------------------')
# 2.a
# Create an Annotator item and use the setModel function to pass in the parsed
# mystery model.
# The item function returns a AnyItem, a tuple containing:
# - CellmlElementType enumeration; and
# - the item itself.
# 2.b
# Retrieve the item with an id of 'marco'. Use the helper function
# get_cellml_element_type_from_enum to convert the enumeration of its type into a
# string for printing to the terminal.
# The item with ID 'marco' is a VARIABLE
# 2.c
# Check that the annotator has not reported any issues.
# 2.d
# Now that we know the marco item's type using its first attribute (it should
# be a CellmlElementType.VARIABLE) we can name its second attribute so we know
# what it is.
print('----------------------------------------------------------')
print(' STEP 3: Find "polo" ')
print('----------------------------------------------------------')
# 3.a
# Now try the same procedure to find the item with id of 'polo'.
# Retrieve the item and print its type to the terminal.
# 3.b
# The item type returned is CellmlElementType.UNDEFINED ... so we
# need to check what the annotator has to say about it.
# Retrieve the issues from the annotator and print to the terminal..b
# Recorded 1 issues:
# Issue [0] is a WARNING:
# description: The id 'polo' occurs 6 times in the model so a unique item cannot be located.
# stored item type: UNDEFINED
# Since the id is not unique, we need to retrieve a vector of all items
# with that id to investigate them.
# 3.c
# Use the items function to retrieve the vector of items with id 'polo',
# and iterate through it printing the different types to the terminal.
# The items with an id of 'polo' have types of:
# - [0] UNITS
# - [1] UNITS
# - [2] UNIT
# - [3] VARIABLE
# - [4] RESET
# - [5] RESET_VALUE
# The item we want has type CellMLElements.UNIT, and we'd like it
# to be unique. We need to change the other items to have other (also unique)
# ids. The Annotator class can create a unique id for an item using the assignId function.
# This is overloaded so that you can pass in any libCellML item, as well as an AnyItem
# pair. NB: You need to be aware of the default types assigned when passing in CellML items
# without a corresponding item type. These are listed in the documentation.
# 3.d
# Assign an automatic id to all of the items with id 'polo', except for the one whose
# type is UNIT.
# 3.e
# Check that the id of 'polo' is now unique in the model by calling the
# isUnique function.
# Now we know that there is only one item in the model with id 'polo', and we also know
# that it has type UNIT. This means that we can retrieve a Unit item directly from the
# annotator rather than needing to cast it using the std.any_cast. Instead of calling
# the annotator's item function, call the Annotator.unit function with the id 'polo' to return the
# unit item directly.
# 3.f
# Retrieve the Unit with id polo without casting.
print('----------------------------------------------------------')
print(' STEP 4: See who else is lurking in this pool ')
print('----------------------------------------------------------')
# Now that we've found Marco and fixed the duplicates of Polo, we'd like to know
# what other ids are being used in this model.
# 4.a
# Use the Annotator.ids function to return a vector of id strings used in the model, and
# print them to the terminal.
# The hex strings printed are those which have been automatically generated by the assignId
# function we can also see the 'marco' and 'polo' ids as expected.
# 4.b
# Use the duplicateIds function to return a vector of those ids which have been duplicated in
# the model, and print them to the terminal.
print('----------------------------------------------------------')
print(' STEP 5: See who else is lurking around the corner ')
print('----------------------------------------------------------')
# The final step is to make sure that imported items can have their annotations
# tracked back to their sources too.
# 5.a
# Retrieve an item with id of 'whoAmIAndWhereDidIComeFrom' and print its item type
# to the terminal.
# 5.b
# Cast it into a CellML item of the appropriate type.
# 5.c
# Use the Component.isImport() function to verify that it is imported.
# 5.d
# Create an Importer instance and use it to resolve this model's imports.
# Check that it has not raised any issues.
# 5.e
# Retrieve all the information needed to locate any annotations on the
# original item:
# - the URL from which it was imported and
# - the id of the item in the original model.
# Print these to the terminal.
print('----------------------------------------------------------')
print(' STEP 6: Give up and go home ')
print('----------------------------------------------------------')
# 6.a
# Loop through all of the model's components and print their id to the terminal.
# Use the assignIds string with an item type (CellmlElementType.COMPONENT)
# to give all of the items of that type a new unique id. Print the ids again and
# notice that the blanks have been filled with automatically generated strings,
# but existing ids are unchanged.
# 6.b
# Finally, we decide that it's too cold for swimming, and want to nuke all the ids
# and go home.
# Use the clearAllIds function to completely remove all id strings from the model.
# Check that they have gone by repeating step 4.a to print any ids to the terminal.
# 6.c
# Go looking for Marco, but he's gone home already.
# Try and retrieve an item with id 'marco' and check that a null pointer is returned.
# Retrieve and print any issues to the terminal.
# 6.d
# Regret nuking our friends and make plans to return tomorrow and
# annotate everything. Use the assignAllIds function to give an automatic
# id to everything in the model.
# 6.e
# Try to retrieve duplicated ids from the annotator as in step 4.b, and
# check that it returns an empty list.
| 43.034314 | 109 | 0.581729 |
1340ad2e5ae4bf633cfdbb1ca3cca321b7b72c71 | 4,568 | py | Python | software/multifluids_icferst/examples/rotating_channel/channel_tools.py | msc-acse/acse-9-independent-research-project-Wade003 | cfcba990d52ccf535171cf54c0a91b184db6f276 | [
"MIT"
] | 2 | 2020-05-11T02:39:46.000Z | 2020-05-11T03:08:38.000Z | software/multifluids_icferst/examples/rotating_channel/channel_tools.py | msc-acse/acse-9-independent-research-project-Wade003 | cfcba990d52ccf535171cf54c0a91b184db6f276 | [
"MIT"
] | null | null | null | software/multifluids_icferst/examples/rotating_channel/channel_tools.py | msc-acse/acse-9-independent-research-project-Wade003 | cfcba990d52ccf535171cf54c0a91b184db6f276 | [
"MIT"
] | 2 | 2020-05-21T22:50:19.000Z | 2020-10-28T17:16:31.000Z | import os
from fluidity_tools import stat_parser
from sympy import *
from numpy import array,max,abs
meshtemplate='''
Point(1) = {0, 0, 0, <dx>};
Extrude {0, 1, 0} {
Point{1};Layers{<layers>};
}
Point(3) = {1, 0, 0, <dx>};
Extrude {0, 1, 0} {
Point{3};Layers{<layers>};
}
Line(3)={1,3};
Line(4)={2,4};
Line Loop(5) = {4, -2, -3, 1};
Plane Surface(6) = {5};
Physical Line(1) = {1};
Physical Line(2) = {2};
Physical Line(3) = {4, 3};
Physical Surface(1) = {6};
'''
def generate_meshfile(name,layers):
file(name+".geo",'w').write(
meshtemplate.replace('<dx>',str(1./layers)
).replace('<layers>',str(layers)))
os.system("gmsh -2 "+name+".geo")
def forcing(X):
'''Forcing function. Must be an analytic function of X[1] only'''
return (X[1]**3,0)
#Viscosity
mu=1.0
#Note that because Coriolis can't be set from Python, the user has to ensure
#that this matches what it in the flml.
coriolis=1.0
def analytic_solution(forcing):
'''Solve the ode d^2u/dx^2 = F/mu subject to u(0)=0, u(1)=0'''
x=Symbol('x')
# Constants of integration.
c1=Symbol('c_1')
c2=Symbol('c_2')
general=integrate(integrate(-forcing((0,x))[0]/mu,x)+c1,x)+c2
constants = solve((Eq(general.subs(x,0),0),
Eq(general.subs(x,1),0)), c1,c2)
specific=general.subs(constants)
return specific
def solution(forcing):
'''Return a function which is the solution to:
d^2u/dx^2 = F/mu subject to u(0)=0, u(1)=0'''
def sol(sx):
return analytic_solution(forcing).subs(Symbol('x'),sx[1])
return sol
def analytic_pressure_solution(forcing):
u=analytic_solution(forcing)
return integrate(-coriolis*u+forcing((0,Symbol('x')))[1], Symbol('x'))
def pressure_solution(forcing):
'''Return a function which is the solution to:
dp/dx = f x u The constant of integration is set to 0.'''
def sol(sx):
return analytic_pressure_solution(forcing).subs(Symbol('x'),sx[1])
return sol
def plot_theory():
'''Produce a plot showing the forcing, analytic velocity solution and
analytic pressure solution'''
from pylab import \
plot,figure,quiver,frange,subplot,xticks,yticks,axis,xlabel,ylabel, \
subplots_adjust
figure()
y=frange(0.0,1,0.05)
psol=pressure_solution(forcing)
usol=solution(forcing)
v=0*y
x=0*y
us=array([float(usol(pos)) for pos in zip(x,y)])
ps=array([float(psol(pos)) for pos in zip(x,y)])
uf=array([forcing(pos) for pos in zip(x,y)])[:,0]
subplots_adjust(wspace=0.25)
subplot(1,3,1)
quiver(x[1:-1],y[1:-1],uf[1:-1],v[1:-1], scale=1)
plot(uf,y)
xticks([0,0.5,1],map(str,[0,0.5,1]))
yticks([ 0 , 0.2, 0.4, 0.6, 0.8, 1 ],map(str,[ 0 , 0.2, 0.4, 0.6, 0.8, 1 ]))
ylabel("y")
xlabel("u source")
subplot(1,3,2)
plot(us,y)
quiver(x[1:-1],y[1:-1],us[1:-1],v[1:-1], scale=.03)
xticks([0,0.01,0.02,0.03],map(str,[0,0.01,0.02,0.03]))
yticks([])
xlabel("u solution")
subplot(1,3,3)
plot(ps,y)
xticks([-0.02,-0.01,0],map(str,[-0.02,-0.01,0]))
yticks([])
xlabel("p solution")
return uf,us,ps
def plot_results(dx, error):
'''plot_results(error)
Produce a plot of the actual errors provided in the argument
"error". Error should be a two column matrix with the first column being
the velocity error and the second column the pressure error.
'''
from pylab import \
figure,xticks,yticks,axis,xlabel,ylabel,loglog,legend,title
figure()
loglog(dx,error)
loglog(dx,0.03*dx**2)
yticks(yticks()[0], map(lambda x: "%3.1e"%x, yticks()[0]))
xticks(xticks()[0], map(lambda x: "%3.1e"%x, xticks()[0]))
xlabel("dx")
title("Convergence of the rotating channel")
legend(("u error","p error","O(dx^2)"))
def retrieve_results(layers):
'''retrieve_results(layers)
For each layer count in the sequence layers, retrieve the velocity and
pressure error from the simulation results in appropriate channel-n
directory.
The first column of the result is the l2 norm of the error in the u
component of velocity. The second is the l2 norm in the pressure.
'''
from numpy import zeros
error=zeros((len(layers),2))
for i,layer in enumerate(layers):
s=stat_parser("channel-%d/rotating_channel.stat"%layer)
error[i,0]=s["Water"]['AnalyticUVelocitySolutionError']['l2norm'][-1]
error[i,1]=s["Water"]['AnalyticPressureSolutionError']['l2norm'][-1]
return error
| 23.91623 | 90 | 0.618651 |
d22a85ba4ce9958f33e5b4028a2f13fd0087570a | 247 | py | Python | app/__init__.py | JenBanks8585/Music-Recommender | 01145671ac71c9c711b659fce43cac9cca08df25 | [
"MIT"
] | null | null | null | app/__init__.py | JenBanks8585/Music-Recommender | 01145671ac71c9c711b659fce43cac9cca08df25 | [
"MIT"
] | null | null | null | app/__init__.py | JenBanks8585/Music-Recommender | 01145671ac71c9c711b659fce43cac9cca08df25 | [
"MIT"
] | null | null | null | import os
from flask import Flask
from app.appli import appli
def create_app():
app = Flask(__name__)
app.register_blueprint(appli)
return app
if __name__ == '__main__':
my_app = create_app()
my_app.run(debug=True) | 10.73913 | 33 | 0.676113 |
c2f78477f6b57f03d20bac4f3b02ccd87416c9af | 4,753 | py | Python | cnn.py | zhongxinghong/PKUElectiveCaptcha | 1152dfee5b451c203799952b19fad9918ed96a41 | [
"MIT"
] | 20 | 2019-04-05T11:20:24.000Z | 2022-02-22T02:41:58.000Z | cnn.py | zhongxinghong/PKUElectiveCaptcha | 1152dfee5b451c203799952b19fad9918ed96a41 | [
"MIT"
] | 5 | 2020-02-15T11:06:55.000Z | 2022-03-11T23:40:05.000Z | cnn.py | zhongxinghong/PKUElectiveCaptcha | 1152dfee5b451c203799952b19fad9918ed96a41 | [
"MIT"
] | 8 | 2019-07-03T15:00:52.000Z | 2021-09-28T06:19:29.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
###
# File: cnn.py
# Created Date: Wednesday, January 8th 2020, 6:12:20 pm
# Author: Rabbit
# -------------------------
# Copyright (c) 2020 Rabbit
# --------------------------------------------------------------------
###
import os
import joblib
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader, SubsetRandomSampler
from utils import u
from const import LOG_DIR, CNN_MODEL_FILE, LABELS_NUM
from dataset import ElectiveCaptchaDatasetFromPackage
CONFUSION_MATRIX_LOG_FILE = os.path.join(LOG_DIR, r"cnn.confusion_matrix.epoch_{}.csv")
class ElectiveCaptchaCNN(nn.Module):
def __init__(self):
super(ElectiveCaptchaCNN, self).__init__()
self.bn1 = nn.BatchNorm2d(32)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.conv1 = nn.Conv2d(1, 32, 3)
self.conv2 = nn.Conv2d(32, 64, 3)
self.conv3 = nn.Conv2d(64, 128, 3)
self.fc1 = nn.Linear(512, 128)
self.fc2 = nn.Linear(128, LABELS_NUM) # 55
def forward(self, x):
x = self.conv1(x) # batch*32*20*20
x = self.bn1(x)
x = F.relu(x)
x = F.max_pool2d(x, 2) # batch*32*10*10
x = self.conv2(x) # batch*64*8*8
x = self.bn2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2) # batch*64*4*4
x = self.conv3(x) # batch*128*2*2
x = self.bn3(x)
x = F.relu(x)
x = torch.flatten(x, 1) # batch*512
x = self.fc1(x) # batch*128
x = F.relu(x)
x = self.fc2(x) # batch*55
x = F.log_softmax(x, dim=1)
return x
def train(model, train_loader, optimizer, epoch):
log_interval = int(len(train_loader) * 0.05)
model.train()
for ix, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if ix % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch,
ix * len(data),
len(train_loader.sampler),
100.0 * ix / len(train_loader),
loss.item()
))
def validate(model, validation_loader, epoch):
model.eval()
validation_loss = 0
correct = 0
confusion_matrix = np.zeros((LABELS_NUM, LABELS_NUM), dtype=np.int)
with torch.no_grad():
for Xlist, ylist in validation_loader:
output = model(Xlist)
validation_loss += F.nll_loss(output, ylist).item() / len(validation_loader.sampler)
ypred = output.argmax(dim=1, keepdim=True)
correct += ypred.eq(ylist.view_as(ypred)).sum().item()
for t, p in zip(ylist.view(-1), ypred.view(-1)):
confusion_matrix[t.long(), p.long()] += 1
print('\nValidation set: Average loss: {:.6f}, Accuracy: {}/{} ({:.4f}%)\n'.format(
validation_loss,
correct,
len(validation_loader.sampler),
100.0 * correct / len(validation_loader.sampler)
))
df = pd.DataFrame(
data=confusion_matrix,
index=validation_loader.dataset.labels,
columns=validation_loader.dataset.labels,
)
df.to_csv(CONFUSION_MATRIX_LOG_FILE.format(epoch))
def main():
RANDOM_STATE = 42
TRAIN_SIZE = 0.7
BATCH_SIZE = 128
EPOCHS = 5
LEARNING_RATE = 0.1
LR_STEP_SIZE = 1
LR_STEP_GAMMA = 0.15
dataset = ElectiveCaptchaDatasetFromPackage()
indices = np.arange(len(dataset))
np.random.seed(RANDOM_STATE)
np.random.shuffle(indices)
sep = int(len(dataset) * TRAIN_SIZE)
train_indices, validation_indices = indices[:sep], indices[sep:]
train_sampler = SubsetRandomSampler(train_indices)
validation_sampler = SubsetRandomSampler(validation_indices)
train_loader = DataLoader(dataset, batch_size=BATCH_SIZE, sampler=train_sampler)
validation_loader = DataLoader(dataset, batch_size=BATCH_SIZE, sampler=validation_sampler)
model = ElectiveCaptchaCNN()
optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE)
scheduler = StepLR(optimizer, step_size=LR_STEP_SIZE, gamma=LR_STEP_GAMMA)
for epoch in range(1, EPOCHS+1):
train(model, train_loader, optimizer, epoch)
validate(model, validation_loader, epoch)
scheduler.step()
joblib.dump(model.state_dict(), CNN_MODEL_FILE, compress=9)
if __name__ == '__main__':
main()
| 28.981707 | 96 | 0.605723 |
1f15a29ba709af52537148ab0aae29ad516ac9b0 | 1,420 | py | Python | configs/lunarlander_v2/dqn.py | FurkanArslan/rl_algorithms | f6c61e02e181510c212a6ef7b4598338205e4bf7 | [
"MIT"
] | null | null | null | configs/lunarlander_v2/dqn.py | FurkanArslan/rl_algorithms | f6c61e02e181510c212a6ef7b4598338205e4bf7 | [
"MIT"
] | null | null | null | configs/lunarlander_v2/dqn.py | FurkanArslan/rl_algorithms | f6c61e02e181510c212a6ef7b4598338205e4bf7 | [
"MIT"
] | null | null | null | """Config for DQN on LunarLander-v2.
- Author: Kyunghwan Kim
- Contact: kh.kim@medipixel.io
"""
from rl_algorithms.common.helper_functions import identity
agent = dict(
type="DQNAgent",
hyper_params=dict(
gamma=0.99,
tau=5e-3,
buffer_size=int(1e5), # openai baselines: int(1e4)
batch_size=64, # openai baselines: 32
update_starts_from=int(1e4), # openai baselines: int(1e4)
multiple_update=1, # multiple learning updates
train_freq=1, # in openai baselines, train_freq = 4
gradient_clip=10.0, # dueling: 10.0
n_step=3,
w_n_step=1.0,
w_q_reg=1e-7,
per_alpha=0.6, # openai baselines: 0.6
per_beta=0.4,
per_eps=1e-6,
loss_type=dict(type="C51Loss"),
# Epsilon Greedy
max_epsilon=1.0,
min_epsilon=0.01, # openai baselines: 0.01
epsilon_decay=1e-5, # openai baselines: 1e-7 / 1e-1
),
learner_cfg=dict(
type="DQNLearner",
backbone=dict(),
head=dict(
type="C51DuelingMLP",
configs=dict(
hidden_sizes=[128, 64],
use_noisy_net=False,
v_min=-300,
v_max=300,
atom_size=1530,
output_activation=identity,
),
),
optim_cfg=dict(lr_dqn=1e-4, weight_decay=1e-7, adam_eps=1e-8),
),
)
| 29.583333 | 70 | 0.560563 |
02a1a95cc7c83e93ec4d01be5426e8b7701bef90 | 521 | py | Python | user/migrations/0011_auto_20200706_2345.py | sa-y-an/Qriosity2.0 | f0a46533881a6a7f8cd548eadbc72570396b1141 | [
"Apache-2.0"
] | null | null | null | user/migrations/0011_auto_20200706_2345.py | sa-y-an/Qriosity2.0 | f0a46533881a6a7f8cd548eadbc72570396b1141 | [
"Apache-2.0"
] | 2 | 2020-06-30T16:28:26.000Z | 2020-07-25T21:35:31.000Z | user/migrations/0011_auto_20200706_2345.py | sa-y-an/Qriosity2.0 | f0a46533881a6a7f8cd548eadbc72570396b1141 | [
"Apache-2.0"
] | 4 | 2021-06-16T09:53:15.000Z | 2021-09-18T07:40:31.000Z | # Generated by Django 3.0.7 on 2020-07-06 18:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0010_solved'),
]
operations = [
migrations.RenameField(
model_name='solved',
old_name='player',
new_name='gamer',
),
migrations.AlterField(
model_name='solved',
name='level_on',
field=models.IntegerField(blank=True, default=0),
),
]
| 21.708333 | 61 | 0.556622 |
dff6bd171abec46756ab8e3e679d7b052224f7d8 | 942 | py | Python | test_suite.py | tdcosim/TDcoSim | 0fd0cd1eea1136c82c9de982a88ca640e5e6a935 | [
"BSD-3-Clause"
] | 18 | 2019-06-21T17:43:17.000Z | 2022-02-27T21:14:50.000Z | test_suite.py | alkaidone/TDcoSim | 19519d54550bc68b28e43f95495a81aa2ef4164f | [
"BSD-3-Clause"
] | 33 | 2019-09-26T17:14:58.000Z | 2022-02-02T20:28:34.000Z | test_suite.py | alkaidone/TDcoSim | 19519d54550bc68b28e43f95495a81aa2ef4164f | [
"BSD-3-Clause"
] | 7 | 2019-09-10T20:15:05.000Z | 2022-02-21T05:03:37.000Z | import unittest
import os
import sys
from tests.model.psse.test_psse_model import TestPSSEModel
from tests.model.opendss.model.test_opendss_interface import TestOpenDSSInterface
from tests.model.opendss.model.pvderaggregation.model.test_pvder_aggregated_model import TestPVDERAggregatedModel
from tests.model.opendss.model.pvderaggregation.model.test_pvder_model import TestPVDERModel
def suite():
"""Define a test suite.
TODO: Include the procedure test
"""
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestPSSEModel))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestOpenDSSInterface))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestPVDERAggregatedModel))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestPVDERModel))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| 28.545455 | 113 | 0.809979 |
cbb63db27f0c56ff2b879c1fed2b2523e3f70970 | 1,771 | py | Python | TEST/progress_bar.py | louisyoungx/tcp_transfer_server | e6f0e639a884caa65daa218bc32b9ef7711d6d31 | [
"MIT"
] | null | null | null | TEST/progress_bar.py | louisyoungx/tcp_transfer_server | e6f0e639a884caa65daa218bc32b9ef7711d6d31 | [
"MIT"
] | null | null | null | TEST/progress_bar.py | louisyoungx/tcp_transfer_server | e6f0e639a884caa65daa218bc32b9ef7711d6d31 | [
"MIT"
] | null | null | null | # import time
# import datetime
#
#
# class Progress(object):
# startTime = time.time()
# left_sign = '█'
# # left_sign = '░'
# # right_sign = '-'
# right_sign = ' '
# lens = 20
# delay = 0.05
#
# def __init__(self, total, name='Progress'):
# self.count = 0
# self.total = total
# self.name = name
# self.mutiple = self.lens / self.total
#
# def update(self):
# self.count += 1
# self.progress = int(self.count*self.mutiple)
# self.percent = self.progress*int(100/self.lens)
# percentChar = str(self.percent) + "%"
# doneSign = self.progress*self.left_sign
# dontSign = (self.lens-self.progress)*self.right_sign
# leftTime = self.getLeftTime()
# print("\r{}: {:<4} |{}{}| [{}/{}]({})".format(
# self.name, percentChar, doneSign, dontSign, self.count, self.total, leftTime), end="", flush=True)
#
# def done(self):
# print("\r{}: {:<4} |{}{}| [{}/{}]({})".format(
# self.name, '100%', self.lens*self.left_sign, '', self.count, self.total, '00:00:00'), flush=True)
#
# def getNowTime(self):
# return int(time.time() - self.startTime)
#
# def getLeftTime(self):
# nowTime = self.getNowTime()
# leftTimeSecs = int(nowTime/(self.percent/100)) - nowTime if self.percent > 0 else 0
# leftTime = str(datetime.timedelta(seconds=leftTimeSecs))
# leftTime = leftTime if len(leftTime) > 7 else '0' + leftTime
# return leftTime if leftTimeSecs > 0 else '00:00:00'
#
#
# def progress(num):
# p = Progress(num, "Start")
# for i in range(num):
# p.update()
# time.sleep(0.5)
# p.done()
#
# if __name__ == "__main__":
# progress(10) | 32.796296 | 112 | 0.549972 |
9bdc5311eb4d2c465cadab88d5d2411167eb711d | 577 | py | Python | flask/flask_fundamentals/hello_flask/test.py | fatimaalheeh/python_stack | 9ba84e6dc030a65494f105152a97f0a38aa2e4f3 | [
"MIT"
] | null | null | null | flask/flask_fundamentals/hello_flask/test.py | fatimaalheeh/python_stack | 9ba84e6dc030a65494f105152a97f0a38aa2e4f3 | [
"MIT"
] | null | null | null | flask/flask_fundamentals/hello_flask/test.py | fatimaalheeh/python_stack | 9ba84e6dc030a65494f105152a97f0a38aa2e4f3 | [
"MIT"
] | null | null | null | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def hello_world():#default route
return 'hello!'
@app.route('/success/<name>') #'/success--/--<name>' the / can be replaced with any character -,$, , , . , .....
def success(name):#return a value from URL
print(name)
return 'Hello, '+name
@app.route('/page')#request HTML page
def pagego():
return render_template('index.html')
@app.route('/addpage')
def index():
return render_template("index.html", phrase="hello", times=5)
if __name__=="__main__":
app.run(debug=True) | 28.85 | 113 | 0.665511 |
c07a446e3cf6dc6938e22665b2d01c689ff6ce6f | 495 | py | Python | menus/typeIDs.py | fsanges/neMenuManager | 733a281b1e0217ff24bc2fe9adf74c97a4715a2b | [
"Apache-2.0"
] | 1 | 2021-01-28T05:11:55.000Z | 2021-01-28T05:11:55.000Z | menus/typeIDs.py | fsanges/neMenuManager | 733a281b1e0217ff24bc2fe9adf74c97a4715a2b | [
"Apache-2.0"
] | null | null | null | menus/typeIDs.py | fsanges/neMenuManager | 733a281b1e0217ff24bc2fe9adf74c97a4715a2b | [
"Apache-2.0"
] | null | null | null | ###########################
## COMMON
COMMONNODENAME = None
UTILS_MENUNAME = "UTILS"
UTILSID = 1
UTILS_ISRADIAL = True
UTILS_RADIALPOS = "N"
COPYID = 10
COPY_MENUNAME = "copyNodes"
PASTEID = 11
PASTE_MENUNAME = "pasteNodes"
###########################
## HERMITE
HA_NODENAME = "jd_hermiteArrayCrv"
HASOUTH = 100
HASOUTH_MENUNAME = "Create OutputJoints"
HASOUTH_ISRADIAL = True
HASOUTH_RADIALPOS = "S"
HANORTH = 101
HANORTH_MENUNAME = "TEST"
HANORTH_ISRADIAL = True
HANORTH_RADIALPOS = "N"
| 17.068966 | 40 | 0.674747 |
5c617bb8021316e0a627997fab45f2d1e7fddf5b | 10,396 | py | Python | notify/drivers/sfdc.py | boris-42/notify | d13f4840f1c6f8b888ea906c107e37f2607872b1 | [
"Apache-2.0"
] | 1 | 2016-12-06T08:24:58.000Z | 2016-12-06T08:24:58.000Z | notify/drivers/sfdc.py | boris-42/notify | d13f4840f1c6f8b888ea906c107e37f2607872b1 | [
"Apache-2.0"
] | 21 | 2016-12-06T05:27:34.000Z | 2016-12-30T16:28:22.000Z | notify/drivers/sfdc.py | boris-42/notify | d13f4840f1c6f8b888ea906c107e37f2607872b1 | [
"Apache-2.0"
] | 3 | 2016-12-05T09:17:16.000Z | 2017-01-10T12:15:36.000Z | # Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from xml.dom import minidom
import requests
from requests.packages.urllib3 import exceptions as urllib_exc
from notify import driver
requests.packages.urllib3.disable_warnings(urllib_exc.InsecureRequestWarning)
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
class OAuth2(object):
def __init__(self,
client_id,
client_secret,
username,
password,
auth_url=None,
organizationId=None):
self.auth_url = auth_url or "https://login.salesforce.com"
self.client_id = client_id
self.client_secret = client_secret
self.username = username
self.password = password
self.organization = organizationId
def authenticate_soap(self):
LOG.debug("Making SFDC SOAP auth for {}".format(self.username))
doc = minidom.Document()
env = doc.appendChild(doc.createElement("soapenv:Envelope"))
env.setAttribute("xmlns:soapenv",
"http://schemas.xmlsoap.org/soap/envelope/")
env.setAttribute("xmlns:urn", "urn:partner.soap.sforce.com")
head = ("Header", [("CallOptions", [("client", "RestForce"),
("defaultNamespace", "sf")]),
("LoginScopeHeader", [("organizationId",
self.organization)])])
body = ("Body", [("login", [("username", self.username),
("password", self.password)])])
for name1, nested1 in head, body:
e1 = env.appendChild(doc.createElement("soapenv:" + name1))
for name2, nested2 in nested1:
e2 = e1.appendChild(doc.createElement("urn:" + name2))
for name3, value in nested2:
e3 = e2.appendChild(doc.createElement("urn:" + name3))
e3.appendChild(doc.createTextNode(value))
envelope = doc.toxml(encoding="utf-8").decode("utf-8")
url = "{}/services/Soap/u/36.0".format(self.auth_url)
headers = {"Charset": "UTF-8",
"SOAPAction": "login",
"Content-Type": "text/xml"}
resp = requests.post(url, envelope, verify=None, headers=headers)
LOG.debug(("SFDC OAuth2 SOAP Response "
"({}): {}").format(resp.status_code, resp.text))
resp.raise_for_status()
resp_xml = minidom.parseString(resp.text)
elements = resp_xml.getElementsByTagName("sessionId")
token = elements and elements[0].firstChild.nodeValue or None
return {"access_token": token, "instance_url": self.auth_url}
def authenticate_rest(self):
LOG.debug("Making SFDC REST auth for {}".format(self.client_id))
data = {"grant_type": "password",
"client_id": self.client_id,
"client_secret": self.client_secret,
"username": self.username,
"password": self.password}
url = "{}/services/oauth2/token".format(self.auth_url)
resp = requests.post(url, data=data, verify=None)
LOG.debug(("SFDC OAuth2 REST Response "
"({}): {}").format(resp.status_code, resp.text))
resp.raise_for_status()
return resp.json()
def authenticate(self):
if self.organization:
return self.authenticate_soap()
return self.authenticate_rest()
class Client(object):
def __init__(self, oauth2, base_path="/services/data/v36.0"):
self.oauth2 = oauth2
self.base_path = base_path
self.path = "{}/sobjects".format(base_path)
self.access_token = None
self.instance_url = None
def authenticate(self):
result = self.oauth2.authenticate()
self.access_token = result["access_token"]
self.instance_url = result["instance_url"]
def _request(self, method, url, headers=None, repeat=True, **kwargs):
if not self.access_token:
self.authenticate()
headers = headers or {}
headers["Authorization"] = "Bearer {}".format(self.access_token)
if method in ("POST", "PUT", "PATCH"):
headers["Content-Type"] = "application/json"
request_url = self.instance_url + url
LOG.debug("SFDC {} Request: {} {} {}".format(method, url, headers,
kwargs))
try:
resp = requests.request(
method, request_url, headers=headers, verify=None, **kwargs)
except Exception as e:
LOG.error("SFDC Request has failed: {}: {}".format(type(e), e))
return None, None, None
LOG.debug("SFDC ({}) Response: {}".format(resp.status_code,
resp.text))
if not resp.text:
return resp.status_code, {}, None
try:
data = resp.json()
except Exception as e:
LOG.error("SFDC Response JSON error: {}: {}".format(type(e), e))
return resp.status_code, {}, None
# NOTE(maretskiy): this simplifies further error checks
if data and type(data) == list and "errorCode" in data[0]:
sfdc_error = (data[0]["errorCode"], data[0]["message"])
LOG.error("SFDC ({}) Response: {}".format(resp.status_code, data))
else:
sfdc_error = None
if repeat and sfdc_error and sfdc_error[0] == "INVALID_SESSION_ID":
LOG.debug("SFDC token has expired, authenticating...")
self.authenticate()
return self._request(method, url, headers=headers, repeat=False,
**kwargs)
return resp.status_code, data, sfdc_error
def create_feeditem(self, data):
url = "{}/FeedItem".format(self.path)
return self._request("POST", url, data=json.dumps(data))
def create_case(self, data):
url = "{}/Case".format(self.path)
return self._request("POST", url, data=json.dumps(data))
def update_case(self, id_, data):
url = "{}/Case/{}".format(self.path, id_)
return self._request("PATCH", url, data=json.dumps(data))
def get_case(self, id_):
return self._request("GET", "{}/Case/{}".format(self.path, id_))
class Driver(driver.Driver):
"""SalesForce notification driver."""
CONFIG_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema",
"type": "object",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
"client_id": {"type": "string"},
"client_secret": {"type": "string"},
"auth_url": {"type": "string"},
"organization_id": {"type": "string"},
},
"required": ["username", "password", "client_id", "client_secret"],
"additionalProperties": False
}
SEVERITY = {
"OK": "060 Informational",
"INFO": "060 Informational",
"UNKNOWN": "070 Unknown",
"WARNING": "080 Warning",
"CRITICAL": "090 Critical",
"DOWN": "090 Critical"}
def __init__(self, config):
super(Driver, self).__init__(config)
oauth2 = OAuth2(username=config["username"],
password=config["password"],
client_id=config["client_id"],
client_secret=config["client_secret"],
auth_url=config.get("auth_url"),
organizationId=config.get("organization_id"))
self.client = Client(oauth2)
def notify(self, payload):
region = payload["region"]
priority = self.SEVERITY[payload["severity"]]
payload_id = "|".join([region, payload["what"], payload["who"]])
if payload.get("affected_hosts"):
subject = payload_id + "|" + ",".join(payload["affected_hosts"])
else:
subject = payload_id
case = {"Subject": subject,
"Description": payload["description"],
"IsMosAlert__c": "true",
"Alert_ID__c": payload_id,
"Environment2__c": region,
"Alert_Priority__c": priority,
"Alert_Host__c": payload["who"],
"Alert_Service__c": payload["what"]}
item = {"Description": payload["description"],
"Alert_Id": payload_id,
"Cloud_ID": region,
"Alert_Priority": priority,
"Status": "New"}
code, resp, sfdc_error = self.client.create_case(case)
if resp and code in (200, 201):
case_id = resp["id"]
elif sfdc_error and sfdc_error[0] == "DUPLICATE_VALUE":
LOG.info("SFDC ({}): Case is a duplicate: {}".format(code, resp))
# NOTE(maretskiy): this parsing looks ugly, ideas?
case_id = resp[0]["message"].strip().split(" ")[-1]
code, resp, error = self.client.get_case(case_id)
if code not in (200, 201, 202, 204):
return False
item["Status"] = resp["Status"]
case["Subject"] = resp["Subject"]
code, resp, error = self.client.update_case(case_id, data=case)
if code not in (200, 201, 202, 204):
return False
else:
LOG.error("SFDC ({}) Unexpected Case: {}".format(code, resp))
return False
body = json.dumps(item, sort_keys=True, indent=2)
code, resp, error = self.client.create_feeditem(
{"ParentId": case_id, "Visibility": "AllUsers", "Body": body})
return code in (200, 201)
| 37.803636 | 78 | 0.564833 |
92a8f3b12ecd974ca347db051b9ab338d4d858a4 | 1,295 | py | Python | freeze_graph.py | smartinfrastructurelab/yolov3_marking | 7485695e1d168e1550d2b7beeb470088f716ab65 | [
"MIT"
] | null | null | null | freeze_graph.py | smartinfrastructurelab/yolov3_marking | 7485695e1d168e1550d2b7beeb470088f716ab65 | [
"MIT"
] | null | null | null | freeze_graph.py | smartinfrastructurelab/yolov3_marking | 7485695e1d168e1550d2b7beeb470088f716ab65 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : freeze_graph.py
# Author : YunYang1994
# Created date: 2019-03-20 15:57:33
# Description :
#
#================================================================
import tensorflow as tf
from core.yolov3 import YOLOV3
from core.config import cfg
pb_file = "./yolov3_mark_no_manualFlip_no_codeFlip.pb"
ckpt_file = cfg.TEST.WEIGHT_FILE
output_node_names = ["input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2", "pred_lbbox/concat_2"]
with tf.name_scope('input'):
input_data = tf.placeholder(dtype=tf.float32, name='input_data')
model = YOLOV3(input_data, trainable=False)
print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver = tf.train.Saver()
saver.restore(sess, ckpt_file)
converted_graph_def = tf.graph_util.convert_variables_to_constants(sess,
input_graph_def = sess.graph.as_graph_def(),
output_node_names = output_node_names)
with tf.gfile.GFile(pb_file, "wb") as f:
f.write(converted_graph_def.SerializeToString())
| 30.116279 | 109 | 0.641699 |
f2f1ba4eeb291db85d118c86c2e8bf2638aa983a | 1,714 | py | Python | mod/units/eat_handler.py | HeraldStudio/wechat | b023b7460a6b4284ea782333e13f24d169ddaff4 | [
"MIT"
] | 1 | 2015-06-28T15:26:52.000Z | 2015-06-28T15:26:52.000Z | mod/units/eat_handler.py | HeraldStudio/wechat | b023b7460a6b4284ea782333e13f24d169ddaff4 | [
"MIT"
] | null | null | null | mod/units/eat_handler.py | HeraldStudio/wechat | b023b7460a6b4284ea782333e13f24d169ddaff4 | [
"MIT"
] | 6 | 2015-03-20T16:36:22.000Z | 2021-08-28T07:58:18.000Z | # -*- coding: utf-8 -*-
# @Date : 2015-05-28
import tornado.web
from ..models.eat import Eat
from config import eat_token
import datetime,time
from sqlalchemy.orm.exc import NoResultFound
class EatHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
def get(self):
self.render('eat.html')
def post(self):
status = self.get_argument('status',default = None)
token = self.get_argument('token',default = None)
if not status or not token:
self.write('请填写完整信息哦')
self.finish()
else:
if not token==eat_token:
self.write('token不正确')
self.finish()
else:
day = time.strftime('%Y-%m-%d',time.localtime(time.time()))
today = time.strftime('%Y-%m-%d-%H',time.localtime(time.time()))
try:
item = self.db.query(Eat).filter(Eat.day == day).one()
item.status = status
item.time = today
except NoResultFound:
eat = Eat(
day = day,
time = today,
status = status)
self.db.add(eat)
try:
self.db.commit()
self.write('success')
self.finish()
except Exception,e:
print str(e)
self.db.rollback()
self.write('发布失败T T')
self.finish()
self.db.close()
| 32.961538 | 81 | 0.446908 |
a3aca4d7ee9aae5bc4c2a6fab6a68d398be58be0 | 87 | py | Python | act/sampledownloader.py | RiS3-Lab/FICS- | 82c8abef52ca943946b7e82a16998cf67f1d2049 | [
"Apache-2.0"
] | 37 | 2020-12-04T09:15:50.000Z | 2022-03-28T13:33:29.000Z | act/sampledownloader.py | RiS3-Lab/FICS- | 82c8abef52ca943946b7e82a16998cf67f1d2049 | [
"Apache-2.0"
] | 7 | 2020-12-03T08:14:31.000Z | 2021-11-24T14:14:03.000Z | act/sampledownloader.py | RiS3-Lab/FICS- | 82c8abef52ca943946b7e82a16998cf67f1d2049 | [
"Apache-2.0"
] | 19 | 2020-12-04T08:43:31.000Z | 2022-03-28T13:33:27.000Z |
from act import Act
class SampleDownloader(Act):
def start(self):
pass
| 9.666667 | 28 | 0.643678 |
17abf275726b66bfe331eb8b87bb16994c6426fc | 12,512 | py | Python | archive/QT_GUI.py | MikeDT/bdm-whack-a-mole | 33b52008b2fae231b604c0af959df57e25dee61f | [
"MIT"
] | null | null | null | archive/QT_GUI.py | MikeDT/bdm-whack-a-mole | 33b52008b2fae231b604c0af959df57e25dee61f | [
"MIT"
] | null | null | null | archive/QT_GUI.py | MikeDT/bdm-whack-a-mole | 33b52008b2fae231b604c0af959df57e25dee61f | [
"MIT"
] | 1 | 2021-09-26T14:12:20.000Z | 2021-09-26T14:12:20.000Z | # -*- coding: utf-8 -*-
"""
main
====
Typical GUI screen, adapted from prior pyqt work
Attributes:
na
Todo:
* clean up docstrings (ideally to sphinx format, but to numpy/scipy
minimally)
Related projects:
Adapted from initial toy project https://github.com/sonlexqt/whack-a-mole
which is under MIT license
@author: DZLR3
"""
from PyQt5 import QtWidgets, uic
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import Qt
import random
import sys
from os import listdir
from wam.game import GameManager
import pygame
from time import time
class QT_GUI(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# File locations
self.ui_file_loc = 'ui\\QT_Screen.ui'
self.intro_text_file_loc = 'text\\Introduction.txt'
self.disclaimer_text_file_loc = 'text\\Disclaimer.txt'
self.instruct_text_file_loc = 'text\\Instructions.txt'
self.debrief_text_file_loc = 'text\\Debrief.txt'
# Import the QT designer UI and name the window
self.window = uic.loadUi(self.ui_file_loc, self)
self.setWindowTitle('BDM Whack-A-Mole')
# Import images & set front screen image
self.pixmap_dict = {}
self.set_image_dict()
self.window.front_screen.setPixmap(self.pixmap_dict["Front_Screen"])
# Adjust the combobox content to support the valid values
self.set_gender_types()
self.set_edu_types()
# Connect the buttons and tabs to the relevant functions
self.window.back_btn.clicked.connect(self.back_button_clicked)
self.window.next_btn.clicked.connect(self.next_button_clicked)
self.window.launch_btn.clicked.connect(self.launch_button_clicked)
self.window.save_btn.clicked.connect(self.save_button_clicked)
self.window.tabs.currentChanged.connect(self.refresh_nav_buttons)
self.window.tabs.currentChanged.connect(self.check_disclaimer_nav)
# Open the 'database' table, set relevant file loc
self.csv_user_log_db = open('logs\\csv_user_log_db.csv', 'a')
# Import all the text from external sources (simplifies future changes)
# adjust with the appropriate text and fill the text boxes
# and set to read only to prevent user edits
self.get_set_text()
# Set the default visibility for the nav buttons and show the screen
self.launched = False
self.window.back_btn.hide()
self.window.launch_btn.hide()
self.window.save_btn.hide()
self.window.error_textbox.hide()
self.window.show()
def set_image_dict(self):
"""
Import and set the images for the urns into the pixmap_dict dictionary
for importing into the gui.
"""
files = listdir('images')
for file in files:
if file == 'Front_Screen.png':
pixmap = QPixmap('images\\' + file)
pixmap = pixmap.scaled(1001, 811,
Qt.KeepAspectRatio,
Qt.SmoothTransformation)
self.pixmap_dict['Front_Screen'] = pixmap
else:
print('FYI - Non png file detected in image folder - ', file)
def set_gender_types(self):
"""
Sets the gender types for the combobox. Presumed to be relatively
static, but could be altered to support imports for more non-code
adjustability
"""
gender_list = ['', 'Prefer Not To Say', 'Female', 'Male', 'Other']
for gender in gender_list:
self.window.gender_combobox.addItem(gender)
def set_edu_types(self):
"""
Sets the education types for the combobox. Presumed to be relatively
static, but could be altered to support imports for more non-code
adjustability
"""
education_list = ['', 'High School', 'Bachelors', 'Masters', 'PhD',
'Other']
for education in education_list:
self.window.edu_combobox.addItem(education)
def set_cond_all(self):
"""
tbc
"""
pass
def back_button_clicked(self):
"""
Dictates the actions for clicking the back button on a given screen
using the screen_fxn_dict dictionary that houses the screen dispay
functions
"""
self.window.tabs.setCurrentIndex(self.window.tabs.currentIndex() - 1)
self.window.next_btn.show()
self.window.back_btn.show()
self.window.launch_btn.hide()
if self.window.tabs.currentIndex() == 0:
self.window.back_btn.hide()
def next_button_clicked(self):
"""
Dictates the actions for clicking the next button on a given screen
using the screen_fxn_dict dictionary that houses the screen dispay
functions
"""
self.window.tabs.setCurrentIndex(self.window.tabs.currentIndex() + 1)
self.window.next_btn.show()
self.window.back_btn.show()
if self.window.tabs.currentIndex() == 3:
self.window.next_btn.hide()
self.show_launch_check()
if self.window.tabs.currentIndex() == 4:
self.window.next_btn.hide()
self.show_save_check()
def check_disclaimer_nav(self):
"""
Ensures navigation cannot happen past the disclaimer screen unless
consent has been provided via the consent_checkbox
"""
if self.window.consent_checkbox.isChecked() is False:
if self.window.tabs.currentIndex() > 1:
self.window.tabs.setCurrentIndex(1)
self.window.launch_btn.hide()
self.window.back_btn.show()
self.window.next_btn.show()
self.refresh_nav_buttons()
else:
self.refresh_nav_buttons()
else:
self.refresh_nav_buttons()
def refresh_nav_buttons(self):
"""
Refreshs the navigation buttons upon tab clicks to ensure only the
relevant buttons are shown
"""
if self.window.tabs.currentIndex() == 0:
self.window.launch_btn.hide()
self.window.back_btn.hide()
elif self.window.tabs.currentIndex() == 3:
self.show_launch_check()
self.window.next_btn.hide()
self.window.back_btn.show()
else:
self.window.next_btn.show()
self.window.back_btn.show()
self.show_debrief_check()
self.window.launch_btn.hide()
def check_task_complete(self):
"""
Checks all activities, demographics etc have been submitted prior to
allowing the participant to save and exit. Should tasks not be
complete an error message will be supplied to the user detailing
the issue(s)
"""
complete = True
error_message = 'The following errors are preventing saving: '
# Check all the required attributes have been captured
if len(self.window.username_textbox.text()) > 0:
complete *= True
else:
complete *= False
error_message += 'username is blank, '
if self.window.consent_checkbox.isChecked():
complete *= True
else:
complete *= False
error_message += 'consent was not provided, '
if self.window.age_spinbox.value() > 17:
complete *= True
else:
complete *= False
error_message += 'must be an adult (18+) to participate, '
if str(self.window.edu_combobox.currentText()) != '':
complete *= True
else:
complete *= False
error_message += 'education level was not provided, '
print(self.window.edu_combobox.currentText())
if str(self.window.gender_combobox.currentText()) != '':
complete *= True
else:
complete *= False
error_message += 'gender was not provided, '
print(self.window.gender_combobox.currentText())
return (complete, error_message)
def get_save_details(self):
"""
Get the all the details from the experiment (incl. demographics and
consent), and cast them into a csv ready string, then return the
content as a list
"""
self.username = str(self.window.username_textbox.text())
self.consent = str(self.window.consent_checkbox.isChecked())
self.age = str(self.window.age_spinbox.value())
self.education = str(self.window.edu_combobox.currentText())
self.gender = str(self.window.gender_combobox.currentText())
save_details = [(self.username + ', ' +
self.consent + ', ' +
self.age + ', ' +
self.education + ', ' +
self.gender)]
return save_details
def show_launch_check(self):
"""
Check whether the save button should be shown, based upon the
completion of all the relevant criteria (consent, demographics, test)
"""
if self.window.consent_checkbox.isChecked():
self.window.launch_btn.show()
else:
self.window.launch_btn.hide()
def show_save_check(self):
"""
Check whether the save button should be shown, based upon the
completion of all the relevant criteria (consent, demographics, test)
"""
if (self.window.consent_checkbox.isChecked() * self.launched):
self.window.save_btn.show()
else:
self.window.save_btn.hide()
def show_debrief_check(self):
"""
Check whether the save button should be shown, based upon the
completion of all the relevant criteria (consent, demographics, test)
"""
if (self.window.consent_checkbox.isChecked() * self.launched):
self.window.debrief_textbox.setText(self.debrief_text)
def launch_button_clicked(self):
"""
Saves the demographics to csv, closes the csv, sets the remaining
random conditions in the batch and exits the application
"""
self.launched = True
self.launch_btn.hide()
self.next_btn.show()
self.launch_game()
def launch_game(self):
pygame.mixer.init(frequency=22050, size=-16, channels=2, buffer=512)
pygame.init()
usr_timestamp = (str(self.window.username_textbox.text()) + '_' +
str(time()))
# Run the main loop
my_game = GameManager(usr_timestamp)
my_game.play_game()
# Exit the game if the main loop ends
pygame.quit()
def save_button_clicked(self):
"""
Saves the demographics to csv, closes the csv, sets the remaining
random conditions in the batch and exits the application
"""
results = self.get_save_details()
(validity, error_message) = self.check_task_complete()
if validity:
for result in results:
self.csv_user_log_db.write(result)
self.csv_user_log_db.write('\n')
self.csv_user_log_db.close()
sys.exit(QtWidgets.QApplication([]).exec_())
else:
self.window.error_textbox.show()
self.window.error_textbox.setText(error_message)
self.window.error_textbox.setReadOnly(True)
def get_set_text(self):
"""
Gets the text from the file locations and embeds it into the gui
text boxs (made read only to prevent user edits)
"""
self.intro_text = open(self.intro_text_file_loc, 'r').read()
self.window.intro_textbox.setText(self.intro_text)
self.window.intro_textbox.setReadOnly(True)
self.disclaimer_text = open(self.disclaimer_text_file_loc, 'r').read()
self.window.disclaimer_textbox.setText(self.disclaimer_text)
self.window.disclaimer_textbox.setReadOnly(True)
self.instruction_text = open(self.instruct_text_file_loc, 'r').read()
self.window.instr_textbox.setText(self.instruction_text)
self.window.instr_textbox.setReadOnly(True)
self.debrief_text = open(self.debrief_text_file_loc, 'r').read()
self.window.debrief_textbox.setText('Experiment not yet complete...')
self.window.instr_textbox.setReadOnly(True)
| 37.573574 | 79 | 0.618047 |
88e26be885932305fd1477f5ad0bcd6952a67a57 | 31,074 | py | Python | airflow/executors/kubernetes_executor.py | alexlshon/airflow | 8eddc8b5019890a712810b8e5b1185997adb9bf4 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-07-30T17:14:05.000Z | 2021-08-03T13:51:25.000Z | airflow/executors/kubernetes_executor.py | alexlshon/airflow | 8eddc8b5019890a712810b8e5b1185997adb9bf4 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4 | 2021-06-28T20:57:42.000Z | 2022-02-26T02:11:11.000Z | airflow/executors/kubernetes_executor.py | alexlshon/airflow | 8eddc8b5019890a712810b8e5b1185997adb9bf4 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3 | 2021-05-21T21:26:34.000Z | 2021-10-05T16:57:57.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
KubernetesExecutor
.. seealso::
For more information on how the KubernetesExecutor works, take a look at the guide:
:ref:`executor:KubernetesExecutor`
"""
import functools
import json
import multiprocessing
import time
from queue import Empty, Queue # pylint: disable=unused-import
from typing import Any, Dict, List, Optional, Tuple
import kubernetes
from dateutil import parser
from kubernetes import client, watch
from kubernetes.client import Configuration, models as k8s
from kubernetes.client.rest import ApiException
from urllib3.exceptions import ReadTimeoutError
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import NOT_STARTED_MESSAGE, BaseExecutor, CommandType
from airflow.kubernetes import pod_generator
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.kube_config import KubeConfig
from airflow.kubernetes.kubernetes_helper_functions import create_pod_id
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.kubernetes.pod_launcher import PodLauncher
from airflow.models import TaskInstance
from airflow.models.taskinstance import TaskInstanceKey
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.utils.state import State
# TaskInstance key, command, configuration, pod_template_file
KubernetesJobType = Tuple[TaskInstanceKey, CommandType, Any, Optional[str]]
# key, state, pod_id, namespace, resource_version
KubernetesResultsType = Tuple[TaskInstanceKey, Optional[str], str, str, str]
# pod_id, namespace, state, annotations, resource_version
KubernetesWatchType = Tuple[str, str, Optional[str], Dict[str, str], str]
class ResourceVersion:
"""Singleton for tracking resourceVersion from Kubernetes"""
_instance = None
resource_version = "0"
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
class KubernetesJobWatcher(multiprocessing.Process, LoggingMixin):
"""Watches for Kubernetes jobs"""
def __init__(
self,
namespace: Optional[str],
multi_namespace_mode: bool,
watcher_queue: 'Queue[KubernetesWatchType]',
resource_version: Optional[str],
scheduler_job_id: Optional[str],
kube_config: Configuration,
):
super().__init__()
self.namespace = namespace
self.multi_namespace_mode = multi_namespace_mode
self.scheduler_job_id = scheduler_job_id
self.watcher_queue = watcher_queue
self.resource_version = resource_version
self.kube_config = kube_config
def run(self) -> None:
"""Performs watching"""
kube_client: client.CoreV1Api = get_kube_client()
if not self.scheduler_job_id:
raise AirflowException(NOT_STARTED_MESSAGE)
while True:
try:
self.resource_version = self._run(
kube_client, self.resource_version, self.scheduler_job_id, self.kube_config
)
except ReadTimeoutError:
self.log.warning(
"There was a timeout error accessing the Kube API. Retrying request.", exc_info=True
)
time.sleep(1)
except Exception:
self.log.exception('Unknown error in KubernetesJobWatcher. Failing')
raise
else:
self.log.warning(
'Watch died gracefully, starting back up with: last resource_version: %s',
self.resource_version,
)
def _run(
self,
kube_client: client.CoreV1Api,
resource_version: Optional[str],
scheduler_job_id: str,
kube_config: Any,
) -> Optional[str]:
self.log.info('Event: and now my watch begins starting at resource_version: %s', resource_version)
watcher = watch.Watch()
kwargs = {'label_selector': f'airflow-worker={scheduler_job_id}'}
if resource_version:
kwargs['resource_version'] = resource_version
if kube_config.kube_client_request_args:
for key, value in kube_config.kube_client_request_args.items():
kwargs[key] = value
last_resource_version: Optional[str] = None
if self.multi_namespace_mode:
list_worker_pods = functools.partial(
watcher.stream, kube_client.list_pod_for_all_namespaces, **kwargs
)
else:
list_worker_pods = functools.partial(
watcher.stream, kube_client.list_namespaced_pod, self.namespace, **kwargs
)
for event in list_worker_pods():
task = event['object']
self.log.info('Event: %s had an event of type %s', task.metadata.name, event['type'])
if event['type'] == 'ERROR':
return self.process_error(event)
annotations = task.metadata.annotations
task_instance_related_annotations = {
'dag_id': annotations['dag_id'],
'task_id': annotations['task_id'],
'execution_date': annotations['execution_date'],
'try_number': annotations['try_number'],
}
self.process_status(
pod_id=task.metadata.name,
namespace=task.metadata.namespace,
status=task.status.phase,
annotations=task_instance_related_annotations,
resource_version=task.metadata.resource_version,
event=event,
)
last_resource_version = task.metadata.resource_version
return last_resource_version
def process_error(self, event: Any) -> str:
"""Process error response"""
self.log.error('Encountered Error response from k8s list namespaced pod stream => %s', event)
raw_object = event['raw_object']
if raw_object['code'] == 410:
self.log.info(
'Kubernetes resource version is too old, must reset to 0 => %s', (raw_object['message'],)
)
# Return resource version 0
return '0'
raise AirflowException(
'Kubernetes failure for %s with code %s and message: %s'
% (raw_object['reason'], raw_object['code'], raw_object['message'])
)
def process_status(
self,
pod_id: str,
namespace: str,
status: str,
annotations: Dict[str, str],
resource_version: str,
event: Any,
) -> None:
"""Process status response"""
if status == 'Pending':
if event['type'] == 'DELETED':
self.log.info('Event: Failed to start pod %s, will reschedule', pod_id)
self.watcher_queue.put(
(pod_id, namespace, State.UP_FOR_RESCHEDULE, annotations, resource_version)
)
else:
self.log.info('Event: %s Pending', pod_id)
elif status == 'Failed':
self.log.error('Event: %s Failed', pod_id)
self.watcher_queue.put((pod_id, namespace, State.FAILED, annotations, resource_version))
elif status == 'Succeeded':
self.log.info('Event: %s Succeeded', pod_id)
self.watcher_queue.put((pod_id, namespace, None, annotations, resource_version))
elif status == 'Running':
self.log.info('Event: %s is Running', pod_id)
else:
self.log.warning(
'Event: Invalid state: %s on pod: %s in namespace %s with annotations: %s with '
'resource_version: %s',
status,
pod_id,
namespace,
annotations,
resource_version,
)
class AirflowKubernetesScheduler(LoggingMixin):
"""Airflow Scheduler for Kubernetes"""
def __init__(
self,
kube_config: Any,
task_queue: 'Queue[KubernetesJobType]',
result_queue: 'Queue[KubernetesResultsType]',
kube_client: client.CoreV1Api,
scheduler_job_id: str,
):
super().__init__()
self.log.debug("Creating Kubernetes executor")
self.kube_config = kube_config
self.task_queue = task_queue
self.result_queue = result_queue
self.namespace = self.kube_config.kube_namespace
self.log.debug("Kubernetes using namespace %s", self.namespace)
self.kube_client = kube_client
self.launcher = PodLauncher(kube_client=self.kube_client)
self._manager = multiprocessing.Manager()
self.watcher_queue = self._manager.Queue()
self.scheduler_job_id = scheduler_job_id
self.kube_watcher = self._make_kube_watcher()
def _make_kube_watcher(self) -> KubernetesJobWatcher:
resource_version = ResourceVersion().resource_version
watcher = KubernetesJobWatcher(
watcher_queue=self.watcher_queue,
namespace=self.kube_config.kube_namespace,
multi_namespace_mode=self.kube_config.multi_namespace_mode,
resource_version=resource_version,
scheduler_job_id=self.scheduler_job_id,
kube_config=self.kube_config,
)
watcher.start()
return watcher
def _health_check_kube_watcher(self):
if self.kube_watcher.is_alive():
self.log.debug("KubeJobWatcher alive, continuing")
else:
self.log.error(
'Error while health checking kube watcher process. Process died for unknown reasons'
)
self.kube_watcher = self._make_kube_watcher()
def run_next(self, next_job: KubernetesJobType) -> None:
"""
The run_next command will check the task_queue for any un-run jobs.
It will then create a unique job-id, launch that job in the cluster,
and store relevant info in the current_jobs map so we can track the job's
status
"""
self.log.info('Kubernetes job is %s', str(next_job))
key, command, kube_executor_config, pod_template_file = next_job
dag_id, task_id, execution_date, try_number = key
if command[0:3] != ["airflow", "tasks", "run"]:
raise ValueError('The command must start with ["airflow", "tasks", "run"].')
base_worker_pod = get_base_pod_from_template(pod_template_file, self.kube_config)
if not base_worker_pod:
raise AirflowException(
f"could not find a valid worker template yaml at {self.kube_config.pod_template_file}"
)
pod = PodGenerator.construct_pod(
namespace=self.namespace,
scheduler_job_id=self.scheduler_job_id,
pod_id=create_pod_id(dag_id, task_id),
dag_id=dag_id,
task_id=task_id,
kube_image=self.kube_config.kube_image,
try_number=try_number,
date=execution_date,
args=command,
pod_override_object=kube_executor_config,
base_worker_pod=base_worker_pod,
)
# Reconcile the pod generated by the Operator and the Pod
# generated by the .cfg file
self.log.debug("Kubernetes running for command %s", command)
self.log.debug("Kubernetes launching image %s", pod.spec.containers[0].image)
# the watcher will monitor pods, so we do not block.
self.launcher.run_pod_async(pod, **self.kube_config.kube_client_request_args)
self.log.debug("Kubernetes Job created!")
def delete_pod(self, pod_id: str, namespace: str) -> None:
"""Deletes POD"""
try:
self.log.debug("Deleting pod %s in namespace %s", pod_id, namespace)
self.kube_client.delete_namespaced_pod(
pod_id,
namespace,
body=client.V1DeleteOptions(**self.kube_config.delete_option_kwargs),
**self.kube_config.kube_client_request_args,
)
except ApiException as e:
# If the pod is already deleted
if e.status != 404:
raise
def sync(self) -> None:
"""
The sync function checks the status of all currently running kubernetes jobs.
If a job is completed, its status is placed in the result queue to
be sent back to the scheduler.
:return:
"""
self.log.debug("Syncing KubernetesExecutor")
self._health_check_kube_watcher()
while True:
try:
task = self.watcher_queue.get_nowait()
try:
self.log.debug("Processing task %s", task)
self.process_watcher_task(task)
finally:
self.watcher_queue.task_done()
except Empty:
break
def process_watcher_task(self, task: KubernetesWatchType) -> None:
"""Process the task by watcher."""
pod_id, namespace, state, annotations, resource_version = task
self.log.info(
'Attempting to finish pod; pod_id: %s; state: %s; annotations: %s', pod_id, state, annotations
)
key = self._annotations_to_key(annotations=annotations)
if key:
self.log.debug('finishing job %s - %s (%s)', key, state, pod_id)
self.result_queue.put((key, state, pod_id, namespace, resource_version))
def _annotations_to_key(self, annotations: Dict[str, str]) -> Optional[TaskInstanceKey]:
self.log.debug("Creating task key for annotations %s", annotations)
dag_id = annotations['dag_id']
task_id = annotations['task_id']
try_number = int(annotations['try_number'])
execution_date = parser.parse(annotations['execution_date'])
return TaskInstanceKey(dag_id, task_id, execution_date, try_number)
def _flush_watcher_queue(self) -> None:
self.log.debug('Executor shutting down, watcher_queue approx. size=%d', self.watcher_queue.qsize())
while True:
try:
task = self.watcher_queue.get_nowait()
# Ignoring it since it can only have either FAILED or SUCCEEDED pods
self.log.warning('Executor shutting down, IGNORING watcher task=%s', task)
self.watcher_queue.task_done()
except Empty:
break
def terminate(self) -> None:
"""Terminates the watcher."""
self.log.debug("Terminating kube_watcher...")
self.kube_watcher.terminate()
self.kube_watcher.join()
self.log.debug("kube_watcher=%s", self.kube_watcher)
self.log.debug("Flushing watcher_queue...")
self._flush_watcher_queue()
# Queue should be empty...
self.watcher_queue.join()
self.log.debug("Shutting down manager...")
self._manager.shutdown()
def get_base_pod_from_template(pod_template_file: Optional[str], kube_config: Any) -> k8s.V1Pod:
"""
Reads either the pod_template_file set in the executor_config or the base pod_template_file
set in the airflow.cfg to craft a "base pod" that will be used by the KubernetesExecutor
:param pod_template_file: absolute path to a pod_template_file.yaml or None
:param kube_config: The KubeConfig class generated by airflow that contains all kube metadata
:return: a V1Pod that can be used as the base pod for k8s tasks
"""
if pod_template_file:
return PodGenerator.deserialize_model_file(pod_template_file)
else:
return PodGenerator.deserialize_model_file(kube_config.pod_template_file)
class KubernetesExecutor(BaseExecutor, LoggingMixin):
"""Executor for Kubernetes"""
def __init__(self):
self.kube_config = KubeConfig()
self._manager = multiprocessing.Manager()
self.task_queue: 'Queue[KubernetesJobType]' = self._manager.Queue()
self.result_queue: 'Queue[KubernetesResultsType]' = self._manager.Queue()
self.kube_scheduler: Optional[AirflowKubernetesScheduler] = None
self.kube_client: Optional[client.CoreV1Api] = None
self.scheduler_job_id: Optional[str] = None
super().__init__(parallelism=self.kube_config.parallelism)
@provide_session
def clear_not_launched_queued_tasks(self, session=None) -> None:
"""
If the airflow scheduler restarts with pending "Queued" tasks, the tasks may or
may not
have been launched. Thus on starting up the scheduler let's check every
"Queued" task to
see if it has been launched (ie: if there is a corresponding pod on kubernetes)
If it has been launched then do nothing, otherwise reset the state to "None" so
the task
will be rescheduled
This will not be necessary in a future version of airflow in which there is
proper support
for State.LAUNCHED
"""
self.log.debug("Clearing tasks that have not been launched")
if not self.kube_client:
raise AirflowException(NOT_STARTED_MESSAGE)
queued_tasks = session.query(TaskInstance).filter(TaskInstance.state == State.QUEUED).all()
self.log.info('When executor started up, found %s queued task instances', len(queued_tasks))
for task in queued_tasks:
# pylint: disable=protected-access
self.log.debug("Checking task %s", task)
dict_string = "dag_id={},task_id={},execution_date={},airflow-worker={}".format(
pod_generator.make_safe_label_value(task.dag_id),
pod_generator.make_safe_label_value(task.task_id),
pod_generator.datetime_to_label_safe_datestring(task.execution_date),
pod_generator.make_safe_label_value(str(self.scheduler_job_id)),
)
# pylint: enable=protected-access
kwargs = dict(label_selector=dict_string)
if self.kube_config.kube_client_request_args:
for key, value in self.kube_config.kube_client_request_args.items():
kwargs[key] = value
pod_list = self.kube_client.list_namespaced_pod(self.kube_config.kube_namespace, **kwargs)
if not pod_list.items:
self.log.info(
'TaskInstance: %s found in queued state but was not launched, rescheduling', task
)
session.query(TaskInstance).filter(
TaskInstance.dag_id == task.dag_id,
TaskInstance.task_id == task.task_id,
TaskInstance.execution_date == task.execution_date,
).update({TaskInstance.state: State.NONE})
def start(self) -> None:
"""Starts the executor"""
self.log.info('Start Kubernetes executor')
if not self.job_id:
raise AirflowException("Could not get scheduler_job_id")
self.scheduler_job_id = self.job_id
self.log.debug('Start with scheduler_job_id: %s', self.scheduler_job_id)
self.kube_client = get_kube_client()
self.kube_scheduler = AirflowKubernetesScheduler(
self.kube_config, self.task_queue, self.result_queue, self.kube_client, self.scheduler_job_id
)
self.clear_not_launched_queued_tasks()
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: Optional[str] = None,
executor_config: Optional[Any] = None,
) -> None:
"""Executes task asynchronously"""
self.log.info('Add task %s with command %s with executor_config %s', key, command, executor_config)
kube_executor_config = PodGenerator.from_obj(executor_config)
if executor_config:
pod_template_file = executor_config.get("pod_template_override", None)
else:
pod_template_file = None
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.event_buffer[key] = (State.QUEUED, self.scheduler_job_id)
self.task_queue.put((key, command, kube_executor_config, pod_template_file))
def sync(self) -> None:
"""Synchronize task state."""
if self.running:
self.log.debug('self.running: %s', self.running)
if self.queued_tasks:
self.log.debug('self.queued: %s', self.queued_tasks)
if not self.scheduler_job_id:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.kube_scheduler:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.kube_config:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.result_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.kube_scheduler.sync()
last_resource_version = None
while True: # pylint: disable=too-many-nested-blocks
try:
results = self.result_queue.get_nowait()
try:
key, state, pod_id, namespace, resource_version = results
last_resource_version = resource_version
self.log.info('Changing state of %s to %s', results, state)
try:
self._change_state(key, state, pod_id, namespace)
except Exception as e: # pylint: disable=broad-except
self.log.exception(
"Exception: %s when attempting to change state of %s to %s, re-queueing.",
e,
results,
state,
)
self.result_queue.put(results)
finally:
self.result_queue.task_done()
except Empty:
break
resource_instance = ResourceVersion()
resource_instance.resource_version = last_resource_version or resource_instance.resource_version
# pylint: disable=too-many-nested-blocks
for _ in range(self.kube_config.worker_pods_creation_batch_size):
try:
task = self.task_queue.get_nowait()
try:
self.kube_scheduler.run_next(task)
except ApiException as e:
if e.reason == "BadRequest":
self.log.error("Request was invalid. Failing task")
key, _, _, _ = task
self.change_state(key, State.FAILED, e)
else:
self.log.warning(
'ApiException when attempting to run task, re-queueing. Message: %s',
json.loads(e.body)['message'],
)
self.task_queue.put(task)
finally:
self.task_queue.task_done()
except Empty:
break
# pylint: enable=too-many-nested-blocks
def _change_state(self, key: TaskInstanceKey, state: Optional[str], pod_id: str, namespace: str) -> None:
if state != State.RUNNING:
if self.kube_config.delete_worker_pods:
if not self.kube_scheduler:
raise AirflowException(NOT_STARTED_MESSAGE)
if state is not State.FAILED or self.kube_config.delete_worker_pods_on_failure:
self.kube_scheduler.delete_pod(pod_id, namespace)
self.log.info('Deleted pod: %s in namespace %s', str(key), str(namespace))
try:
self.running.remove(key)
except KeyError:
self.log.debug('Could not find key: %s', str(key))
self.event_buffer[key] = state, None
def try_adopt_task_instances(self, tis: List[TaskInstance]) -> List[TaskInstance]:
tis_to_flush = [ti for ti in tis if not ti.external_executor_id]
scheduler_job_ids = [ti.external_executor_id for ti in tis]
pod_ids = {
create_pod_id(
dag_id=pod_generator.make_safe_label_value(ti.dag_id),
task_id=pod_generator.make_safe_label_value(ti.task_id),
): ti
for ti in tis
if ti.external_executor_id
}
kube_client: client.CoreV1Api = self.kube_client
for scheduler_job_id in scheduler_job_ids:
scheduler_job_id = pod_generator.make_safe_label_value(str(scheduler_job_id))
kwargs = {'label_selector': f'airflow-worker={scheduler_job_id}'}
pod_list = kube_client.list_namespaced_pod(namespace=self.kube_config.kube_namespace, **kwargs)
for pod in pod_list.items:
self.adopt_launched_task(kube_client, pod, pod_ids)
self._adopt_completed_pods(kube_client)
tis_to_flush.extend(pod_ids.values())
return tis_to_flush
def adopt_launched_task(self, kube_client, pod, pod_ids: dict):
"""
Patch existing pod so that the current KubernetesJobWatcher can monitor it via label selectors
:param kube_client: kubernetes client for speaking to kube API
:param pod: V1Pod spec that we will patch with new label
:param pod_ids: pod_ids we expect to patch.
"""
self.log.info("attempting to adopt pod %s", pod.metadata.name)
pod.metadata.labels['airflow-worker'] = pod_generator.make_safe_label_value(
str(self.scheduler_job_id)
)
dag_id = pod.metadata.labels['dag_id']
task_id = pod.metadata.labels['task_id']
pod_id = create_pod_id(dag_id=dag_id, task_id=task_id)
if pod_id not in pod_ids:
self.log.error(
"attempting to adopt task %s in dag %s which was not specified by database",
task_id,
dag_id,
)
else:
try:
kube_client.patch_namespaced_pod(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
body=PodGenerator.serialize_pod(pod),
)
pod_ids.pop(pod_id)
except ApiException as e:
self.log.info("Failed to adopt pod %s. Reason: %s", pod.metadata.name, e)
def _adopt_completed_pods(self, kube_client: kubernetes.client.CoreV1Api):
"""
Patch completed pod so that the KubernetesJobWatcher can delete it.
:param kube_client: kubernetes client for speaking to kube API
"""
kwargs = {
'field_selector': "status.phase=Succeeded",
'label_selector': 'kubernetes_executor=True',
}
pod_list = kube_client.list_namespaced_pod(namespace=self.kube_config.kube_namespace, **kwargs)
for pod in pod_list.items:
self.log.info("Attempting to adopt pod %s", pod.metadata.name)
pod.metadata.labels['airflow-worker'] = pod_generator.make_safe_label_value(
str(self.scheduler_job_id)
)
try:
kube_client.patch_namespaced_pod(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
body=PodGenerator.serialize_pod(pod),
)
except ApiException as e:
self.log.info("Failed to adopt pod %s. Reason: %s", pod.metadata.name, e)
def _flush_task_queue(self) -> None:
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.log.debug('Executor shutting down, task_queue approximate size=%d', self.task_queue.qsize())
while True:
try:
task = self.task_queue.get_nowait()
# This is a new task to run thus ok to ignore.
self.log.warning('Executor shutting down, will NOT run task=%s', task)
self.task_queue.task_done()
except Empty:
break
def _flush_result_queue(self) -> None:
if not self.result_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.log.debug('Executor shutting down, result_queue approximate size=%d', self.result_queue.qsize())
while True: # pylint: disable=too-many-nested-blocks
try:
results = self.result_queue.get_nowait()
self.log.warning('Executor shutting down, flushing results=%s', results)
try:
key, state, pod_id, namespace, resource_version = results
self.log.info(
'Changing state of %s to %s : resource_version=%d', results, state, resource_version
)
try:
self._change_state(key, state, pod_id, namespace)
except Exception as e: # pylint: disable=broad-except
self.log.exception(
'Ignoring exception: %s when attempting to change state of %s to %s.',
e,
results,
state,
)
finally:
self.result_queue.task_done()
except Empty:
break
def end(self) -> None:
"""Called when the executor shuts down"""
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.result_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.kube_scheduler:
raise AirflowException(NOT_STARTED_MESSAGE)
self.log.info('Shutting down Kubernetes executor')
self.log.debug('Flushing task_queue...')
self._flush_task_queue()
self.log.debug('Flushing result_queue...')
self._flush_result_queue()
# Both queues should be empty...
self.task_queue.join()
self.result_queue.join()
if self.kube_scheduler:
self.kube_scheduler.terminate()
self._manager.shutdown()
def terminate(self):
"""Terminate the executor is not doing anything."""
| 42.801653 | 109 | 0.621066 |
e83fcfeda19ba29af519ff0ecbd798cdefc5ddbb | 395 | py | Python | garrus/metrics/brier.py | sleep3r/garrus | 28096ca0d6166117be23e740a68831396ba92a7e | [
"Apache-2.0"
] | 13 | 2021-04-06T15:00:41.000Z | 2021-06-12T21:27:46.000Z | garrus/metrics/brier.py | sleep3r/garrus | 28096ca0d6166117be23e740a68831396ba92a7e | [
"Apache-2.0"
] | null | null | null | garrus/metrics/brier.py | sleep3r/garrus | 28096ca0d6166117be23e740a68831396ba92a7e | [
"Apache-2.0"
] | 1 | 2021-04-26T04:25:59.000Z | 2021-04-26T04:25:59.000Z | import numpy as np
from garrus.core import BaseMetric
class Brier(BaseMetric):
"""
Brier score.
$$ Brier = -\frac{1}{m} \sum_{j=1}^{m} (y_{j}-b_{j})^{2}) $$
"""
def _compute(self, confidences: np.ndarray, accuracies: np.ndarray, **kwargs) -> float:
brier_score = np.mean(np.sum((confidences - accuracies) ** 2, axis=1)) # noqa
return float(brier_score)
| 24.6875 | 91 | 0.607595 |
0599058c61373022c61e25e8e27299d0be79f9ff | 1,106 | py | Python | graph/measures/core/edge_based.py | mazlo/lodcc | dcc3403fe7785c9dc73f09154f397c0ff42f1920 | [
"MIT"
] | 2 | 2018-12-09T16:34:22.000Z | 2021-02-18T23:45:29.000Z | graph/measures/core/edge_based.py | mazlo/lodcc | dcc3403fe7785c9dc73f09154f397c0ff42f1920 | [
"MIT"
] | null | null | null | graph/measures/core/edge_based.py | mazlo/lodcc | dcc3403fe7785c9dc73f09154f397c0ff42f1920 | [
"MIT"
] | 1 | 2018-04-30T08:25:09.000Z | 2018-04-30T08:25:09.000Z | import logging
from graph_tool import GraphView
from graph_tool.topology import edge_reciprocity, label_largest_component, pseudo_diameter
log = logging.getLogger( __name__ )
def f_reciprocity( D, stats, options={ 'features': [] } ):
""""""
if 'reciprocity' in options['features']:
stats['reciprocity']=edge_reciprocity(D)
log.debug( 'done reciprocity' )
def f_pseudo_diameter( D, stats, options={ 'features': [] } ):
""""""
LC = label_largest_component(D)
LCD = GraphView( D, vfilt=LC )
if 'diameter' in options['features']:
if LCD.num_vertices() == 0 or LCD.num_vertices() == 1:
# if largest component does practically not exist, use the whole graph
dist, ends = pseudo_diameter(D)
else:
dist, ends = pseudo_diameter(LCD)
stats['pseudo_diameter']=dist
# D may be used in both cases
stats['pseudo_diameter_src_vertex']=D.vertex_properties['name'][ends[0]]
stats['pseudo_diameter_trg_vertex']=D.vertex_properties['name'][ends[1]]
log.debug( 'done pseudo_diameter' )
| 33.515152 | 90 | 0.653707 |
b3357a82d8b5e91f593eefcfe0b448703fb2dbca | 12,756 | py | Python | applications/tensorflow2/fastspeech2/preprocessor/text.py | payoto/graphcore_examples | 46d2b7687b829778369fc6328170a7b14761e5c6 | [
"MIT"
] | 260 | 2019-11-18T01:50:00.000Z | 2022-03-28T23:08:53.000Z | applications/tensorflow2/fastspeech2/preprocessor/text.py | payoto/graphcore_examples | 46d2b7687b829778369fc6328170a7b14761e5c6 | [
"MIT"
] | 27 | 2020-01-28T23:07:50.000Z | 2022-02-14T15:37:06.000Z | applications/tensorflow2/fastspeech2/preprocessor/text.py | payoto/graphcore_examples | 46d2b7687b829778369fc6328170a7b14761e5c6 | [
"MIT"
] | 56 | 2019-11-18T02:13:12.000Z | 2022-02-28T14:36:09.000Z | # Copyright (c) 2021 Graphcore Ltd. All Rights Reserved.
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file has been modified by Graphcore Ltd.
"""
This script has been adapated from the original TensorSpeech/TensorFlowTTS repo found here:
[
https://github.com/TensorSpeech/TensorFlowTTS/blob/v1.8/tensorflow_tts/processor/base_processor.py,
https://github.com/TensorSpeech/TensorFlowTTS/blob/v1.8/tensorflow_tts/processor/ljspeech.py
]
Main changes:
Combine two files.
"""
import os
import re
import abc
import json
import numpy as np
import soundfile as sf
from typing import Dict, List, Union
from dataclasses import dataclass, field
from cleaner import english_cleaners
class DataProcessorError(Exception):
pass
valid_symbols = [
"AA",
"AA0",
"AA1",
"AA2",
"AE",
"AE0",
"AE1",
"AE2",
"AH",
"AH0",
"AH1",
"AH2",
"AO",
"AO0",
"AO1",
"AO2",
"AW",
"AW0",
"AW1",
"AW2",
"AY",
"AY0",
"AY1",
"AY2",
"B",
"CH",
"D",
"DH",
"EH",
"EH0",
"EH1",
"EH2",
"ER",
"ER0",
"ER1",
"ER2",
"EY",
"EY0",
"EY1",
"EY2",
"F",
"G",
"HH",
"IH",
"IH0",
"IH1",
"IH2",
"IY",
"IY0",
"IY1",
"IY2",
"JH",
"K",
"L",
"M",
"N",
"NG",
"OW",
"OW0",
"OW1",
"OW2",
"OY",
"OY0",
"OY1",
"OY2",
"P",
"R",
"S",
"SH",
"T",
"TH",
"UH",
"UH0",
"UH1",
"UH2",
"UW",
"UW0",
"UW1",
"UW2",
"V",
"W",
"Y",
"Z",
"ZH",
]
_pad = "pad"
_eos = "eos"
_punctuation = "!'(),.:;? "
_special = "-"
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ["@" + s for s in valid_symbols]
# Export all symbols:
LJSPEECH_SYMBOLS = (
[_pad] + list(_special) + list(_punctuation) +
list(_letters) + _arpabet + [_eos]
)
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)")
@dataclass
class BaseProcessor(abc.ABC):
"""Base Processor for all processor."""
data_dir: str
symbols: List[str]
speakers_map: Dict[str, int]
train_f_name: str = "train.txt"
delimiter: str = "|"
positions = {
"file": 0,
"text": 1,
"speaker_name": 2,
} # positions of file,text,speaker_name after split line
f_extension: str = ".wav"
saved_mapper_path: str = None
loaded_mapper_path: str = None
# extras
# text, wav_path, speaker_name
items: List[List[str]] = field(default_factory=list)
symbol_to_id: Dict[str, int] = field(default_factory=dict)
id_to_symbol: Dict[int, str] = field(default_factory=dict)
def __post_init__(self):
if self.loaded_mapper_path is not None:
self._load_mapper(loaded_path=self.loaded_mapper_path)
if self.setup_eos_token():
self.add_symbol(
self.setup_eos_token()
) # if this eos token not yet present in symbols list.
self.eos_id = self.symbol_to_id[self.setup_eos_token()]
return
if self.symbols.__len__() < 1:
raise DataProcessorError(
"Symbols list is empty but mapper isn't loaded")
self.create_items()
self.create_speaker_map()
self.reverse_speaker = {v: k for k, v in self.speakers_map.items()}
self.create_symbols()
if self.saved_mapper_path is not None:
self._save_mapper(saved_path=self.saved_mapper_path)
# processor name. useful to use it for AutoProcessor
self._processor_name = type(self).__name__
if self.setup_eos_token():
self.add_symbol(
self.setup_eos_token()
) # if this eos token not yet present in symbols list.
self.eos_id = self.symbol_to_id[self.setup_eos_token()]
def __getattr__(self, name: str) -> Union[str, int]:
if "_id" in name: # map symbol to id
return self.symbol_to_id[name.replace("_id", "")]
return self.symbol_to_id[name] # map symbol to value
def create_speaker_map(self):
"""
Create speaker map for dataset.
"""
sp_id = 0
for i in self.items:
speaker_name = i[-1]
if speaker_name not in self.speakers_map:
self.speakers_map[speaker_name] = sp_id
sp_id += 1
def get_speaker_id(self, name: str) -> int:
return self.speakers_map[name]
def get_speaker_name(self, speaker_id: int) -> str:
return self.speakers_map[speaker_id]
def create_symbols(self):
self.symbol_to_id = {s: i for i, s in enumerate(self.symbols)}
self.id_to_symbol = {i: s for i, s in enumerate(self.symbols)}
def create_items(self):
"""
Method used to create items from training file
items struct example => text, wav_file_path, speaker_name.
Note that the speaker_name should be a last.
"""
with open(
os.path.join(self.data_dir, self.train_f_name), mode="r", encoding="utf-8"
) as f:
for line in f:
parts = line.strip().split(self.delimiter)
wav_path = os.path.join(
self.data_dir, parts[self.positions["file"]])
wav_path = (
wav_path + self.f_extension
if wav_path[-len(self.f_extension):] != self.f_extension
else wav_path
)
text = parts[self.positions["text"]]
speaker_name = parts[self.positions["speaker_name"]]
self.items.append([text, wav_path, speaker_name])
def add_symbol(self, symbol: Union[str, list]):
if isinstance(symbol, str):
if symbol in self.symbol_to_id:
return
self.symbols.append(symbol)
symbol_id = len(self.symbol_to_id)
self.symbol_to_id[symbol] = symbol_id
self.id_to_symbol[symbol_id] = symbol
elif isinstance(symbol, list):
for i in symbol:
self.add_symbol(i)
else:
raise ValueError(
"A new_symbols must be a string or list of string.")
@abc.abstractmethod
def get_one_sample(self, item):
"""Get one sample from dataset items.
Args:
item: one item in Dataset items.
Dataset items may include (raw_text, speaker_id, wav_path, ...)
Returns:
sample (dict): sample dictionary return all feature used for preprocessing later.
"""
sample = {
"raw_text": None,
"text_ids": None,
"audio": None,
"utt_id": None,
"speaker_name": None,
"rate": None,
}
return sample
@abc.abstractmethod
def text_to_sequence(self, text: str):
return []
@abc.abstractmethod
def setup_eos_token(self):
"""Return eos symbol of type string."""
return "eos"
def convert_symbols_to_ids(self, symbols: Union[str, list]):
sequence = []
if isinstance(symbols, str):
sequence.append(self._symbol_to_id[symbols])
return sequence
elif isinstance(symbols, list):
for s in symbols:
if isinstance(s, str):
sequence.append(self._symbol_to_id[s])
else:
raise ValueError(
"All elements of symbols must be a string.")
else:
raise ValueError("A symbols must be a string or list of string.")
return sequence
def _load_mapper(self, loaded_path: str = None):
"""
Save all needed mappers to file
"""
loaded_path = (
os.path.join(self.data_dir, "mapper.json")
if loaded_path is None
else loaded_path
)
with open(loaded_path, "r") as f:
data = json.load(f)
self.speakers_map = data["speakers_map"]
self.symbol_to_id = data["symbol_to_id"]
self.id_to_symbol = {
int(k): v for k, v in data["id_to_symbol"].items()}
self._processor_name = data["processor_name"]
# other keys
all_data_keys = data.keys()
for key in all_data_keys:
if key not in ["speakers_map", "symbol_to_id", "id_to_symbol"]:
setattr(self, key, data[key])
def _save_mapper(self, saved_path: str = None, extra_attrs_to_save: dict = None):
"""
Save all needed mappers to file
"""
saved_path = (
os.path.join(self.data_dir, "mapper.json")
if saved_path is None
else saved_path
)
with open(saved_path, "w") as f:
full_mapper = {
"symbol_to_id": self.symbol_to_id,
"id_to_symbol": self.id_to_symbol,
"speakers_map": self.speakers_map,
"processor_name": self._processor_name,
}
if extra_attrs_to_save:
full_mapper = {**full_mapper, **extra_attrs_to_save}
json.dump(full_mapper, f)
@abc.abstractmethod
def save_pretrained(self, saved_path):
"""Save mappers to file"""
pass
@dataclass
class LJSpeechProcessor(BaseProcessor):
"""LJSpeech processor."""
positions = {
"wave_file": 0,
"text": 1,
"text_norm": 2,
}
train_f_name: str = "metadata.csv"
def create_items(self):
if self.data_dir:
with open(
os.path.join(self.data_dir, self.train_f_name), encoding="utf-8"
) as f:
self.items = [self.split_line(
self.data_dir, line, "|") for line in f]
def split_line(self, data_dir, line, split):
parts = line.strip().split(split)
wave_file = parts[self.positions["wave_file"]]
text_norm = parts[self.positions["text_norm"]]
wav_path = os.path.join(data_dir, "wavs", f"{wave_file}.wav")
speaker_name = "ljspeech"
return text_norm, wav_path, speaker_name
def setup_eos_token(self):
return _eos
def save_pretrained(self, saved_path):
os.makedirs(saved_path, exist_ok=True)
self._save_mapper(os.path.join(saved_path, "processor.json"), {})
def get_one_sample(self, item):
text, wav_path, speaker_name = item
# normalize audio signal to be [-1, 1], soundfile already norm.
audio, rate = sf.read(wav_path)
audio = audio.astype(np.float32)
# convert text to ids
text_ids = np.asarray(self.text_to_sequence(text), np.int32)
sample = {
"raw_text": text,
"text_ids": text_ids,
"audio": audio,
"utt_id": os.path.split(wav_path)[-1].split(".")[0],
"speaker_name": speaker_name,
"rate": rate,
}
return sample
def text_to_sequence(self, text):
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += self._symbols_to_sequence(
english_cleaners(text)
)
break
sequence += self._symbols_to_sequence(
english_cleaners(m.group(1))
)
sequence += self._arpabet_to_sequence(m.group(2))
text = m.group(3)
# add eos tokens
sequence += [self.eos_id]
return sequence
def _symbols_to_sequence(self, symbols):
return [self.symbol_to_id[s] for s in symbols if self._should_keep_symbol(s)]
def _arpabet_to_sequence(self, text):
return self._symbols_to_sequence(["@" + s for s in text.split()])
def _should_keep_symbol(self, s):
return s in self.symbol_to_id and s != "_" and s != "~"
| 28.72973 | 101 | 0.570947 |
2a4aa82aae79a672f3f411f0e355cf082e8cfed4 | 836 | py | Python | application/utils.py | sisayie/goeasy-project | df8f3fafd17e92fd5638854f15628c1d447e5198 | [
"MIT"
] | 1 | 2020-01-21T15:03:10.000Z | 2020-01-21T15:03:10.000Z | application/utils.py | sisayie/goeasy-project | df8f3fafd17e92fd5638854f15628c1d447e5198 | [
"MIT"
] | 1 | 2019-10-31T16:01:12.000Z | 2019-10-31T16:01:12.000Z | application/utils.py | sisayie/goeasy-project | df8f3fafd17e92fd5638854f15628c1d447e5198 | [
"MIT"
] | 1 | 2019-10-15T09:54:57.000Z | 2019-10-15T09:54:57.000Z | #from flask import json
import datetime as dtm
from datetime import datetime
'''
response = current_app.response_class(
json.dumps(new_sorted, sort_keys=False),
mimetype=current_app.config['JSONIFY_MIMETYPE'])
'''
def date_format(value: str) -> int:
if str.isdigit(value):
return value
else:
dt = dtm.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
return int(dt.timestamp())
def date_format1(value: str) -> int:
if str.isdigit(value):
return value
else:
dt = dtm.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S%z')
return int(dt.timestamp())
def date_format2(value: str) -> int:
d = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f%z')
dt = dtm.datetime.strptime(d, '%Y-%m-%dT%H:%M:%S.%f%z')
return int(dt.timestamp()) | 30.962963 | 65 | 0.598086 |
2c766db4b91ddf71dcf7051911ce70a68f09ec49 | 1,569 | py | Python | src/aks-preview/setup.py | ganga1980/azure-cli-extensions | cf3c2660a92aa349576f440365d6e65570287c12 | [
"MIT"
] | null | null | null | src/aks-preview/setup.py | ganga1980/azure-cli-extensions | cf3c2660a92aa349576f440365d6e65570287c12 | [
"MIT"
] | null | null | null | src/aks-preview/setup.py | ganga1980/azure-cli-extensions | cf3c2660a92aa349576f440365d6e65570287c12 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open as open1
from setuptools import setup, find_packages
VERSION = "0.5.44"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = []
with open1('README.md', 'r', encoding='utf-8') as f:
README = f.read()
with open1('HISTORY.md', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='aks-preview',
version=VERSION,
description='Provides a preview for upcoming AKS features',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli-extensions/tree/master/src/aks-preview',
classifiers=CLASSIFIERS,
packages=find_packages(exclude=["tests"]),
package_data={'azext_aks_preview': ['azext_metadata.json']},
install_requires=DEPENDENCIES
)
| 34.866667 | 94 | 0.601657 |
92e798cb1ff189b3bdd4446b5b35ffa71a932669 | 4,056 | py | Python | openbb_terminal/cryptocurrency/defi/coindix_model.py | 23errg/GamestonkTerminal | 826cd8a723d8e2b810c51bf8266c09e8e55059c4 | [
"MIT"
] | null | null | null | openbb_terminal/cryptocurrency/defi/coindix_model.py | 23errg/GamestonkTerminal | 826cd8a723d8e2b810c51bf8266c09e8e55059c4 | [
"MIT"
] | null | null | null | openbb_terminal/cryptocurrency/defi/coindix_model.py | 23errg/GamestonkTerminal | 826cd8a723d8e2b810c51bf8266c09e8e55059c4 | [
"MIT"
] | null | null | null | """Coindix model"""
__docformat__ = "numpy"
import logging
from typing import Optional
import pandas as pd
import requests
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
VAULTS_FILTERS = ["name", "chain", "protocol", "apy", "tvl", "risk", "link"]
CHAINS = [
"ethereum",
"polygon",
"avalanche",
"bsc",
"terra",
"fantom",
"moonriver",
"celo",
"heco",
"okex",
"cronos",
"arbitrum",
"eth",
"harmony",
"fuse",
"defichain",
"solana",
"optimism",
]
PROTOCOLS = [
"aave",
"acryptos",
"alpaca",
"anchor",
"autofarm",
"balancer",
"bancor",
"beefy",
"belt",
"compound",
"convex",
"cream",
"curve",
"defichain",
"geist",
"lido",
"liquity",
"mirror",
"pancakeswap",
"raydium",
"sushi",
"tarot",
"traderjoe",
"tulip",
"ubeswap",
"uniswap",
"venus",
"yearn",
]
VAULT_KINDS = [
"lp",
"single",
"noimploss",
"stable",
]
def _lambda_risk_mapper(risk_level: int) -> str:
"""Helper methods
Parameters
----------
risk_level: int
number from range 0-4 represents risk factor for given vault
Returns
-------
string:
text representation of risk
"""
mappings = {0: "Non Eligible", 1: "Least", 2: "Low", 3: "Medium", 4: "High"}
return mappings.get(risk_level, "Non Eligible")
@log_start_end(log=logger)
def _prepare_params(**kwargs) -> dict:
"""Helper method, which handles preparation of parameters for requests to coindix api.
Parameters
----------
kwargs: keyword arguments: chain, kind, protocol
Returns
-------
dict:
Prepared parameters for request
"""
params = {"sort": "-apy", "tvl": "1m", "kind": "all"}
mapping = {"chain": CHAINS, "protocol": PROTOCOLS, "kind": VAULT_KINDS}
for key, value in kwargs.items():
category = mapping.get(key, [])
if value in category:
params.update({key: value})
return {k: v.lower() for k, v in params.items()}
@log_start_end(log=logger)
def get_defi_vaults(
chain: Optional[str] = None,
protocol: Optional[str] = None,
kind: Optional[str] = None,
) -> pd.DataFrame:
"""Get DeFi Vaults Information. DeFi Vaults are pools of funds with an assigned strategy which main goal is to
maximize returns of its crypto assets. [Source: https://coindix.com/]
Parameters
----------
chain: str
Blockchain - one from list [
'ethereum', 'polygon', 'avalanche', 'bsc', 'terra', 'fantom',
'moonriver', 'celo', 'heco', 'okex', 'cronos', 'arbitrum', 'eth',
'harmony', 'fuse', 'defichain', 'solana', 'optimism'
]
protocol: str
DeFi protocol - one from list: [
'aave', 'acryptos', 'alpaca', 'anchor', 'autofarm', 'balancer', 'bancor',
'beefy', 'belt', 'compound', 'convex', 'cream', 'curve', 'defichain', 'geist',
'lido', 'liquity', 'mirror', 'pancakeswap', 'raydium', 'sushi', 'tarot', 'traderjoe',
'tulip', 'ubeswap', 'uniswap', 'venus', 'yearn'
]
kind: str
Kind/type of vault - one from list: ['lp','single','noimploss','stable']
Returns
-------
pd.DataFrame
Top 100 DeFi Vaults for given chain/protocol sorted by APY.
"""
params = _prepare_params(chain=chain, protocol=protocol, kind=kind)
response = requests.get("https://apiv2.coindix.com/search", params=params)
if not 200 <= response.status_code < 300:
raise Exception(f"Coindix api exception: {response.text}")
try:
data = response.json()["data"]
if len(data) == 0:
return pd.DataFrame()
df = pd.DataFrame(data)[VAULTS_FILTERS].fillna("NA")
df["risk"] = df["risk"].apply(lambda x: _lambda_risk_mapper(x))
return df
except Exception as e:
logger.exception(e)
raise ValueError(f"Invalid Response: {response.text}") from e
| 25.35 | 114 | 0.57643 |
0a31cb8c9acf0ba8043889b9e23f25c08f962356 | 4,793 | py | Python | third_party/graphy/graphy/backends/google_chart_api/util_test.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/graphy/graphy/backends/google_chart_api/util_test.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | third_party/graphy/graphy/backends/google_chart_api/util_test.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for Graphy and Google Chart API backend."""
import string
import unittest
from graphy import graphy_test
from graphy.backends.google_chart_api import util
class SimpleEncoderTest(graphy_test.GraphyTest):
def setUp(self):
self.simple = util.SimpleDataEncoder()
def testEmpty(self):
self.assertEqual('', self.simple.Encode([]))
def testSingle(self):
self.assertEqual('A', self.simple.Encode([0]))
def testFull(self):
full = string.ascii_uppercase + string.ascii_lowercase + string.digits
self.assertEqual(full, self.simple.Encode(range(0, 62)))
def testRoundingError(self):
"""Scaling might give us some rounding error. Make sure that the encoder
deals with it properly.
"""
a = [-1, 0, 0, 1, 60, 61, 61, 62]
b = [-0.999999, -0.00001, 0.00001, 0.99998,
60.00001, 60.99999, 61.00001, 61.99998]
self.assertEqual(self.simple.Encode(a), self.simple.Encode(b))
def testFloats(self):
ints = [1, 2, 3, 4]
floats = [1.1, 2.1, 3.1, 4.1]
self.assertEqual(self.simple.Encode(ints), self.simple.Encode(floats))
def testOutOfRangeDropped(self):
"""Confirm that values outside of min/max are left blank."""
nums = [-79, -1, 0, 1, 61, 62, 1012]
self.assertEqual('__AB9__', self.simple.Encode(nums))
def testNoneDropped(self):
"""Confirm that the value None is left blank."""
self.assertEqual('_JI_H', self.simple.Encode([None, 9, 8, None, 7]))
class EnhandedEncoderTest(graphy_test.GraphyTest):
def setUp(self):
self.encoder = util.EnhancedDataEncoder()
def testEmpty(self):
self.assertEqual('', self.encoder.Encode([]))
def testFull(self):
full = ''.join(self.encoder.code)
self.assertEqual(full, self.encoder.Encode(range(0, 4096)))
def testOutOfRangeDropped(self):
nums = [-79, -1, 0, 1, 61, 4096, 10012]
self.assertEqual('____AAABA9____', self.encoder.Encode(nums))
def testNoneDropped(self):
self.assertEqual('__AJAI__AH', self.encoder.Encode([None, 9, 8, None, 7]))
class ScaleTest(graphy_test.GraphyTest):
"""Test scaling."""
def testScaleIntegerData(self):
scale = util.ScaleData
# Identity
self.assertEqual([1, 2, 3], scale([1, 2, 3], 1, 3, 1, 3))
self.assertEqual([-1, 0, 1], scale([-1, 0, 1], -1, 1, -1, 1))
# Translate
self.assertEqual([4, 5, 6], scale([1, 2, 3], 1, 3, 4, 6))
self.assertEqual([-3, -2, -1], scale([1, 2, 3], 1, 3, -3, -1))
# Scale
self.assertEqual([1, 3.5, 6], scale([1, 2, 3], 1, 3, 1, 6))
self.assertEqual([-6, 0, 6], scale([1, 2, 3], 1, 3, -6, 6))
# Scale and Translate
self.assertEqual([100, 200, 300], scale([1, 2, 3], 1, 3, 100, 300))
def testScaleDataWithDifferentMinMax(self):
scale = util.ScaleData
self.assertEqual([1.5, 2, 2.5], scale([1, 2, 3], 0, 4, 1, 3))
self.assertEqual([-2, 2, 6], scale([0, 2, 4], 1, 3, 0, 4))
def testScaleFloatingPointData(self):
scale = util.ScaleData
data = [-3.14, -2.72, 0, 2.72, 3.14]
scaled_e = 5 + 5 * 2.72 / 3.14
expected_data = [0, 10 - scaled_e, 5, scaled_e, 10]
actual_data = scale(data, -3.14, 3.14, 0, 10)
for expected, actual in zip(expected_data, actual_data):
self.assertAlmostEqual(expected, actual)
def testScaleDataOverRealRange(self):
scale = util.ScaleData
self.assertEqual([0, 30.5, 61], scale([1, 2, 3], 1, 3, 0, 61))
def testScalingLotsOfData(self):
data = range(0, 100)
expected = range(-100, 100, 2)
actual = util.ScaleData(data, 0, 100, -100, 100)
self.assertEqual(expected, actual)
class NameTest(graphy_test.GraphyTest):
"""Test long/short parameter names."""
def testLongNames(self):
params = dict(size='S', data='D', chg='G')
params = util.ShortenParameterNames(params)
self.assertEqual(dict(chs='S', chd='D', chg='G'), params)
def testCantUseBothLongAndShortName(self):
"""Make sure we don't let the user specify both the long and the short
version of a parameter. (If we did, which one would we pick?)
"""
params = dict(size='long', chs='short')
self.assertRaises(KeyError, util.ShortenParameterNames, params)
if __name__ == '__main__':
unittest.main()
| 31.953333 | 78 | 0.660129 |
7d0e47d032f890656380d2c2f4771fdac6df7be8 | 18,243 | py | Python | cirq-core/cirq/ops/gateset.py | allen91wu/Cirq | c33bd9bd6d08650f41b0db5cf69abb3daed72a8f | [
"Apache-2.0"
] | null | null | null | cirq-core/cirq/ops/gateset.py | allen91wu/Cirq | c33bd9bd6d08650f41b0db5cf69abb3daed72a8f | [
"Apache-2.0"
] | null | null | null | cirq-core/cirq/ops/gateset.py | allen91wu/Cirq | c33bd9bd6d08650f41b0db5cf69abb3daed72a8f | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functionality for grouping and validating Cirq Gates"""
from typing import Any, Callable, cast, Dict, FrozenSet, List, Optional, Type, TYPE_CHECKING, Union
from cirq.ops import global_phase_op, op_tree, raw_types
from cirq import protocols, value
if TYPE_CHECKING:
import cirq
def _gate_str(
gate: Union[raw_types.Gate, Type[raw_types.Gate], 'cirq.GateFamily'],
gettr: Callable[[Any], str] = str,
) -> str:
return gettr(gate) if not isinstance(gate, type) else f'{gate.__module__}.{gate.__name__}'
@value.value_equality(distinct_child_types=True)
class GateFamily:
"""Wrapper around gate instances/types describing a set of accepted gates.
GateFamily supports initialization via
a) Non-parameterized instances of `cirq.Gate` (Instance Family).
b) Python types inheriting from `cirq.Gate` (Type Family).
By default, the containment checks depend on the initialization type:
a) Instance Family: Containment check is done via `cirq.equal_up_to_global_phase`.
b) Type Family: Containment check is done by type comparison.
For example:
a) Instance Family:
>>> gate_family = cirq.GateFamily(cirq.X)
>>> assert cirq.X in gate_family
>>> assert cirq.Rx(rads=np.pi) in gate_family
>>> assert cirq.X ** sympy.Symbol("theta") not in gate_family
b) Type Family:
>>> gate_family = cirq.GateFamily(cirq.XPowGate)
>>> assert cirq.X in gate_family
>>> assert cirq.Rx(rads=np.pi) in gate_family
>>> assert cirq.X ** sympy.Symbol("theta") in gate_family
In order to create gate families with constraints on parameters of a gate
type, users should derive from the `cirq.GateFamily` class and override the
`_predicate` method used to check for gate containment.
"""
def __init__(
self,
gate: Union[Type[raw_types.Gate], raw_types.Gate],
*,
name: Optional[str] = None,
description: Optional[str] = None,
ignore_global_phase: bool = True,
) -> None:
"""Init GateFamily.
Args:
gate: A python `type` inheriting from `cirq.Gate` for type based membership checks, or
a non-parameterized instance of a `cirq.Gate` for equality based membership checks.
name: The name of the gate family.
description: Human readable description of the gate family.
ignore_global_phase: If True, value equality is checked via
`cirq.equal_up_to_global_phase`.
Raises:
ValueError: if `gate` is not a `cirq.Gate` instance or subclass.
ValueError: if `gate` is a parameterized instance of `cirq.Gate`.
"""
if not (
isinstance(gate, raw_types.Gate)
or (isinstance(gate, type) and issubclass(gate, raw_types.Gate))
):
raise ValueError(f'Gate {gate} must be an instance or subclass of `cirq.Gate`.')
if isinstance(gate, raw_types.Gate) and protocols.is_parameterized(gate):
raise ValueError(f'Gate {gate} must be a non-parameterized instance of `cirq.Gate`.')
self._gate = gate
self._name = name if name else self._default_name()
self._description = description if description else self._default_description()
self._ignore_global_phase = ignore_global_phase
def _gate_str(self, gettr: Callable[[Any], str] = str) -> str:
return _gate_str(self.gate, gettr)
def _gate_json(self) -> Union[raw_types.Gate, str]:
return self.gate if not isinstance(self.gate, type) else protocols.json_cirq_type(self.gate)
def _default_name(self) -> str:
family_type = 'Instance' if isinstance(self.gate, raw_types.Gate) else 'Type'
return f'{family_type} GateFamily: {self._gate_str()}'
def _default_description(self) -> str:
check_type = r'g == {}' if isinstance(self.gate, raw_types.Gate) else r'isinstance(g, {})'
return f'Accepts `cirq.Gate` instances `g` s.t. `{check_type.format(self._gate_str())}`'
@property
def gate(self) -> Union[Type[raw_types.Gate], raw_types.Gate]:
return self._gate
@property
def name(self) -> str:
return self._name
@property
def description(self) -> str:
return self._description
def _predicate(self, gate: raw_types.Gate) -> bool:
"""Checks whether `cirq.Gate` instance `gate` belongs to this GateFamily.
The default predicate depends on the gate family initialization type:
a) Instance Family: `cirq.equal_up_to_global_phase(gate, self.gate)`
if self._ignore_global_phase else `gate == self.gate`.
b) Type Family: `isinstance(gate, self.gate)`.
Args:
gate: `cirq.Gate` instance which should be checked for containment.
"""
if isinstance(self.gate, raw_types.Gate):
return (
protocols.equal_up_to_global_phase(gate, self.gate)
if self._ignore_global_phase
else gate == self._gate
)
return isinstance(gate, self.gate)
def __contains__(self, item: Union[raw_types.Gate, raw_types.Operation]) -> bool:
if isinstance(item, raw_types.Operation):
if item.gate is None:
return False
item = item.gate
return self._predicate(item)
def __str__(self) -> str:
return f'{self.name}\n{self.description}'
def __repr__(self) -> str:
name_and_description = ''
if self.name != self._default_name() or self.description != self._default_description():
name_and_description = f'name="{self.name}", description="{self.description}", '
return (
f'cirq.GateFamily('
f'gate={self._gate_str(repr)}, '
f'{name_and_description}'
f'ignore_global_phase={self._ignore_global_phase})'
)
def _value_equality_values_(self) -> Any:
# `isinstance` is used to ensure the a gate type and gate instance is not compared.
return (
isinstance(self.gate, raw_types.Gate),
self.gate,
self.name,
self.description,
self._ignore_global_phase,
)
def _json_dict_(self) -> Dict[str, Any]:
return {
'gate': self._gate_json(),
'name': self.name,
'description': self.description,
'ignore_global_phase': self._ignore_global_phase,
}
@classmethod
def _from_json_dict_(
cls, gate, name, description, ignore_global_phase, **kwargs
) -> 'GateFamily':
if isinstance(gate, str):
gate = protocols.cirq_type_from_json(gate)
return cls(
gate, name=name, description=description, ignore_global_phase=ignore_global_phase
)
@value.value_equality()
class Gateset:
"""Gatesets represent a collection of `cirq.GateFamily` objects.
Gatesets are useful for
a) Describing the set of allowed gates in a human readable format
b) Validating a given gate / optree against the set of allowed gates
Gatesets rely on the underlying `cirq.GateFamily` for both description and
validation purposes.
"""
def __init__(
self,
*gates: Union[Type[raw_types.Gate], raw_types.Gate, GateFamily],
name: Optional[str] = None,
unroll_circuit_op: bool = True,
accept_global_phase_op: bool = True,
) -> None:
"""Init Gateset.
Accepts a list of gates, each of which should be either
a) `cirq.Gate` subclass
b) `cirq.Gate` instance
c) `cirq.GateFamily` instance
`cirq.Gate` subclasses and instances are converted to the default
`cirq.GateFamily(gate=g)` instance and thus a default name and
description is populated.
Args:
*gates: A list of `cirq.Gate` subclasses / `cirq.Gate` instances /
`cirq.GateFamily` instances to initialize the Gateset.
name: (Optional) Name for the Gateset. Useful for description.
unroll_circuit_op: If True, `cirq.CircuitOperation` is recursively
validated by validating the underlying `cirq.Circuit`.
accept_global_phase_op: If True, `cirq.GlobalPhaseOperation` is accepted.
"""
self._name = name
self._unroll_circuit_op = unroll_circuit_op
self._accept_global_phase_op = accept_global_phase_op
self._instance_gate_families: Dict[raw_types.Gate, GateFamily] = {}
self._type_gate_families: Dict[Type[raw_types.Gate], GateFamily] = {}
self._gates_repr_str = ", ".join([_gate_str(g, repr) for g in gates])
unique_gate_list: List[GateFamily] = list(
dict.fromkeys(g if isinstance(g, GateFamily) else GateFamily(gate=g) for g in gates)
)
for g in unique_gate_list:
if type(g) == GateFamily:
if isinstance(g.gate, raw_types.Gate):
self._instance_gate_families[g.gate] = g
else:
self._type_gate_families[g.gate] = g
self._unique_gate_list = unique_gate_list
self._gates = frozenset(unique_gate_list)
@property
def name(self) -> Optional[str]:
return self._name
@property
def gates(self) -> FrozenSet[GateFamily]:
return self._gates
def with_params(
self,
*,
name: Optional[str] = None,
unroll_circuit_op: Optional[bool] = None,
accept_global_phase_op: Optional[bool] = None,
) -> 'Gateset':
"""Returns a copy of this Gateset with identical gates and new values for named arguments.
If a named argument is None then corresponding value of this Gateset is used instead.
Args:
name: New name for the Gateset.
unroll_circuit_op: If True, new Gateset will recursively validate
`cirq.CircuitOperation` by validating the underlying `cirq.Circuit`.
accept_global_phase_op: If True, new Gateset will accept `cirq.GlobalPhaseOperation`.
Returns:
`self` if all new values are None or identical to the values of current Gateset.
else a new Gateset with identical gates and new values for named arguments.
"""
def val_if_none(var: Any, val: Any) -> Any:
return var if var is not None else val
name = val_if_none(name, self._name)
unroll_circuit_op = val_if_none(unroll_circuit_op, self._unroll_circuit_op)
accept_global_phase_op = val_if_none(accept_global_phase_op, self._accept_global_phase_op)
if (
name == self._name
and unroll_circuit_op == self._unroll_circuit_op
and accept_global_phase_op == self._accept_global_phase_op
):
return self
return Gateset(
*self.gates,
name=name,
unroll_circuit_op=cast(bool, unroll_circuit_op),
accept_global_phase_op=cast(bool, accept_global_phase_op),
)
def __contains__(self, item: Union[raw_types.Gate, raw_types.Operation]) -> bool:
"""Check for containment of a given Gate/Operation in this Gateset.
Containment checks are handled as follows:
a) For Gates or Operations that have an underlying gate (i.e. op.gate is not None):
- Forwards the containment check to the underlying `cirq.GateFamily` objects.
- Examples of such operations include `cirq.GateOperations` and their controlled
and tagged variants (i.e. instances of `cirq.TaggedOperation`,
`cirq.ControlledOperation` where `op.gate` is not None) etc.
b) For Operations that do not have an underlying gate:
- Forwards the containment check to `self._validate_operation(item)`.
- Examples of such operations include `cirq.CircuitOperations` and their controlled
and tagged variants (i.e. instances of `cirq.TaggedOperation`,
`cirq.ControlledOperation` where `op.gate` is None) etc.
The complexity of the method in terms of the number of `gates`, n, is
a) O(1) when any default `cirq.GateFamily` instance accepts the given item, except
for an Instance GateFamily trying to match an item with a different global phase.
b) O(n) for all other cases: matching against custom gate families, matching across
global phase for the default Instance GateFamily, no match against any underlying
gate family.
Args:
item: The `cirq.Gate` or `cirq.Operation` instance to check containment for.
"""
if isinstance(item, raw_types.Operation) and item.gate is None:
return self._validate_operation(item)
g = item if isinstance(item, raw_types.Gate) else item.gate
assert g is not None, f'`item`: {item} must be a gate or have a valid `item.gate`'
if isinstance(g, global_phase_op.GlobalPhaseGate):
return self._accept_global_phase_op
if g in self._instance_gate_families:
assert item in self._instance_gate_families[g], (
f"{item} instance matches {self._instance_gate_families[g]} but "
f"is not accepted by it."
)
return True
for gate_mro_type in type(g).mro():
if gate_mro_type in self._type_gate_families:
assert item in self._type_gate_families[gate_mro_type], (
f"{g} type {gate_mro_type} matches Type GateFamily:"
f"{self._type_gate_families[gate_mro_type]} but is not accepted by it."
)
return True
return any(item in gate_family for gate_family in self._gates)
def validate(
self,
circuit_or_optree: Union['cirq.AbstractCircuit', op_tree.OP_TREE],
) -> bool:
"""Validates gates forming `circuit_or_optree` should be contained in Gateset.
Args:
circuit_or_optree: The `cirq.Circuit` or `cirq.OP_TREE` to validate.
"""
# To avoid circular import.
from cirq.circuits import circuit
optree = circuit_or_optree
if isinstance(circuit_or_optree, circuit.AbstractCircuit):
optree = circuit_or_optree.all_operations()
return all(self._validate_operation(op) for op in op_tree.flatten_to_ops(optree))
def _validate_operation(self, op: raw_types.Operation) -> bool:
"""Validates whether the given `cirq.Operation` is contained in this Gateset.
The containment checks are handled as follows:
a) For any operation which has an underlying gate (i.e. `op.gate` is not None):
- Containment is checked via `self.__contains__` which further checks for containment
in any of the underlying gate families.
b) For all other types of operations (eg: `cirq.CircuitOperation`,
`cirq.GlobalPhaseOperation` etc):
- The behavior is controlled via flags passed to the constructor.
Users should override this method to define custom behavior for operations that do not
have an underlying `cirq.Gate`.
Args:
op: The `cirq.Operation` instance to check containment for.
"""
# To avoid circular import.
from cirq.circuits import circuit_operation
if op.gate is not None:
return op in self
if isinstance(op, raw_types.TaggedOperation):
return self._validate_operation(op.sub_operation)
elif isinstance(op, circuit_operation.CircuitOperation) and self._unroll_circuit_op:
op_circuit = protocols.resolve_parameters(
op.circuit.unfreeze(), op.param_resolver, recursive=False
)
op_circuit = op_circuit.transform_qubits(
lambda q: cast(circuit_operation.CircuitOperation, op).qubit_map.get(q, q)
)
return self.validate(op_circuit)
else:
return False
def _value_equality_values_(self) -> Any:
return (
self.gates,
self.name,
self._unroll_circuit_op,
self._accept_global_phase_op,
)
def __repr__(self) -> str:
name_str = f'name = "{self.name}", ' if self.name is not None else ''
return (
f'cirq.Gateset('
f'{self._gates_repr_str}, '
f'{name_str}'
f'unroll_circuit_op = {self._unroll_circuit_op},'
f'accept_global_phase_op = {self._accept_global_phase_op})'
)
def __str__(self) -> str:
header = 'Gateset: '
if self.name:
header += self.name
return f'{header}\n' + "\n\n".join([str(g) for g in self._unique_gate_list])
def _json_dict_(self) -> Dict[str, Any]:
return {
'gates': self._unique_gate_list,
'name': self.name,
'unroll_circuit_op': self._unroll_circuit_op,
'accept_global_phase_op': self._accept_global_phase_op,
}
@classmethod
def _from_json_dict_(
cls, gates, name, unroll_circuit_op, accept_global_phase_op, **kwargs
) -> 'Gateset':
return cls(
*gates,
name=name,
unroll_circuit_op=unroll_circuit_op,
accept_global_phase_op=accept_global_phase_op,
)
| 40.812081 | 100 | 0.634654 |
c9fba3fcb2ba9c221ea83bbe9ecefb9c07dd8e5d | 5,841 | py | Python | src/sentry/api/endpoints/project_key_details.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 1 | 2019-08-28T11:03:13.000Z | 2019-08-28T11:03:13.000Z | src/sentry/api/endpoints/project_key_details.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 1 | 2021-05-09T11:43:43.000Z | 2021-05-09T11:43:43.000Z | src/sentry/api/endpoints/project_key_details.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from django.db.models import F
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry import features
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import AuditLogEntryEvent, ProjectKey, ProjectKeyStatus
from sentry.utils.apidocs import scenario, attach_scenarios
from sentry.loader.browsersdkversion import (
DEFAULT_VERSION,
get_browser_sdk_version_choices
)
@scenario('DeleteClientKey')
def delete_key_scenario(runner):
key = runner.utils.create_client_key(runner.default_project)
runner.request(
method='DELETE',
path='/projects/%s/%s/keys/%s/' %
(runner.org.slug, runner.default_project.slug, key.public_key)
)
@scenario('UpdateClientKey')
def update_key_scenario(runner):
key = runner.utils.create_client_key(runner.default_project)
runner.request(
method='PUT',
path='/projects/%s/%s/keys/%s/' %
(runner.org.slug, runner.default_project.slug, key.public_key),
data={'name': 'Quite Positive Key'}
)
class RateLimitSerializer(serializers.Serializer):
count = serializers.IntegerField(min_value=0, required=False)
window = serializers.IntegerField(min_value=0, max_value=60 * 60 * 24, required=False)
class KeySerializer(serializers.Serializer):
name = serializers.CharField(max_length=200, required=False)
isActive = serializers.BooleanField(required=False)
rateLimit = RateLimitSerializer(required=False)
browserSdkVersion = serializers.ChoiceField(
choices=get_browser_sdk_version_choices(), required=False
)
class ProjectKeyDetailsEndpoint(ProjectEndpoint):
doc_section = DocSection.PROJECTS
def get(self, request, project, key_id):
try:
key = ProjectKey.objects.get(
project=project,
public_key=key_id,
roles=F('roles').bitor(ProjectKey.roles.store),
)
except ProjectKey.DoesNotExist:
raise ResourceDoesNotExist
return Response(serialize(key, request.user), status=200)
def put(self, request, project, key_id):
"""
Update a Client Key
```````````````````
Update a client key. This can be used to rename a key.
:pparam string organization_slug: the slug of the organization the
client keys belong to.
:pparam string project_slug: the slug of the project the client keys
belong to.
:pparam string key_id: the ID of the key to update.
:param string name: the new name for the client key.
:auth: required
"""
try:
key = ProjectKey.objects.get(
project=project,
public_key=key_id,
roles=F('roles').bitor(ProjectKey.roles.store),
)
except ProjectKey.DoesNotExist:
raise ResourceDoesNotExist
serializer = KeySerializer(data=request.DATA, partial=True)
if serializer.is_valid():
result = serializer.object
if result.get('name'):
key.label = result['name']
if result.get('browserSdkVersion') == '':
key.data = {'browserSdkVersion': DEFAULT_VERSION}
else:
key.data = {'browserSdkVersion': result.get('browserSdkVersion', DEFAULT_VERSION)}
if result.get('isActive') is True:
key.status = ProjectKeyStatus.ACTIVE
elif result.get('isActive') is False:
key.status = ProjectKeyStatus.INACTIVE
if features.has('projects:rate-limits', project):
if result.get('rateLimit', -1) is None:
key.rate_limit_count = None
key.rate_limit_window = None
elif result.get('rateLimit'):
key.rate_limit_count = result['rateLimit']['count']
key.rate_limit_window = result['rateLimit']['window']
key.save()
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=key.id,
event=AuditLogEntryEvent.PROJECTKEY_EDIT,
data=key.get_audit_log_data(),
)
return Response(serialize(key, request.user), status=200)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@attach_scenarios([delete_key_scenario])
def delete(self, request, project, key_id):
"""
Delete a Client Key
```````````````````
Delete a client key.
:pparam string organization_slug: the slug of the organization the
client keys belong to.
:pparam string project_slug: the slug of the project the client keys
belong to.
:pparam string key_id: the ID of the key to delete.
:auth: required
"""
try:
key = ProjectKey.objects.get(
project=project,
public_key=key_id,
roles=F('roles').bitor(ProjectKey.roles.store),
)
except ProjectKey.DoesNotExist:
raise ResourceDoesNotExist
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=key.id,
event=AuditLogEntryEvent.PROJECTKEY_REMOVE,
data=key.get_audit_log_data(),
)
key.delete()
return Response(status=204)
| 34.767857 | 98 | 0.617189 |
e5de1e4bc2ab8e6333611c82c68decb9a633d0dd | 9,920 | py | Python | sketchpy/vijay.py | itsamansharmahub/sketchpy | 24974bc24283b4fa5863ed18104f62e514719006 | [
"MIT"
] | 12 | 2022-02-13T07:15:55.000Z | 2022-03-29T10:43:40.000Z | sketchpy/vijay.py | itsamansharmahub/sketchpy | 24974bc24283b4fa5863ed18104f62e514719006 | [
"MIT"
] | 2 | 2022-03-29T10:43:28.000Z | 2022-03-31T08:45:17.000Z | sketchpy/vijay.py | itsamansharmahub/sketchpy | 24974bc24283b4fa5863ed18104f62e514719006 | [
"MIT"
] | 6 | 2022-03-01T14:47:55.000Z | 2022-03-30T03:44:58.000Z | import turtle as tu
class vijay:
def __init__(self):
self.dress = [(149, 348),(152, 344),(151, 333),(144, 335),(137, 337),(103, 372),(101, 378),(67, 389),(59, 389),(53, 391),(8, 404),(17, 440),(37, 487),(65, 523),(80, 544),(98, 570),(124, 601),(164, 620),(201, 646),(222, 660),(236, 665),(262, 663),(301, 658),(336, 649),(364, 639),(399, 612),(415, 591),(420, 585),(414, 562),(405, 542),(389, 523),(375, 508),(378, 501),(378, 494),(380, 483),(370, 470),(336, 440),(300, 408),(281, 443),(292, 459),(296, 470),(299, 474),(297, 493),(300, 513),(286, 527),(276, 542),(262, 587),(244, 640),(231, 598),(204, 557),(191, 524),(186, 520),(186, 514),(185, 508),(180, 501),(177, 484),(168, 478),(160, 467),(156, 430),(153, 409),(147, 394),(142, 376),(142, 372),(142, 366),(145, 358),(148, 352),(150, 349),(151, 343),(149, 335),(146, 335)]
self.glass_frame = [(156, 223),(158, 214),(202, 220),(282, 244),(337, 270),(345, 275),(358, 284),(357, 292),(337, 278),(331, 278),(329, 279),(324, 290),(318, 300),(312, 307),(307, 311),(302, 314),(297, 315),(291, 315),(286, 315),(280, 314),(272, 311),(264, 306),(258, 300),(254, 293),(250, 281),(251, 264),(253, 251),(244, 247),(235, 245),(230, 259),(224, 271),(209, 284),(202, 285),(192, 285),(181, 283),(172, 279),(164, 272),(160, 262),(159, 251),(159, 238),(160, 226),(156, 222),(158, 215)]
self.hair = [(156, 220),(159, 214),(178, 215),(205, 163),(209, 157),(211, 155),(217, 155),(229, 157),(256, 164),(251, 165),(259, 171),(253, 170),(259, 175),(253, 175),(256, 177),(251, 179),(257, 182),(271, 182),(265, 180),(275, 180),(267, 175),(278, 179),(272, 173),(301, 180),(313, 187),(310, 178),(316, 181),(324, 186),(325, 183),(334, 190),(347, 198),(353, 203),(353, 210),(350, 216),(346, 227),(341, 238),(340, 243),(337, 255),(339, 266),(341, 254),(342, 266),(343, 261),(348, 259),(350, 260),(350, 270),(348, 278),(351, 275),(351, 279),(353, 278),(354, 280),(354, 282),(356, 279),(357, 284),(358, 281),(358, 285),(357, 286),(356, 295),(355, 291),(354, 293),(353, 299),(351, 296),(350, 300),(350, 309),(348, 305),(348, 312),(347, 314),(346, 318),(345, 315),(344, 321),(343, 330),(341, 337),(333, 346),(327, 359),(327, 354),(325, 359),(324, 356),(319, 361),(321, 355),(316, 361),(316, 356),(313, 361),(314, 353),(307, 361),(311, 353),(305, 359),(306, 353),(290, 370),(294, 364),(278, 380),(275, 382),(268, 384),(266, 380),(266, 369),(269, 364),(273, 357),(274, 351),(272, 343),(267, 332),(266, 335),(262, 331),(262, 333),(258, 327),(258, 329),(255, 325),(255, 328),(251, 322),(250, 324),(246, 321),(246, 323),(241, 319),(238, 317),(232, 315),(228, 319),(222, 317),(220, 313),(217, 309),(210, 309),(203, 308),(203, 310),(194, 312),(187, 313),(181, 316),(177, 321),(174, 329),(172, 335),(172, 344),(175, 341),(167, 351),(162, 344),(162, 337),(160, 341),(160, 333),(158, 336),(157, 329),(155, 321),(153, 313),(150, 307),(150, 300),(150, 291),(146, 305),(146, 316),(145, 324),(146, 334),(146, 345),(153, 354),(158, 367),(163, 375),(168, 388),(170, 395),(174, 401),(176, 398),(178, 404),(178, 404),(181, 404),(187, 410),(195, 411),(204, 418),(211, 424),(214, 422),(221, 423),(225, 426),(230, 424),(233, 428),(237, 425),(245, 425),(250, 423),(256, 420),(266, 415),(272, 412),(277, 415),(283, 409),(291, 405),(297, 401),(305, 397),(313, 391),(318, 386),(321, 381),(328, 373),(334, 365),(337, 359),(344, 341),(351, 330),(352, 322),(356, 314),(360, 307),(365, 312),(373, 317),(382, 318),(383, 317),(390, 306),(391, 311),(404, 285),(403, 294),(415, 267),(422, 239),(424, 249),(432, 229),(432, 217),(428, 203),(424, 195),(429, 201),(427, 188),(423, 178),(430, 188),(428, 177),(424, 168),(421, 163),(412, 157),(406, 150),(397, 141),(391, 132),(390, 123),(394, 128),(386, 118),(371, 110),(365, 102),(355, 90),(363, 94),(353, 87),(335, 86),(322, 81),(333, 84),(323, 77),(314, 77),(302, 77),(295, 74),(304, 75),(281, 67),(269, 66),(254, 69),(244, 74),(247, 71),(240, 74),(233, 74),(230, 74),(223, 71),(231, 70),(225, 69),(214, 69),(207, 73),(202, 78),(198, 83),(193, 93),(185, 120),(190, 87),(181, 105),(179, 111),(174, 142),(171, 132),(168, 138),(174, 156),(161, 205),(157, 208),(157, 211),(156, 221),(158, 214),(177, 215),(206, 162)]
self.l_glass = [(172, 224),(167, 232),(164, 243),(163, 255),(164, 263),(167, 269),(173, 275),(180, 279),(188, 281),(199, 281),(207, 279),(213, 276),(217, 271),(224, 261),(227, 251),(228, 244),(225, 238),(217, 233),(208, 229),(200, 226),(191, 223),(182, 221),(175, 222),(170, 225),(168, 230)]
self.lips = [(190, 334),(196, 349),(225, 360),(239, 359),(254, 351),(254, 346),(244, 337),(214, 326),(195, 325),(188, 332),(191, 336),(198, 336),(208, 335),(217, 338),(226, 339),(232, 342),(239, 345),(245, 347),(250, 347),(252, 348),(253, 352),(248, 352),(236, 350),(192, 338),(194, 344),(198, 350)]
self.neck = [(149, 349),(144, 358),(142, 370),(144, 377),(146, 387),(150, 397),(152, 404),(154, 418),(156, 433),(157, 450),(158, 462),(162, 471),(168, 479),(176, 485),(178, 494),(187, 514),(186, 518),(186, 521),(189, 523),(193, 529),(196, 539),(204, 559),(231, 596),(243, 641),(275, 546),(282, 531),(300, 514),(297, 493),(297, 485),(299, 477),(299, 473),(294, 472),(294, 465),(289, 456),(281, 443),(301, 409),(310, 391),(298, 400),(278, 408),(259, 415),(236, 421),(209, 418),(190, 408),(172, 391),(168, 381),(158, 368),(154, 358),(149, 349),(147, 355),(143, 362)]
self.teeth = [(201, 337),(213, 342),(214, 337),(203, 335),(201, 337),(226, 347),(228, 346),(230, 341),(235, 343),(233, 347),(228, 346),(229, 342),(237, 344),(238, 348),(240, 349),(243, 347),(237, 344)]
self.inner_beard = [(201, 380),(198, 381),(195, 383),(193, 384),(191, 386),(187, 386),(186, 383),(184, 381),(182, 380),(179, 380),(179, 378),(178, 375),(178, 371),(176, 369),(178, 365),(178, 364),(179, 360),(179, 358),(179, 355),(179, 354),(182, 350),(182, 348),(182, 345),(182, 344),(184, 342),(186, 340),(186, 337),(187, 336),(190, 331),(193, 334),(193, 330),(196, 333),(196, 328),(198, 331),(200, 328),(201, 331),(202, 327),(207, 330),(207, 326),(208, 329),(210, 326),(211, 330),(213, 325),(214, 331),(217, 328),(219, 333),(223, 327),(223, 333),(224, 329),(224, 334),(228, 331),(227, 336),(229, 333),(230, 336),(232, 332),(232, 335),(234, 332),(232, 337),(236, 335),(236, 338),(238, 335),(238, 338),(243, 338),(239, 340),(244, 338),(242, 341),(248, 341),(246, 342),(250, 343),(247, 345),(249, 345),(252, 346),(253, 349),(256, 353),(256, 350),(258, 353),(258, 361),(258, 365),(259, 368),(253, 378),(257, 377),(252, 380),(252, 390),(249, 390),(249, 394),(247, 394),(246, 395),(243, 396),(242, 394),(241, 398),(238, 395),(238, 397),(236, 394),(235, 398),(235, 395),(231, 398),(231, 392),(229, 395),(226, 389),(226, 391),(222, 387),(218, 381),(216, 386),(216, 380),(220, 378),(221, 375),(223, 376),(224, 372),(226, 376),(228, 371),(229, 374),(231, 370),(232, 372),(232, 366),(230, 364),(221, 363),(203, 357),(201, 354),(196, 359),(195, 362),(199, 359),(197, 363),(200, 362),(197, 366),(200, 364),(199, 367),(201, 367),(200, 372),(202, 368),(200, 377),(202, 374),(200, 380),(194, 385),(193, 383),(188, 385),(188, 384),(186, 383),(182, 380),(177, 378),(176, 368),(178, 365),(178, 361),(179, 358),(178, 357),(179, 350),(182, 347),(182, 344),(185, 341),(189, 335),(189, 329)]
self.r_glass = [(272, 249),(262, 252),(258, 259),(256, 264),(254, 272),(254, 280),(256, 289),(260, 298),(263, 301),(271, 307),(276, 309),(283, 311),(289, 312),(296, 313),(301, 311),(308, 306),(313, 299),(319, 289),(321, 282),(323, 274),(322, 270),(319, 266),(310, 260),(299, 255),(292, 252),(278, 249),(273, 249),(267, 250),(263, 251),(260, 254)]
self.pen = tu.Turtle()
self.pen.speed(0)
self.x_offset = 270
self.y_offset = 300
def go(self, x, y):
self.pen.penup()
self.pen.goto(x-self.x_offset,(y*-1)+self.y_offset)
self.pen.pendown()
def paint(self,coord,co=(0,0,0)):
self.pen.color(co)
t_x,t_y = coord[0]
self.go(t_x,t_y)
self.pen.fillcolor(co)
self.pen.begin_fill()
t = 0
for i in coord[1:]:
print(i)
x,y = i
if t:
self.go(x,y)
t = 0
self.pen.begin_fill()
continue
if x == -1 and y == -1:
t = 1
self.pen.end_fill()
continue
else:
self.pen.goto(x-self.x_offset,(y*-1)+self.y_offset)
self.pen.end_fill()
def draw_fn(self,coord,mode = 1,co = (0,0,0),thickness = 1):
co = (co[0]/255,co[1]/255,co[2]/255)
self.pen.color(co)
if mode:
self.pen.width(thickness)
t_x,t_y = coord[0]
self.go(t_x,t_y)
t = 0
for i in coord[1:]:
print(i)
x,y = i
if t:
self.go(x,y)
t = 0
continue
if x == -1 and y == -1:
t = 1
continue
else:
self.pen.goto(x-self.x_offset,(y*-1)+self.y_offset)
else:
self.paint(coord=coord,co = co)
def draw(self,retain=True):
self.draw_fn(self.neck,co = (247, 164, 130),mode = 0)
self.draw_fn(self.dress,co = (75, 91, 153),mode = 0)
self.draw_fn(self.hair,co = (0,0,0),mode = 0)
self.draw_fn(self.glass_frame,co = (56, 53, 48),mode = 0)
self.draw_fn(self.l_glass,co = (7, 96, 148),mode = 0)
self.draw_fn(self.r_glass,co = (7, 96, 148),mode = 0)
self.draw_fn(self.inner_beard,co = (241, 152, 112),mode = 0)
self.draw_fn(self.lips,co = (238, 104, 114),mode = 0)
self.draw_fn(self.teeth,co = (0,0,0),mode = 0)
if retain:
tu.done()
| 107.826087 | 2,832 | 0.51371 |
6b124b91a85415ca48455c1aa848ee45cf190c37 | 2,099 | bzl | Python | tools/cpp/toolchain_utils.bzl | obruns/bazel | 654f36408319719d3a90849b2bd21bd3efd62d7a | [
"Apache-2.0"
] | 16,989 | 2015-09-01T19:57:15.000Z | 2022-03-31T23:54:00.000Z | tools/cpp/toolchain_utils.bzl | FreyaVPN/bazel | b4cc44c978bc0e25b6652b66018b6aad12bff820 | [
"Apache-2.0"
] | 12,562 | 2015-09-01T09:06:01.000Z | 2022-03-31T22:26:20.000Z | tools/cpp/toolchain_utils.bzl | FreyaVPN/bazel | b4cc44c978bc0e25b6652b66018b6aad12bff820 | [
"Apache-2.0"
] | 3,707 | 2015-09-02T19:20:01.000Z | 2022-03-31T17:06:14.000Z | # pylint: disable=g-bad-file-header
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Finds the c++ toolchain.
Returns the toolchain if enabled, and falls back to a toolchain constructed from
the CppConfiguration.
"""
def find_cpp_toolchain(ctx):
"""
Finds the c++ toolchain.
If the c++ toolchain is in use, returns it. Otherwise, returns a c++
toolchain derived from legacy toolchain selection.
Args:
ctx: The rule context for which to find a toolchain.
Returns:
A CcToolchainProvider.
"""
# Check the incompatible flag for toolchain resolution.
if hasattr(cc_common, "is_cc_toolchain_resolution_enabled_do_not_use") and cc_common.is_cc_toolchain_resolution_enabled_do_not_use(ctx = ctx):
if not "@bazel_tools//tools/cpp:toolchain_type" in ctx.toolchains:
fail("In order to use find_cpp_toolchain, you must include the '@bazel_tools//tools/cpp:toolchain_type' in the toolchains argument to your rule.")
toolchain_info = ctx.toolchains["@bazel_tools//tools/cpp:toolchain_type"]
if hasattr(toolchain_info, "cc_provider_in_toolchain") and hasattr(toolchain_info, "cc"):
return toolchain_info.cc
return toolchain_info
# Fall back to the legacy implicit attribute lookup.
if hasattr(ctx.attr, "_cc_toolchain"):
return ctx.attr._cc_toolchain[cc_common.CcToolchainInfo]
# We didn't find anything.
fail("In order to use find_cpp_toolchain, you must define the '_cc_toolchain' attribute on your rule or aspect.")
| 40.365385 | 158 | 0.736541 |
cff1655afc0969c1d89b5c9960a3a07ffd808580 | 2,753 | py | Python | app/model/tecnico.py | MacosPrintes001/webservice-paem | fa992e4bda40eaae3b585cee2ad2b65685104cc3 | [
"Apache-2.0"
] | null | null | null | app/model/tecnico.py | MacosPrintes001/webservice-paem | fa992e4bda40eaae3b585cee2ad2b65685104cc3 | [
"Apache-2.0"
] | null | null | null | app/model/tecnico.py | MacosPrintes001/webservice-paem | fa992e4bda40eaae3b585cee2ad2b65685104cc3 | [
"Apache-2.0"
] | null | null | null | from ..database import db
from .usuario import UsuarioModel
from .campus import CampusModel
from .base_model import BaseHasNameModel
from datetime import date
from app.model import campus
class TecnicoModel(BaseHasNameModel, db.Model):
__tablename__ = "tecnico"
id_tecnico = db.Column(db.Integer, primary_key=True)
siape = db.Column(db.String(45), unique=True, nullable=False)
nome = db.Column(db.String(45), nullable=False)
__data_nascimento = db.Column('data_nascimento', db.Date, nullable=True)
cargo = db.Column(db.String(45), nullable=True)
status_covid = db.Column(db.SmallInteger, nullable=True)
status_afastamento = db.Column(db.SmallInteger, nullable=True)
usuario_id_usuario = db.Column(db.Integer, db.ForeignKey('usuario.id_usuario'), nullable=True)
usuario = db.relationship('UsuarioModel', lazy='select', uselist=False)
campus_id_campus = db.Column(db.Integer, db.ForeignKey('campus.id_campus'), nullable=True)
campus = db.relationship('CampusModel', uselist=False, lazy='noload')
@property
def data_nascimento(self):
return str(self.__data_nascimento)
@data_nascimento.setter
def data_nascimento(self, data):
if isinstance(data, str):
day, month, year = data.split('-')
data = date(day=int(day), month=int(month), year=int(year))
self.__data_nascimento = data
def serialize(self):
try:
usuario_dict = self.usuario.serialize()
except AttributeError as msg:
print("Warning: Usuário não cadatrado para este trécnico")
usuario_dict = None
finally:
campus = db.session.query(
CampusModel.nome
).filter_by(id_campus=self.campus_id_campus).first() # query name and get name from tuple
return {
'id_tecnico':self.id_tecnico,
'siape':self.siape,
'nome':self.nome,
'data_nascimento':self.data_nascimento,
"cargo":self.cargo,
'status_covid':self.status_covid,
'status_afastamento':self.status_afastamento,
'usuario_id_usuario':self.usuario_id_usuario,
'usuario': usuario_dict if usuario_dict else "null",
'campus_id_campus':self.campus_id_campus,
'campus': campus.nome if campus else "null"
}
@classmethod
def query_all_names(cls):
return super().query_all_names(
cls.nome.label("nome"),
cls.id_tecnico.label("id"),
cls.siape.label("other_id")
)
def __repr__(self):
return '<tecnico %r>' % self.nome
| 36.223684 | 101 | 0.626952 |
ab1ff678be9132753baa3bfdecbc7e9abd6e0fbc | 3,160 | py | Python | dataset/grid_dataset.py | archettialberto/neural_weighted_a_star | a7172f1de81ad5cc7e301031f271ded3e93a2283 | [
"MIT"
] | 2 | 2021-09-21T10:22:07.000Z | 2021-09-22T08:35:28.000Z | dataset/grid_dataset.py | archettialberto/neural_weighted_a_star | a7172f1de81ad5cc7e301031f271ded3e93a2283 | [
"MIT"
] | null | null | null | dataset/grid_dataset.py | archettialberto/neural_weighted_a_star | a7172f1de81ad5cc7e301031f271ded3e93a2283 | [
"MIT"
] | null | null | null | import os
from abc import ABC, abstractmethod
from pathlib import Path
import numpy as np
import torch
class GridDataset(torch.utils.data.Dataset, ABC):
def __init__(self, path, prefix, normalize_input=False):
super().__init__()
self.path = Path(path)
if prefix not in ["train", "val", "test"]:
raise ValueError(prefix)
self.prefix = prefix
self.images = self.load_from_file("images")
if normalize_input:
self.images = self.normalize(self.images)
@abstractmethod
def __len__(self):
pass
@abstractmethod
def __getitem__(self, index):
pass
def load_from_file(self, name):
filename = self.prefix + "_" + name + ".npy"
path = os.path.join(self.path, filename)
if not os.path.isfile(path):
raise FileNotFoundError("File " + str(path) + " does not exist.")
array = torch.from_numpy(np.load(path)).float()
return array
@staticmethod
def normalize(i):
assert len(i.shape) == 4
batch, rows, cols, channels = i.shape
i = i.reshape((i.shape[0], -1))
i -= i.min(1, keepdim=True)[0]
i /= i.max(1, keepdim=True)[0]
i = i.reshape((batch, rows, cols, channels))
return i
class WarcraftDataset(GridDataset):
def __init__(self, path, prefix, normalize_input=True):
super().__init__(path, prefix, normalize_input)
self.weights = self.load_from_file("weights")
self.sources = self.load_from_file("sources").long()
self.targets = self.load_from_file("targets").long()
self.paths = self.load_from_file("paths")
self.exp_nodes = self.load_from_file("exp_nodes")
self.opt_exp_nodes = self.load_from_file("opt_exp_nodes")
self.heuristic = self.load_from_file("heuristic")
def __len__(self):
return self.sources.shape[0] * self.sources.shape[1] * self.sources.shape[2]
def __getitem__(self, index):
t_per_i = self.sources.shape[1]
s_per_t = self.sources.shape[2]
b = index // (t_per_i * s_per_t)
i = index % (t_per_i * s_per_t) // t_per_i
j = index % (t_per_i * s_per_t) % t_per_i
source = self.sources[b, i, j]
path = self.paths[b, i, j]
exp_nodes = self.exp_nodes[b, i, j]
opt_exp_nodes = self.opt_exp_nodes[b, i, j]
image = self.images[b]
weights = self.weights[b]
target = self.targets[b, i]
heuristic = self.heuristic[b, i]
image_st = torch.zeros((image.shape[0], image.shape[1], 5))
image_st[:, :, 0:3] = image
dx = image.shape[0] // path.shape[0]
dy = image.shape[1] // path.shape[1]
tx = target[0] * dx
ty = target[1] * dy
image_st[tx:tx + dx, ty:ty + dy, 3] = 1.0
sx = source[0] * dx
sy = source[1] * dy
image_st[sx:sx + dx, sy:sy + dy, 4] = 1.0
return (
image,
image_st,
weights,
heuristic,
source,
target,
path,
exp_nodes,
opt_exp_nodes
)
| 31.287129 | 84 | 0.573101 |
09600d4e623e9ff68379179aeacabd3a91d19005 | 18,821 | py | Python | nltk-drt/nltk_drt/temporaldrt.py | prodotiscus/nltk-drt | 6029f1357369b124758e86020734a55574a6a15a | [
"Apache-2.0"
] | 2 | 2021-12-28T09:02:57.000Z | 2022-01-04T07:02:31.000Z | nltk-drt/nltk_drt/temporaldrt.py | prodotiscus/nltk-drt | 6029f1357369b124758e86020734a55574a6a15a | [
"Apache-2.0"
] | 5 | 2022-03-20T23:11:29.000Z | 2022-03-20T23:30:03.000Z | nltk-drt/nltk_drt/temporaldrt.py | prodotiscus/nltk-drt | 6029f1357369b124758e86020734a55574a6a15a | [
"Apache-2.0"
] | null | null | null | """
Temporal extension of presuppdrt
"""
__author__ = "Peter Makarov, Alex Kislev, Emma Li"
__version__ = "1.0"
__date__ = "Tue, 24 Aug 2010"
#import presuppdrt as drt
from . import presuppdrt as drt
from nltk.sem.logic import Variable
from .presuppdrt import DrsDrawer
from .presuppdrt import AnaphoraResolutionException
from .presuppdrt import DrtApplicationExpression
from .presuppdrt import DrtTimeVariableExpression
from .presuppdrt import DRS
from .presuppdrt import DrtExpression
from .presuppdrt import DrtVariableExpression
from .presuppdrt import DrtStateVariableExpression
from .presuppdrt import Binding
from .presuppdrt import DrtEventVariableExpression
from .presuppdrt import VariableReplacer
from .presuppdrt import ConditionReplacer
from .presuppdrt import ConditionRemover
from .presuppdrt import DrtEqualityExpression
from .presuppdrt import DrtConstantExpression
from .presuppdrt import unique_variable
from .presuppdrt import DrtNegatedExpression
from .presuppdrt import is_statevar
from .presuppdrt import is_eventvar
from .presuppdrt import is_timevar
from .presuppdrt import is_uttervar
from .presuppdrt import DrtAbstractVariableExpression
from .presuppdrt import DrtUtterVariableExpression
from .presuppdrt import DrtIndividualVariableExpression
from .presuppdrt import DefiniteDescriptionDRS
from .presuppdrt import DrtEventualityApplicationExpression
from .presuppdrt import DrtLambdaExpression
from .presuppdrt import DrtBooleanExpression
from .presuppdrt import DrtConcatenation
from .presuppdrt import DrtImpExpression
from .presuppdrt import DrtOrExpression
from .presuppdrt import DrtFeatureConstantExpression
class DrtTokens(drt.DrtTokens):
NEWINFO_DRS = 'NEWINFO'
LOCATION_TIME = 'LOCPRO'
UTTER_TIME = 'UTTER'
REFER_TIME = 'REFER'
PERF = 'PERF'
UTTER = "UTTER"
REFER = "REFER"
OVERLAP = "overlap"
EARLIER = "earlier"
INCLUDE = "include"
ABUT = "abut"
END = "end"
TEMP_CONDS = [OVERLAP, EARLIER, INCLUDE]
PAST = "PAST"
PRES = "PRES"
FUT = "FUT"
TENSE = [PAST, PRES, FUT]
# ???
OLD_NLTK = 0
NLTK = 1
PROVER9 = 2
class DrtTimeApplicationExpression(DrtApplicationExpression):
pass
class LocationTimeResolutionException(Exception):
pass
class DrtLocationTimeApplicationExpression(DrtTimeApplicationExpression):
"""LOCPRO(t) condition from a non-finite verb. Gets resolved
to the closest location time referent introduced by a finite auxiliary. """
def readings(self, trail=[]):
utter_time_search = False
for drs in (ancestor for ancestor in reversed(trail) if isinstance(ancestor, DRS)):
search_list = drs.refs
if self.argument.variable in drs.refs:
search_list = drs.refs[:drs.refs.index(self.argument.variable)]
for ref in reversed(search_list):
refex = DrtVariableExpression(ref)
if isinstance(refex, DrtUtterVariableExpression):
#In case there is no location time referent that has not yet been used
#to relate some eventuality to utterance time, use utterance time as location time
return [Binding([(trail[-1], VariableReplacer(self.argument.variable, refex))])], True
elif not utter_time_search and isinstance(refex, DrtTimeVariableExpression) and \
not (refex == self.argument):
if any(isinstance(c, DrtApplicationExpression) and isinstance(c.function, DrtApplicationExpression) and \
c.function.argument == refex and (c.function.function.variable.name == DrtTokens.OVERLAP or \
c.function.function.variable.name == DrtTokens.INCLUDE) for c in drs.conds):
utter_time_search = True
else:
#Returns first suitable antecedent expression
return [Binding([(trail[-1], VariableReplacer(self.argument.variable, refex))])], True
raise LocationTimeResolutionException("Variable '%s' does not "
"resolve to anything." % self.argument)
class DrtFindUtterTimeExpression(DrtApplicationExpression):
"""Type of application expression looking to equate its argument with utterance time"""
def readings(self, trail=[]):
for ancestor in trail:
for ref in ancestor.get_refs():
refex = DrtVariableExpression(ref)
if isinstance(refex, DrtUtterVariableExpression):
return [Binding([(trail[-1], VariableReplacer(self.argument.variable, refex))])], True
raise UtteranceTimeTimeResolutionException("Variable '%s' does not "
"resolve to anything." % self.argument)
class UtteranceTimeTimeResolutionException(Exception):
pass
class DrtFindEventualityExpression(DrtApplicationExpression):
"""Comprises reference point REFER condition and aspectual PERF condition.
DRS-condition REFER(e) or REFER(s) returns a temporal condition that
relates given eventuality and some previous event or state. In the simplified
version of the reference point selection algorithm, the condition picks out the
most recent event and, depending on the type of its argument, returns either an
earlier(e*,e) or include(s,e*), where e* is the reference point and e/s is the given
eventuality. In case there is no event in the previous discourse, the most recent
state is taken as the reference point and overlap(s*,s) or include(s*,e) is introduced
depending on the type of the given eventuality.
PERF(e) locates the most recent state referent s and resolves to a condition abut(e,s).
PERF(s) locates the most recent state referent s* and resolves to a condition abut(e*,s*),
e* = end(s) and adds a new event referent e*. Note that end(.) is an operator on states
that returns events."""
def readings(self, trail=[]):
state_reference_point = None
index = trail[-1].conds.index(self)
#state reference point in case there are no previous events
for drs in (ancestor for ancestor in reversed(trail) if isinstance(ancestor, DRS)):
search_list = drs.refs
if drs is trail[-1]:
#Described eventuality in the object's referents?
#Take r
# efs' list up to described eventuality
search_list = drs.refs[:drs.refs.index(self.argument.variable)]
for ref in reversed(search_list):
#search for the most recent reference
refex = DrtVariableExpression(ref)
if isinstance(refex, DrtEventVariableExpression) and \
not (refex == self.argument) and not self.function.variable.name == DrtTokens.PERF:
if isinstance(self.argument, DrtEventVariableExpression):
#In case given eventuality is an event, return earlier
return [Binding([(trail[-1], ConditionReplacer(index,
[self._combine(DrtTokens.EARLIER, refex, self.argument)]))])], False
elif isinstance(self.argument, DrtStateVariableExpression):
#In case given eventuality is a state, return include
return [Binding([(trail[-1], ConditionReplacer(index,
[self._combine(DrtTokens.INCLUDE, self.argument, refex)]))])], False
elif not state_reference_point and \
isinstance(refex, DrtStateVariableExpression) and \
not (refex == self.argument):
#In case no event is found, locate the most recent state
state_reference_point = refex
if state_reference_point:
if self.function.variable.name == DrtTokens.PERF:
#in case we are dealing with PERF
if isinstance(self.argument, DrtEventVariableExpression):
#Reference point is a state and described eventuality an event,
#return event abuts on state
return [Binding([(trail[-1], ConditionReplacer(index,
[self._combine(DrtTokens.ABUT, self.argument, state_reference_point)]))])], False
elif isinstance(self.argument, DrtStateVariableExpression):
#Reference point is a state and described eventuality a state,
#then add an event referent to the ancestor's refs list and two conditions
#that that event is the end of eventuality and
#that event abuts on ref.state. Function object needed.
termination_point = unique_variable(Variable("e"))
conds = [DrtEqualityExpression(DrtEventVariableExpression(termination_point), DrtApplicationExpression(self.make_ConstantExpression(DrtTokens.END), self.argument)),
self._combine(DrtTokens.ABUT, DrtEventVariableExpression(termination_point), state_reference_point)]
return [Binding([(trail[-1], DrtFindEventualityExpression.ConditionReplacer(index, conds, termination_point))])], False
elif isinstance(self.argument, DrtStateVariableExpression):
#Reference point is a state and given eventuality is also a state,
#return overlap
return [Binding([(trail[-1], ConditionReplacer(index,
[self._combine(DrtTokens.OVERLAP, state_reference_point, self.argument)]))])], False
elif isinstance(self.argument, DrtEventVariableExpression):
#Reference point is a state and given eventuality is an event,
#return include
return [Binding([(trail[-1], ConditionReplacer(index,
[self._combine(DrtTokens.INCLUDE, state_reference_point, self.argument)]))])], False
else:
#no suitable reference found
return [Binding([(trail[-1], ConditionRemover(index))])], False
def make_ConstantExpression(self, name):
return DrtConstantExpression(Variable(name))
def _combine(self, cond, arg1, arg2):
"""Combines two arguments into a DrtEventualityApplicationExpression
that has another DrtEventualityApplicationExpression as its functor"""
return DrtEventualityApplicationExpression(DrtEventualityApplicationExpression(self.make_ConstantExpression(cond), arg1), arg2)
class NewInfoDRS(DRS):
pass
class PresuppositionDRS(drt.PresuppositionDRS):
def collect_event_data(self, cond, event_data_map, event_strings_map, individuals=None):
if isinstance(cond.function, DrtApplicationExpression) and \
not isinstance(cond.function, DrtTimeApplicationExpression) and \
isinstance(cond.argument, DrtIndividualVariableExpression) and \
not isinstance(cond.argument, DrtTimeVariableExpression):
event_data_map.setdefault(cond.argument.variable, []).append((cond.function.argument, cond.function.function.variable.name))
elif cond.__class__ == DrtEventualityApplicationExpression and \
(isinstance(cond.argument, DrtEventVariableExpression) or \
isinstance(cond.argument, DrtStateVariableExpression)) and \
not isinstance(cond.function, DrtApplicationExpression):
assert cond.argument not in event_strings_map
event_strings_map[cond.argument] = cond.function.variable.name
# The rest are nouns and attributive adjectives
elif individuals is not None and cond.__class__ == DrtApplicationExpression and \
not isinstance(cond.function, DrtApplicationExpression):
individuals.setdefault(cond.argument.variable, []).append(cond)
class DefiniteDescriptionDRS(drt.DefiniteDescriptionDRS):
def _get_free(self):
free = self.free(True)
temporal_conditions = []
# If there are free variables that stem from conditions like 'overlap', earlier', 'include',
# those conditions will be moved to the local DRS
for cond in self.conds:
if isinstance(cond, DrtTimeApplicationExpression) and isinstance(cond.function, DrtTimeApplicationExpression):
assert cond.function.function.variable.name in DrtTokens.TEMP_CONDS
for expression in [cond.argument, cond.function.argument]:
expression_variable = expression.variable
if expression_variable in free and isinstance(expression, DrtUtterVariableExpression):
free.remove(expression_variable)
if isinstance(cond, DrtEventualityApplicationExpression) and \
isinstance(cond.function, DrtEventualityApplicationExpression):
assert cond.function.function.variable.name in DrtTokens.TEMP_CONDS
for expression_variable in [cond.argument.variable, cond.function.argument.variable]:
if expression_variable in free:
free.remove(expression_variable)
temporal_conditions.append(cond)
self.conds.remove(cond)
return free, temporal_conditions
class DrtParser(drt.DrtParser):
"""DrtParser producing conditions and referents for temporal logic"""
def handle_PresuppositionDRS(self, tok, context):
"""Parse all the Presuppositon DRSs"""
if tok == DrtTokens.DEFINITE_DESCRIPTION_DRS:
self.assertNextToken(DrtTokens.OPEN)
drs = self.handle_DRS(tok, context)
return DefiniteDescriptionDRS(drs.refs, drs.conds)
else:
return drt.DrtParser.handle_PresuppositionDRS(self, tok, context)
def handle_DRS(self, tok, context):
drs = drt.DrtParser.handle_DRS(self, tok, context)
location_time = None
for cond in drs.conds:
if isinstance(cond, DrtFindEventualityExpression):
#PERF(.) gives rise to a DrtFindEventualityExpression;
#in case it is among the DRS-conditions, the eventuality carried by
#this DRS does not give rise to a REFER(.) condition
return DRS(drs.refs, drs.conds)
if not location_time and isinstance(cond, DrtLocationTimeApplicationExpression):
location_time = cond.argument
for ref in drs.refs:
#Change DRS: introduce REFER(s/e) condition, add INCLUDE/OVERLAP
#conditions to verbs (triggered by LOCPRO) and given some trigger
#from DrtTokens.TENSE put UTTER(.) condition and,for PAST and FUT,
#earlier(.,.) condition w.r.t. to some new discourse
#referent bound to utterance time.
if is_statevar(ref.name):
#Adds REFER(s) condition.
if location_time:
#Relates location time and eventuality
drs.conds.append(DrtTimeApplicationExpression(DrtTimeApplicationExpression(self.make_ConstantExpression(DrtTokens.OVERLAP), location_time), DrtStateVariableExpression(ref)))
drs.conds.append(DrtFindEventualityExpression(self.make_ConstantExpression(DrtTokens.REFER), DrtVariableExpression(ref)))
if is_eventvar(ref.name):
#Adds REFER(e) condition.
if location_time:
#Relates location time and eventuality
drs.conds.append(DrtTimeApplicationExpression(DrtTimeApplicationExpression(self.make_ConstantExpression(DrtTokens.INCLUDE), location_time), DrtStateVariableExpression(ref)))
drs.conds.append(DrtFindEventualityExpression(self.make_ConstantExpression(DrtTokens.REFER), DrtVariableExpression(ref)))
if is_timevar(ref.name) and not is_uttervar(ref.name):
#Relates location time with utterance time
tense_cond = [c for c in drs.conds if isinstance(c, DrtApplicationExpression) and \
isinstance(c.function, DrtConstantExpression) and \
c.function.variable.name in DrtTokens.TENSE and DrtVariableExpression(ref) == c.argument]
if not tense_cond == []:
if tense_cond[0].function.variable.name == DrtTokens.PRES:
#Put UTTER(t) instead
#drs.conds.remove(drs.conds.index(tense_cond[0]))
drs.conds[drs.conds.index(tense_cond[0])] = DrtFindUtterTimeExpression(self.make_ConstantExpression(DrtTokens.UTTER), DrtTimeVariableExpression(ref))
else:
#Put new discourse referent and bind it to utterance time
#by UTTER(.) and also add earlier(.,.) condition
utter_time = unique_variable(ref)
drs.refs.insert(0, utter_time)
drs.conds[drs.conds.index(tense_cond[0])] = DrtFindUtterTimeExpression(self.make_ConstantExpression(DrtTokens.UTTER), DrtTimeVariableExpression(utter_time))
if tense_cond[0].function.variable.name == DrtTokens.PAST:
drs.conds.append(DrtTimeApplicationExpression(DrtTimeApplicationExpression(self.make_ConstantExpression(DrtTokens.EARLIER), DrtTimeVariableExpression(ref)), DrtTimeVariableExpression(utter_time)))
else:
drs.conds.append(DrtTimeApplicationExpression(DrtTimeApplicationExpression(self.make_ConstantExpression(DrtTokens.EARLIER), DrtTimeVariableExpression(utter_time)), DrtTimeVariableExpression(ref)))
return DRS(drs.refs, drs.conds)
def make_VariableExpression(self, name):
return DrtVariableExpression(Variable(name))
def make_ApplicationExpression(self, function, argument):
if isinstance(function, DrtAbstractVariableExpression) and \
function.variable.name == DrtTokens.LOCATION_TIME and \
isinstance(argument, DrtTimeVariableExpression):
return DrtLocationTimeApplicationExpression(function, argument)
elif isinstance(function, DrtAbstractVariableExpression) and \
function.variable.name == DrtTokens.PERF:
return DrtFindEventualityExpression(function, argument)
elif isinstance(argument, DrtStateVariableExpression) or \
isinstance(argument, DrtEventVariableExpression):
return DrtEventualityApplicationExpression(function, argument)
elif isinstance(argument, DrtTimeVariableExpression):
return DrtTimeApplicationExpression(function, argument)
else:
return DrtApplicationExpression(function, argument)
| 49.528947 | 224 | 0.673397 |
81bf9bf3789a01465ed8ba22520b4eb32d25f97c | 2,530 | py | Python | dataview/base.py | joshloyal/DataView | 28fa57ff421115638244d59dccfaf5b3403be765 | [
"MIT"
] | null | null | null | dataview/base.py | joshloyal/DataView | 28fa57ff421115638244d59dccfaf5b3403be765 | [
"MIT"
] | null | null | null | dataview/base.py | joshloyal/DataView | 28fa57ff421115638244d59dccfaf5b3403be765 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
from dataview import data_types as types
from dataview.wrappers import DataViewMeta, registry
import dataview.data_utils as data_utils
class DataSchema(object):
def __init__(self):
self._schema = {}
@property
def columns(self):
return self._schema.keys()
def analyze(self, data):
for column in data_utils.numeric_columns(data):
self._schema[column] = types.DataTypes.NUMERIC
for column in data_utils.categorical_columns(data):
self._schema[column] = types.DataTypes.CATEGORICAL
return self
def select(self, data_types):
if isinstance(data_types, types.DataTypes):
data_types = [data_types]
return DataSchema.from_dict(
{k: v for k, v in self._schema.iteritems() if v in data_types})
def update_type(self, column_name, data_type):
if not isinstance(data_type, types.DataTypes):
raise ValueError('Must be DataType')
self._schema[column_name] = data_type
def to_dict(self):
return self._schema
@classmethod
def from_dict(cls, data):
new_schema = cls()
new_schema._schema = data
return new_schema
class DataView(object):
"""DataView
An object to manipulate datasets.
"""
__metaclass__ = DataViewMeta
def __init__(self):
self._data = None
self._schema = None
@property
def schema(self):
if self._schema is None:
self._schema = DataSchema().analyze(self._data)
return self._schema
@property
def data(self):
return self._data.copy()
def view(self, partition_method, pipeline):
for instruction in pipeline:
if instruction[0] == types.DataTypes.ALL:
dv.values
instruction[2].fit_transform(self.select(intruction[0]).data.values)
def fetch(self):
raise NotImplementedError()
def select(self, data_types):
subset = self._data[self.schema.select(data_types).columns]
return self.__class__.from_dataframe(subset)
@classmethod
def from_dataframe(cls, dataframe):
new_view = cls()
new_view._data = dataframe.copy()
return new_view
def fetch_view(view_name):
if view_name not in registry:
raise ValueError('Not recognized view')
dv = registry[view_name]()
dv.fetch()
return dv
| 25.816327 | 80 | 0.652569 |
3a56f8b6053685f14e23dc861c1b60df270e79ee | 1,263 | py | Python | tests/test_exceptions.py | SmartManoj/quart | 317562ea660edb7159efc20fa57b95223d408ea0 | [
"MIT"
] | 1 | 2020-08-09T19:45:14.000Z | 2020-08-09T19:45:14.000Z | tests/test_exceptions.py | SmartManoj/quart | 317562ea660edb7159efc20fa57b95223d408ea0 | [
"MIT"
] | null | null | null | tests/test_exceptions.py | SmartManoj/quart | 317562ea660edb7159efc20fa57b95223d408ea0 | [
"MIT"
] | null | null | null | import pytest
from quart import Response
from quart.exceptions import (
abort, HTTPException, HTTPStatusException, MethodNotAllowed, RedirectRequired,
)
def test_abort() -> None:
with pytest.raises(HTTPStatusException):
abort(400)
def test_abort_with_arguments() -> None:
with pytest.raises(HTTPException) as exc_info:
abort(400, "A description", "A name")
assert exc_info.value.description == "A description"
def test_abort_with_response() -> None:
with pytest.raises(HTTPException) as exc_info:
abort(Response("Message", 205))
assert exc_info.value.get_response().status_code == 205
@pytest.mark.asyncio
async def test_http_exception() -> None:
error = HTTPException(205, 'Description', 'Name')
assert error.get_response().status_code == 205
assert b'Name' in (await error.get_response().get_data()) # type: ignore
assert b'Description' in (await error.get_response().get_data()) # type: ignore
def test_method_not_allowed() -> None:
error = MethodNotAllowed(['GET', 'POST'])
assert 'GET, POST' == error.get_headers()['Allow']
def test_redirect_required() -> None:
error = RedirectRequired('/redirect')
assert '/redirect' in error.get_response().headers['Location']
| 30.071429 | 84 | 0.710214 |
e1a465054049af20afafe140f6baf9a5b62d8d2f | 3,583 | py | Python | app/migrations/0001_initial.py | wzy916/wzy | 5e491cc45c896fb1da79c63bae0e3fc3414a916e | [
"Apache-2.0"
] | null | null | null | app/migrations/0001_initial.py | wzy916/wzy | 5e491cc45c896fb1da79c63bae0e3fc3414a916e | [
"Apache-2.0"
] | null | null | null | app/migrations/0001_initial.py | wzy916/wzy | 5e491cc45c896fb1da79c63bae0e3fc3414a916e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-11-02 14:55
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='MyUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.CharField(max_length=100, unique=True, verbose_name='邮箱')),
('address', models.CharField(max_length=251, null=True, verbose_name='地址')),
('phone', models.CharField(max_length=13, null=True, verbose_name='电话')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Wheel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.CharField(max_length=251)),
('name', models.CharField(max_length=40)),
('trackid', models.CharField(max_length=30)),
],
options={
'db_table': 'axf_whell',
},
),
]
| 58.737705 | 329 | 0.636896 |
c2250eae7426360d53301a6dd4d1d8eef075d052 | 9,910 | py | Python | src/twisted/python/logfile.py | muelli/twisted | eacc5964187aebf5c34fa255c7e0a3700eaab15a | [
"MIT",
"Unlicense"
] | null | null | null | src/twisted/python/logfile.py | muelli/twisted | eacc5964187aebf5c34fa255c7e0a3700eaab15a | [
"MIT",
"Unlicense"
] | null | null | null | src/twisted/python/logfile.py | muelli/twisted | eacc5964187aebf5c34fa255c7e0a3700eaab15a | [
"MIT",
"Unlicense"
] | null | null | null | # -*- test-case-name: twisted.test.test_logfile -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A rotating, browsable log file.
"""
# System Imports
import os
import glob
import time
import stat
from twisted.python import threadable
class BaseLogFile:
"""
The base class for a log file that can be rotated.
"""
synchronized = ["write", "rotate"]
def __init__(self, name, directory, defaultMode=None):
"""
Create a log file.
@param name: name of the file
@param directory: directory holding the file
@param defaultMode: permissions used to create the file. Default to
current permissions of the file if the file exists.
"""
self.directory = directory
self.name = name
self.path = os.path.join(directory, name)
if defaultMode is None and os.path.exists(self.path):
self.defaultMode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])
else:
self.defaultMode = defaultMode
self._openFile()
@classmethod
def fromFullPath(cls, filename, *args, **kwargs):
"""
Construct a log file from a full file path.
"""
logPath = os.path.abspath(filename)
return cls(os.path.basename(logPath),
os.path.dirname(logPath), *args, **kwargs)
def shouldRotate(self):
"""
Override with a method to that returns true if the log
should be rotated.
"""
raise NotImplementedError
def _openFile(self):
"""
Open the log file.
The log file is always opened in binary mode.
"""
self.closed = False
if os.path.exists(self.path):
self._file = open(self.path, "rb+", 0)
self._file.seek(0, 2)
else:
if self.defaultMode is not None:
# Set the lowest permissions
oldUmask = os.umask(0o777)
try:
self._file = open(self.path, "wb+", 0)
finally:
os.umask(oldUmask)
else:
self._file = open(self.path, "wb+", 0)
if self.defaultMode is not None:
try:
os.chmod(self.path, self.defaultMode)
except OSError:
# Probably /dev/null or something?
pass
def write(self, data):
"""
Write some data to the file.
@param data: The data to write. Text will be encoded as UTF-8.
@type data: L{bytes} or L{unicode}
"""
if self.shouldRotate():
self.flush()
self.rotate()
if isinstance(data, str):
data = data.encode('utf8')
self._file.write(data)
def flush(self):
"""
Flush the file.
"""
self._file.flush()
def close(self):
"""
Close the file.
The file cannot be used once it has been closed.
"""
self.closed = True
self._file.close()
self._file = None
def reopen(self):
"""
Reopen the log file. This is mainly useful if you use an external log
rotation tool, which moves under your feet.
Note that on Windows you probably need a specific API to rename the
file, as it's not supported to simply use os.rename, for example.
"""
self.close()
self._openFile()
def getCurrentLog(self):
"""
Return a LogReader for the current log file.
"""
return LogReader(self.path)
class LogFile(BaseLogFile):
"""
A log file that can be rotated.
A rotateLength of None disables automatic log rotation.
"""
def __init__(self, name, directory, rotateLength=1000000, defaultMode=None,
maxRotatedFiles=None):
"""
Create a log file rotating on length.
@param name: file name.
@type name: C{str}
@param directory: path of the log file.
@type directory: C{str}
@param rotateLength: size of the log file where it rotates. Default to
1M.
@type rotateLength: C{int}
@param defaultMode: mode used to create the file.
@type defaultMode: C{int}
@param maxRotatedFiles: if not None, max number of log files the class
creates. Warning: it removes all log files above this number.
@type maxRotatedFiles: C{int}
"""
BaseLogFile.__init__(self, name, directory, defaultMode)
self.rotateLength = rotateLength
self.maxRotatedFiles = maxRotatedFiles
def _openFile(self):
BaseLogFile._openFile(self)
self.size = self._file.tell()
def shouldRotate(self):
"""
Rotate when the log file size is larger than rotateLength.
"""
return self.rotateLength and self.size >= self.rotateLength
def getLog(self, identifier):
"""
Given an integer, return a LogReader for an old log file.
"""
filename = "%s.%d" % (self.path, identifier)
if not os.path.exists(filename):
raise ValueError("no such logfile exists")
return LogReader(filename)
def write(self, data):
"""
Write some data to the file.
"""
BaseLogFile.write(self, data)
self.size += len(data)
def rotate(self):
"""
Rotate the file and create a new one.
If it's not possible to open new logfile, this will fail silently,
and continue logging to old logfile.
"""
if not (os.access(self.directory, os.W_OK) and os.access(self.path, os.W_OK)):
return
logs = self.listLogs()
logs.reverse()
for i in logs:
if self.maxRotatedFiles is not None and i >= self.maxRotatedFiles:
os.remove("%s.%d" % (self.path, i))
else:
os.rename("%s.%d" % (self.path, i), "%s.%d" % (self.path, i + 1))
self._file.close()
os.rename(self.path, "%s.1" % self.path)
self._openFile()
def listLogs(self):
"""
Return sorted list of integers - the old logs' identifiers.
"""
result = []
for name in glob.glob("%s.*" % self.path):
try:
counter = int(name.split('.')[-1])
if counter:
result.append(counter)
except ValueError:
pass
result.sort()
return result
def __getstate__(self):
state = BaseLogFile.__getstate__(self)
del state["size"]
return state
threadable.synchronize(LogFile)
class DailyLogFile(BaseLogFile):
"""A log file that is rotated daily (at or after midnight localtime)
"""
def _openFile(self):
BaseLogFile._openFile(self)
self.lastDate = self.toDate(os.stat(self.path)[8])
def shouldRotate(self):
"""Rotate when the date has changed since last write"""
return self.toDate() > self.lastDate
def toDate(self, *args):
"""Convert a unixtime to (year, month, day) localtime tuple,
or return the current (year, month, day) localtime tuple.
This function primarily exists so you may overload it with
gmtime, or some cruft to make unit testing possible.
"""
# primarily so this can be unit tested easily
return time.localtime(*args)[:3]
def suffix(self, tupledate):
"""Return the suffix given a (year, month, day) tuple or unixtime"""
try:
return '_'.join(map(str, tupledate))
except:
# try taking a float unixtime
return '_'.join(map(str, self.toDate(tupledate)))
def getLog(self, identifier):
"""Given a unix time, return a LogReader for an old log file."""
if self.toDate(identifier) == self.lastDate:
return self.getCurrentLog()
filename = "%s.%s" % (self.path, self.suffix(identifier))
if not os.path.exists(filename):
raise ValueError("no such logfile exists")
return LogReader(filename)
def write(self, data):
"""Write some data to the log file"""
BaseLogFile.write(self, data)
# Guard against a corner case where time.time()
# could potentially run backwards to yesterday.
# Primarily due to network time.
self.lastDate = max(self.lastDate, self.toDate())
def rotate(self):
"""Rotate the file and create a new one.
If it's not possible to open new logfile, this will fail silently,
and continue logging to old logfile.
"""
if not (os.access(self.directory, os.W_OK) and os.access(self.path, os.W_OK)):
return
newpath = "%s.%s" % (self.path, self.suffix(self.lastDate))
if os.path.exists(newpath):
return
self._file.close()
os.rename(self.path, newpath)
self._openFile()
def __getstate__(self):
state = BaseLogFile.__getstate__(self)
del state["lastDate"]
return state
threadable.synchronize(DailyLogFile)
class LogReader:
"""Read from a log file."""
def __init__(self, name):
"""
Open the log file for reading.
The comments about binary-mode for L{BaseLogFile._openFile} also apply
here.
"""
self._file = open(name, "r")
def readLines(self, lines=10):
"""Read a list of lines from the log file.
This doesn't returns all of the files lines - call it multiple times.
"""
result = []
for i in range(lines):
line = self._file.readline()
if not line:
break
result.append(line)
return result
def close(self):
self._file.close()
| 29.147059 | 86 | 0.571443 |
1ffddb09b20613df8c2908659e50fdb99e83aaf5 | 154 | py | Python | discoursesimplification/utils/ner/ner_string_parse_exception.py | kkatsamaktsis/PyDiscourseSimplification | 18d247894355b4b51f5abcced86e7a7292b17ac0 | [
"MIT"
] | null | null | null | discoursesimplification/utils/ner/ner_string_parse_exception.py | kkatsamaktsis/PyDiscourseSimplification | 18d247894355b4b51f5abcced86e7a7292b17ac0 | [
"MIT"
] | null | null | null | discoursesimplification/utils/ner/ner_string_parse_exception.py | kkatsamaktsis/PyDiscourseSimplification | 18d247894355b4b51f5abcced86e7a7292b17ac0 | [
"MIT"
] | null | null | null | class NERStringParseException(Exception):
def __init__(self, msg: str):
self.msg = msg
def __str__(self):
return repr(self.msg)
| 19.25 | 41 | 0.642857 |
e5a39e62eb6f9e077c98e8ae8ea5ae8624d9d248 | 6,602 | py | Python | GUI/Dialog/DMachineSetup/SWSlot/PWSlot22/PWSlot22.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | null | null | null | GUI/Dialog/DMachineSetup/SWSlot/PWSlot22/PWSlot22.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | null | null | null | GUI/Dialog/DMachineSetup/SWSlot/PWSlot22/PWSlot22.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""@package pyleecan.GUI.Dialog.DMachineSetup.SWSlot.PWSlot22.PWSlot22
SlotW22 Setup Page
@date Created on Wed Jul 15 14:30:54 2015
@copyright (C) 2015-2016 EOMYS ENGINEERING.
@author pierre_b
@todo unittest it
"""
import PyQt5.QtCore
from numpy import pi
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QWidget
from pyleecan.Classes.SlotW22 import SlotW22
from pyleecan.GUI import gui_option
from pyleecan.GUI.Dialog.DMachineSetup.SWSlot.PWSlot22.Gen_PWSlot22 import Gen_PWSlot22
from pyleecan.Methods.Slot.Slot.check import SlotCheckError
translate = PyQt5.QtCore.QCoreApplication.translate
class PWSlot22(Gen_PWSlot22, QWidget):
"""Page to set the Slot Type 22
"""
# Signal to DMachineSetup to know that the save popup is needed
saveNeeded = pyqtSignal()
# Information for Slot combobox
slot_name = "Slot Type 22"
slot_type = SlotW22
def __init__(self, lamination=None):
"""Initialize the GUI according to current lamination
Parameters
----------
self : PWSlot22
A PWSlot22 widget
lamination : Lamination
current lamination to edit
"""
# Build the interface according to the .ui file
QWidget.__init__(self)
self.setupUi(self)
self.lamination = lamination
self.slot = lamination.slot
# Set FloatEdit unit
self.lf_H0.unit = "m"
self.lf_H2.unit = "m"
# Set unit name (m ou mm)
wid_list = [self.unit_H0, self.unit_H2]
for wid in wid_list:
wid.setText(gui_option.unit.get_m_name())
# Fill the fields with the machine values (if they're filled)
self.lf_W0.setValue(self.slot.W0)
self.lf_W2.setValue(self.slot.W2)
self.lf_H0.setValue(self.slot.H0)
self.lf_H2.setValue(self.slot.H2)
self.c_W0_unit.setCurrentIndex(0) # rad
self.c_W2_unit.setCurrentIndex(0) # rad
# Display the main output of the slot (surface, height...)
self.w_out.comp_output()
# Connect the signal/slot
self.lf_W0.editingFinished.connect(self.set_W0)
self.lf_W2.editingFinished.connect(self.set_W2)
self.lf_H0.editingFinished.connect(self.set_H0)
self.lf_H2.editingFinished.connect(self.set_H2)
self.c_W0_unit.currentIndexChanged.connect(self.set_W0_unit)
self.c_W2_unit.currentIndexChanged.connect(self.set_W2_unit)
def set_W0(self):
"""Signal to update the value of W0 according to the line edit
Parameters
----------
self : PWSlot22
A PWSlot22 object
"""
if self.c_W0_unit.currentIndex() == 0: # Rad
self.slot.W0 = self.lf_W0.value()
else:
self.slot.W0 = self.lf_W0.value() / 180 * pi
self.w_out.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_W2(self):
"""Signal to update the value of W2 according to the line edit
Parameters
----------
self : PWSlot22
A PWSlot22 object
"""
if self.c_W2_unit.currentIndex() == 0: # Rad
self.slot.W2 = self.lf_W2.value()
else:
self.slot.W2 = self.lf_W2.value() / 180 * pi
self.w_out.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_H0(self):
"""Signal to update the value of H0 according to the line edit
Parameters
----------
self : PWSlot22
A PWSlot22 object
"""
self.slot.H0 = self.lf_H0.value()
self.w_out.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_H2(self):
"""Signal to update the value of H2 according to the line edit
Parameters
----------
self : PWSlot22
A PWSlot22 object
"""
self.slot.H2 = self.lf_H2.value()
self.w_out.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_W0_unit(self, value):
"""Signal to convert the value of W0 according to the combobox unit
Parameters
----------
self : PWSlot22
A PWSlot22 object
value : int
Current index of combobox
"""
if self.lf_W0.text() != "":
self.set_W0() # Update for deg if needed and call comp_output
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_W2_unit(self, value):
"""Signal to convert the value of W2 according to the combobox unit
Parameters
----------
self : PWSlot22
A PWSlot22 object
value : int
Current index of combobox
"""
if self.lf_W2.text() != "":
self.set_W2() # Update for deg if needed and call comp_output
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
@staticmethod
def check(lam):
"""Check that the current lamination have all the needed field set
Parameters
----------
lam: LamSlotWind
Lamination to check
Returns
-------
error: str
Error message (return None if no error)
"""
# Check that everything is set
if lam.slot.Zs is None:
return translate("You must set Zs !", "PWSlot22 check")
elif lam.slot.W0 is None:
return translate("You must set W0 !", "PWSlot22 check")
elif lam.slot.W2 is None:
return translate("You must set W2 !", "PWSlot22 check")
elif lam.slot.H0 is None:
return translate("You must set H0 !", "PWSlot22 check")
elif lam.slot.H2 is None:
return translate("You must set H2 !", "PWSlot22 check")
# Check that everything is set right
# Constraints
try:
lam.slot.check()
except SlotCheckError as error:
return str(error)
# Output
try:
yoke_height = lam.comp_height_yoke()
except Exception as error:
return translate("Unable to compute yoke height:", "PWSlot22 check") + str(
error
)
if yoke_height <= 0:
return translate(
"The slot height is greater than the lamination !", "PWSlot22 check"
)
| 31.438095 | 87 | 0.59694 |
39e28d42fe135820bba22eeaff4cc916538f20af | 6,084 | py | Python | tests/core/test_db_validation.py | hashgreen/chia-blockchain | b1acb5597ba242649d1dc97de7fd605148e33816 | [
"Apache-2.0"
] | null | null | null | tests/core/test_db_validation.py | hashgreen/chia-blockchain | b1acb5597ba242649d1dc97de7fd605148e33816 | [
"Apache-2.0"
] | null | null | null | tests/core/test_db_validation.py | hashgreen/chia-blockchain | b1acb5597ba242649d1dc97de7fd605148e33816 | [
"Apache-2.0"
] | null | null | null | import random
import sqlite3
from contextlib import closing
from pathlib import Path
from typing import List
import aiosqlite
import pytest
from chia.cmds.db_validate_func import validate_v2
from chia.consensus.blockchain import Blockchain
from chia.consensus.default_constants import DEFAULT_CONSTANTS
from chia.consensus.multiprocess_validation import PreValidationResult
from chia.full_node.block_store import BlockStore
from chia.full_node.coin_store import CoinStore
from chia.full_node.hint_store import HintStore
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.full_block import FullBlock
from chia.util.db_wrapper import DBWrapper
from chia.util.ints import uint32, uint64
from tests.setup_nodes import test_constants
from tests.util.temp_file import TempFile
def rand_hash() -> bytes32:
ret = bytearray(32)
for i in range(32):
ret[i] = random.getrandbits(8)
return bytes32(ret)
def make_version(conn: sqlite3.Connection, version: int) -> None:
conn.execute("CREATE TABLE database_version(version int)")
conn.execute("INSERT INTO database_version VALUES (?)", (version,))
conn.commit()
def make_peak(conn: sqlite3.Connection, peak_hash: bytes32) -> None:
conn.execute("CREATE TABLE IF NOT EXISTS current_peak(key int PRIMARY KEY, hash blob)")
conn.execute("INSERT OR REPLACE INTO current_peak VALUES(?, ?)", (0, peak_hash))
conn.commit()
def make_block_table(conn: sqlite3.Connection) -> None:
conn.execute(
"CREATE TABLE IF NOT EXISTS full_blocks("
"header_hash blob PRIMARY KEY,"
"prev_hash blob,"
"height bigint,"
"sub_epoch_summary blob,"
"is_fully_compactified tinyint,"
"in_main_chain tinyint,"
"block blob,"
"block_record blob)"
)
def add_block(
conn: sqlite3.Connection, header_hash: bytes32, prev_hash: bytes32, height: int, in_main_chain: bool
) -> None:
conn.execute(
"INSERT INTO full_blocks VALUES(?, ?, ?, NULL, 0, ?, NULL, NULL)",
(
header_hash,
prev_hash,
height,
in_main_chain,
),
)
def test_db_validate_wrong_version() -> None:
with TempFile() as db_file:
with closing(sqlite3.connect(db_file)) as conn:
make_version(conn, 3)
with pytest.raises(RuntimeError) as execinfo:
validate_v2(db_file, validate_blocks=False)
assert "Database has the wrong version (3 expected 2)" in str(execinfo.value)
def test_db_validate_missing_peak_table() -> None:
with TempFile() as db_file:
with closing(sqlite3.connect(db_file)) as conn:
make_version(conn, 2)
with pytest.raises(RuntimeError) as execinfo:
validate_v2(db_file, validate_blocks=False)
assert "Database is missing current_peak table" in str(execinfo.value)
def test_db_validate_missing_peak_block() -> None:
with TempFile() as db_file:
with closing(sqlite3.connect(db_file)) as conn:
make_version(conn, 2)
make_peak(conn, bytes32.fromhex("fafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafa"))
make_block_table(conn)
with pytest.raises(RuntimeError) as execinfo:
validate_v2(db_file, validate_blocks=False)
assert "Database is missing the peak block" in str(execinfo.value)
@pytest.mark.parametrize("invalid_in_chain", [True, False])
def test_db_validate_in_main_chain(invalid_in_chain: bool) -> None:
with TempFile() as db_file:
with closing(sqlite3.connect(db_file)) as conn:
make_version(conn, 2)
make_block_table(conn)
prev = bytes32(DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA)
for height in range(0, 100):
header_hash = rand_hash()
add_block(conn, header_hash, prev, height, True)
if height % 4 == 0:
# insert an orphaned block
add_block(conn, rand_hash(), prev, height, invalid_in_chain)
prev = header_hash
make_peak(conn, header_hash)
if invalid_in_chain:
with pytest.raises(RuntimeError) as execinfo:
validate_v2(db_file, validate_blocks=False)
assert " (height: 96) is orphaned, but in_main_chain is set" in str(execinfo.value)
else:
validate_v2(db_file, validate_blocks=False)
async def make_db(db_file: Path, blocks: List[FullBlock]) -> None:
async with aiosqlite.connect(db_file) as conn:
await conn.execute("pragma journal_mode=OFF")
await conn.execute("pragma synchronous=OFF")
await conn.execute("pragma locking_mode=exclusive")
# this is done by chia init normally
await conn.execute("CREATE TABLE database_version(version int)")
await conn.execute("INSERT INTO database_version VALUES (2)")
await conn.commit()
db_wrapper = DBWrapper(conn, 2)
block_store = await BlockStore.create(db_wrapper)
coin_store = await CoinStore.create(db_wrapper, uint32(0))
hint_store = await HintStore.create(db_wrapper)
bc = await Blockchain.create(coin_store, block_store, test_constants, hint_store, Path("."), reserved_cores=0)
await db_wrapper.commit_transaction()
for block in blocks:
results = PreValidationResult(None, uint64(1), None, False)
result, err, _, _ = await bc.receive_block(block, results)
assert err is None
@pytest.mark.asyncio
async def test_db_validate_default_1000_blocks(default_1000_blocks: List[FullBlock]) -> None:
with TempFile() as db_file:
await make_db(db_file, default_1000_blocks)
# we expect everything to be valid except this is a test chain, so it
# doesn't have the correct genesis challenge
with pytest.raises(RuntimeError) as execinfo:
validate_v2(db_file, validate_blocks=True)
assert "Blockchain has invalid genesis challenge" in str(execinfo.value)
| 36.214286 | 118 | 0.686391 |
1e0782ac0b99c9becdfe0cb3df4dfe5172d9bc26 | 5,537 | py | Python | spinup/algos/sac/core.py | mksmsrkn/spinningup_pytorch | 1b1176126f293e44e0c2990cfda409b1e42409c9 | [
"MIT"
] | null | null | null | spinup/algos/sac/core.py | mksmsrkn/spinningup_pytorch | 1b1176126f293e44e0c2990cfda409b1e42409c9 | [
"MIT"
] | null | null | null | spinup/algos/sac/core.py | mksmsrkn/spinningup_pytorch | 1b1176126f293e44e0c2990cfda409b1e42409c9 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch import nn
from torch.distributions import Normal
from gym.spaces import Box
EPS = 1e-8
LOG_STD_MAX = 2
LOG_STD_MIN = -20
class MLP(nn.Module):
def __init__(self, in_dim, hidden_sizes=(64,64), activation=nn.Tanh,
output_activation=None, output_scaler=1, do_squeeze = False):
super(MLP, self).__init__()
self.output_scaler = output_scaler
self.do_squeeze = do_squeeze
layers = []
prev_h = in_dim
for h in hidden_sizes[:-1]:
layers.append(nn.Linear(prev_h, h))
layers.append(activation())
prev_h = h
layers.append(nn.Linear(h, hidden_sizes[-1]))
if output_activation:
try:
out = output_activation(-1) # Sigmoid specific case
except:
out = output_activation()
layers.append(out)
self.model = nn.Sequential(*layers)
def forward(self, x):
x = self.model(x)
if self.do_squeeze: x.squeeze_()
return x * self.output_scaler
# Credit: https://discuss.pytorch.org/t/how-do-i-check-the-number-of-parameters-of-a-model/4325/9
def count_vars(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def clip_but_pass_gradient(x, l=-1., u=1.):
clip_up = (x > u).float()
clip_low = (x < l).float()
return x + ((u - x)*clip_up + (l - x)*clip_low).detach()
"""
Policies
"""
def apply_squashing_func(mu, pi, logp_pi):
mu = torch.tanh(mu)
pi = torch.tanh(pi)
# To avoid evil machine precision error, strictly clip 1-pi**2 to [0,1] range.
logp_pi -= (torch.log(clip_but_pass_gradient(1 - pi**2, l=0, u=1) + EPS)).sum(dim=1)
return mu, pi, logp_pi
class MLPGaussian(nn.Module):
def __init__(self, in_dim, out_dim, hidden_sizes=(64,64),
activation=nn.Tanh, output_activation=None, act_limit=1.0):
super(MLPGaussian, self).__init__()
self.act_limit = act_limit
self.net = MLP(in_dim, list(hidden_sizes), activation, activation, do_squeeze = False)
self.mu = [nn.Linear(hidden_sizes[-1], out_dim)]
if output_activation is not None: self.mu.append(output_activation())
self.log_sigma = [nn.Linear(hidden_sizes[-1], out_dim), nn.Tanh()]
self.mu = nn.Sequential(*self.mu)
self.log_sigma = nn.Sequential(*self.log_sigma)
def forward(self, x, a = None):
x = self.net(x)
mu = self.mu(x)
log_sigma = self.log_sigma(x)
""" Note from Josh Achiam @ OpenAI
Because algorithm maximizes trade-off of reward and entropy,
entropy must be unique to state---and therefore log_stds need
to be a neural network output instead of a shared-across-states
learnable parameter vector. But for deep Relu and other nets,
simply sticking an activationless dense layer at the end would
be quite bad---at the beginning of training, a randomly initialized
net could produce extremely large values for the log_stds, which
would result in some actions being either entirely deterministic
or too random to come back to earth. Either of these introduces
numerical instability which could break the algorithm. To
protect against that, we'll constrain the output range of the
log_stds, to lie within [LOG_STD_MIN, LOG_STD_MAX]. This is
slightly different from the trick used by the original authors of
SAC---they used tf.clip_by_value instead of squashing and rescaling.
I prefer this approach because it allows gradient propagation
through log_std where clipping wouldn't, but I don't know if
it makes much of a difference.
"""
log_sigma = LOG_STD_MIN + 0.5 * (LOG_STD_MAX - LOG_STD_MIN) * (log_sigma + 1)
sigma = torch.exp(log_sigma)
dist = Normal(mu, sigma)
# rsample() - https://pytorch.org/docs/stable/distributions.html#pathwise-derivative
pi = dist.rsample() # reparametrization
logp_pi = dist.log_prob(pi).sum(dim=1)
mu *= self.act_limit
pi *= self.act_limit
mu, pi, logp_pi = apply_squashing_func(mu, pi, logp_pi)
return mu, pi, logp_pi
"""
Actor-Critics
"""
class ActorCritic(nn.Module):
def __init__(self, state_dim, hidden_sizes=(400,300), activation=nn.ReLU, #torch.relu, # nn.ReLU
output_activation=None, action_space=None, policy = MLPGaussian):
super(ActorCritic, self).__init__()
assert isinstance(action_space, Box)
act_dim = action_space.shape[0]
act_limit = action_space.high[0]
self.policy = policy(state_dim, act_dim, list(hidden_sizes),
activation, output_activation, act_limit)
self.q1 = MLP(state_dim + act_dim, list(hidden_sizes)+[1], activation, do_squeeze = True)
self.q2 = MLP(state_dim + act_dim, list(hidden_sizes)+[1], activation, do_squeeze = True)
self.v = MLP(state_dim, list(hidden_sizes)+[1], activation, do_squeeze = True)
def forward(self, x, a = None):
mu, pi, logp_pi = self.policy(x)
if a is None:
return mu, pi, logp_pi
else:
q1 = self.q1(torch.cat([x, a],dim=1))
q1_pi = self.q1(torch.cat([x, pi],dim=1))
q2 = self.q2(torch.cat([x, a],dim=1))
q2_pi = self.q2(torch.cat([x, pi],dim=1))
v = self.v(x)
return mu, pi, logp_pi, q1, q2, q1_pi, q2_pi, v | 42.267176 | 100 | 0.635543 |
9bce82e2e3685f04cd4f12d2d573cfc0cf576253 | 9,737 | py | Python | docsrc/conf.py | markusritschel/oceanpack | 53028431babda6fbea9d691ee6a4a94c99ada0c0 | [
"MIT"
] | null | null | null | docsrc/conf.py | markusritschel/oceanpack | 53028431babda6fbea9d691ee6a4a94c99ada0c0 | [
"MIT"
] | 5 | 2021-09-22T08:18:14.000Z | 2021-10-20T23:44:58.000Z | docsrc/conf.py | markusritschel/oceanpack | 53028431babda6fbea9d691ee6a4a94c99ada0c0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import inspect
import shutil
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, '../src'))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
# # TODO: check if this can stay commented or even be erased
# try: # for Sphinx >= 1.7
# from sphinx.ext import apidoc
# except ImportError:
# from sphinx import apidoc
#
# output_dir = os.path.join(__location__, "api")
# module_dir = os.path.join(__location__, "../src/oceanpack")
# try:
# shutil.rmtree(output_dir)
# except FileNotFoundError:
# pass
#
# try:
# import sphinx
# from pkg_resources import parse_version
#
# cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
# cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
#
# args = cmd_line.split(" ")
# if parse_version(sphinx.__version__) >= parse_version('1.7'):
# args = args[1:]
#
# apidoc.main(args)
# except Exception as e:
# print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['nbsphinx', 'myst_parser', 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon', 'sphinx_rtd_theme', 'sphinx.ext.githubpages', 'sphinx_issues', 'sphinxcontrib.bibtex'
]
myst_update_mathjax = False
nbsphinx_execute = 'never'
bibtex_bibfiles = ['refs.bib']
# bibtex_reference_style = 'author_year'
def setup(app):
app.add_stylesheet('custom.css')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'oceanpack'
copyright = u'2020, markusritschel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# 'sidebar_width': '300px',
# 'page_width': '1200px'
# }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from oceanpack import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Enable HTML5 writer support
html_experimental_html5_writer = True
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'oceanpack-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'oceanpack Documentation',
u'markusritschel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
}
| 33.926829 | 122 | 0.701243 |
5ae35ac86029b8ada9c2fe8afaa3450b20066cbb | 586 | py | Python | Largest Rectangle in Histogram.py | AmanCSE-1/Campus-Coding-Test | 46c84d12353ac9628826e9e1f792f24ff3b37689 | [
"MIT"
] | null | null | null | Largest Rectangle in Histogram.py | AmanCSE-1/Campus-Coding-Test | 46c84d12353ac9628826e9e1f792f24ff3b37689 | [
"MIT"
] | null | null | null | Largest Rectangle in Histogram.py | AmanCSE-1/Campus-Coding-Test | 46c84d12353ac9628826e9e1f792f24ff3b37689 | [
"MIT"
] | null | null | null | ''' Given an array of integers heights representing the histogram's bar height where the width of each bar is 1,
return the area of the largest rectangle in the histogram. '''
## Public Test Case : Input: heights = [2,1,5,6,2,3]
# Output: 10
## Public Test Case : Input: heights = [2,4]
# Output: 4
def largestRectangleArea(heights):
if __name__ == "__main__":
test_cases = int(input())
for _ in range(test_cases):
heights = list(map(int, input().split()))
print(largestRectangleArea(heights))
| 26.636364 | 113 | 0.609215 |
1a436cebcf9557154a13f98baba8b64f33119dbf | 1,318 | py | Python | backend/group/migrations/0002_auto_20200812_2011.py | cjc7373/hackergame | 86971b4cf8a2761044d417b4c8bd934c3309d6fd | [
"MIT"
] | 2 | 2020-07-12T13:11:43.000Z | 2020-07-14T08:12:17.000Z | backend/group/migrations/0002_auto_20200812_2011.py | cjc7373/hackergame | 86971b4cf8a2761044d417b4c8bd934c3309d6fd | [
"MIT"
] | 1 | 2020-08-13T13:56:18.000Z | 2020-09-29T12:39:08.000Z | backend/group/migrations/0002_auto_20200812_2011.py | cjc7373/hackergame | 86971b4cf8a2761044d417b4c8bd934c3309d6fd | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-08-12 12:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('group', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='group',
name='admin',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='group_admin', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='application',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='group.group'),
),
migrations.AddField(
model_name='application',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddConstraint(
model_name='application',
constraint=models.UniqueConstraint(condition=models.Q(('status', 'pending'), ('status', 'accepted'), _connector='OR'), fields=('user', 'group'), name='unique_application'),
),
]
| 34.684211 | 184 | 0.640364 |
36a010f2afd265dca702e0d08bbd791ca2be7e3e | 3,335 | py | Python | bring_container_to_current_workspace.py | sainathadapa/sway-wm-multi-disp-scripts | 7106852596046434acf4c98b29c8e1258351d7a1 | [
"MIT"
] | null | null | null | bring_container_to_current_workspace.py | sainathadapa/sway-wm-multi-disp-scripts | 7106852596046434acf4c98b29c8e1258351d7a1 | [
"MIT"
] | null | null | null | bring_container_to_current_workspace.py | sainathadapa/sway-wm-multi-disp-scripts | 7106852596046434acf4c98b29c8e1258351d7a1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import subprocess
import sys
import anytree as at
import necessaryFuncs as nf
def create_tree(root_json, root_node):
con_name = root_json['name']
if con_name is None:
con_name = 'container'
if con_name in ['__i3', 'topdock', 'bottomdock']:
return None
else:
this_node = at.AnyNode(id=con_name,
parent=root_node,
con_id=root_json['id'],
workspace=False,
container=False)
if con_name == 'container':
this_node.container = True
for a_node in root_json['nodes']:
create_tree(a_node, this_node)
def fix_container_names(node):
if node.id == 'container':
node_name = ', '.join([x.id for x in node.children])
node_name = 'container[' + node_name + ']'
node.id = node_name
def rofi(options, program):
'''Call dmenu with a list of options.'''
cmd = subprocess.Popen(program,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = cmd.communicate('\n'.join(options).encode('utf-8'))
return stdout.decode('utf-8').strip('\n')
# Get I3 tree
proc_out = subprocess.run(['swaymsg', '-t', 'get_tree'], stdout=subprocess.PIPE)
i3tree = json.loads(proc_out.stdout.decode('utf-8'))
# Create tree from the i3 tree output
root = at.AnyNode(id='r')
create_tree(i3tree, root)
root = root.children[0]
# Identify the workspaces
for display in root.children:
for wk in display.children[0].children:
wk.workspace = True
# Get the current workspace
proc_out = subprocess.run(['swaymsg', '-t', 'get_workspaces'], stdout=subprocess.PIPE)
wkList = json.loads(proc_out.stdout.decode('utf-8'))
focWkName = nf.getFocusedWK(wkList)
# Change the tree such that the workspaces are children to the root
# while ignoring the current workspace
root.children = [node
for node in at.PostOrderIter(root, filter_=lambda x: x.workspace)
if node.id != focWkName]
# If workspace contains only one container, then remove that container
for node in at.PostOrderIter(root, filter_=lambda x: x.workspace):
if len(node.children) == 1:
node.children = node.children[0].children
# If containers have only one element, then remove such containers
for node in at.PreOrderIter(root, filter_=lambda x: x.container):
if len(node.children) == 1:
node.children[0].parent = node.parent
node.parent = None
# Create names for containers
for node in at.PreOrderIter(root, filter_=lambda x: x.container):
fix_container_names(node)
# Create new names for nodes for diplay in Rofi
names_id_map = [[x+y.id, y.con_id] for x, _, y in at.RenderTree(root)]
# Call rofi
selected = rofi([x[0] for x in names_id_map[1:]], 'rofi -dmenu -i -format i')
if selected == '':
sys.exit(0)
# Run the command
selected = int(selected)+1
command_to_run = ['swaymsg',
'[con_id=' + str(names_id_map[selected][1]) + '] ' +
'move --no-auto-back-and-forth container to workspace ' +
focWkName]
# print(command_to_run)
subprocess.call(command_to_run)
| 31.462264 | 86 | 0.629685 |
1ec6f3cd1be5fdefd1236b9bb0677f1e8daec04a | 5,664 | py | Python | install/linux_x86_64/pt/fteproxy/Crypto/Random/Fortuna/FortunaAccumulator.py | getlantern/lantern-archive | 8d311928e8ab38fb1b206b0156b90c82e67a4d87 | [
"Apache-2.0"
] | 4 | 2015-08-14T17:34:32.000Z | 2017-03-18T16:52:46.000Z | install/linux_x86_64/pt/fteproxy/Crypto/Random/Fortuna/FortunaAccumulator.py | getlantern/lantern-archive | 8d311928e8ab38fb1b206b0156b90c82e67a4d87 | [
"Apache-2.0"
] | 1 | 2015-04-21T19:54:40.000Z | 2015-04-21T19:54:40.000Z | install/linux_x86_64/pt/fteproxy/Crypto/Random/Fortuna/FortunaAccumulator.py | getlantern/lantern-archive | 8d311928e8ab38fb1b206b0156b90c82e67a4d87 | [
"Apache-2.0"
] | 7 | 2015-11-28T02:36:40.000Z | 2020-09-27T23:19:24.000Z | # -*- coding: ascii -*-
#
# FortunaAccumulator.py : Fortuna's internal accumulator
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.Util.py3compat import *
from binascii import b2a_hex
import time
import warnings
from Crypto.pct_warnings import ClockRewindWarning
import SHAd256
import FortunaGenerator
class FortunaPool(object):
"""Fortuna pool type
This object acts like a hash object, with the following differences:
- It keeps a count (the .length attribute) of the number of bytes that
have been added to the pool
- It supports a .reset() method for in-place reinitialization
- The method to add bytes to the pool is .append(), not .update().
"""
digest_size = SHAd256.digest_size
def __init__(self):
self.reset()
def append(self, data):
self._h.update(data)
self.length += len(data)
def digest(self):
return self._h.digest()
def hexdigest(self):
if sys.version_info[0] == 2:
return b2a_hex(self.digest())
else:
return b2a_hex(self.digest()).decode()
def reset(self):
self._h = SHAd256.new()
self.length = 0
def which_pools(r):
"""Return a list of pools indexes (in range(32)) that are to be included during reseed number r.
According to _Practical Cryptography_, chapter 10.5.2 "Pools":
"Pool P_i is included if 2**i is a divisor of r. Thus P_0 is used
every reseed, P_1 every other reseed, P_2 every fourth reseed, etc."
"""
# This is a separate function so that it can be unit-tested.
assert r >= 1
retval = []
mask = 0
for i in range(32):
# "Pool P_i is included if 2**i is a divisor of [reseed_count]"
if (r & mask) == 0:
retval.append(i)
else:
break # optimization. once this fails, it always fails
mask = (mask << 1) | 1L
return retval
class FortunaAccumulator(object):
min_pool_size = 64 # TODO: explain why
reseed_interval = 0.100 # 100 ms TODO: explain why
def __init__(self):
self.reseed_count = 0
self.generator = FortunaGenerator.AESGenerator()
self.last_reseed = None
# Initialize 32 FortunaPool instances.
# NB: This is _not_ equivalent to [FortunaPool()]*32, which would give
# us 32 references to the _same_ FortunaPool instance (and cause the
# assertion below to fail).
self.pools = [FortunaPool() for i in range(32)] # 32 pools
assert(self.pools[0] is not self.pools[1])
def _forget_last_reseed(self):
# This is not part of the standard Fortuna definition, and using this
# function frequently can weaken Fortuna's ability to resist a state
# compromise extension attack, but we need this in order to properly
# implement Crypto.Random.atfork(). Otherwise, forked child processes
# might continue to use their parent's PRNG state for up to 100ms in
# some cases. (e.g. CVE-2013-1445)
self.last_reseed = None
def random_data(self, bytes):
current_time = time.time()
if (self.last_reseed is not None and self.last_reseed > current_time): # Avoid float comparison to None to make Py3k happy
warnings.warn("Clock rewind detected. Resetting last_reseed.", ClockRewindWarning)
self.last_reseed = None
if (self.pools[0].length >= self.min_pool_size and
(self.last_reseed is None or
current_time > self.last_reseed + self.reseed_interval)):
self._reseed(current_time)
# The following should fail if we haven't seeded the pool yet.
return self.generator.pseudo_random_data(bytes)
def _reseed(self, current_time=None):
if current_time is None:
current_time = time.time()
seed = []
self.reseed_count += 1
self.last_reseed = current_time
for i in which_pools(self.reseed_count):
seed.append(self.pools[i].digest())
self.pools[i].reset()
seed = b("").join(seed)
self.generator.reseed(seed)
def add_random_event(self, source_number, pool_number, data):
assert 1 <= len(data) <= 32
assert 0 <= source_number <= 255
assert 0 <= pool_number <= 31
self.pools[pool_number].append(bchr(source_number))
self.pools[pool_number].append(bchr(len(data)))
self.pools[pool_number].append(data)
# vim:set ts=4 sw=4 sts=4 expandtab:
| 36.541935 | 130 | 0.644951 |
6a8e765b09ea0be03f643bfc64049368d6cb7090 | 1,594 | py | Python | blueapps/account/views.py | springborland/bk-sops | a9057672c10efb5f2414a805a30ead4092429c76 | [
"Apache-2.0"
] | 1 | 2021-05-19T04:31:34.000Z | 2021-05-19T04:31:34.000Z | blueapps/account/views.py | ZhuoZhuoCrayon/bk-sops | d1475d53c19729915727ce7adc24e3226f15e332 | [
"Apache-2.0"
] | null | null | null | blueapps/account/views.py | ZhuoZhuoCrayon/bk-sops | d1475d53c19729915727ce7adc24e3226f15e332 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import time
from django.shortcuts import render
from django.http import JsonResponse
from blueapps.account.decorators import login_exempt
@login_exempt
def login_success(request):
"""
弹框登录成功返回页面
"""
return render(request, 'account/login_success.html')
@login_exempt
def login_page(request):
"""
跳转至固定页面,然后弹框登录
"""
refer_url = request.GET.get('refer_url')
context = {
'refer_url': refer_url
}
return render(request, 'account/login_page.html', context)
def send_code_view(request):
ret = request.user.send_code()
return JsonResponse(ret)
def get_user_info(request):
return JsonResponse({
"code": 0,
"data": {
"id": request.user.id,
"username": request.user.username,
"timestamp": time.time()
},
"message": 'ok'
})
| 26.566667 | 115 | 0.702008 |
67ceb22f6fc15ceaabad393c5275b72bc6eebc2a | 2,640 | py | Python | Stack/test4.py | CrazyIdeaDream/DataStructuresandAlgorithms | 5a102a6b82c30f1190e68523356a7d31224fe086 | [
"MIT"
] | null | null | null | Stack/test4.py | CrazyIdeaDream/DataStructuresandAlgorithms | 5a102a6b82c30f1190e68523356a7d31224fe086 | [
"MIT"
] | null | null | null | Stack/test4.py | CrazyIdeaDream/DataStructuresandAlgorithms | 5a102a6b82c30f1190e68523356a7d31224fe086 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-#
# -------------------------------------------------------------------------------
# Name: ch5
# Author: xiaohuo
# Date: 2020/3/29
# Prohect_Name: data_structure
# IEDA_Name: PyCharm
# Create_Time: 21:57
# -------------------------------------------------------------------------------
"""
python实现计算后序表达式
"""
from Stack.stack import Stack
def postfixEval(operandStrack, tokenList, operand, operateSymbol):
"""
利用栈进行计算
:param operandStrack:栈的对象
:param tokenList:表达式列表
:param operand:操作数
:param operateSymbol:操作符
:return:计算结果
"""
for token in tokenList:
if token in operand: # 判断是否为操作数
operandStrack.push(int(token))
if token in operateSymbol: # 判断是否为操作符
if not operandStrack.isEmpty(): # 判断栈是否为空
operand_1 = operandStrack.pop()
operand_2 = operandStrack.pop()
result = calculate(operand_1, operand_2, token)
operandStrack.push(result)
return operandStrack.pop()
def calculate(num1, num2, symbol):
"""
操作数的计算
:param num1:操作数1
:param num2:操作数2
:param symbol:操作符
:return:计算后的结果
"""
if symbol == '+':
return num1 + num2
if symbol == '-':
return num1 - num2
if symbol == '*':
return num1 * num2
if symbol == '/':
return num1 / num2
else:
return None
def simplification(postfixExpr, operand):
"""
对输入表达式判断以及分类
:param postfixExpr:输入的表达式
:param operand:生成的操作数列表
:param operateSymbol:操作符列表
:return:处理后的表达式列表
"""
tokenLists = []
operateSymbol_list = ['+', '-', '*', '/', '(', ')']
for i in list(postfixExpr):
if i in operand or i in operateSymbol_list:
tokenLists.append(i)
elif i == ' ':
pass
else:
raise Exception("你输入的不是操作数(0-9)或者操作符(+-*/),你的输入为:%s", format(i))
return tokenLists
def create_Target():
"""
生成Strack的对象
:return:Strack的对象
"""
return Stack()
def create_operand():
"""
生成操作数和定义操作符
:return:操作数(operand)、操作符(operateSymbol)
"""
operand = list(str(i) for i in range(10)) # 生成0至9操作数
operateSymbol = ['+', '-', '*', '/'] # 操作符
return operand, operateSymbol
if __name__ == '__main__':
postfixExpr = input("请输入要计算的后序表达式:")
operandStrack = create_Target() # 操作栈对象
operand, operateSymbol = create_operand()
tokenLists = simplification(postfixExpr, operand)
print(str("".join(tokenLists)) + "=" + str(postfixEval(operandStrack, tokenLists, operand, operateSymbol)))
| 25.631068 | 111 | 0.55303 |
a5c23d0d28d2204f5b49ddb1619089cf5895c5a4 | 5,592 | py | Python | UINotifier.py | tom66/scopeapp | b3364d2cebc0e6b8c5eb2ae5befdd29d15655a36 | [
"MIT"
] | 6 | 2020-11-29T21:13:37.000Z | 2022-03-19T23:57:39.000Z | UINotifier.py | tom66/scopeapp | b3364d2cebc0e6b8c5eb2ae5befdd29d15655a36 | [
"MIT"
] | null | null | null | UINotifier.py | tom66/scopeapp | b3364d2cebc0e6b8c5eb2ae5befdd29d15655a36 | [
"MIT"
] | 11 | 2021-12-13T01:03:19.000Z | 2022-02-21T03:35:43.000Z | """
This file is part of YAOS and is licenced under the MIT licence.
"""
import gettext
gettext.bindtextdomain('yaosapp', '/lang')
gettext.textdomain('yaosapp')
_ = gettext.gettext
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
import time
# Y-offset for notifications
NOTIFY_YPOS = 0
# Supported notification classes
NOTIFY_WARNING = 2
NOTIFY_INFO = 1
# Notification show time & fadeout period.
NOTIFY_SHOW_AGE = 5
NOTIFY_FADEOUT_TIME = 1
# Small notify widget filter
NOTIFY_SMALL_WIDGET = 16
# Load debug logger
import logging
log = logging.getLogger()
class NotifyController(object):
def __init__(self):
self.notifiers = [None, None]
self.fixed = None
self.cur_wdg = None
self.last_computed_x = None
def push_notification(self, notify):
# We used to have a complex sorting logic that picked out the newest notification,
# but this is better. We only show the newest notification, except if that notification
# has a lower priority than the current notification. If so, it gets put into slot 1
# of the notification queue.
if self.notifiers[1] == None or notify.cls < self.notifiers[1].cls:
log.info("Replacing secondary notification with %r" % notify)
if self.notifiers[1] is not None:
self.notifiers[1].destroy()
self.notifiers[1] = notify
else:
log.info("Replacing current notification with %r" % notify)
if self.notifiers[0] is not None:
self.notifiers[0].destroy()
self.notifiers[0] = notify
def set_fixed_container(self, fixed):
self.fixed = fixed
def update_overlay(self, screen_width):
wdg = self.get_next_notify_widget()
if wdg == None:
self.last_computed_x = None
return
# If allocated_width is small, hide the widget for now; we'll show it on the next frame
# (This is used to avoid the widget snapping into place after it is attached to the GtkFixed)
if wdg.get_allocated_width() <= NOTIFY_SMALL_WIDGET:
wdg.set_opacity(0)
computed_x = (screen_width / 2) - (wdg.get_allocated_width() / 2)
if self.cur_wdg != wdg:
if self.cur_wdg != None:
self.fixed.remove(self.cur_wdg)
self.fixed.put(wdg, computed_x, NOTIFY_YPOS)
else:
if computed_x != self.last_computed_x:
self.fixed.move(wdg, computed_x, NOTIFY_YPOS)
self.last_computed_x = computed_x
self.cur_wdg = wdg
def get_next_notify_widget(self):
"""Determines which notifier to show from the two available slots."""
for n in range(2):
if self.notifiers[n] != None:
wdg = self.notifiers[n].get_widget()
if wdg == False:
self.notifiers[n] = None
else:
return wdg
return None
class NotifyMessage(object):
def __init__(self, cls_, message):
self.cls = cls_
self.message = message
self.label = Gtk.Label()
self.label.set_xalign(0.5)
self.label.set_yalign(0.5)
self.label.set_hexpand(False)
self.label.set_vexpand(False)
self.label.set_opacity(1.0)
self.last_opacity = None
self.label_ctx = self.label.get_style_context()
self.label_ctx.add_class("notify_global")
self.t_created = time.time()
self.t_started = None
self.lbl_visible = False
def __lt__(self, other):
# Sort by priority first, then age
#print("__lt__ %r %r" % (self, other))
if self.cls == other.cls:
# younger age wins?
return self.t_created > other.t_created
else:
return self.cls > other.cls
def get_widget(self):
if self.t_started == None:
self.t_started = time.time()
if not self.lbl_visible:
self.label.show_all()
self.label.set_markup(self.message)
self.lbl_visible = True
self.label_ctx.remove_class("notify_info")
self.label_ctx.remove_class("notify_warning")
if self.cls == NOTIFY_WARNING:
self.label_ctx.add_class("notify_warning")
self.label_ctx.remove_class("notify_info")
elif self.cls == NOTIFY_INFO:
self.label_ctx.add_class("notify_info")
self.label_ctx.remove_class("notify_warning")
# once shown, opacity goes to 0 over course of NOTIFY_FADEOUT_TIME seconds
age = time.time() - self.t_started
if age >= NOTIFY_SHOW_AGE:
age -= NOTIFY_SHOW_AGE
fade = 1.0 - (age / NOTIFY_FADEOUT_TIME)
#self.label.set_opacity(fade)
self.last_opacity = fade
if fade <= 0:
self.label.destroy() # Kill the widget
self.lbl_visible = False
return False
else:
#print("setOpacity...")
self.label.set_opacity(1.0)
self.last_opacity = 1.0
return self.label
def destroy(self):
"""Quick cleanup before we're killed."""
self.label.set_opacity(0.0)
| 33.48503 | 102 | 0.574034 |
9e69293922399cc77129d71c42c0f32db30d3ba2 | 9,574 | py | Python | twisted/web/proxy.py | ioggstream/twisted | 34f9b1e3f097685839000c656332c66ee85be5d8 | [
"Unlicense",
"MIT"
] | 3 | 2020-06-20T23:31:06.000Z | 2021-01-11T02:17:16.000Z | twisted/web/proxy.py | ioggstream/twisted | 34f9b1e3f097685839000c656332c66ee85be5d8 | [
"Unlicense",
"MIT"
] | 1 | 2022-03-04T17:40:22.000Z | 2022-03-04T17:40:22.000Z | twisted/web/proxy.py | ioggstream/twisted | 34f9b1e3f097685839000c656332c66ee85be5d8 | [
"Unlicense",
"MIT"
] | 3 | 2018-11-09T03:38:09.000Z | 2020-02-24T06:26:10.000Z | # -*- test-case-name: twisted.web.test.test_proxy -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Simplistic HTTP proxy support.
This comes in two main variants - the Proxy and the ReverseProxy.
When a Proxy is in use, a browser trying to connect to a server (say,
www.yahoo.com) will be intercepted by the Proxy, and the proxy will covertly
connect to the server, and return the result.
When a ReverseProxy is in use, the client connects directly to the ReverseProxy
(say, www.yahoo.com) which farms off the request to one of a pool of servers,
and returns the result.
Normally, a Proxy is used on the client end of an Internet connection, while a
ReverseProxy is used on the server end.
"""
import urlparse
from urllib import quote as urlquote
from twisted.internet import reactor
from twisted.internet.protocol import ClientFactory
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from twisted.web.http import HTTPClient, Request, HTTPChannel
class ProxyClient(HTTPClient):
"""
Used by ProxyClientFactory to implement a simple web proxy.
@ivar _finished: A flag which indicates whether or not the original request
has been finished yet.
"""
_finished = False
def __init__(self, command, rest, version, headers, data, father):
self.father = father
self.command = command
self.rest = rest
if "proxy-connection" in headers:
del headers["proxy-connection"]
headers["connection"] = "close"
headers.pop('keep-alive', None)
self.headers = headers
self.data = data
def connectionMade(self):
self.sendCommand(self.command, self.rest)
for header, value in self.headers.items():
self.sendHeader(header, value)
self.endHeaders()
self.transport.write(self.data)
def handleStatus(self, version, code, message):
self.father.setResponseCode(int(code), message)
def handleHeader(self, key, value):
# t.web.server.Request sets default values for these headers in its
# 'process' method. When these headers are received from the remote
# server, they ought to override the defaults, rather than append to
# them.
if key.lower() in ['server', 'date', 'content-type']:
self.father.responseHeaders.setRawHeaders(key, [value])
else:
self.father.responseHeaders.addRawHeader(key, value)
def handleResponsePart(self, buffer):
self.father.write(buffer)
def handleResponseEnd(self):
"""
Finish the original request, indicating that the response has been
completely written to it, and disconnect the outgoing transport.
"""
if not self._finished:
self._finished = True
self.father.finish()
self.transport.loseConnection()
class ProxyClientFactory(ClientFactory):
"""
Used by ProxyRequest to implement a simple web proxy.
"""
protocol = ProxyClient
def __init__(self, command, rest, version, headers, data, father):
self.father = father
self.command = command
self.rest = rest
self.headers = headers
self.data = data
self.version = version
def buildProtocol(self, addr):
return self.protocol(self.command, self.rest, self.version,
self.headers, self.data, self.father)
def clientConnectionFailed(self, connector, reason):
"""
Report a connection failure in a response to the incoming request as
an error.
"""
self.father.setResponseCode(501, "Gateway error")
self.father.responseHeaders.addRawHeader("Content-Type", "text/html")
self.father.write("<H1>Could not connect</H1>")
self.father.finish()
class ProxyRequest(Request):
"""
Used by Proxy to implement a simple web proxy.
@ivar reactor: the reactor used to create connections.
@type reactor: object providing L{twisted.internet.interfaces.IReactorTCP}
"""
protocols = {'http': ProxyClientFactory}
ports = {'http': 80}
def __init__(self, channel, queued, reactor=reactor):
Request.__init__(self, channel, queued)
self.reactor = reactor
def process(self):
parsed = urlparse.urlparse(self.uri)
protocol = parsed[0]
host = parsed[1]
port = self.ports[protocol]
if ':' in host:
host, port = host.split(':')
port = int(port)
rest = urlparse.urlunparse(('', '') + parsed[2:])
if not rest:
rest = rest + '/'
class_ = self.protocols[protocol]
headers = self.getAllHeaders().copy()
if 'host' not in headers:
headers['host'] = host
self.content.seek(0, 0)
s = self.content.read()
clientFactory = class_(self.method, rest, self.clientproto, headers,
s, self)
self.reactor.connectTCP(host, port, clientFactory)
class Proxy(HTTPChannel):
"""
This class implements a simple web proxy.
Since it inherits from L{twisted.web.http.HTTPChannel}, to use it you
should do something like this::
from twisted.web import http
f = http.HTTPFactory()
f.protocol = Proxy
Make the HTTPFactory a listener on a port as per usual, and you have
a fully-functioning web proxy!
"""
requestFactory = ProxyRequest
class ReverseProxyRequest(Request):
"""
Used by ReverseProxy to implement a simple reverse proxy.
@ivar proxyClientFactoryClass: a proxy client factory class, used to create
new connections.
@type proxyClientFactoryClass: L{ClientFactory}
@ivar reactor: the reactor used to create connections.
@type reactor: object providing L{twisted.internet.interfaces.IReactorTCP}
"""
proxyClientFactoryClass = ProxyClientFactory
def __init__(self, channel, queued, reactor=reactor):
Request.__init__(self, channel, queued)
self.reactor = reactor
def process(self):
"""
Handle this request by connecting to the proxied server and forwarding
it there, then forwarding the response back as the response to this
request.
"""
self.requestHeaders.setRawHeaders(b"host", [self.factory.host])
clientFactory = self.proxyClientFactoryClass(
self.method, self.uri, self.clientproto, self.getAllHeaders(),
self.content.read(), self)
self.reactor.connectTCP(self.factory.host, self.factory.port,
clientFactory)
class ReverseProxy(HTTPChannel):
"""
Implements a simple reverse proxy.
For details of usage, see the file examples/reverse-proxy.py.
"""
requestFactory = ReverseProxyRequest
class ReverseProxyResource(Resource):
"""
Resource that renders the results gotten from another server
Put this resource in the tree to cause everything below it to be relayed
to a different server.
@ivar proxyClientFactoryClass: a proxy client factory class, used to create
new connections.
@type proxyClientFactoryClass: L{ClientFactory}
@ivar reactor: the reactor used to create connections.
@type reactor: object providing L{twisted.internet.interfaces.IReactorTCP}
"""
proxyClientFactoryClass = ProxyClientFactory
def __init__(self, host, port, path, reactor=reactor):
"""
@param host: the host of the web server to proxy.
@type host: C{str}
@param port: the port of the web server to proxy.
@type port: C{port}
@param path: the base path to fetch data from. Note that you shouldn't
put any trailing slashes in it, it will be added automatically in
request. For example, if you put B{/foo}, a request on B{/bar} will
be proxied to B{/foo/bar}. Any required encoding of special
characters (such as " " or "/") should have been done already.
@type path: C{str}
"""
Resource.__init__(self)
self.host = host
self.port = port
self.path = path
self.reactor = reactor
def getChild(self, path, request):
"""
Create and return a proxy resource with the same proxy configuration
as this one, except that its path also contains the segment given by
C{path} at the end.
"""
return ReverseProxyResource(
self.host, self.port, self.path + '/' + urlquote(path, safe=""),
self.reactor)
def render(self, request):
"""
Render a request by forwarding it to the proxied server.
"""
# RFC 2616 tells us that we can omit the port if it's the default port,
# but we have to provide it otherwise
if self.port == 80:
host = self.host
else:
host = "%s:%d" % (self.host, self.port)
request.requestHeaders.setRawHeaders(b"host", [host])
request.content.seek(0, 0)
qs = urlparse.urlparse(request.uri)[4]
if qs:
rest = self.path + '?' + qs
else:
rest = self.path
clientFactory = self.proxyClientFactoryClass(
request.method, rest, request.clientproto,
request.getAllHeaders(), request.content.read(), request)
self.reactor.connectTCP(self.host, self.port, clientFactory)
return NOT_DONE_YET
| 31.493421 | 79 | 0.644454 |
ba6637585495e5bc4217d21e9ccf27c9e52accab | 308 | py | Python | cycada/models/models.py | peterzcc/cycada_release | bfd1a9dd01bdb2a956cad13b01f3e305930b7d09 | [
"BSD-2-Clause"
] | 532 | 2018-07-09T00:37:32.000Z | 2022-03-09T15:10:07.000Z | cycada/models/models.py | ckevin4747/cycada_review | aac0c4724d704165738bfad9684fbffa9337c211 | [
"BSD-2-Clause"
] | 41 | 2018-07-16T07:20:34.000Z | 2021-12-10T21:20:23.000Z | cycada/models/models.py | ckevin4747/cycada_review | aac0c4724d704165738bfad9684fbffa9337c211 | [
"BSD-2-Clause"
] | 143 | 2018-07-09T13:10:17.000Z | 2022-02-15T14:24:29.000Z | import torch
models = {}
def register_model(name):
def decorator(cls):
models[name] = cls
return cls
return decorator
def get_model(name, num_cls=10, **args):
net = models[name](num_cls=num_cls, **args)
if torch.cuda.is_available():
net = net.cuda()
return net
| 19.25 | 47 | 0.623377 |
119717f67ac7909897b317e8efee531437b001d5 | 2,144 | py | Python | src/alice_bob_unfair.py | plug-obp/plug-remote-python | 9b57989e3536b34fbbd7d6cafbc674ff6f4686eb | [
"MIT"
] | null | null | null | src/alice_bob_unfair.py | plug-obp/plug-remote-python | 9b57989e3536b34fbbd7d6cafbc674ff6f4686eb | [
"MIT"
] | null | null | null | src/alice_bob_unfair.py | plug-obp/plug-remote-python | 9b57989e3536b34fbbd7d6cafbc674ff6f4686eb | [
"MIT"
] | 1 | 2020-01-28T13:44:52.000Z | 2020-01-28T13:44:52.000Z | from soup_language import *
from language_server import server
def alice_bob_unfair():
init, wait, critical = 0, 1, 2
def alice():
def i2wa(env):
env['flag_alice'] = True
env['alice'] = wait
i2w = Behavior(lambda env: env['alice'] == init, i2wa, "alice_wantsIn")
def w2ca(env):
env['alice'] = critical
w2c = Behavior(
lambda env: env['alice'] == wait and (not env['flag_bob']),
w2ca,
"alice_goesIn")
def c2ia(env):
env['flag_alice'] = False
env['alice'] = init
c2i = Behavior(
lambda env: env['alice'] == critical,
c2ia,
"alice_getsOut")
return [i2w, w2c, c2i]
def bob():
def i2wa(env):
env['flag_bob'] = True
env['bob'] = wait
i2w = Behavior(lambda env: env['bob'] == init, i2wa, "bob_wantsIn")
def w2ca(env):
env['bob'] = critical
w2c = Behavior(
lambda env: env['bob'] == wait and (not env['flag_alice']),
w2ca,
"bob_goesIn")
def w2ia(env):
env['flag_bob'] = False
env['bob'] = init
w2i = Behavior(
lambda env: env['bob'] == wait and env['flag_alice'],
w2ia,
"bob_givesUp"
)
def c2ia(env):
env['flag_bob'] = False
env['bob'] = init
c2i = Behavior(
lambda env: env['bob'] == critical,
c2ia,
"bob_getsOut")
return [i2w, w2c, w2i, c2i]
# make the soup
soup = BehaviorSoup(
Environment(
{'alice': 0, 'flag_alice': 1,
'bob': 2, 'flag_bob': 3},
[init, False, init, False]),
alice() + bob())
# instantiate the TransitionRelation for the soup
return LanguageModule(
BehaviorSoupTransitionRelation(soup),
BehaviorSoupRuntimeView(soup),
BehaviorSoupAtomEvaluator(soup),
BehaviorSoupMarshaller(soup)
)
if __name__ == "__main__":
server(alice_bob_unfair)
| 25.52381 | 79 | 0.5 |
88a69fbefb737fd3a11fb8fbdb0bd3b73fc2de77 | 894 | py | Python | isi_sdk_8_2_1/test/test_auth_wellknowns.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_2_1/test/test_auth_wellknowns.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_2_1/test/test_auth_wellknowns.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_1
from isi_sdk_8_2_1.models.auth_wellknowns import AuthWellknowns # noqa: E501
from isi_sdk_8_2_1.rest import ApiException
class TestAuthWellknowns(unittest.TestCase):
"""AuthWellknowns unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAuthWellknowns(self):
"""Test AuthWellknowns"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_1.models.auth_wellknowns.AuthWellknowns() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.804878 | 85 | 0.702461 |
9b5c034738d6fdc5105fc3ccc6cd8c300161401f | 179,840 | py | Python | pyuvdata/utils.py | e-koch/pyuvdata | ac36067f195c75127b28f02479eda1eb7a3400ed | [
"BSD-2-Clause"
] | null | null | null | pyuvdata/utils.py | e-koch/pyuvdata | ac36067f195c75127b28f02479eda1eb7a3400ed | [
"BSD-2-Clause"
] | null | null | null | pyuvdata/utils.py | e-koch/pyuvdata | ac36067f195c75127b28f02479eda1eb7a3400ed | [
"BSD-2-Clause"
] | null | null | null | # -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Commonly used utility functions."""
import re
import copy
import warnings
from collections.abc import Iterable
from copy import deepcopy
import numpy as np
from scipy.spatial.distance import cdist
from astropy.time import Time
from astropy.coordinates import Angle
from astropy.utils import iers
from astropy.coordinates import SkyCoord, Distance, EarthLocation
from astropy import units
import erfa
from . import _utils
__all__ = [
"POL_STR2NUM_DICT",
"POL_NUM2STR_DICT",
"CONJ_POL_DICT",
"JONES_STR2NUM_DICT",
"JONES_NUM2STR_DICT",
"LatLonAlt_from_XYZ",
"XYZ_from_LatLonAlt",
"rotECEF_from_ECEF",
"ECEF_from_rotECEF",
"ENU_from_ECEF",
"ECEF_from_ENU",
"phase_uvw",
"unphase_uvw",
"uvcalibrate",
"apply_uvflag",
"get_lst_for_time",
"polstr2num",
"polnum2str",
"jstr2num",
"jnum2str",
"parse_polstr",
"parse_jpolstr",
"conj_pol",
"reorder_conj_pols",
"baseline_to_antnums",
"antnums_to_baseline",
"baseline_index_flip",
"get_baseline_redundancies",
"get_antenna_redundancies",
"collapse",
"mean_collapse",
"absmean_collapse",
"quadmean_collapse",
"or_collapse",
"and_collapse",
]
# fmt: off
# polarization constants
# maps polarization strings to polarization integers
POL_STR2NUM_DICT = {"pI": 1, "pQ": 2, "pU": 3, "pV": 4,
"I": 1, "Q": 2, "U": 3, "V": 4, # support straight stokes names
"rr": -1, "ll": -2, "rl": -3, "lr": -4,
"xx": -5, "yy": -6, "xy": -7, "yx": -8}
# maps polarization integers to polarization strings
POL_NUM2STR_DICT = {1: "pI", 2: "pQ", 3: "pU", 4: "pV",
-1: "rr", -2: "ll", -3: "rl", -4: "lr",
-5: "xx", -6: "yy", -7: "xy", -8: "yx"}
# maps how polarizations change when antennas are swapped
CONJ_POL_DICT = {"xx": "xx", "yy": "yy", "xy": "yx", "yx": "xy",
"ee": "ee", "nn": "nn", "en": "ne", "ne": "en",
"rr": "rr", "ll": "ll", "rl": "lr", "lr": "rl",
"I": "I", "Q": "Q", "U": "U", "V": "V",
"pI": "pI", "pQ": "pQ", "pU": "pU", "pV": "pV"}
# maps jones matrix element strings to jones integers
# Add entries that don't start with "J" to allow shorthand versions
JONES_STR2NUM_DICT = {"Jxx": -5, "Jyy": -6, "Jxy": -7, "Jyx": -8,
"xx": -5, "x": -5, "yy": -6, "y": -6, "xy": -7, "yx": -8,
"Jrr": -1, "Jll": -2, "Jrl": -3, "Jlr": -4,
"rr": -1, "r": -1, "ll": -2, "l": -2, "rl": -3, "lr": -4}
# maps jones integers to jones matrix element strings
JONES_NUM2STR_DICT = {-1: "Jrr", -2: "Jll", -3: "Jrl", -4: "Jlr",
-5: "Jxx", -6: "Jyy", -7: "Jxy", -8: "Jyx"}
# maps uvdata pols to input feed polarizations
POL_TO_FEED_DICT = {"xx": ["x", "x"], "yy": ["y", "y"],
"xy": ["x", "y"], "yx": ["y", "x"],
"ee": ["e", "e"], "nn": ["n", "n"],
"en": ["e", "n"], "ne": ["n", "e"],
"rr": ["r", "r"], "ll": ["l", "l"],
"rl": ["r", "l"], "lr": ["l", "r"]}
# fmt: on
def _get_iterable(x):
"""Return iterable version of input."""
if isinstance(x, Iterable):
return x
else:
return (x,)
def _fits_gethduaxis(hdu, axis):
"""
Make axis arrays for fits files.
Parameters
----------
hdu : astropy.io.fits HDU object
The HDU to make an axis array for.
axis : int
The axis number of interest (1-based).
Returns
-------
ndarray of float
Array of values for the specified axis.
"""
ax = str(axis)
axis_num = hdu.header["NAXIS" + ax]
val = hdu.header["CRVAL" + ax]
delta = hdu.header["CDELT" + ax]
index = hdu.header["CRPIX" + ax] - 1
return delta * (np.arange(axis_num) - index) + val
def _fits_indexhdus(hdulist):
"""
Get a dict of table names and HDU numbers from a FITS HDU list.
Parameters
----------
hdulist : list of astropy.io.fits HDU objects
List of HDUs to get names for
Returns
-------
dict
dictionary with table names as keys and HDU number as values.
"""
tablenames = {}
for i in range(len(hdulist)):
try:
tablenames[hdulist[i].header["EXTNAME"]] = i
except (KeyError):
continue
return tablenames
def _get_fits_extra_keywords(header, keywords_to_skip=None):
"""
Get any extra keywords and return as dict.
Parameters
----------
header : FITS header object
header object to get extra_keywords from.
keywords_to_skip : list of str
list of keywords to not include in extra keywords in addition to standard
FITS keywords.
Returns
-------
dict
dict of extra keywords.
"""
# List standard FITS header items that are still should not be included in
# extra_keywords
# These are the beginnings of FITS keywords to ignore, the actual keywords
# often include integers following these names (e.g. NAXIS1, CTYPE3)
std_fits_substrings = [
"HISTORY",
"SIMPLE",
"BITPIX",
"EXTEND",
"BLOCKED",
"GROUPS",
"PCOUNT",
"BSCALE",
"BZERO",
"NAXIS",
"PTYPE",
"PSCAL",
"PZERO",
"CTYPE",
"CRVAL",
"CRPIX",
"CDELT",
"CROTA",
"CUNIT",
]
if keywords_to_skip is not None:
std_fits_substrings.extend(keywords_to_skip)
extra_keywords = {}
# find all the other header items and keep them as extra_keywords
for key in header:
# check if key contains any of the standard FITS substrings
if np.any([sub in key for sub in std_fits_substrings]):
continue
if key == "COMMENT":
extra_keywords[key] = str(header.get(key))
elif key != "":
extra_keywords[key] = header.get(key)
return extra_keywords
def _check_history_version(history, version_string):
"""Check if version_string is present in history string."""
if version_string.replace(" ", "") in history.replace("\n", "").replace(" ", ""):
return True
else:
return False
def _check_histories(history1, history2):
"""Check if two histories are the same."""
if history1.replace("\n", "").replace(" ", "") == history2.replace(
"\n", ""
).replace(" ", ""):
return True
else:
return False
def _combine_history_addition(history1, history2):
"""
Find extra history to add to have minimal repeats.
Parameters
----------
history1 : str
First history.
history2 : str
Second history
Returns
-------
str
Extra history to add to first history.
"""
# first check if they're the same to avoid more complicated processing.
if _check_histories(history1, history2):
return None
hist2_words = history2.split(" ")
add_hist = ""
test_hist1 = " " + history1 + " "
for i, word in enumerate(hist2_words):
if " " + word + " " not in test_hist1:
add_hist += " " + word
keep_going = i + 1 < len(hist2_words)
while keep_going:
if (hist2_words[i + 1] == " ") or (
" " + hist2_words[i + 1] + " " not in test_hist1
):
add_hist += " " + hist2_words[i + 1]
del hist2_words[i + 1]
keep_going = i + 1 < len(hist2_words)
else:
keep_going = False
if add_hist == "":
add_hist = None
return add_hist
def baseline_to_antnums(baseline, Nants_telescope):
"""
Get the antenna numbers corresponding to a given baseline number.
Parameters
----------
baseline : int or array_like of ints
baseline number
Nants_telescope : int
number of antennas
Returns
-------
int or array_like of int
first antenna number(s)
int or array_like of int
second antenna number(s)
"""
if Nants_telescope > 2048:
raise Exception(
"error Nants={Nants}>2048 not supported".format(Nants=Nants_telescope)
)
return_array = isinstance(baseline, (np.ndarray, list, tuple))
ant1, ant2 = _utils.baseline_to_antnums(
np.ascontiguousarray(baseline, dtype=np.int64)
)
if return_array:
return ant1, ant2
else:
return ant1.item(0), ant2.item(0)
def antnums_to_baseline(ant1, ant2, Nants_telescope, attempt256=False):
"""
Get the baseline number corresponding to two given antenna numbers.
Parameters
----------
ant1 : int or array_like of int
first antenna number
ant2 : int or array_like of int
second antenna number
Nants_telescope : int
number of antennas
attempt256 : bool
Option to try to use the older 256 standard used in
many uvfits files (will use 2048 standard if there are more
than 256 antennas). Default is False.
Returns
-------
int or array of int
baseline number corresponding to the two antenna numbers.
"""
if Nants_telescope is not None and Nants_telescope > 2048:
raise Exception(
"cannot convert ant1, ant2 to a baseline index "
"with Nants={Nants}>2048.".format(Nants=Nants_telescope)
)
return_array = isinstance(ant1, (np.ndarray, list, tuple))
baseline = _utils.antnums_to_baseline(
np.ascontiguousarray(ant1, dtype=np.int64),
np.ascontiguousarray(ant2, dtype=np.int64),
attempt256=attempt256,
)
if return_array:
return baseline
else:
return baseline.item(0)
def baseline_index_flip(baseline, Nants_telescope):
"""Change baseline number to reverse antenna order."""
ant1, ant2 = baseline_to_antnums(baseline, Nants_telescope)
return antnums_to_baseline(ant2, ant1, Nants_telescope)
def _x_orientation_rep_dict(x_orientation):
"""Create replacement dict based on x_orientation."""
if x_orientation.lower() == "east" or x_orientation.lower() == "e":
return {"x": "e", "y": "n"}
elif x_orientation.lower() == "north" or x_orientation.lower() == "n":
return {"x": "n", "y": "e"}
else:
raise ValueError("x_orientation not recognized.")
def polstr2num(pol, x_orientation=None):
"""
Convert polarization str to number according to AIPS Memo 117.
Prefer 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes,
not true Stokes, but also supports 'I', 'Q', 'U', 'V'.
Parameters
----------
pol : str
polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
int
Number corresponding to string
Raises
------
ValueError
If the pol string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(POL_STR2NUM_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in POL_STR2NUM_DICT.items():
new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[new_key] = value
except ValueError:
warnings.warn("x_orientation not recognized.")
poldict = {k.lower(): v for k, v in dict_use.items()}
if isinstance(pol, str):
out = poldict[pol.lower()]
elif isinstance(pol, Iterable):
out = [poldict[key.lower()] for key in pol]
else:
raise ValueError(
"Polarization {p} cannot be converted to a polarization number.".format(
p=pol
)
)
return out
def polnum2str(num, x_orientation=None):
"""
Convert polarization number to str according to AIPS Memo 117.
Uses 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes,
not true Stokes
Parameters
----------
num : int
polarization number
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to convert to
E/N strings. See corresonding parameter on UVData for more details.
Returns
-------
str
String corresponding to polarization number
Raises
------
ValueError
If the polarization number cannot be converted to a polarization string.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(POL_NUM2STR_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in POL_NUM2STR_DICT.items():
new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[key] = new_val
except ValueError:
warnings.warn("x_orientation not recognized.")
if isinstance(num, (int, np.int32, np.int64)):
out = dict_use[num]
elif isinstance(num, Iterable):
out = [dict_use[i] for i in num]
else:
raise ValueError(
"Polarization {p} cannot be converted to string.".format(p=num)
)
return out
def jstr2num(jstr, x_orientation=None):
"""
Convert jones polarization str to number according to calfits memo.
Parameters
----------
jstr : str
antenna (jones) polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
int
antenna (jones) polarization number corresponding to string
Raises
------
ValueError
If the jones string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(JONES_STR2NUM_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in JONES_STR2NUM_DICT.items():
new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[new_key] = value
except ValueError:
warnings.warn("x_orientation not recognized.")
jdict = {k.lower(): v for k, v in dict_use.items()}
if isinstance(jstr, str):
out = jdict[jstr.lower()]
elif isinstance(jstr, Iterable):
out = [jdict[key.lower()] for key in jstr]
else:
raise ValueError(
"Jones polarization {j} cannot be converted to index.".format(j=jstr)
)
return out
def jnum2str(jnum, x_orientation=None):
"""
Convert jones polarization number to str according to calfits memo.
Parameters
----------
num : int
antenna (jones) polarization number
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to convert to
E/N strings. See corresonding parameter on UVData for more details.
Returns
-------
str
antenna (jones) polarization string corresponding to number
Raises
------
ValueError
If the jones polarization number cannot be converted to a jones
polarization string.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(JONES_NUM2STR_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in JONES_NUM2STR_DICT.items():
new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[key] = new_val
except ValueError:
warnings.warn("x_orientation not recognized.")
if isinstance(jnum, (int, np.int32, np.int64)):
out = dict_use[jnum]
elif isinstance(jnum, Iterable):
out = [dict_use[i] for i in jnum]
else:
raise ValueError(
"Jones polarization {j} cannot be converted to string.".format(j=jnum)
)
return out
def parse_polstr(polstr, x_orientation=None):
"""
Parse a polarization string and return pyuvdata standard polarization string.
See utils.POL_STR2NUM_DICT for options.
Parameters
----------
polstr : str
polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
str
AIPS Memo 117 standard string
Raises
------
ValueError
If the pol string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
return polnum2str(
polstr2num(polstr, x_orientation=x_orientation), x_orientation=x_orientation
)
def parse_jpolstr(jpolstr, x_orientation=None):
"""
Parse a Jones polarization string and return pyuvdata standard jones string.
See utils.JONES_STR2NUM_DICT for options.
Parameters
----------
jpolstr : str
Jones polarization string
Returns
-------
str
calfits memo standard string
Raises
------
ValueError
If the jones string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
return jnum2str(
jstr2num(jpolstr, x_orientation=x_orientation), x_orientation=x_orientation
)
def conj_pol(pol):
"""
Return the polarization for the conjugate baseline.
For example, (1, 2, 'xy') = conj(2, 1, 'yx').
The returned polarization is determined by assuming the antenna pair is
reversed in the data, and finding the correct polarization correlation
which will yield the requested baseline when conjugated. Note this means
changing the polarization for linear cross-pols, but keeping auto-pol
(e.g. xx) and Stokes the same.
Parameters
----------
pol : str or int
Polarization string or integer.
Returns
-------
cpol : str or int
Polarization as if antennas are swapped (type matches input)
"""
cpol_dict = {k.lower(): v for k, v in CONJ_POL_DICT.items()}
if isinstance(pol, str):
cpol = cpol_dict[pol.lower()]
elif isinstance(pol, Iterable):
cpol = [conj_pol(p) for p in pol]
elif isinstance(pol, (int, np.int32, np.int64)):
cpol = polstr2num(cpol_dict[polnum2str(pol).lower()])
else:
raise ValueError("Polarization not recognized, cannot be conjugated.")
return cpol
def reorder_conj_pols(pols):
"""
Reorder multiple pols, swapping pols that are conjugates of one another.
For example ('xx', 'xy', 'yx', 'yy') -> ('xx', 'yx', 'xy', 'yy')
This is useful for the _key2inds function in the case where an antenna
pair is specified but the conjugate pair exists in the data. The conjugated
data should be returned in the order of the polarization axis, so after
conjugating the data, the pols need to be reordered.
For example, if a file contains antpair (0, 1) and pols 'xy' and 'yx', but
the user requests antpair (1, 0), they should get:
[(1x, 0y), (1y, 0x)] = [conj(0y, 1x), conj(0x, 1y)]
Parameters
----------
pols : array_like of str or int
Polarization array (strings or ints).
Returns
-------
conj_order : ndarray of int
Indices to reorder polarization array.
"""
if not isinstance(pols, Iterable):
raise ValueError("reorder_conj_pols must be given an array of polarizations.")
cpols = np.array([conj_pol(p) for p in pols]) # Array needed for np.where
conj_order = [np.where(cpols == p)[0][0] if p in cpols else -1 for p in pols]
if -1 in conj_order:
raise ValueError(
"Not all conjugate pols exist in the polarization array provided."
)
return conj_order
def LatLonAlt_from_XYZ(xyz, check_acceptability=True):
"""
Calculate lat/lon/alt from ECEF x,y,z.
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
check_acceptability : bool
Flag to check XYZ coordinates are reasonable.
Returns
-------
latitude : ndarray or float
latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
longitude : ndarray or float
longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
altitude : ndarray or float
altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters
"""
# convert to a numpy array
xyz = np.asarray(xyz)
if xyz.ndim > 1 and xyz.shape[1] != 3:
raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).")
squeeze = xyz.ndim == 1
if squeeze:
xyz = xyz[np.newaxis, :]
xyz = np.ascontiguousarray(xyz.T, dtype=np.float64)
# checking for acceptable values
if check_acceptability:
norms = np.linalg.norm(xyz, axis=0)
if not all(np.logical_and(norms >= 6.35e6, norms <= 6.39e6)):
raise ValueError("xyz values should be ECEF x, y, z coordinates in meters")
# this helper function returns one 2D array because it is less overhead for cython
lla = _utils._lla_from_xyz(xyz)
if squeeze:
return lla[0, 0], lla[1, 0], lla[2, 0]
return lla[0], lla[1], lla[2]
def XYZ_from_LatLonAlt(latitude, longitude, altitude):
"""
Calculate ECEF x,y,z from lat/lon/alt values.
Parameters
----------
latitude : ndarray or float
latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
longitude : ndarray or float
longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
altitude : ndarray or float
altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters
Returns
-------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
"""
latitude = np.ascontiguousarray(latitude, dtype=np.float64)
longitude = np.ascontiguousarray(longitude, dtype=np.float64)
altitude = np.ascontiguousarray(altitude, dtype=np.float64)
n_pts = latitude.size
if longitude.size != n_pts:
raise ValueError(
"latitude, longitude and altitude must all have the same length"
)
if altitude.size != n_pts:
raise ValueError(
"latitude, longitude and altitude must all have the same length"
)
xyz = _utils._xyz_from_latlonalt(latitude, longitude, altitude)
xyz = xyz.T
if n_pts == 1:
return xyz[0]
return xyz
def rotECEF_from_ECEF(xyz, longitude):
"""
Get rotated ECEF positions such that the x-axis goes through the longitude.
Miriad and uvfits expect antenna positions in this frame
(with longitude of the array center/telescope location)
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
longitude : float
longitude in radians to rotate coordinates to
(usually the array center/telescope location).
Returns
-------
ndarray of float
Rotated ECEF coordinates, shape (Npts, 3).
"""
angle = -1 * longitude
rot_matrix = np.array(
[
[np.cos(angle), -1 * np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
)
return rot_matrix.dot(xyz.T).T
def ECEF_from_rotECEF(xyz, longitude):
"""
Calculate ECEF from a rotated ECEF (Inverse of rotECEF_from_ECEF).
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with rotated ECEF x,y,z coordinates.
longitude : float
longitude in radians giving the x direction of the rotated coordinates
(usually the array center/telescope location).
Returns
-------
ndarray of float
ECEF coordinates, shape (Npts, 3).
"""
angle = longitude
rot_matrix = np.array(
[
[np.cos(angle), -1 * np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
)
return rot_matrix.dot(xyz.T).T
def ENU_from_ECEF(xyz, latitude, longitude, altitude):
"""
Calculate local ENU (east, north, up) coordinates from ECEF coordinates.
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
latitude : float
Latitude of center of ENU coordinates in radians.
longitude : float
Longitude of center of ENU coordinates in radians.
altitude : float
Altitude of center of ENU coordinates in radians.
Returns
-------
ndarray of float
numpy array, shape (Npts, 3), with local ENU coordinates
"""
xyz = np.asarray(xyz)
if xyz.ndim > 1 and xyz.shape[1] != 3:
raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).")
squeeze = False
if xyz.ndim == 1:
squeeze = True
xyz = xyz[np.newaxis, :]
xyz = np.ascontiguousarray(xyz.T, dtype=np.float64)
# check that these are sensible ECEF values -- their magnitudes need to be
# on the order of Earth's radius
ecef_magnitudes = np.linalg.norm(xyz, axis=0)
sensible_radius_range = (6.35e6, 6.39e6)
if np.any(ecef_magnitudes <= sensible_radius_range[0]) or np.any(
ecef_magnitudes >= sensible_radius_range[1]
):
raise ValueError(
"ECEF vector magnitudes must be on the order of the radius of the earth"
)
# the cython utility expects (3, Npts) for faster manipulation
# transpose after we get the array back to match the expected shape
enu = _utils._ENU_from_ECEF(
xyz,
np.ascontiguousarray(latitude, dtype=np.float64),
np.ascontiguousarray(longitude, dtype=np.float64),
np.ascontiguousarray(altitude, dtype=np.float64),
)
enu = enu.T
if squeeze:
enu = np.squeeze(enu)
return enu
def ECEF_from_ENU(enu, latitude, longitude, altitude):
"""
Calculate ECEF coordinates from local ENU (east, north, up) coordinates.
Parameters
----------
enu : ndarray of float
numpy array, shape (Npts, 3), with local ENU coordinates.
latitude : float
Latitude of center of ENU coordinates in radians.
longitude : float
Longitude of center of ENU coordinates in radians.
altitude : float
Altitude of center of ENU coordinates in radians.
Returns
-------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
"""
enu = np.asarray(enu)
if enu.ndim > 1 and enu.shape[1] != 3:
raise ValueError("The expected shape of the ENU array is (Npts, 3).")
squeeze = False
if enu.ndim == 1:
squeeze = True
enu = enu[np.newaxis, :]
enu = np.ascontiguousarray(enu.T, dtype=np.float64)
# the cython utility expects (3, Npts) for faster manipulation
# transpose after we get the array back to match the expected shape
xyz = _utils._ECEF_from_ENU(
enu,
np.ascontiguousarray(latitude, dtype=np.float64),
np.ascontiguousarray(longitude, dtype=np.float64),
np.ascontiguousarray(altitude, dtype=np.float64),
)
xyz = xyz.T
if squeeze:
xyz = np.squeeze(xyz)
return xyz
def phase_uvw(ra, dec, initial_uvw):
"""
Calculate phased uvws/positions from unphased ones in an icrs or gcrs frame.
This code expects input uvws or positions relative to the telescope
location in the same frame that ra/dec are in (e.g. icrs or gcrs) and
returns phased ones in the same frame.
Note that this code is nearly identical to ENU_from_ECEF, except that it
uses an arbitrary phasing center rather than a coordinate center.
Parameters
----------
ra : float
Right ascension of phase center.
dec : float
Declination of phase center.
initial_uvw : ndarray of float
Unphased uvws or positions relative to the array center,
shape (Nlocs, 3).
Returns
-------
uvw : ndarray of float
uvw array in the same frame as initial_uvws, ra and dec.
"""
if initial_uvw.ndim == 1:
initial_uvw = initial_uvw[np.newaxis, :]
return _utils._phase_uvw(
np.float64(ra),
np.float64(dec),
np.ascontiguousarray(initial_uvw.T, dtype=np.float64),
).T
def unphase_uvw(ra, dec, uvw):
"""
Calculate unphased uvws/positions from phased ones in an icrs or gcrs frame.
This code expects phased uvws or positions in the same frame that ra/dec
are in (e.g. icrs or gcrs) and returns unphased ones in the same frame.
Parameters
----------
ra : float
Right ascension of phase center.
dec : float
Declination of phase center.
uvw : ndarray of float
Phased uvws or positions relative to the array center,
shape (Nlocs, 3).
Returns
-------
unphased_uvws : ndarray of float
Unphased uvws or positions relative to the array center,
shape (Nlocs, 3).
"""
if uvw.ndim == 1:
uvw = uvw[np.newaxis, :]
return _utils._unphase_uvw(
np.float64(ra), np.float64(dec), np.ascontiguousarray(uvw.T, dtype=np.float64),
).T
def polar2_to_cart3(lon_array, lat_array):
"""
Convert 2D polar coordinates into 3D cartesian coordinates.
This is a simple routine for converting a set of spherical angular coordinates
into a 3D cartesian vectors, where the x-direction is set by the position (0, 0).
Parameters
----------
lon_array : float or ndarray
Longitude coordinates, which increases in the counter-clockwise direction.
Units of radians. Can either be a float or ndarray -- if the latter, must have
the same shape as lat_array.
lat_array : float or ndarray
Latitude coordinates, where 0 falls on the equator of the sphere. Units of
radians. Can either be a float or ndarray -- if the latter, must have the same
shape as lat_array.
Returns
-------
xyz_array : ndarray of float
Cartesian coordinates of the given longitude and latitude on a unit sphere.
Shape is (3, coord_shape), where coord_shape is the shape of lon_array and
lat_array if they were provided as type ndarray, otherwise (3,).
"""
# Check to make sure that we are not playing with mixed types
if type(lon_array) is not type(lat_array):
raise ValueError(
"lon_array and lat_array must either both be floats or ndarrays."
)
if isinstance(lon_array, np.ndarray):
if lon_array.shape != lat_array.shape:
raise ValueError("lon_array and lat_array must have the same shape.")
# Once we know that lon_array and lat_array are of the same shape,
# time to create our 3D set of vectors!
xyz_array = np.array(
[
np.cos(lon_array) * np.cos(lat_array),
np.sin(lon_array) * np.cos(lat_array),
np.sin(lat_array),
],
dtype=float,
)
return xyz_array
def cart3_to_polar2(xyz_array):
"""
Convert 3D cartesian coordinates into 2D polar coordinates.
This is a simple routine for converting a set of 3D cartesian vectors into
spherical coordinates, where the position (0, 0) lies along the x-direction.
Parameters
----------
xyz_array : ndarray of float
Cartesian coordinates, need not be of unit vector length. Shape is
(3, coord_shape).
Returns
-------
lon_array : ndarray of float
Longitude coordinates, which increases in the counter-clockwise direction.
Units of radians, shape is (coord_shape,).
lat_array : ndarray of float
Latitude coordinates, where 0 falls on the equator of the sphere. Units of
radians, shape is (coord_shape,).
"""
if not isinstance(xyz_array, np.ndarray):
raise ValueError("xyz_array must be an ndarray.")
if xyz_array.ndim == 0:
raise ValueError("xyz_array must have ndim > 0")
if xyz_array.shape[0] != 3:
raise ValueError("xyz_array must be length 3 across the zeroth axis.")
# The longitude coord is relatively easy to calculate, just take the X and Y
# components and find the arctac of the pair.
lon_array = np.mod(np.arctan2(xyz_array[1], xyz_array[0]), 2.0 * np.pi, dtype=float)
# If we _knew_ that xyz_array was always of length 1, then this call could be a much
# simpler one to arcsin. But to make this generic, we'll use the length of the XY
# component along with arctan2.
lat_array = np.arctan2(
xyz_array[2], np.sqrt((xyz_array[0:2] ** 2.0).sum(axis=0)), dtype=float
)
# Return the two arrays
return lon_array, lat_array
def _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot):
"""
Apply a rotation matrix to a series of vectors.
This is a simple convenience function which wraps numpy's matmul function for use
with various vector rotation functions in this module. This code could, in
principle, be replaced by a cythonized piece of code, although the matmul function
is _pretty_ well optimized already. This function is not meant to be called by
users, but is instead used by multiple higher-level utility functions (namely those
that perform rotations).
Parameters
----------
xyz_array : ndarray of floats
Array of vectors to be rotated. When nrot > 1, shape may be (n_rot, 3, n_vec)
or (1, 3, n_vec), the latter is useful for when performing multiple rotations
on a fixed set of vectors. If nrot = 1, shape may be (1, 3, n_vec), (3, n_vec),
or (3,).
rot_matrix : ndarray of floats
Series of rotation matricies to be applied to the stack of vectors. Must be
of shape (n_rot, 3, 3)
n_rot : int
Number of individual rotation matricies to be applied.
Returns
-------
rotated_xyz : ndarray of floats
Array of vectors that have been rotated, of shape (n_rot, 3, n_vectors,).
"""
# Do a quick check to make sure that things look sensible
if rot_matrix.shape != (n_rot, 3, 3):
raise ValueError(
"rot_matrix must be of shape (n_rot, 3, 3), where n_rot=%i." % n_rot
)
if (xyz_array.ndim == 3) and (
(xyz_array.shape[0] not in [1, n_rot]) or (xyz_array.shape[-2] != 3)
):
raise ValueError("Misshaped xyz_array - expected shape (n_rot, 3, n_vectors).")
if (xyz_array.ndim < 3) and (xyz_array.shape[0] != 3):
raise ValueError("Misshaped xyz_array - expected shape (3, n_vectors) or (3,).")
rotated_xyz = np.matmul(rot_matrix, xyz_array)
return rotated_xyz
def _rotate_one_axis(xyz_array, rot_amount, rot_axis):
"""
Rotate an array of 3D positions around the a single axis (x, y, or z).
This function performs a basic rotation of 3D vectors about one of the priciple
axes -- the x-axis, the y-axis, or the z-axis.
Note that the rotations here obey the right-hand rule -- that is to say, from the
perspective of the positive side of the axis of rotation, a positive rotation will
cause points on the plane intersecting this axis to move in a counter-clockwise
fashion.
Parameters
----------
xyz_array : ndarray of float
Set of 3-dimensional vectors be rotated, in typical right-handed cartesian
order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors).
rot_amount : float or ndarray of float
Amount (in radians) to rotate the given set of coordinates. Can either be a
single float (or ndarray of shape (1,)) if rotating all vectors by the same
amount, otherwise expected to be shape (Nrot,).
rot_axis : int
Axis around which the rotation is applied. 0 is the x-axis, 1 is the y-axis,
and 2 is the z-axis.
Returns
-------
rotated_xyz : ndarray of float
Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector).
"""
# If rot_amount is None or all zeros, then this is just one big old no-op.
if (rot_amount is None) or np.all(rot_amount == 0.0):
if np.ndim(xyz_array) == 1:
return deepcopy(xyz_array[np.newaxis, :, np.newaxis])
elif np.ndim(xyz_array) == 2:
return deepcopy(xyz_array[np.newaxis, :, :])
else:
return deepcopy(xyz_array)
# Check and see how big of a rotation matrix we need
n_rot = 1 if (not isinstance(rot_amount, np.ndarray)) else (rot_amount.shape[0])
n_vec = xyz_array.shape[-1]
# The promotion of values to float64 is to suppress numerical precision issues,
# since the matrix math can - in limited circumstances - introduce precision errors
# of order 10x the limiting numerical precision of the float. For a float32/single,
# thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to
# a part in 1e15.
rot_matrix = np.zeros((3, 3, n_rot), dtype=np.float64)
# Figure out which pieces of the matrix we need to update
temp_jdx = (rot_axis + 1) % 3
temp_idx = (rot_axis + 2) % 3
# Fill in the rotation matricies accordingly
rot_matrix[rot_axis, rot_axis] = 1
rot_matrix[temp_idx, temp_idx] = np.cos(rot_amount, dtype=np.float64)
rot_matrix[temp_jdx, temp_jdx] = rot_matrix[temp_idx, temp_idx]
rot_matrix[temp_idx, temp_jdx] = np.sin(rot_amount, dtype=np.float64)
rot_matrix[temp_jdx, temp_idx] = -rot_matrix[temp_idx, temp_jdx]
# The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements
# of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3)
rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1])
if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3):
# This is a special case where we allow the rotation axis to "expand" along
# the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1
# but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and
# swap the n_vector and n_rot axes, and then swap them back once everything
# else is done.
return np.transpose(
_rotate_matmul_wrapper(
np.transpose(xyz_array, axes=[2, 1, 0]), rot_matrix, n_rot,
),
axes=[2, 1, 0],
)
else:
return _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot)
def _rotate_two_axis(xyz_array, rot_amount1, rot_amount2, rot_axis1, rot_axis2):
"""
Rotate an array of 3D positions sequentially around a pair of axes (x, y, or z).
This function performs a sequential pair of basic rotations of 3D vectors about
the priciple axes -- the x-axis, the y-axis, or the z-axis.
Note that the rotations here obey the right-hand rule -- that is to say, from the
perspective of the positive side of the axis of rotation, a positive rotation will
cause points on the plane intersecting this axis to move in a counter-clockwise
fashion.
Parameters
----------
xyz_array : ndarray of float
Set of 3-dimensional vectors be rotated, in typical right-handed cartesian
order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors).
rot_amount1 : float or ndarray of float
Amount (in radians) of rotatation to apply during the first rotation of the
sequence, to the given set of coordinates. Can either be a single float (or
ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise
expected to be shape (Nrot,).
rot_amount2 : float or ndarray of float
Amount (in radians) of rotatation to apply during the second rotation of the
sequence, to the given set of coordinates. Can either be a single float (or
ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise
expected to be shape (Nrot,).
rot_axis1 : int
Axis around which the first rotation is applied. 0 is the x-axis, 1 is the
y-axis, and 2 is the z-axis.
rot_axis2 : int
Axis around which the second rotation is applied. 0 is the x-axis, 1 is the
y-axis, and 2 is the z-axis.
Returns
-------
rotated_xyz : ndarray of float
Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector).
"""
# Capture some special cases upfront, where we can save ourselves a bit of work
no_rot1 = (rot_amount1 is None) or np.all(rot_amount1 == 0.0)
no_rot2 = (rot_amount2 is None) or np.all(rot_amount2 == 0.0)
if no_rot1 and no_rot2:
# If rot_amount is None, then this is just one big old no-op.
return deepcopy(xyz_array)
elif no_rot1:
# If rot_amount1 is None, then ignore it and just work w/ the 2nd rotation
return _rotate_one_axis(xyz_array, rot_amount2, rot_axis2)
elif no_rot2:
# If rot_amount2 is None, then ignore it and just work w/ the 1st rotation
return _rotate_one_axis(xyz_array, rot_amount1, rot_axis1)
elif rot_axis1 == rot_axis2:
# Capture the case where someone wants to do a sequence of rotations on the same
# axis. Also known as just rotating a single axis.
return _rotate_one_axis(xyz_array, rot_amount1 + rot_amount2, rot_axis1)
# Figure out how many individual rotation matricies we need, accounting for the
# fact that these can either be floats or ndarrays.
n_rot = max(
rot_amount1.shape[0] if isinstance(rot_amount1, np.ndarray) else 1,
rot_amount2.shape[0] if isinstance(rot_amount2, np.ndarray) else 1,
)
n_vec = xyz_array.shape[-1]
# The promotion of values to float64 is to suppress numerical precision issues,
# since the matrix math can - in limited circumstances - introduce precision errors
# of order 10x the limiting numerical precision of the float. For a float32/single,
# thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to
# a part in 1e15.
rot_matrix = np.empty((3, 3, n_rot), dtype=np.float64)
# There are two permulations per pair of axes -- when the pair is right-hand
# oriented vs left-hand oriented. Check here which one it is. For example,
# rotating first on the x-axis, second on the y-axis is considered a
# "right-handed" pair, whereas z-axis first, then y-axis would be considered
# a "left-handed" pair.
lhd_order = np.mod(rot_axis2 - rot_axis1, 3) != 1
temp_idx = [
np.mod(rot_axis1 - lhd_order, 3),
np.mod(rot_axis1 + 1 - lhd_order, 3),
np.mod(rot_axis1 + 2 - lhd_order, 3),
]
# We're using lots of sin and cos calculations -- doing them once upfront saves
# quite a bit of time by eliminating redundant calculations
sin_lo = np.sin(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64)
cos_lo = np.cos(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64)
sin_hi = np.sin(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64)
cos_hi = np.cos(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64)
# Take care of the diagonal terms first, since they aren't actually affected by the
# order of rotational opertations
rot_matrix[temp_idx[0], temp_idx[0]] = cos_hi
rot_matrix[temp_idx[1], temp_idx[1]] = cos_lo
rot_matrix[temp_idx[2], temp_idx[2]] = cos_lo * cos_hi
# Now time for the off-diagonal terms, as a set of 3 pairs. The rotation matrix
# for a left-hand oriented pair of rotation axes (e.g., x-rot, then y-rot) is just
# a transpose of the right-hand orientation of the same pair (e.g., y-rot, then
# x-rot).
rot_matrix[temp_idx[0 + lhd_order], temp_idx[1 - lhd_order]] = sin_lo * sin_hi
rot_matrix[temp_idx[0 - lhd_order], temp_idx[lhd_order - 1]] = (
cos_lo * sin_hi * ((-1.0) ** lhd_order)
)
rot_matrix[temp_idx[1 - lhd_order], temp_idx[0 + lhd_order]] = 0.0
rot_matrix[temp_idx[1 + lhd_order], temp_idx[2 - lhd_order]] = sin_lo * (
(-1.0) ** (1 + lhd_order)
)
rot_matrix[temp_idx[lhd_order - 1], temp_idx[0 - lhd_order]] = sin_hi * (
(-1.0) ** (1 + lhd_order)
)
rot_matrix[temp_idx[2 - lhd_order], temp_idx[1 + lhd_order]] = (
sin_lo * cos_hi * ((-1.0) ** (lhd_order))
)
# The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements
# of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3)
rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1])
if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3):
# This is a special case where we allow the rotation axis to "expand" along
# the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1
# but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and
# swap the n_vector and n_rot axes, and then swap them back once everything
# else is done.
return np.transpose(
_rotate_matmul_wrapper(
np.transpose(xyz_array, axes=[2, 1, 0]), rot_matrix, n_rot,
),
axes=[2, 1, 0],
)
else:
return _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot)
def calc_uvw(
app_ra=None,
app_dec=None,
frame_pa=None,
lst_array=None,
use_ant_pos=True,
uvw_array=None,
antenna_positions=None,
antenna_numbers=None,
ant_1_array=None,
ant_2_array=None,
old_app_ra=None,
old_app_dec=None,
old_frame_pa=None,
telescope_lat=None,
telescope_lon=None,
from_enu=False,
to_enu=False,
):
"""
Calculate an array of baseline coordinates, in either uvw or ENU.
This routine is meant as a convenience function for producing baseline coordinates
based under a few different circumstances:
1) Calculating ENU coordinates using antenna positions
2) Calculating uwv coordinates at a given sky position using antenna positions
3) Converting from ENU coordinates to uvw coordinates
4) Converting from uvw coordinate to ENU coordinates
5) Converting from uvw coordinates at one sky position to another sky position
Different conversion pathways have different parameters that are required.
Parameters
----------
app_ra : ndarray of float
Apparent RA of the target phase center, required if calculating baseline
coordinates in uvw-space (vs ENU-space). Shape is (Nblts,), units are
radians.
app_dec : ndarray of float
Apparent declination of the target phase center, required if calculating
baseline coordinates in uvw-space (vs ENU-space). Shape is (Nblts,),
units are radians.
frame_pa : ndarray of float
Position angle between the great circle of declination in the apparent frame
versus that of the reference frame, used for making sure that "North" on
the derived maps points towards a particular celestial pole (not just the
topocentric one). Required if not deriving baseline coordinates from antenna
positions, from_enu=False, and a value for old_frame_pa is given. Shape is
(Nblts,), units are radians.
old_app_ra : ndarray of float
Apparent RA of the previous phase center, required if not deriving baseline
coordinates from antenna positions and from_enu=False. Shape is (Nblts,),
units are radians.
old_app_dec : ndarray of float
Apparent declination of the previous phase center, required if not deriving
baseline coordinates from antenna positions and from_enu=False. Shape is
(Nblts,), units are radians.
old_frame_pa : ndarray of float
Frame position angle of the previous phase center, required if not deriving
baseline coordinates from antenna positions, from_enu=False, and a value
for frame_pa is supplied. Shape is (Nblts,), units are radians.
lst_array : ndarray of float
Local apparent sidereal time, required if deriving baseline coordinates from
antenna positions, or converting to/from ENU coordinates. Shape is (Nblts,).
use_ant_pos : bool
Switch to determine whether to derive uvw values from the antenna positions
(if set to True), or to use the previously calculated uvw coordinates to derive
new the new baseline vectors (if set to False). Default is True.
uvw_array : ndarray of float
Array of previous baseline coordinates (in either uvw or ENU), required if
not deriving new coordinates from antenna positions. Shape is (Nblts, 3).
antenna_positions : ndarray of float
List of antenna positions relative to array center in ECEF coordinates,
required if not providing `uvw_array`. Shape is (Nants, 3).
antenna_numbers: ndarray of int
List of antenna numbers, ordered in the same way as `antenna_positions` (e.g.,
`antenna_numbers[0]` should given the number of antenna that resides at ECEF
position given by `antenna_positions[0]`). Shape is (Nants,), requred if not
providing `uvw_array`. Contains all unique entires of the joint set of
`ant_1_array` and `ant_2_array`.
ant_1_array : ndarray of int
Antenna number of the first antenna in the baseline pair, for all baselines
Required if not providing `uvw_array`, shape is (Nblts,).
ant_2_array : ndarray of int
Antenna number of the second antenna in the baseline pair, for all baselines
Required if not providing `uvw_array`, shape is (Nblts,).
telescope_lat : float
Latitude of the phase center, units radians, required if deriving baseline
coordinates from antenna positions, or converting to/from ENU coordinates.
telescope_lon : float
Longitude of the phase center, units radians, required if deriving baseline
coordinates from antenna positions, or converting to/from ENU coordinates.
from_enu : boolean
Set to True if uvw_array is expressed in ENU coordinates. Default is False.
to_enu : boolean
Set to True if you would like the output expressed in EN coordinates. Default
is False.
Returns
-------
new_coords : ndarray of float64
Set of baseline coordinates, shape (Nblts, 3).
"""
if to_enu:
if lst_array is None and not use_ant_pos:
raise ValueError(
"Must include lst_array to calculate baselines in ENU coordinates!"
)
if telescope_lat is None:
raise ValueError(
"Must include telescope_lat to calculate baselines "
"in ENU coordinates!"
)
else:
if ((app_ra is None) or (app_dec is None)) and frame_pa is None:
raise ValueError(
"Must include both app_ra and app_dec, or frame_pa to calculate "
"baselines in uvw coordinates!"
)
if use_ant_pos:
# Assume at this point we are dealing w/ antenna positions
if antenna_positions is None:
raise ValueError("Must include antenna_positions if use_ant_pos=True.")
if (ant_1_array is None) or (ant_2_array is None) or (antenna_numbers is None):
raise ValueError(
"Must include ant_1_array, ant_2_array, and antenna_numbers "
"setting use_ant_pos=True."
)
if lst_array is None and not to_enu:
raise ValueError(
"Must include lst_array if use_ant_pos=True and not calculating "
"baselines in ENU coordinates."
)
if telescope_lon is None:
raise ValueError("Must include telescope_lon if use_ant_pos=True.")
ant_dict = {ant_num: idx for idx, ant_num in enumerate(antenna_numbers)}
ant_1_index = np.array([ant_dict[idx] for idx in ant_1_array], dtype=int)
ant_2_index = np.array([ant_dict[idx] for idx in ant_2_array], dtype=int)
N_ants = antenna_positions.shape[0]
# Use the app_ra, app_dec, and lst_array arrays to figure out how many unique
# rotations are actually needed. If the ratio of Nblts to number of unique
# entries is favorable, we can just rotate the antenna positions and save
# outselves a bit of work.
if to_enu:
# If to_enu, skip all this -- there's only one unique ha + dec combo
unique_mask = np.zeros(len(ant_1_index), dtype=np.bool_)
unique_mask[0] = True
else:
unique_mask = np.append(
True,
(
((lst_array[:-1] - app_ra[:-1]) != (lst_array[1:] - app_ra[1:]))
| (app_dec[:-1] != app_dec[1:])
),
)
# GHA -> Hour Angle as measured at Greenwich (because antenna coords are
# centered such that x-plane intersects the meridian at longitude 0).
if to_enu:
# Unphased coordinates appear to be stored in ENU coordinates -- that's
# equivalent to calculating uvw's based on zenith. We can use that to our
# advantage and spoof the gha and dec based on telescope lon and lat
unique_gha = np.zeros(1) - telescope_lon
unique_dec = np.zeros(1) + telescope_lat
unique_pa = None
else:
unique_gha = (lst_array[unique_mask] - app_ra[unique_mask]) - telescope_lon
unique_dec = app_dec[unique_mask]
unique_pa = 0.0 if frame_pa is None else frame_pa[unique_mask]
# Tranpose the ant vectors so that they are in the proper shape
ant_vectors = np.transpose(antenna_positions)[np.newaxis, :, :]
# Apply rotations, and then reorganize the ndarray so that you can access
# individual antenna vectors quickly.
ant_rot_vectors = np.reshape(
np.transpose(
_rotate_one_axis(
_rotate_two_axis(ant_vectors, unique_gha, unique_dec, 2, 1),
unique_pa,
0,
),
axes=[0, 2, 1],
),
(-1, 3),
)
unique_mask[0] = False
unique_map = np.cumsum(unique_mask) * N_ants
new_coords = (
ant_rot_vectors[unique_map + ant_2_index]
- ant_rot_vectors[unique_map + ant_1_index]
)
else:
if uvw_array is None:
raise ValueError("Must include uvw_array if use_ant_pos=False.")
if from_enu:
if to_enu:
# Well this was pointless... returning your uvws unharmed
return uvw_array
# Unphased coordinates appear to be stored in ENU coordinates -- that's
# equivalent to calculating uvw's based on zenith. We can use that to our
# advantage and spoof old_app_ra and old_app_dec based on lst_array and
# telescope_lat
if telescope_lat is None:
raise ValueError(
"Must include telescope_lat if moving between "
'ENU (i.e., "unphased") and uvw coordinates!'
)
if lst_array is None:
raise ValueError(
'Must include lst_array if moving between ENU (i.e., "unphased") '
"and uvw coordinates!"
)
else:
if (old_frame_pa is None) and not (frame_pa is None or to_enu):
raise ValueError(
"Must include old_frame_pa values if data are phased and "
"applying new position angle values (frame_pa)."
)
if ((old_app_ra is None) and not (app_ra is None or to_enu)) or (
(old_app_dec is None) and not (app_dec is None or to_enu)
):
raise ValueError(
"Must include old_app_ra and old_app_dec values when data are "
"already phased and phasing to a new position."
)
# For this operation, all we need is the delta-ha coverage, which _should_ be
# entirely encapsulated by the change in RA.
if (app_ra is None) and (old_app_ra is None):
gha_delta_array = 0.0
else:
gha_delta_array = (lst_array if from_enu else old_app_ra) - (
lst_array if to_enu else app_ra
)
# Notice below there's an axis re-orientation here, to go from uvw -> XYZ,
# where X is pointing in the direction of the source. This is mostly here
# for convenience and code legibility -- a slightly different pair of
# rotations would give you the same result w/o needing to cycle the axes.
# Up front, we want to trap the corner-case where the sky position you are
# phasing up to hasn't changed, just the position angle (i.e., which way is
# up on the map). This is a much easier transform to handle.
if np.all(gha_delta_array == 0.0) and np.all(old_app_dec == app_dec):
new_coords = _rotate_one_axis(
uvw_array[:, [2, 0, 1], np.newaxis],
frame_pa - (0.0 if old_frame_pa is None else old_frame_pa),
0,
)[:, :, 0]
else:
new_coords = _rotate_two_axis(
_rotate_two_axis( # Yo dawg, I heard you like rotation maticies...
uvw_array[:, [2, 0, 1], np.newaxis],
0.0 if (from_enu or old_frame_pa is None) else (-old_frame_pa),
(-telescope_lat) if from_enu else (-old_app_dec),
0,
1,
),
gha_delta_array,
telescope_lat if to_enu else app_dec,
2,
1,
)
# One final rotation applied here, to compensate for the fact that we want
# the Dec-axis of our image (Fourier dual to the v-axis) to be aligned with
# the chosen frame, if we not in ENU coordinates
if not to_enu:
new_coords = _rotate_one_axis(new_coords, frame_pa, 0)
# Finally drop the now-vestigal last axis of the array
new_coords = new_coords[:, :, 0]
# There's one last task to do, which is to re-align the axes from projected
# XYZ -> uvw, where X (which points towards the source) falls on the w axis,
# and Y and Z fall on the u and v axes, respectively.
return new_coords[:, [1, 2, 0]]
def transform_sidereal_coords(
lon,
lat,
in_coord_frame,
out_coord_frame,
in_coord_epoch=None,
out_coord_epoch=None,
time_array=None,
):
"""
Transform a given set of coordinates from one sidereal coordinate frame to another.
Uses astropy to convert from a coordinates from sidereal frame into another.
This function will support transforms from several frames, including GCRS,
FK5 (i.e., J2000), FK4 (i.e., B1950), Galactic, Supergalactic, CIRS, HCRS, and
a few others (basically anything that doesn't require knowing the observers
location on Earth/other celestial body).
Parameters
----------
lon_coord : float or ndarray of floats
Logitudinal coordinate to be transformed, typically expressed as the right
ascension, in units of radians. Can either be a float, or an ndarray of
floats with shape (Ncoords,). Must agree with lat_coord.
lat_coord : float or ndarray of floats
Latitudinal coordinate to be transformed, typically expressed as the
declination, in units of radians. Can either be a float, or an ndarray of
floats with shape (Ncoords,). Must agree with lon_coord.
in_coord_frame : string
Reference frame for the provided coordinates. Expected to match a list of
those supported within the astropy SkyCoord object. An incomplete list includes
'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'.
out_coord_frame : string
Reference frame to output coordinates in. Expected to match a list of
those supported within the astropy SkyCoord object. An incomplete list includes
'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'.
in_coord_epoch : float
Epoch for the input coordinate frame. Optional parameter, only required
when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are
in fractional years.
out_coord_epoch : float
Epoch for the output coordinate frame. Optional parameter, only required
when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are
in fractional years.
time_array : float or ndarray of floats
Julian date(s) to which the coordinates correspond to, only used in frames
with annular motion terms (e.g., abberation in GCRS). Can either be a float,
or an ndarray of floats with shape (Ntimes,), assuming that either lat_coord
and lon_coord are floats, or that Ntimes == Ncoords.
Returns
-------
new_lat : float or ndarray of floats
Longitudinal coordinates, in units of radians. Output will be an ndarray
if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs.
new_lon : float or ndarray of floats
Latidudinal coordinates, in units of radians. Output will be an ndarray
if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs.
"""
lon_coord = lon * units.rad
lat_coord = lat * units.rad
# Check here to make sure that lat_coord and lon_coord are the same length,
# either 1 or len(time_array)
if lat_coord.shape != lon_coord.shape:
raise ValueError("lon and lat must be the same shape.")
if lon_coord.ndim == 0:
lon_coord.shape += (1,)
lat_coord.shape += (1,)
# Check to make sure that we have a properly formatted epoch for our in-bound
# coordinate frame
in_epoch = None
if isinstance(in_coord_epoch, str) or isinstance(in_coord_epoch, Time):
# If its a string or a Time object, we don't need to do anything more
in_epoch = Time(in_coord_epoch)
elif in_coord_epoch is not None:
if in_coord_frame.lower() in ["fk4", "fk4noeterms"]:
in_epoch = Time(in_coord_epoch, format="byear")
else:
in_epoch = Time(in_coord_epoch, format="jyear")
# Now do the same for the outbound frame
out_epoch = None
if isinstance(out_coord_epoch, str) or isinstance(out_coord_epoch, Time):
# If its a string or a Time object, we don't need to do anything more
out_epoch = Time(out_coord_epoch)
elif out_coord_epoch is not None:
if out_coord_frame.lower() in ["fk4", "fk4noeterms"]:
out_epoch = Time(out_coord_epoch, format="byear")
else:
out_epoch = Time(out_coord_epoch, format="jyear")
# Make sure that time array matched up with what we expect. Thanks to astropy
# weirdness, time_array has to be the same length as lat/lon coords
rep_time = False
rep_crds = False
if time_array is None:
time_obj_array = None
else:
if isinstance(time_array, Time):
time_obj_array = time_array
else:
time_obj_array = Time(time_array, format="jd", scale="utc")
if (time_obj_array.size != 1) and (lon_coord.size != 1):
if time_obj_array.shape != lon_coord.shape:
raise ValueError(
"Shape of time_array must be either that of "
" lat_coord/lon_coord if len(time_array) > 1."
)
else:
rep_crds = (time_obj_array.size != 1) and (lon_coord.size == 1)
rep_time = (time_obj_array.size == 1) and (lon_coord.size != 1)
if rep_crds:
lon_coord = np.repeat(lon_coord, len(time_array))
lat_coord = np.repeat(lat_coord, len(time_array))
if rep_time:
time_obj_array = Time(
np.repeat(time_obj_array.jd, len(lon_coord)), format="jd", scale="utc",
)
coord_object = SkyCoord(
lon_coord,
lat_coord,
frame=in_coord_frame,
equinox=in_epoch,
obstime=time_obj_array,
)
# Easiest, most general way to transform to the new frame is to create a dummy
# SkyCoord with all the attributes needed -- note that we particularly need this
# in order to use a non-standard equinox/epoch
new_coord = coord_object.transform_to(
SkyCoord(0, 0, unit="rad", frame=out_coord_frame, equinox=out_epoch)
)
return new_coord.spherical.lon.rad, new_coord.spherical.lat.rad
def transform_icrs_to_app(
time_array,
ra,
dec,
telescope_loc,
epoch=2000.0,
pm_ra=None,
pm_dec=None,
vrad=None,
dist=None,
astrometry_library="erfa",
):
"""
Transform a set of coordinates in ICRS to topocentric/apparent coordinates.
This utility uses one of three libraries (astropy, NOVAS, or ERFA) to calculate
the apparent (i.e., topocentric) coordinates of a source at a given time and
location, given a set of coordinates expressed in the ICRS frame. These coordinates
are most typically used for defining the phase center of the array (i.e, calculating
baseline vectors).
As of astropy v4.2, the agreement between the three libraries is consistent down to
the level of better than 1 mas, with the values produced by astropy and pyERFA
consistent to bettter than 10 µas (this is not surprising, given that astropy uses
pyERFA under the hood for astrometry). ERFA is the default as it outputs
coordinates natively in the apparent frame (whereas NOVAS and astropy do not), as
well as the fact that of the three libraries, it produces results the fastest.
Parameters
----------
time_array : float or array-like of float
Julian dates to calculate coordinate positions for. Can either be a single
float, or an array-like of shape (Ntimes,).
ra : float or array-like of float
ICRS RA of the celestial target, expressed in units of radians. Can either
be a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (with the exception of telescope location parameters).
dec : float or array-like of float
ICRS Dec of the celestial target, expressed in units of radians. Can either
be a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (with the exception of telescope location parameters).
telescope_loc : array-like of floats or EarthLocation
ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center
of the array. Can either be provided as an astropy EarthLocation, or a tuple
of shape (3,) containung (in order) the latitude, longitude, and altitude,
in units of radians, radians, and meters, respectively.
epoch : int or float or str or Time object
Epoch of the coordinate data supplied, only used when supplying proper motion
values. If supplying a number, it will assumed to be in Julian years. Default
is J2000.0.
pm_ra : float or array-like of float
Proper motion in RA of the source, expressed in units of milliarcsec / year.
Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS
values should be set to their expected values when the epoch is 2000.0).
Can either be a single float or array of shape (Ntimes,), although this must
be consistent with other parameters (namely ra_coord and dec_coord). Note that
units are in dRA/dt, not cos(Dec)*dRA/dt. Not required.
pm_dec : float or array-like of float
Proper motion in Dec of the source, expressed in units of milliarcsec / year.
Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS
values should be set to their expected values when the epoch is 2000.0).
Can either be a single float or array of shape (Ntimes,), although this must
be consistent with other parameters (namely ra_coord and dec_coord). Not
required.
vrad : float or array-like of float
Radial velocity of the source, expressed in units of km / sec. Can either be
a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (namely ra_coord and dec_coord). Not required.
dist : float or array-like of float
Distance of the source, expressed in milliarcseconds. Can either be a single
float or array of shape (Ntimes,), although this must be consistent with other
parameters (namely ra_coord and dec_coord). Not required.
astrometry_library : str
Library used for running the coordinate conversions. Allowed options are
'erfa' (which uses the pyERFA), 'novas' (which uses the python-novas library),
and 'astropy' (which uses the astropy utilities). Default is erfa.
Returns
-------
app_ra : ndarray of floats
Apparent right ascension coordinates, in units of radians, of shape (Ntimes,).
app_dec : ndarray of floats
Apparent declination coordinates, in units of radians, of shape (Ntimes,).
"""
# Make sure that the library requested is actually permitted
if astrometry_library not in ["erfa", "novas", "astropy"]:
raise ValueError(
"Requested coordinate transformation library is not supported, please "
"select either 'erfa', 'novas', or 'astropy' for astrometry_library."
)
ra_coord = ra * units.rad
dec_coord = dec * units.rad
# Check here to make sure that ra_coord and dec_coord are the same length,
# either 1 or len(time_array)
multi_coord = ra_coord.size != 1
if ra_coord.shape != dec_coord.shape:
raise ValueError("ra and dec must be the same shape.")
pm_ra_coord = None if pm_ra is None else pm_ra * (units.mas / units.yr)
pm_dec_coord = None if pm_dec is None else pm_dec * (units.mas / units.yr)
d_coord = (
None if (dist is None or np.all(dist == 0.0)) else Distance(dist * units.pc)
)
v_coord = None if vrad is None else vrad * (units.km / units.s)
opt_list = [pm_ra_coord, pm_dec_coord, d_coord, v_coord]
opt_names = ["pm_ra", "pm_dec", "dist", "vrad"]
# Check the optional inputs, make sure that they're sensible
for item, name in zip(opt_list, opt_names):
if item is not None:
if ra_coord.shape != item.shape:
raise ValueError("%s must be the same shape as ra and dec." % name)
if isinstance(telescope_loc, EarthLocation):
site_loc = telescope_loc
else:
site_loc = EarthLocation.from_geodetic(
telescope_loc[1] * (180.0 / np.pi),
telescope_loc[0] * (180.0 / np.pi),
height=telescope_loc[2],
)
# Useful for both astropy and novas methods, the latter of which gives easy
# access to the IERS data that we want.
if isinstance(time_array, Time):
time_obj_array = time_array
else:
time_obj_array = Time(time_array, format="jd", scale="utc")
if time_obj_array.size != 1:
if (time_obj_array.shape != ra_coord.shape) and multi_coord:
raise ValueError(
"time_array must be of either of length 1 (single "
"float) or same length as ra and dec."
)
elif time_obj_array.ndim == 0:
# Make the array at least 1-dimensional so we don't run into indexing
# issues later.
time_obj_array = Time([time_obj_array])
# Check to make sure that we have a properly formatted epoch for our in-bound
# coordinate frame
coord_epoch = None
if isinstance(epoch, str) or isinstance(epoch, Time):
# If its a string or a Time object, we don't need to do anything more
coord_epoch = Time(epoch)
elif epoch is not None:
coord_epoch = Time(epoch, format="jyear")
# Note if time_array is a single element
multi_time = time_obj_array.size != 1
# Get IERS data, which is needed for NOVAS and ERFA
polar_motion_data = iers.earth_orientation_table.get()
pm_x_array, pm_y_array = polar_motion_data.pm_xy(time_obj_array)
delta_x_array, delta_y_array = polar_motion_data.dcip_xy(time_obj_array)
pm_x_array = pm_x_array.to_value("arcsec")
pm_y_array = pm_y_array.to_value("arcsec")
delta_x_array = delta_x_array.to_value("marcsec")
delta_y_array = delta_y_array.to_value("marcsec")
# Catch the case where we don't have CIP delta values yet (they don't typically have
# predictive values like the polar motion does)
delta_x_array[np.isnan(delta_x_array)] = 0.0
delta_y_array[np.isnan(delta_y_array)] = 0.0
# If the source was instantiated w/ floats, it'll be a 0-dim object, which will
# throw errors if we try to treat it as an array. Reshape to a 1D array of len 1
# so that all the calls can be uniform
if ra_coord.ndim == 0:
ra_coord.shape += (1,)
dec_coord.shape += (1,)
if pm_ra_coord is not None:
pm_ra
if d_coord is not None:
d_coord.shape += (1,)
if v_coord is not None:
v_coord.shape += (1,)
# If there is an epoch and a proper motion, apply that motion now
if astrometry_library == "astropy":
# Astropy doesn't have (oddly enough) a way of getting at the apparent RA/Dec
# directly, but we can cheat this by going to AltAz, and then coverting back
# to apparent RA/Dec using the telescope lat and LAST.
if (epoch is not None) and (pm_ra is not None) and (pm_dec is not None):
# astropy is a bit weird in how it handles proper motion, so rather than
# fight with it to do it all in one step, we separate it into two: first
# apply proper motion to ICRS, then transform to topocentric.
sky_coord = SkyCoord(
ra=ra_coord,
dec=dec_coord,
pm_ra_cosdec=pm_ra_coord * np.cos(dec_coord),
pm_dec=pm_dec_coord,
frame="icrs",
)
sky_coord = sky_coord.apply_space_motion(dt=(time_obj_array - coord_epoch))
ra_coord = sky_coord.ra
dec_coord = sky_coord.dec
if d_coord is not None:
d_coord = d_coord.repeat(ra_coord.size)
if v_coord is not None:
v_coord = v_coord.repeat(ra_coord.size)
sky_coord = SkyCoord(
ra=ra_coord,
dec=dec_coord,
distance=d_coord,
radial_velocity=v_coord,
frame="icrs",
)
azel_data = sky_coord.transform_to(
SkyCoord(
np.zeros_like(time_obj_array) * units.rad,
np.zeros_like(time_obj_array) * units.rad,
location=site_loc,
obstime=time_obj_array,
frame="altaz",
)
)
app_ha, app_dec = erfa.ae2hd(
azel_data.az.rad, azel_data.alt.rad, site_loc.lat.rad,
)
app_ra = np.mod(
time_obj_array.sidereal_time("apparent", longitude=site_loc.lon).rad
- app_ha,
2 * np.pi,
)
elif astrometry_library == "novas":
# Import the NOVAS library only if it's needed/available.
try:
from novas import compat as novas
from novas.compat import eph_manager
import novas_de405 # noqa
except ImportError as e: # pragma: no cover
raise ImportError(
"novas and/or novas_de405 are not installed but is required for "
"NOVAS functionality"
) from e
# Call is needed to load high-precision ephem data in NOVAS
jd_start, jd_end, number = eph_manager.ephem_open()
# Define the obs location, which is needed to calculate diurnal abb term
# and polar wobble corrections
site_loc = novas.make_on_surface(
site_loc.lat.deg, # latitude in deg
site_loc.lon.deg, # Longitude in deg
site_loc.height.to_value("m"), # Height in meters
0.0, # Temperature, set to 0 for now (no atm refrac)
0.0, # Pressure, set to 0 for now (no atm refrac)
)
# NOVAS wants things in terrestial time and UT1
tt_time_array = time_obj_array.tt.jd
ut1_time_array = time_obj_array.ut1.jd
gast_array = time_obj_array.sidereal_time("apparent", "greenwich").rad
if np.any(tt_time_array < jd_start) or np.any(tt_time_array > jd_end):
raise ValueError(
"No current support for JPL ephems outside of 1700 - 2300 AD. "
"Check back later (or possibly earlier)..."
)
app_ra = np.zeros(tt_time_array.shape) + np.zeros(ra_coord.shape)
app_dec = np.zeros(tt_time_array.shape) + np.zeros(ra_coord.shape)
for idx in range(len(app_ra)):
if multi_coord or (idx == 0):
# Create a catalog entry for the source in question
cat_entry = novas.make_cat_entry(
"dummy_name", # Dummy source name
"GKK", # Catalog ID, fixed for now
156, # Star ID number, fixed for now
ra_coord[idx].to_value("hourangle"),
dec_coord[idx].to_value("deg"),
0.0
if pm_ra is None
else (
pm_ra_coord.to_value("mas/yr")
* np.cos(dec_coord[idx].to_value("rad"))
),
0.0 if pm_dec is None else pm_dec_coord.to_value("mas/yr"),
0.0
if (dist is None or np.any(dist == 0.0))
else (d_coord.kiloparsec ** -1.0),
0.0 if (vrad is None) else v_coord.to_value("km/s"),
)
# Update polar wobble parameters for a given timestamp
if multi_time or (idx == 0):
gast = gast_array[idx]
pm_x = pm_x_array[idx] * np.cos(gast) + pm_y_array[idx] * np.sin(gast)
pm_y = pm_y_array[idx] * np.cos(gast) - pm_x_array[idx] * np.sin(gast)
tt_time = tt_time_array[idx]
ut1_time = ut1_time_array[idx]
novas.cel_pole(
tt_time, 2, delta_x_array[idx], delta_y_array[idx],
)
# Calculate topocentric RA/Dec values
[temp_ra, temp_dec] = novas.topo_star(
tt_time,
(tt_time - ut1_time) * 86400.0,
cat_entry,
site_loc,
accuracy=0,
)
xyz_array = polar2_to_cart3(
temp_ra * (np.pi / 12.0), temp_dec * (np.pi / 180.0)
)
xyz_array = novas.wobble(tt_time, pm_x, pm_y, xyz_array, 1)
app_ra[idx], app_dec[idx] = cart3_to_polar2(np.array(xyz_array))
elif astrometry_library == "erfa":
# liberfa wants things in radians
pm_x_array *= np.pi / (3600.0 * 180.0)
pm_y_array *= np.pi / (3600.0 * 180.0)
[_, _, _, app_dec, app_ra, eqn_org] = erfa.atco13(
ra_coord.to_value("rad"),
dec_coord.to_value("rad"),
0.0 if (pm_ra is None) else pm_ra_coord.to_value("rad/yr"),
0.0 if (pm_dec is None) else pm_dec_coord.to_value("rad/yr"),
0.0 if (dist is None or np.any(dist == 0.0)) else (d_coord.pc ** -1.0),
0.0 if (vrad is None) else v_coord.to_value("km/s"),
time_obj_array.utc.jd,
0.0,
time_obj_array.delta_ut1_utc,
site_loc.lon.rad,
site_loc.lat.rad,
site_loc.height.to_value("m"),
pm_x_array,
pm_y_array,
0, # ait pressure, used for refraction (ignored)
0, # amb temperature, used for refraction (ignored)
0, # rel humidity, used for refraction (ignored)
0, # wavelength, used for refraction (ignored)
)
app_ra = np.mod(app_ra - eqn_org, 2 * np.pi)
return app_ra, app_dec
def transform_app_to_icrs(
time_array, app_ra, app_dec, telescope_loc, astrometry_library="erfa",
):
"""
Transform a set of coordinates in topocentric/apparent to ICRS coordinates.
This utility uses either astropy or erfa to calculate the ICRS coordinates of
a given set of apparent source coordinates. These coordinates are most typically
used for defining the celestial/catalog position of a source. Note that at present,
this is only implemented in astropy and pyERFA, although it could hypothetically
be extended to NOVAS at some point.
Parameters
----------
time_array : float or ndarray of float
Julian dates to calculate coordinate positions for. Can either be a single
float, or an ndarray of shape (Ntimes,).
app_ra : float or ndarray of float
ICRS RA of the celestial target, expressed in units of radians. Can either
be a single float or array of shape (Ncoord,). Note that if time_array is
not a singleton value, then Ncoord must be equal to Ntimes.
app_dec : float or ndarray of float
ICRS Dec of the celestial target, expressed in units of radians. Can either
be a single float or array of shape (Ncoord,). Note that if time_array is
not a singleton value, then Ncoord must be equal to Ntimes.
telescope_loc : tuple of floats or EarthLocation
ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center
of the array. Can either be provided as an astropy EarthLocation, or a tuple
of shape (3,) containung (in order) the latitude, longitude, and altitude,
in units of radians, radians, and meters, respectively.
Returns
-------
icrs_ra : ndarray of floats
ICRS right ascension coordinates, in units of radians, of either shape
(Ntimes,) if Ntimes >1, otherwise (Ncoord,).
icrs_dec : ndarray of floats
ICRS declination coordinates, in units of radians, of either shape
(Ntimes,) if Ntimes >1, otherwise (Ncoord,).
"""
# Make sure that the library requested is actually permitted
if astrometry_library not in ["erfa", "astropy"]:
raise ValueError(
"Requested coordinate transformation library is not supported, please "
"select either 'erfa' or 'astropy' for astrometry_library."
)
ra_coord = app_ra * units.rad
dec_coord = app_dec * units.rad
# Check here to make sure that ra_coord and dec_coord are the same length,
# either 1 or len(time_array)
multi_coord = ra_coord.size != 1
if ra_coord.shape != dec_coord.shape:
raise ValueError("app_ra and app_dec must be the same shape.")
if isinstance(telescope_loc, EarthLocation):
site_loc = telescope_loc
else:
site_loc = EarthLocation.from_geodetic(
telescope_loc[1] * (180.0 / np.pi),
telescope_loc[0] * (180.0 / np.pi),
height=telescope_loc[2],
)
if isinstance(time_array, Time):
time_obj_array = time_array
else:
time_obj_array = Time(time_array, format="jd", scale="utc")
if time_obj_array.size != 1:
if (time_obj_array.shape != ra_coord.shape) and multi_coord:
raise ValueError(
"time_array must be of either of length 1 (single "
"float) or same length as ra and dec."
)
elif time_obj_array.ndim == 0:
# Make the array at least 1-dimensional so we don't run into indexing
# issues later.
time_obj_array = Time([time_obj_array])
if astrometry_library == "astropy":
az_coord, el_coord = erfa.hd2ae(
np.mod(
time_obj_array.sidereal_time("apparent", longitude=site_loc.lon).rad
- ra_coord.to_value("rad"),
2 * np.pi,
),
dec_coord.to_value("rad"),
site_loc.lat.rad,
)
sky_coord = SkyCoord(
az_coord * units.rad,
el_coord * units.rad,
frame="altaz",
location=site_loc,
obstime=time_obj_array,
)
coord_data = sky_coord.transform_to("icrs")
icrs_ra = coord_data.ra.rad
icrs_dec = coord_data.dec.rad
elif astrometry_library == "erfa":
# Get IERS data, which is needed for highest precision
polar_motion_data = iers.earth_orientation_table.get()
pm_x_array, pm_y_array = polar_motion_data.pm_xy(time_obj_array)
pm_x_array = pm_x_array.to_value("rad")
pm_y_array = pm_y_array.to_value("rad")
bpn_matrix = erfa.pnm06a(time_obj_array.tt.jd, 0.0)
cip_x, cip_y = erfa.bpn2xy(bpn_matrix)
cio_s = erfa.s06(time_obj_array.tt.jd, 0.0, cip_x, cip_y)
eqn_org = erfa.eors(bpn_matrix, cio_s)
# Observed to ICRS via ERFA
icrs_ra, icrs_dec = erfa.atoc13(
"r",
ra_coord.to_value("rad") + eqn_org,
dec_coord.to_value("rad"),
time_obj_array.utc.jd,
0.0, # Second half of the UT date, not needed
time_obj_array.delta_ut1_utc,
site_loc.lon.rad,
site_loc.lat.rad,
site_loc.height.value,
pm_x_array,
pm_y_array,
0, # ait pressure, used for refraction (ignored)
0, # amb temperature, used for refraction (ignored)
0, # rel humidity, used for refraction (ignored)
0, # wavelength, used for refraction (ignored)
)
# Return back the two RA/Dec arrays
return icrs_ra, icrs_dec
def calc_parallactic_angle(
app_ra, app_dec, lst_array, telescope_lat,
):
"""
Calculate the parallactic angle between RA/Dec and the AltAz frame.
Parameters
----------
app_ra : ndarray of floats
Array of apparent RA values in units of radians, shape (Ntimes,).
app_dec : ndarray of floats
Array of apparent dec values in units of radians, shape (Ntimes,).
telescope_lat : float
Latitude of the observatory, in units of radians.
lst_array : float or ndarray of float
Array of local apparent sidereal timesto calculate position angle values
for, in units of radians. Can either be a single float or an array of shape
(Ntimes,).
"""
# This is just a simple wrapped around the pas function in ERFA
return erfa.pas(app_ra, app_dec, lst_array, telescope_lat)
def calc_frame_pos_angle(
time_array,
app_ra,
app_dec,
telescope_loc,
ref_frame,
ref_epoch=None,
offset_pos=(np.pi / 360.0),
):
"""
Calculate an position angle given apparent position and reference frame.
This function is used to determine the position angle between the great
circle of declination in apparent coordinates, versus that in a given
reference frame. Note that this is slightly different than parallactic
angle, which is the difference between apparent declination and elevation.
Paramters
---------
time_array : float or ndarray of floats
Array of julian dates to calculate position angle values for, of shape
(Ntimes,).
app_ra : ndarray of floats
Array of apparent RA values in units of radians, shape (Ntimes,).
app_dec : ndarray of floats
Array of apparent dec values in units of radians, shape (Ntimes,).
telescope_loc : tuple of floats or EarthLocation
ITRF latitude, longitude, and altitude (rel to sea-level) of the observer.
Can either be provided as an astropy EarthLocation, or an array-like of shape
(3,) containing the latitude, longitude, and altitude, in that order, with units
of radians, radians, and meters, respectively.
offset_pos : float
Distance of the offset position used to calculate the frame PA. Default
is 0.5 degrees, which should be sufficent for most applications.
ref_frame : str
Coordinate frame to calculate position angles for. Can be any of the
several supported frames in astropy (a limited list: fk4, fk5, icrs,
gcrs, cirs, galactic).
ref_epoch : str or flt
Epoch of the coordinates, only used when ref_frame = fk4 or fk5. Given
in unites of fractional years, either as a float or as a string with
the epoch abbreviation (e.g, Julian epoch 2000.0 would be J2000.0).
Returns
-------
frame_pa : ndarray of floats
Array of position angles, in units of radians.
"""
# Check to see if the position angles should default to zero
if (ref_frame is None) or (ref_frame == "topo"):
# No-op detected, ENGAGE MAXIMUM SNARK!
return np.zeros_like(time_array)
# This creates an array of unique entries of ra + dec + time, since the processing
# time for each element can be non-negligible, and entries along the Nblt axis can
# be highly redundant.
unique_mask = np.union1d(
np.union1d(
np.unique(app_ra, return_index=True)[1],
np.unique(app_dec, return_index=True)[1],
),
np.unique(time_array, return_index=True)[1],
)
# Pluck out the unique entries for each
unique_ra = app_ra[unique_mask]
unique_dec = app_dec[unique_mask]
unique_time = time_array[unique_mask]
# Figure out how many elements we need to transform
n_coord = len(unique_mask)
# Offset north/south positions by 0.5 deg, such that the PA is determined over a
# 1 deg arc.
up_dec = unique_dec + (np.pi / 360.0)
dn_dec = unique_dec - (np.pi / 360.0)
up_ra = dn_ra = unique_ra
# Wrap the positions if they happen to go over the poles
up_ra[up_dec > (np.pi / 2.0)] = np.mod(
up_ra[up_dec > (np.pi / 2.0)] + np.pi, 2.0 * np.pi
)
up_dec[up_dec > (np.pi / 2.0)] = np.pi - up_dec[up_dec > (np.pi / 2.0)]
dn_ra[-dn_dec > (np.pi / 2.0)] = np.mod(
dn_ra[dn_dec > (np.pi / 2.0)] + np.pi, 2.0 * np.pi
)
dn_dec[-dn_dec > (np.pi / 2.0)] = np.pi - dn_dec[-dn_dec > (np.pi / 2.0)]
# Run the set of offset coordinates through the "reverse" transform. The two offset
# positions are concat'd together to help reduce overheads
ref_ra, ref_dec = calc_sidereal_coords(
np.tile(unique_time, 2),
np.concatenate((dn_ra, up_ra)),
np.concatenate((dn_dec, up_dec)),
telescope_loc,
ref_frame,
coord_epoch=ref_epoch,
)
# Use the pas function from ERFA to calculate the position angle. The negative sign
# is here because we're measuring PA of app -> frame, but we want frame -> app.
unique_pa = -erfa.pas(
ref_ra[:n_coord], ref_dec[:n_coord], ref_ra[n_coord:], ref_dec[n_coord:]
)
# Finally, we have to go back through and "fill in" the redundant entries
frame_pa = np.zeros_like(app_ra)
for idx in range(n_coord):
select_mask = np.logical_and(
np.logical_and(unique_ra[idx] == app_ra, unique_dec[idx] == app_dec,),
unique_time[idx] == time_array,
)
frame_pa[select_mask] = unique_pa[idx]
return frame_pa
def lookup_jplhorizons(
target_name,
time_array,
telescope_loc=None,
high_cadence=False,
force_indv_lookup=None,
):
"""
Lookup solar system body coordinates via the JPL-Horizons service.
This utility is useful for generating ephemerides, which can then be interpolated in
order to provide positional data for a target which is moving, such as planetary
bodies and other solar system objects. Use of this function requires the
installation of the `astroquery` module.
Parameters
----------
target_name : str
Name of the target to gather an ephemeris for. Must match the name
in the JPL-Horizons database.
time_array : array-like of float
Times in UTC Julian days to gather an ephemeris for.
telescope_loc : array-like of float
ITRF latitude, longitude, and altitude (rel to sea-level) of the observer. Must
be an array-like of shape (3,) containing the latitude, longitude, and
altitude, in that order, with units of radians, radians, and meters,
respectively.
high_cadence : bool
If set to True, will calculate ephemeris points every 3 minutes in time, as
opposed to the default of every 3 hours.
force_indv_lookup : bool
If set to True, will calculate coordinate values for each value found within
`time_array`. If False, a regularized time grid is sampled that encloses the
values contained within `time_array`. Default is False, unless `time_array` is
of length 1, in which the default is set to True.
Returns
-------
ephem_times : ndarray of float
Times for which the ephemeris values were calculated, in UTC Julian days.
ephem_ra : ndarray of float
ICRS Right ascension of the target at the values within `ephem_times`, in
units of radians.
ephem_dec : ndarray of float
ICRS Declination of the target at the values within `ephem_times`, in units
of radians.
ephem_dist : ndarray of float
Distance of the target relative to the observer, at the values within
`ephem_times`, in units of parsecs.
ephem_vel : ndarray of float
Velocity of the targets relative to the observer, at the values within
`ephem_times`, in units of km/sec.
"""
try:
from astroquery.jplhorizons import Horizons
except ImportError as err: # pragma: no cover
raise ImportError(
"astroquery is not installed but is required for "
"planet ephemeris functionality"
) from err
from pyuvdata.data import DATA_PATH
from os.path import join as path_join
from json import load as json_load
# Get the telescope location into a format that JPL-Horizons can understand,
# which is nominally a dict w/ entries for lon (units of deg), lat (units of
# deg), and elevation (units of km).
if isinstance(telescope_loc, EarthLocation):
site_loc = {
"lon": telescope_loc.lon.deg,
"lat": telescope_loc.lat.deg,
"elevation": telescope_loc.height.to_value(unit=units.km),
}
elif telescope_loc is None:
# Setting to None will report the geocentric position
site_loc = None
else:
site_loc = {
"lon": telescope_loc[1] * (180.0 / np.pi),
"lat": telescope_loc[0] * (180.0 / np.pi),
"elevation": telescope_loc[2] * (0.001), # m -> km
}
# If force_indv_lookup is True, or unset but only providing a single value, then
# just calculate the RA/Dec for the times requested rather than creating a table
# to interpolate from.
if force_indv_lookup or (
(np.array(time_array).size == 1) and (force_indv_lookup is None)
):
epoch_list = np.unique(time_array)
if len(epoch_list) > 50:
raise ValueError(
"Requesting too many individual ephem points from JPL-Horizons. This "
"can be remedied by setting force_indv_lookup=False or limiting the "
"number of values in time_array."
)
else:
# When querying for multiple times, its faster (and kinder to the
# good folks at JPL) to create a range to query, and then interpolate
# between values. The extra buffer of 0.001 or 0.25 days for high and
# low cadence is to give enough data points to allow for spline
# interpolation of the data.
if high_cadence:
start_time = np.min(time_array) - 0.001
stop_time = np.max(time_array) + 0.001
step_time = "3m"
n_entries = (stop_time - start_time) * (1440.0 / 3.0)
else:
# The start/stop time here are setup to maximize reusability of the
# data, since astroquery appears to cache the results from previous
# queries.
start_time = (0.25 * np.floor(4.0 * np.min(time_array))) - 0.25
stop_time = (0.25 * np.ceil(4.0 * np.max(time_array))) + 0.25
step_time = "3h"
n_entries = (stop_time - start_time) * (24.0 / 3.0)
# We don't want to overtax the JPL service, so limit ourselves to 1000
# individual queries at a time. Note that this is likely a conservative
# cap for JPL-Horizons, but there should be exceptionally few applications
# that actually require more than this.
if n_entries > 1000:
if (len(np.unique(time_array)) <= 50) and (force_indv_lookup is None):
# If we have a _very_ sparse set of epochs, pass that along instead
epoch_list = np.unique(time_array)
else:
# Otherwise, time to raise an error
raise ValueError(
"Too many ephem points requested from JPL-Horizons. This "
"can be remedied by setting high_cadance=False or limiting "
"the number of values in time_array."
)
else:
epoch_list = {
"start": Time(start_time, format="jd").isot,
"stop": Time(stop_time, format="jd").isot,
"step": step_time,
}
# Check to make sure dates are within the 1700-2200 time range,
# since not all targets are supported outside of this range
if (np.min(time_array) < 2341973.0) or (np.max(time_array) > 2524593.0):
raise ValueError(
"No current support for JPL ephems outside of 1700 - 2300 AD. "
"Check back later (or possibly earlier)..."
)
# JPL-Horizons has a separate catalog with what it calls 'major bodies',
# and will throw an error if you use the wrong catalog when calling for
# astrometry. We'll use the dict below to capture this behavior.
with open(path_join(DATA_PATH, "jpl_major_bodies.json"), "r") as fhandle:
major_body_dict = json_load(fhandle)
target_id = target_name
id_type = "smallbody"
# If we find the target in the major body database, then we can extract the
# target ID to make the query a bit more robust (otherwise JPL-Horizons will fail
# on account that id will find multiple partial matches: e.g., "Mars" will be
# matched with "Mars", "Mars Explorer", "Mars Barycenter"..., and JPL-Horizons will
# not know which to choose).
if target_name in major_body_dict.keys():
target_id = major_body_dict[target_name]
id_type = "majorbody"
query_obj = Horizons(
id=target_id, location=site_loc, epochs=epoch_list, id_type=id_type,
)
# If not in the major bodies catalog, try the minor bodies list, and if
# still not found, throw an error.
try:
ephem_data = query_obj.ephemerides(extra_precision=True)
except KeyError:
# This is a fix for an astroquery + JPL-Horizons bug, that's related to
# API change on JPL's side. In this case, the source is identified, but
# astroquery can't correctly parse the return message from JPL-Horizons.
# See astroquery issue #2169.
ephem_data = query_obj.ephemerides(extra_precision=False) # pragma: no cover
except ValueError as err:
query_obj._session.close()
raise ValueError(
"Target ID is not recognized in either the small or major bodies "
"catalogs, please consult the JPL-Horizons database for supported "
"targets (https://ssd.jpl.nasa.gov/?horizons)."
) from err
# This is explicitly closed here to trap a bug that occassionally throws an
# unexpected warning, see astroquery issue #1807
query_obj._session.close()
# Now that we have the ephem data, extract out the relevant data
ephem_times = np.array(ephem_data["datetime_jd"])
ephem_ra = np.array(ephem_data["RA"]) * (np.pi / 180.0)
ephem_dec = np.array(ephem_data["DEC"]) * (np.pi / 180.0)
ephem_dist = np.array(ephem_data["delta"]) # AU
ephem_vel = np.array(ephem_data["delta_rate"]) # km/s
return ephem_times, ephem_ra, ephem_dec, ephem_dist, ephem_vel
def interpolate_ephem(
time_array, ephem_times, ephem_ra, ephem_dec, ephem_dist=None, ephem_vel=None,
):
"""
Interpolates ephemerides to give positions for requested times.
This is a simple tool for calculated interpolated RA and Dec positions, as well
as distances and velocities, for a given ephemeris. Under the hood, the method
uses as cubic spline interpolation to calculate values at the requested times,
provided that there are enough values to interpolate over to do so (requires
>= 4 points), otherwise a linear interpolation is used.
Parameters
----------
time_array : array-like of floats
Times to interpolate positions for, in UTC Julian days.
ephem_times : array-like of floats
Times in UTC Julian days which describe that match to the recorded postions
of the target. Must be array-like, of shape (Npts,), where Npts is the number
of ephemeris points.
ephem_ra : array-like of floats
Right ascencion of the target, at the times given in `ephem_times`. Units are
in radians, must have the same shape as `ephem_times`.
ephem_dec : array-like of floats
Declination of the target, at the times given in `ephem_times`. Units are
in radians, must have the same shape as `ephem_times`.
ephem_dist : array-like of floats
Distance of the target from the observer, at the times given in `ephem_times`.
Optional argument, in units of parsecs. Must have the same shape as
`ephem_times`.
ephem_vel : array-like of floats
Velocities of the target, at the times given in `ephem_times`. Optional
argument, in units of km/sec. Must have the same shape as `ephem_times`.
Returns
-------
ra_vals : ndarray of float
Interpolated RA values, returned as an ndarray of floats with
units of radians, and the same shape as `time_array`.
dec_vals : ndarray of float
Interpolated declination values, returned as an ndarray of floats with
units of radians, and the same shape as `time_array`.
dist_vals : None or ndarray of float
If `ephem_dist` was provided, an ndarray of floats (with same shape as
`time_array`) with the interpolated target distances, in units of parsecs.
If `ephem_dist` was not provided, this returns as None.
vel_vals : None or ndarray of float
If `ephem_vals` was provided, an ndarray of floats (with same shape as
`time_array`) with the interpolated target velocities, in units of km/sec.
If `ephem_vals` was not provided, this returns as None.
"""
# We're importing this here since it's only used for this one function
from scipy.interpolate import interp1d
ephem_shape = np.array(ephem_times).shape
# Make sure that things look reasonable
if np.array(ephem_ra).shape != ephem_shape:
raise ValueError("ephem_ra must have the same shape as ephem_times.")
if np.array(ephem_dec).shape != ephem_shape:
raise ValueError("ephem_dec must have the same shape as ephem_times.")
if (np.array(ephem_dist).shape != ephem_shape) and (ephem_dist is not None):
raise ValueError("ephem_dist must have the same shape as ephem_times.")
if (np.array(ephem_vel).shape != ephem_shape) and (ephem_vel is not None):
raise ValueError("ephem_vel must have the same shape as ephem_times.")
ra_vals = np.zeros_like(time_array, dtype=float)
dec_vals = np.zeros_like(time_array, dtype=float)
dist_vals = None if ephem_dist is None else np.zeros_like(time_array, dtype=float)
vel_vals = None if ephem_vel is None else np.zeros_like(time_array, dtype=float)
if len(ephem_times) == 1:
ra_vals += ephem_ra
dec_vals += ephem_dec
if ephem_dist is not None:
dist_vals += ephem_dist
if ephem_vel is not None:
vel_vals += ephem_vel
else:
if len(ephem_times) > 3:
interp_kind = "cubic"
else:
interp_kind = "linear"
# If we have values that line up perfectly, just use those directly
select_mask = np.isin(time_array, ephem_times)
if np.any(select_mask):
time_select = time_array[select_mask]
ra_vals[select_mask] = interp1d(ephem_times, ephem_ra, kind="nearest")(
time_select
)
dec_vals[select_mask] = interp1d(ephem_times, ephem_dec, kind="nearest")(
time_select
)
if ephem_dist is not None:
dist_vals[select_mask] = interp1d(
ephem_times, ephem_dist, kind="nearest"
)(time_select)
if ephem_vel is not None:
vel_vals[select_mask] = interp1d(
ephem_times, ephem_vel, kind="nearest"
)(time_select)
# If we have values lining up between grid points, use spline interpolation
# to calculate their values
select_mask = ~select_mask
if np.any(select_mask):
time_select = time_array[select_mask]
ra_vals[select_mask] = interp1d(ephem_times, ephem_ra, kind=interp_kind)(
time_select
)
dec_vals[select_mask] = interp1d(ephem_times, ephem_dec, kind=interp_kind)(
time_select
)
if ephem_dist is not None:
dist_vals[select_mask] = interp1d(
ephem_times, ephem_dist, kind=interp_kind
)(time_select)
if ephem_vel is not None:
vel_vals[select_mask] = interp1d(
ephem_times, ephem_vel, kind=interp_kind
)(time_select)
return (ra_vals, dec_vals, dist_vals, vel_vals)
def calc_app_coords(
lon_coord,
lat_coord,
coord_frame="icrs",
coord_epoch=None,
coord_times=None,
coord_type="sidereal",
time_array=None,
lst_array=None,
telescope_loc=None,
pm_ra=None,
pm_dec=None,
vrad=None,
dist=None,
):
"""
Calculate apparent coordinates for several different coordinate types.
This function calculates apparent positions at the current epoch.
Parameters
----------
lon_coord : float or ndarray of float
Longitudinal (e.g., RA) coordinates, units of radians. Must match the same
shape as lat_coord.
lat_coord : float or ndarray of float
Latitudinal (e.g., Dec) coordinates, units of radians. Must match the same
shape as lon_coord.
coord_frame : string
The requested reference frame for the output coordinates, can be any frame
that is presently supported by astropy.
coord_epoch : float or str or Time object
Epoch for ref_frame, nominally only used if converting to either the FK4 or
FK5 frames, in units of fractional years. If provided as a float and the
coord_frame is an FK4-variant, value will assumed to be given in Besselian
years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be
in Julian years.
coord_times : float or ndarray of float
Only used when `coord_type="ephem"`, the JD UTC time for each value of
`lon_coord` and `lat_coord`. These values are used to interpolate `lon_coord`
and `lat_coord` values to those times listed in `time_array`.
coord_type : str
coord_type : str
Type of source to calculate coordinates for. Must be one of:
"sidereal" (fixed RA/Dec),
"ephem" (RA/Dec that moves with time),
"driftscan" (fixed az/el position),
"unphased" (alias for "driftscan" with (Az, Alt) = (0 deg, 90 deg)).
time_array : float or ndarray of float or Time object
Times for which the apparent coordinates were calculated, in UTC JD. If more
than a single element, must be the same shape as lon_coord and lat_coord if
both of those are arrays (instead of single floats).
telescope_loc : array-like of floats or EarthLocation
ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center
of the array. Can either be provided as an astropy EarthLocation, or a tuple
of shape (3,) containung (in order) the latitude, longitude, and altitude,
in units of radians, radians, and meters, respectively.
coord_frame : string
The requested reference frame for the output coordinates, can be any frame
that is presently supported by astropy. Default is ICRS.
coord_epoch : float or str or Time object
Epoch for ref_frame, nominally only used if converting to either the FK4 or
FK5 frames, in units of fractional years. If provided as a float and the
ref_frame is an FK4-variant, value will assumed to be given in Besselian
years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be
in Julian years.
pm_ra : float or ndarray of float
Proper motion in RA of the source, expressed in units of milliarcsec / year.
Can either be a single float or array of shape (Ntimes,), although this must
be consistent with other parameters (namely ra_coord and dec_coord). Not
required, motion is calculated relative to the value of `coord_epoch`.
pm_dec : float or ndarray of float
Proper motion in Dec of the source, expressed in units of milliarcsec / year.
Can either be a single float or array of shape (Ntimes,), although this must
be consistent with other parameters (namely ra_coord and dec_coord). Not
required, motion is calculated relative to the value of `coord_epoch`.
vrad : float or ndarray of float
Radial velocity of the source, expressed in units of km / sec. Can either be
a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (namely ra_coord and dec_coord). Not required.
dist : float or ndarray of float
Distance of the source, expressed in milliarcseconds. Can either be a single
float or array of shape (Ntimes,), although this must be consistent with other
parameters (namely ra_coord and dec_coord). Not required.
Returns
-------
app_ra : ndarray of floats
Apparent right ascension coordinates, in units of radians.
app_dec : ndarray of floats
Apparent declination coordinates, in units of radians.
"""
if isinstance(telescope_loc, EarthLocation):
site_loc = telescope_loc
else:
site_loc = EarthLocation.from_geodetic(
telescope_loc[1] * (180.0 / np.pi),
telescope_loc[0] * (180.0 / np.pi),
height=telescope_loc[2],
)
# Time objects and unique don't seem to play well together, so we break apart
# their handling here
if isinstance(time_array, Time):
unique_time_array, unique_mask = np.unique(time_array.utc.jd, return_index=True)
else:
unique_time_array, unique_mask = np.unique(time_array, return_index=True)
if coord_type in ["driftscan", "unphased"]:
if lst_array is None:
unique_lst = get_lst_for_time(
unique_time_array,
site_loc.lat.deg,
site_loc.lon.deg,
site_loc.height.to_value("m"),
)
else:
unique_lst = lst_array[unique_mask]
if coord_type == "sidereal":
# If the coordinates are not in the ICRS frame, go ahead and transform them now
if coord_frame != "icrs":
icrs_ra, icrs_dec = transform_sidereal_coords(
lon_coord,
lat_coord,
coord_frame,
"icrs",
in_coord_epoch=coord_epoch,
time_array=unique_time_array,
)
else:
icrs_ra = lon_coord
icrs_dec = lat_coord
unique_app_ra, unique_app_dec = transform_icrs_to_app(
unique_time_array,
icrs_ra,
icrs_dec,
site_loc,
pm_ra=pm_ra,
pm_dec=pm_dec,
vrad=vrad,
dist=dist,
)
elif coord_type == "driftscan":
# Use the ERFA function ae2hd, which will do all the heavy
# lifting for us
unique_app_ha, unique_app_dec = erfa.ae2hd(
lon_coord, lat_coord, site_loc.lat.rad
)
# The above returns HA/Dec, so we just need to rotate by
# the LST to get back app RA and Dec
unique_app_ra = np.mod(unique_app_ha + unique_lst, 2 * np.pi)
unique_app_dec = unique_app_dec + np.zeros_like(unique_app_ra)
elif coord_type == "ephem":
interp_ra, interp_dec, _, _ = interpolate_ephem(
unique_time_array, coord_times, lon_coord, lat_coord,
)
if coord_frame != "icrs":
icrs_ra, icrs_dec = transform_sidereal_coords(
interp_ra,
interp_dec,
coord_frame,
"icrs",
in_coord_epoch=coord_epoch,
time_array=unique_time_array,
)
else:
icrs_ra = interp_ra
icrs_dec = interp_dec
# TODO: Vel and distance handling to be integrated here, once they are are
# needed for velocity frame tracking
unique_app_ra, unique_app_dec = transform_icrs_to_app(
unique_time_array, icrs_ra, icrs_dec, site_loc, pm_ra=pm_ra, pm_dec=pm_dec,
)
elif coord_type == "unphased":
# This is the easiest one - this is just supposed to be ENU, so set the
# apparent coords to the current lst and telescope_lon.
unique_app_ra = unique_lst.copy()
unique_app_dec = np.zeros_like(unique_app_ra) + site_loc.lat.rad
else:
raise ValueError("Object type %s is not recognized." % coord_type)
# Now that we've calculated all the unique values, time to backfill through the
# "redundant" entries in the Nblt axis.
app_ra = np.zeros(np.array(time_array).shape)
app_dec = np.zeros(np.array(time_array).shape)
# Need this promotion in order to match entries
if isinstance(time_array, Time):
unique_time_array = Time(unique_time_array, format="jd", scale="utc")
for idx, unique_time in enumerate(unique_time_array):
select_mask = time_array == unique_time
app_ra[select_mask] = unique_app_ra[idx]
app_dec[select_mask] = unique_app_dec[idx]
return app_ra, app_dec
def calc_sidereal_coords(
time_array, app_ra, app_dec, telescope_loc, coord_frame, coord_epoch=None,
):
"""
Calculate sidereal coordinates given apparent coordinates.
This function calculates coordinates in the requested frame (at a given epoch)
from a set of apparent coordinates.
Parameters
----------
time_array : float or ndarray of float or Time object
Times for which the apparent coordinates were calculated, in UTC JD. Must
match the shape of app_ra and app_dec.
app_ra : float or ndarray of float
Array of apparent right ascension coordinates, units of radians. Must match
the shape of time_array and app_dec.
app_ra : float or ndarray of float
Array of apparent right declination coordinates, units of radians. Must match
the shape of time_array and app_dec.
telescope_loc : tuple of floats or EarthLocation
ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center
of the array. Can either be provided as an astropy EarthLocation, or a tuple
of shape (3,) containung (in order) the latitude, longitude, and altitude,
in units of radians, radians, and meters, respectively.
coord_frame : string
The requested reference frame for the output coordinates, can be any frame
that is presently supported by astropy. Default is ICRS.
coord_epoch : float or str or Time object
Epoch for ref_frame, nominally only used if converting to either the FK4 or
FK5 frames, in units of fractional years. If provided as a float and the
ref_frame is an FK4-variant, value will assumed to be given in Besselian
years (i.e., 1950 would be 'B1950'), otherwise the year is assumed to be
in Julian years.
Returns
-------
ref_ra : ndarray of floats
Right ascension coordinates in the requested frame, in units of radians.
Either shape (Ntimes,) if Ntimes >1, otherwise (Ncoord,).
ref_dec : ndarray of floats
Declination coordinates in the requested frame, in units of radians.
Either shape (Ntimes,) if Ntimes >1, otherwise (Ncoord,).
"""
# Check to make sure that we have a properly formatted epoch for our in-bound
# coordinate frame
epoch = None
if isinstance(coord_epoch, str) or isinstance(coord_epoch, Time):
# If its a string or a Time object, we don't need to do anything more
epoch = Time(coord_epoch)
elif coord_epoch is not None:
if coord_frame.lower() in ["fk4", "fk4noeterms"]:
epoch = Time(coord_epoch, format="byear")
else:
epoch = Time(coord_epoch, format="jyear")
icrs_ra, icrs_dec = transform_app_to_icrs(
time_array, app_ra, app_dec, telescope_loc
)
if coord_frame == "icrs":
ref_ra, ref_dec = (icrs_ra, icrs_dec)
else:
ref_ra, ref_dec = transform_sidereal_coords(
icrs_ra,
icrs_dec,
"icrs",
coord_frame,
out_coord_epoch=epoch,
time_array=time_array,
)
return ref_ra, ref_dec
def get_lst_for_time(
jd_array, latitude, longitude, altitude, astrometry_library="erfa"
):
"""
Get the local apparent sidereal time for a set of jd times at an earth location.
This function calculates the local apparent sidereal time (LAST), given a UTC time
and a position on the Earth, using either the astropy or NOVAS libraries. It
is important to note that there is an apporoximate 20 microsecond difference
between the two methods, presumably due to small differences in the apparent
reference frame. These differences will cancel out when calculating coordinates
in the TOPO frame, so long as apparent coordinates are calculated using the
same library (i.e., astropy or NOVAS). Failing to do so can introduce errors
up to ~1 mas in the horizontal coordinate system (i.e., AltAz).
Parameters
----------
jd_array : ndarray of float
JD times to get lsts for.
latitude : float
Latitude of location to get lst for in degrees.
longitude : float
Longitude of location to get lst for in degrees.
altitude : float
Altitude of location to get lst for in meters.
astrometry_library : str
Library used for running the LST calculations. Allowed options are 'erfa'
(which uses the pyERFA), 'novas' (which uses the python-novas library),
and 'astropy' (which uses the astropy utilities). Default is erfa.
Returns
-------
ndarray of float
LASTs in radians corresponding to the jd_array.
"""
if isinstance(jd_array, np.ndarray):
lst_array = np.zeros_like(jd_array)
else:
lst_array = np.zeros(1)
jd, reverse_inds = np.unique(jd_array, return_inverse=True)
times = Time(
jd,
format="jd",
scale="utc",
location=(Angle(longitude, unit="deg"), Angle(latitude, unit="deg"), altitude),
)
if iers.conf.auto_max_age is None: # pragma: no cover
delta, status = times.get_delta_ut1_utc(return_status=True)
if np.any(
np.isin(status, (iers.TIME_BEFORE_IERS_RANGE, iers.TIME_BEYOND_IERS_RANGE))
):
warnings.warn(
"time is out of IERS range, setting delta ut1 utc to "
"extrapolated value"
)
times.delta_ut1_utc = delta
if astrometry_library == "erfa":
# This appears to be what astropy is using under the hood,
# so it _should_ be totally consistent.
gast_array = erfa.gst06a(times.ut1.jd, 0.0, times.tt.jd, 0.0)
lst_array = np.mod(gast_array + (longitude * (np.pi / 180.0)), 2.0 * np.pi)[
reverse_inds
]
elif astrometry_library == "astropy":
lst_array = times.sidereal_time("apparent").radian[reverse_inds]
elif astrometry_library == "novas":
# Import the NOVAS library only if it's needed/available.
try:
from novas import compat as novas
from novas.compat import eph_manager
import novas_de405 # noqa
except ImportError as e: # pragma: no cover
raise ImportError(
"novas and/or novas_de405 are not installed but is required for "
"NOVAS functionality"
) from e
jd_start, jd_end, number = eph_manager.ephem_open()
tt_time_array = times.tt.value
ut1_time_array = times.ut1.value
polar_motion_data = iers.earth_orientation_table.get()
delta_x_array = np.interp(
times.mjd,
polar_motion_data["MJD"].value,
polar_motion_data["dX_2000A_B"].value,
left=0.0,
right=0.0,
)
delta_y_array = np.interp(
times.mjd,
polar_motion_data["MJD"].value,
polar_motion_data["dY_2000A_B"].value,
left=0.0,
right=0.0,
)
# Catch the case where we don't have CIP delta values yet (they don't typically
# have predictive values like the polar motion does)
delta_x_array[np.isnan(delta_x_array)] = 0.0
delta_y_array[np.isnan(delta_y_array)] = 0.0
for idx in range(len(times)):
novas.cel_pole(
tt_time_array[idx], 2, delta_x_array[idx], delta_y_array[idx]
)
# The NOVAS routine will return Greenwich Apparent Sidereal Time (GAST),
# in units of hours
lst_array[reverse_inds == idx] = novas.sidereal_time(
ut1_time_array[idx],
0.0,
(tt_time_array[idx] - ut1_time_array[idx]) * 86400.0,
)
# Add the telescope lon to convert from GAST to LAST (local)
lst_array = np.mod(lst_array + (longitude / 15.0), 24.0)
# Convert from hours back to rad
lst_array *= np.pi / 12.0
return lst_array
def _adj_list(vecs, tol, n_blocks=None):
"""Identify neighbors of each vec in vecs, to distance tol."""
n_items = len(vecs)
max_items = 2 ** 10 # Max array size used is max_items**2. Avoid using > 1 GiB
if n_blocks is None:
n_blocks = max(n_items // max_items, 1)
# We may sort blocks so that some pairs of blocks may be skipped.
# Reorder vectors by x.
order = np.argsort(vecs[:, 0])
blocks = np.array_split(order, n_blocks)
adj = [{k} for k in range(n_items)] # Adjacency lists
for b1 in blocks:
for b2 in blocks:
v1, v2 = vecs[b1], vecs[b2]
# Check for no overlap, with tolerance.
xmin1 = v1[0, 0] - tol
xmax1 = v1[-1, 0] + tol
xmin2 = v2[0, 0] - tol
xmax2 = v2[-1, 0] + tol
if max(xmin1, xmin2) > min(xmax1, xmax2):
continue
adj_mat = cdist(vecs[b1], vecs[b2]) < tol
for bi, col in enumerate(adj_mat):
adj[b1[bi]] = adj[b1[bi]].union(b2[col])
return [frozenset(g) for g in adj]
def _find_cliques(adj, strict=False):
n_items = len(adj)
loc_gps = []
visited = np.zeros(n_items, dtype=bool)
for k in range(n_items):
if visited[k]:
continue
a0 = adj[k]
visited[k] = True
if all(adj[it].__hash__() == a0.__hash__() for it in a0):
group = list(a0)
group.sort()
visited[list(a0)] = True
loc_gps.append(group)
# Require all adjacency lists to be isolated maximal cliques:
if strict:
if not all(sorted(st) in loc_gps for st in adj):
raise ValueError("Non-isolated cliques found in graph.")
return loc_gps
def find_clusters(location_ids, location_vectors, tol, strict=False):
"""
Find clusters of vectors (e.g. redundant baselines, times).
Parameters
----------
location_ids : array_like of int
ID labels for locations.
location_vectors : array_like of float
location vectors, can be multidimensional
tol : float
tolerance for clusters
strict : bool
Require that all adjacency lists be isolated maximal cliques.
This ensures that vectors do not fall into multiple clusters.
Default: False
Returns
-------
list of list of location_ids
"""
location_vectors = np.asarray(location_vectors)
location_ids = np.asarray(location_ids)
if location_vectors.ndim == 1:
location_vectors = location_vectors[:, np.newaxis]
adj = _adj_list(location_vectors, tol) # adj = list of sets
loc_gps = _find_cliques(adj, strict=strict)
loc_gps = [np.sort(location_ids[gp]).tolist() for gp in loc_gps]
return loc_gps
def get_baseline_redundancies(baselines, baseline_vecs, tol=1.0, with_conjugates=False):
"""
Find redundant baseline groups.
Parameters
----------
baselines : array_like of int
Baseline numbers, shape (Nbls,)
baseline_vecs : array_like of float
Baseline vectors in meters, shape (Nbls, 3)
tol : float
Absolute tolerance of redundancy, in meters.
with_conjugates : bool
Option to include baselines that are redundant when flipped.
Returns
-------
baseline_groups : list of lists of int
list of lists of redundant baseline numbers
vec_bin_centers : list of array_like of float
List of vectors describing redundant group centers
lengths : list of float
List of redundant group baseline lengths in meters
baseline_ind_conj : list of int
List of baselines that are redundant when reversed. Only returned if
with_conjugates is True
"""
Nbls = baselines.shape[0]
if not baseline_vecs.shape == (Nbls, 3):
raise ValueError("Baseline vectors must be shape (Nbls, 3)")
baseline_vecs = copy.copy(baseline_vecs) # Protect the vectors passed in.
if with_conjugates:
conjugates = []
for bv in baseline_vecs:
uneg = bv[0] < -tol
uzer = np.isclose(bv[0], 0.0, atol=tol)
vneg = bv[1] < -tol
vzer = np.isclose(bv[1], 0.0, atol=tol)
wneg = bv[2] < -tol
conjugates.append(uneg or (uzer and vneg) or (uzer and vzer and wneg))
conjugates = np.array(conjugates, dtype=bool)
baseline_vecs[conjugates] *= -1
baseline_ind_conj = baselines[conjugates]
bl_gps, vec_bin_centers, lens = get_baseline_redundancies(
baselines, baseline_vecs, tol=tol, with_conjugates=False
)
return bl_gps, vec_bin_centers, lens, baseline_ind_conj
try:
bl_gps = find_clusters(baselines, baseline_vecs, tol, strict=True)
except ValueError as exc:
raise ValueError(
"Some baselines are falling into multiple"
" redundant groups. Lower the tolerance to resolve ambiguity."
) from exc
n_unique = len(bl_gps)
vec_bin_centers = np.zeros((n_unique, 3))
for gi, gp in enumerate(bl_gps):
inds = [np.where(i == baselines)[0] for i in gp]
vec_bin_centers[gi] = np.mean(baseline_vecs[inds, :], axis=0)
lens = np.sqrt(np.sum(vec_bin_centers ** 2, axis=1))
return bl_gps, vec_bin_centers, lens
def get_antenna_redundancies(
antenna_numbers, antenna_positions, tol=1.0, include_autos=False
):
"""
Find redundant baseline groups based on antenna positions.
Parameters
----------
antenna_numbers : array_like of int
Antenna numbers, shape (Nants,).
antenna_positions : array_like of float
Antenna position vectors in the ENU (topocentric) frame in meters,
shape (Nants, 3).
tol : float
Redundancy tolerance in meters.
include_autos : bool
Option to include autocorrelations.
Returns
-------
baseline_groups : list of lists of int
list of lists of redundant baseline numbers
vec_bin_centers : list of array_like of float
List of vectors describing redundant group centers
lengths : list of float
List of redundant group baseline lengths in meters
Notes
-----
The baseline numbers refer to antenna pairs (a1, a2) such that
the baseline vector formed from ENU antenna positions,
blvec = enu[a1] - enu[a2]
is close to the other baselines in the group.
This is achieved by putting baselines in a form of the u>0
convention, but with a tolerance in defining the signs of
vector components.
To guarantee that the same baseline numbers are present in a UVData
object, ``UVData.conjugate_bls('u>0', uvw_tol=tol)``, where `tol` is
the tolerance used here.
"""
Nants = antenna_numbers.size
bls = []
bl_vecs = []
for aj in range(Nants):
mini = aj + 1
if include_autos:
mini = aj
for ai in range(mini, Nants):
anti, antj = antenna_numbers[ai], antenna_numbers[aj]
bidx = antnums_to_baseline(antj, anti, Nants)
bv = antenna_positions[ai] - antenna_positions[aj]
bl_vecs.append(bv)
bls.append(bidx)
bls = np.array(bls)
bl_vecs = np.array(bl_vecs)
gps, vecs, lens, conjs = get_baseline_redundancies(
bls, bl_vecs, tol=tol, with_conjugates=True
)
# Flip the baselines in the groups.
for gi, gp in enumerate(gps):
for bi, bl in enumerate(gp):
if bl in conjs:
gps[gi][bi] = baseline_index_flip(bl, Nants)
return gps, vecs, lens
def mean_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse by averaging data.
This is similar to np.average, except it handles infs (by giving them
zero weight) and zero weight axes (by forcing result to be inf with zero
output weight).
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
weights for average. If none, will default to equal weight for all
non-infinite data.
axis : int or tuple, optional
Axis or axes to collapse (passed to np.sum). Default is all.
return_weights : bool
Whether to return sum of weights.
return_weights_square: bool
Whether to return the sum of the square of the weights. Default is False.
"""
arr = copy.deepcopy(arr) # avoid changing outside
if weights is None:
weights = np.ones_like(arr)
else:
weights = copy.deepcopy(weights)
weights = weights * np.logical_not(np.isinf(arr))
arr[np.isinf(arr)] = 0
weight_out = np.sum(weights, axis=axis)
if return_weights_square:
weights_square = weights ** 2
weights_square_out = np.sum(weights_square, axis=axis)
out = np.sum(weights * arr, axis=axis)
where = weight_out > 1e-10
out = np.true_divide(out, weight_out, where=where)
out = np.where(where, out, np.inf)
if return_weights and return_weights_square:
return out, weight_out, weights_square_out
elif return_weights:
return out, weight_out
elif return_weights_square:
return out, weights_square_out
else:
return out
def absmean_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse by averaging absolute value of data.
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
weights for average. If none, will default to equal weight for all
non-infinite data.
axis : int or tuple, optional
Axis or axes to collapse (passed to np.sum). Default is all.
return_weights : bool
Whether to return sum of weights.
return_weights_square: bool
whether to return the sum of the squares of the weights. Default is False.
"""
return mean_collapse(
np.abs(arr),
weights=weights,
axis=axis,
return_weights=return_weights,
return_weights_square=return_weights_square,
)
def quadmean_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse by averaging in quadrature.
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
weights for average. If none, will default to equal weight for all
non-infinite data.
axis : int or tuple, optional
Axis or axes to collapse (passed to np.sum). Default is all.
return_weights : bool
Whether to return sum of weights.
return_weights_square: bool
whether to return the sum of the squares of the weights. Default is False.
"""
out = mean_collapse(
np.abs(arr) ** 2,
weights=weights,
axis=axis,
return_weights=return_weights,
return_weights_square=return_weights_square,
)
if return_weights and return_weights_square:
return np.sqrt(out[0]), out[1], out[2]
elif return_weights or return_weights_square:
return np.sqrt(out[0]), out[1]
else:
return np.sqrt(out)
def or_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse using OR operation.
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
NOT USED, but kept for symmetry with other collapsing functions.
axis : int or tuple, optional
Axis or axes to collapse (take OR over). Default is all.
return_weights : bool
Whether to return dummy weights array.
NOTE: the dummy weights will simply be an array of ones
return_weights_square: bool
NOT USED, but kept for symmetry with other collapsing functions.
"""
if arr.dtype != np.bool_:
raise ValueError("Input to or_collapse function must be boolean array")
out = np.any(arr, axis=axis)
if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]):
warnings.warn("Currently weights are not handled when OR-ing boolean arrays.")
if return_weights:
return out, np.ones_like(out, dtype=np.float64)
else:
return out
def and_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse using AND operation.
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
NOT USED, but kept for symmetry with other collapsing functions.
axis : int or tuple, optional
Axis or axes to collapse (take AND over). Default is all.
return_weights : bool
Whether to return dummy weights array.
NOTE: the dummy weights will simply be an array of ones
return_weights_square: bool
NOT USED, but kept for symmetry with other collapsing functions.
"""
if arr.dtype != np.bool_:
raise ValueError("Input to and_collapse function must be boolean array")
out = np.all(arr, axis=axis)
if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]):
warnings.warn("Currently weights are not handled when AND-ing boolean arrays.")
if return_weights:
return out, np.ones_like(out, dtype=np.float64)
else:
return out
def collapse(
arr, alg, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Parent function to collapse an array with a given algorithm.
Parameters
----------
arr : array
Input array to process.
alg : str
Algorithm to use. Must be defined in this function with
corresponding subfunction above.
weights: ndarray, optional
weights for collapse operation (e.g. weighted mean).
NOTE: Some subfunctions do not use the weights. See corresponding
doc strings.
axis : int or tuple, optional
Axis or axes to collapse. Default is all.
return_weights : bool
Whether to return sum of weights.
return_weights_square: bool
Whether to return the sum of the squares of the weights. Default is False.
"""
collapse_dict = {
"mean": mean_collapse,
"absmean": absmean_collapse,
"quadmean": quadmean_collapse,
"or": or_collapse,
"and": and_collapse,
}
try:
out = collapse_dict[alg](
arr,
weights=weights,
axis=axis,
return_weights=return_weights,
return_weights_square=return_weights_square,
)
except KeyError:
raise ValueError(
"Collapse algorithm must be one of: "
+ ", ".join(collapse_dict.keys())
+ "."
)
return out
def uvcalibrate(
uvdata,
uvcal,
inplace=True,
prop_flags=True,
Dterm_cal=False,
flip_gain_conj=False,
delay_convention="minus",
undo=False,
time_check=True,
ant_check=True,
):
"""
Calibrate a UVData object with a UVCal object.
Parameters
----------
uvdata : UVData object
UVData object to calibrate.
uvcal : UVCal object
UVCal object containing the calibration.
inplace : bool, optional
if True edit uvdata in place, else return a calibrated copy
prop_flags : bool, optional
if True, propagate calibration flags to data flags
and doesn't use flagged gains. Otherwise, uses flagged gains and
does not propagate calibration flags to data flags.
Dterm_cal : bool, optional
Calibrate the off-diagonal terms in the Jones matrix if present
in uvcal. Default is False. Currently not implemented.
flip_gain_conj : bool, optional
This function uses the UVData ant_1_array and ant_2_array to specify the
antennas in the UVCal object. By default, the conjugation convention, which
follows the UVData convention (i.e. ant2 - ant1), is that the applied
gain = ant1_gain * conjugate(ant2_gain). If the other convention is required,
set flip_gain_conj=True.
delay_convention : str, optional
Exponent sign to use in conversion of 'delay' to 'gain' cal_type
if the input uvcal is not inherently 'gain' cal_type. Default to 'minus'.
undo : bool, optional
If True, undo the provided calibration. i.e. apply the calibration with
flipped gain_convention. Flag propagation rules apply the same.
time_check : bool
Option to check that times match between the UVCal and UVData
objects if UVCal has a single time or time range. Times are always
checked if UVCal has multiple times.
ant_check : bool
Option to check that all antennas with data on the UVData
object have calibration solutions in the UVCal object. If this option is
set to False, uvcalibrate will proceed without erroring and data for
antennas without calibrations will be flagged.
Returns
-------
UVData, optional
Returns if not inplace
"""
if not inplace:
uvdata = uvdata.copy()
# check both objects
uvdata.check()
uvcal.check()
# Check whether the UVData antennas *that have data associated with them*
# have associated data in the UVCal object
uvdata_unique_nums = np.unique(np.append(uvdata.ant_1_array, uvdata.ant_2_array))
uvdata.antenna_names = np.asarray(uvdata.antenna_names)
uvdata_used_antnames = np.array(
[
uvdata.antenna_names[np.where(uvdata.antenna_numbers == antnum)][0]
for antnum in uvdata_unique_nums
]
)
uvcal_unique_nums = np.unique(uvcal.ant_array)
uvcal.antenna_names = np.asarray(uvcal.antenna_names)
uvcal_used_antnames = np.array(
[
uvcal.antenna_names[np.where(uvcal.antenna_numbers == antnum)][0]
for antnum in uvcal_unique_nums
]
)
ant_arr_match = uvcal_used_antnames.tolist() == uvdata_used_antnames.tolist()
if not ant_arr_match:
# check more carefully
name_missing = []
for this_ant_name in uvdata_used_antnames:
wh_ant_match = np.nonzero(uvcal_used_antnames == this_ant_name)
if wh_ant_match[0].size == 0:
name_missing.append(this_ant_name)
if len(name_missing) > 0:
if len(name_missing) == uvdata_used_antnames.size:
# all antenna_names with data on UVData are missing on UVCal.
if not ant_check:
warnings.warn(
"All antenna names with data on UVData are missing "
"on UVCal. Since ant_check is False, calibration will "
"proceed but all data will be flagged."
)
else:
raise ValueError(
"All antenna names with data on UVData are missing "
"on UVCal. To continue with calibration "
"(and flag all the data), set ant_check=False."
)
else:
# Only some antenna_names with data on UVData are missing on UVCal
if not ant_check:
warnings.warn(
f"Antennas {name_missing} have data on UVData but are missing "
"on UVCal. Since ant_check is False, calibration will "
"proceed and the data for these antennas will be flagged."
)
else:
raise ValueError(
f"Antennas {name_missing} have data on UVData but "
"are missing on UVCal. To continue calibration and "
"flag the data from missing antennas, set ant_check=False."
)
uvdata_times = np.unique(uvdata.time_array)
downselect_cal_times = False
if uvcal.Ntimes > 1:
if uvcal.Ntimes < uvdata.Ntimes:
raise ValueError(
"The uvcal object has more than one time but fewer than the "
"number of unique times on the uvdata object."
)
uvcal_times = np.unique(uvcal.time_array)
try:
time_arr_match = np.allclose(
uvcal_times,
uvdata_times,
atol=uvdata._time_array.tols[1],
rtol=uvdata._time_array.tols[0],
)
except ValueError:
time_arr_match = False
if not time_arr_match:
# check more carefully
uvcal_times_to_keep = []
for this_time in uvdata_times:
wh_time_match = np.nonzero(
np.isclose(
uvcal.time_array - this_time,
0,
atol=uvdata._time_array.tols[1],
rtol=uvdata._time_array.tols[0],
)
)
if wh_time_match[0].size > 0:
uvcal_times_to_keep.append(uvcal.time_array[wh_time_match][0])
else:
raise ValueError(
f"Time {this_time} exists on UVData but not on UVCal."
)
if len(uvcal_times_to_keep) < uvcal.Ntimes:
downselect_cal_times = True
elif uvcal.time_range is None:
# only one UVCal time, no time_range.
# This cannot match if UVData.Ntimes > 1.
# If they are both NTimes = 1, then check if they're close.
if uvdata.Ntimes > 1 or not np.isclose(
uvdata_times,
uvcal.time_array,
atol=uvdata._time_array.tols[1],
rtol=uvdata._time_array.tols[0],
):
if not time_check:
warnings.warn(
"Times do not match between UVData and UVCal "
"but time_check is False, so calibration "
"will be applied anyway."
)
else:
raise ValueError(
"Times do not match between UVData and UVCal. "
"Set time_check=False to apply calibration anyway."
)
else:
# time_array is length 1 and time_range exists: check uvdata_times in time_range
if (
np.min(uvdata_times) < uvcal.time_range[0]
or np.max(uvdata_times) > uvcal.time_range[1]
):
if not time_check:
warnings.warn(
"Times do not match between UVData and UVCal "
"but time_check is False, so calibration "
"will be applied anyway."
)
else:
raise ValueError(
"Times do not match between UVData and UVCal. "
"Set time_check=False to apply calibration anyway. "
)
downselect_cal_freq = False
if uvdata.future_array_shapes:
uvdata_freq_arr_use = uvdata.freq_array
else:
uvdata_freq_arr_use = uvdata.freq_array[0, :]
try:
freq_arr_match = np.allclose(
np.sort(uvcal.freq_array[0, :]),
np.sort(uvdata_freq_arr_use),
atol=uvdata._freq_array.tols[1],
rtol=uvdata._freq_array.tols[0],
)
except ValueError:
freq_arr_match = False
if freq_arr_match is False:
# check more carefully
uvcal_freqs_to_keep = []
for this_freq in uvdata_freq_arr_use:
wh_freq_match = np.nonzero(
np.isclose(
uvcal.freq_array - this_freq,
0,
atol=uvdata._freq_array.tols[1],
rtol=uvdata._freq_array.tols[0],
)
)
if wh_freq_match[0].size > 0:
uvcal_freqs_to_keep.append(uvcal.freq_array[wh_freq_match][0])
else:
raise ValueError(
f"Frequency {this_freq} exists on UVData but not on UVCal."
)
if len(uvcal_freqs_to_keep) < uvcal.Nfreqs:
downselect_cal_freq = True
# check if uvdata.x_orientation isn't set (it's required for uvcal)
uvd_x = uvdata.x_orientation
if uvd_x is None:
# use the uvcal x_orientation throughout
uvd_x = uvcal.x_orientation
warnings.warn(
"UVData object does not have `x_orientation` specified but UVCal does. "
"Matching based on `x` and `y` only "
)
uvdata_pol_strs = polnum2str(uvdata.polarization_array, x_orientation=uvd_x)
uvcal_pol_strs = jnum2str(uvcal.jones_array, x_orientation=uvcal.x_orientation)
uvdata_feed_pols = {
feed for pol in uvdata_pol_strs for feed in POL_TO_FEED_DICT[pol]
}
for feed in uvdata_feed_pols:
# get diagonal jones str
jones_str = parse_jpolstr(feed, x_orientation=uvcal.x_orientation)
if jones_str not in uvcal_pol_strs:
raise ValueError(
f"Feed polarization {feed} exists on UVData but not on UVCal. "
)
# downselect UVCal times, frequencies
if downselect_cal_freq or downselect_cal_times:
if not downselect_cal_times:
uvcal_times_to_keep = None
elif not downselect_cal_freq:
uvcal_freqs_to_keep = None
uvcal_use = uvcal.select(
times=uvcal_times_to_keep, frequencies=uvcal_freqs_to_keep, inplace=False
)
new_uvcal = True
else:
uvcal_use = uvcal
new_uvcal = False
# input checks
if uvcal_use.cal_type == "delay":
if not new_uvcal:
# make a copy to convert to gain
uvcal_use = uvcal_use.copy()
new_uvcal = True
uvcal_use.convert_to_gain(delay_convention=delay_convention)
# D-term calibration
if Dterm_cal:
# check for D-terms
if -7 not in uvcal_use.jones_array and -8 not in uvcal_use.jones_array:
raise ValueError(
"Cannot apply D-term calibration without -7 or -8"
"Jones polarization in uvcal object."
)
raise NotImplementedError("D-term calibration is not yet implemented.")
# No D-term calibration
else:
# key is number, value is name
uvdata_ant_dict = dict(zip(uvdata.antenna_numbers, uvdata.antenna_names))
# opposite: key is name, value is number
uvcal_ant_dict = dict(zip(uvcal.antenna_names, uvcal.antenna_numbers))
# iterate over keys
for key in uvdata.get_antpairpols():
# get indices for this key
blt_inds = uvdata.antpair2ind(key)
pol_ind = np.argmin(
np.abs(uvdata.polarization_array - polstr2num(key[2], uvd_x))
)
# try to get gains for each antenna
ant1_num = key[0]
ant2_num = key[1]
feed1, feed2 = POL_TO_FEED_DICT[key[2]]
try:
uvcal_ant1_num = uvcal_ant_dict[uvdata_ant_dict[ant1_num]]
except KeyError:
uvcal_ant1_num = None
try:
uvcal_ant2_num = uvcal_ant_dict[uvdata_ant_dict[ant2_num]]
except KeyError:
uvcal_ant2_num = None
uvcal_key1 = (uvcal_ant1_num, feed1)
uvcal_key2 = (uvcal_ant2_num, feed2)
if (uvcal_ant1_num is None or uvcal_ant2_num is None) or not (
uvcal_use._has_key(*uvcal_key1) and uvcal_use._has_key(*uvcal_key2)
):
if uvdata.future_array_shapes:
uvdata.flag_array[blt_inds, :, pol_ind] = True
else:
uvdata.flag_array[blt_inds, 0, :, pol_ind] = True
continue
if flip_gain_conj:
gain = (
np.conj(uvcal_use.get_gains(uvcal_key1))
* uvcal_use.get_gains(uvcal_key2)
).T # tranpose to match uvdata shape
else:
gain = (
uvcal_use.get_gains(uvcal_key1)
* np.conj(uvcal_use.get_gains(uvcal_key2))
).T # tranpose to match uvdata shape
flag = (uvcal_use.get_flags(uvcal_key1) | uvcal_use.get_flags(uvcal_key2)).T
# propagate flags
if prop_flags:
mask = np.isclose(gain, 0.0) | flag
gain[mask] = 1.0
if uvdata.future_array_shapes:
uvdata.flag_array[blt_inds, :, pol_ind] += mask
else:
uvdata.flag_array[blt_inds, 0, :, pol_ind] += mask
# apply to data
mult_gains = uvcal_use.gain_convention == "multiply"
if undo:
mult_gains = not mult_gains
if uvdata.future_array_shapes:
if mult_gains:
uvdata.data_array[blt_inds, :, pol_ind] *= gain
else:
uvdata.data_array[blt_inds, :, pol_ind] /= gain
else:
if mult_gains:
uvdata.data_array[blt_inds, 0, :, pol_ind] *= gain
else:
uvdata.data_array[blt_inds, 0, :, pol_ind] /= gain
# update attributes
uvdata.history += "\nCalibrated with pyuvdata.utils.uvcalibrate."
if undo:
uvdata.vis_units = "uncalib"
else:
if uvcal_use.gain_scale is not None:
uvdata.vis_units = uvcal_use.gain_scale
if not inplace:
return uvdata
def apply_uvflag(
uvd, uvf, inplace=True, unflag_first=False, flag_missing=True, force_pol=True
):
"""
Apply flags from a UVFlag to a UVData instantiation.
Note that if uvf.Nfreqs or uvf.Ntimes is 1, it will broadcast flags across
that axis.
Parameters
----------
uvd : UVData object
UVData object to add flags to.
uvf : UVFlag object
A UVFlag object in flag mode.
inplace : bool
If True overwrite flags in uvd, otherwise return new object
unflag_first : bool
If True, completely unflag the UVData before applying flags.
Else, OR the inherent uvd flags with uvf flags.
flag_missing : bool
If input uvf is a baseline type and antpairs in uvd do not exist in uvf,
flag them in uvd. Otherwise leave them untouched.
force_pol : bool
If True, broadcast flags to all polarizations if they do not match.
Only works if uvf.Npols == 1.
Returns
-------
UVData
If not inplace, returns new UVData object with flags applied
"""
# assertions
if uvf.mode != "flag":
raise ValueError("UVFlag must be flag mode")
if not inplace:
uvd = uvd.copy()
# make a deepcopy by default b/c it is generally edited inplace downstream
uvf = uvf.copy()
# convert to baseline type
if uvf.type != "baseline":
# edits inplace
uvf.to_baseline(uvd, force_pol=force_pol)
else:
# make sure polarizations match or force_pol
uvd_pols, uvf_pols = (
uvd.polarization_array.tolist(),
uvf.polarization_array.tolist(),
)
if set(uvd_pols) != set(uvf_pols):
if uvf.Npols == 1 and force_pol:
# if uvf is 1pol we can make them match: also edits inplace
uvf.polarization_array = uvd.polarization_array
uvf.Npols = len(uvf.polarization_array)
uvf_pols = uvf.polarization_array.tolist()
else:
raise ValueError("Input uvf and uvd polarizations do not match")
# make sure polarization ordering is correct: also edits inplace
uvf.polarization_array = uvf.polarization_array[
[uvd_pols.index(pol) for pol in uvf_pols]
]
# check time and freq shapes match: if Ntimes or Nfreqs is 1, allow
# implicit broadcasting
if uvf.Ntimes == 1:
mismatch_times = False
elif uvf.Ntimes == uvd.Ntimes:
tdiff = np.unique(uvf.time_array) - np.unique(uvd.time_array)
mismatch_times = np.any(tdiff > np.max(np.abs(uvf._time_array.tols)))
else:
mismatch_times = True
if mismatch_times:
raise ValueError("UVFlag and UVData have mismatched time arrays.")
if uvf.Nfreqs == 1:
mismatch_freqs = False
elif uvf.Nfreqs == uvd.Nfreqs:
fdiff = np.unique(uvf.freq_array) - np.unique(uvd.freq_array)
mismatch_freqs = np.any(fdiff > np.max(np.abs(uvf._freq_array.tols)))
else:
mismatch_freqs = True
if mismatch_freqs:
raise ValueError("UVFlag and UVData have mismatched frequency arrays.")
# unflag if desired
if unflag_first:
uvd.flag_array[:] = False
# iterate over antpairs and apply flags: TODO need to be able to handle
# conjugated antpairs
uvf_antpairs = uvf.get_antpairs()
for ap in uvd.get_antpairs():
uvd_ap_inds = uvd.antpair2ind(ap)
if ap not in uvf_antpairs:
if flag_missing:
uvd.flag_array[uvd_ap_inds] = True
continue
uvf_ap_inds = uvf.antpair2ind(*ap)
# addition of boolean is OR
if uvd.future_array_shapes:
uvd.flag_array[uvd_ap_inds] += uvf.flag_array[uvf_ap_inds, 0, :, :]
else:
uvd.flag_array[uvd_ap_inds] += uvf.flag_array[uvf_ap_inds]
uvd.history += "\nFlagged with pyuvdata.utils.apply_uvflags."
if not inplace:
return uvd
def parse_ants(uv, ant_str, print_toggle=False, x_orientation=None):
"""
Get antpair and polarization from parsing an aipy-style ant string.
Used to support the select function. Generates two lists of antenna pair
tuples and polarization indices based on parsing of the string ant_str.
If no valid polarizations (pseudo-Stokes params, or combinations of [lr]
or [xy]) or antenna numbers are found in ant_str, ant_pairs_nums and
polarizations are returned as None.
Parameters
----------
uv : UVBase Object
A UVBased object that supports the following functions and parameters:
- get_ants
- get_antpairs
- get_pols
These are used to construct the baseline ant_pair_nums
and polarizations returned.
ant_str : str
String containing antenna information to parse. Can be 'all',
'auto', 'cross', or combinations of antenna numbers and polarization
indicators 'l' and 'r' or 'x' and 'y'. Minus signs can also be used
in front of an antenna number or baseline to exclude it from being
output in ant_pairs_nums. If ant_str has a minus sign as the first
character, 'all,' will be appended to the beginning of the string.
See the tutorial for examples of valid strings and their behavior.
print_toggle : bool
Boolean for printing parsed baselines for a visual user check.
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. If input uv object has an `x_orientation`
parameter and the input to this function is `None`, the value from the
object will be used. Any input given to this function will override the
value on the uv object. See corresonding parameter on UVData
for more details.
Returns
-------
ant_pairs_nums : list of tuples of int or None
List of tuples containing the parsed pairs of antenna numbers, or
None if ant_str is 'all' or a pseudo-Stokes polarizations.
polarizations : list of int or None
List of desired polarizations or None if ant_str does not contain a
polarization specification.
"""
required_attrs = ["get_ants", "get_antpairs", "get_pols"]
if not all(hasattr(uv, attr) for attr in required_attrs):
raise ValueError(
"UVBased objects must have all the following attributes in order "
f"to call 'parse_ants': {required_attrs}."
)
if x_orientation is None and (
hasattr(uv, "x_orientation") and uv.x_orientation is not None
):
x_orientation = uv.x_orientation
ant_re = r"(\(((-?\d+[lrxy]?,?)+)\)|-?\d+[lrxy]?)"
bl_re = "(^(%s_%s|%s),?)" % (ant_re, ant_re, ant_re)
str_pos = 0
ant_pairs_nums = []
polarizations = []
ants_data = uv.get_ants()
ant_pairs_data = uv.get_antpairs()
pols_data = uv.get_pols()
warned_ants = []
warned_pols = []
if ant_str.startswith("-"):
ant_str = "all," + ant_str
while str_pos < len(ant_str):
m = re.search(bl_re, ant_str[str_pos:])
if m is None:
if ant_str[str_pos:].upper().startswith("ALL"):
if len(ant_str[str_pos:].split(",")) > 1:
ant_pairs_nums = uv.get_antpairs()
elif ant_str[str_pos:].upper().startswith("AUTO"):
for pair in ant_pairs_data:
if pair[0] == pair[1] and pair not in ant_pairs_nums:
ant_pairs_nums.append(pair)
elif ant_str[str_pos:].upper().startswith("CROSS"):
for pair in ant_pairs_data:
if not (pair[0] == pair[1] or pair in ant_pairs_nums):
ant_pairs_nums.append(pair)
elif ant_str[str_pos:].upper().startswith("PI"):
polarizations.append(polstr2num("pI"))
elif ant_str[str_pos:].upper().startswith("PQ"):
polarizations.append(polstr2num("pQ"))
elif ant_str[str_pos:].upper().startswith("PU"):
polarizations.append(polstr2num("pU"))
elif ant_str[str_pos:].upper().startswith("PV"):
polarizations.append(polstr2num("pV"))
else:
raise ValueError("Unparsible argument {s}".format(s=ant_str))
comma_cnt = ant_str[str_pos:].find(",")
if comma_cnt >= 0:
str_pos += comma_cnt + 1
else:
str_pos = len(ant_str)
else:
m = m.groups()
str_pos += len(m[0])
if m[2] is None:
ant_i_list = [m[8]]
ant_j_list = list(uv.get_ants())
else:
if m[3] is None:
ant_i_list = [m[2]]
else:
ant_i_list = m[3].split(",")
if m[6] is None:
ant_j_list = [m[5]]
else:
ant_j_list = m[6].split(",")
for ant_i in ant_i_list:
include_i = True
if type(ant_i) == str and ant_i.startswith("-"):
ant_i = ant_i[1:] # nibble the - off the string
include_i = False
for ant_j in ant_j_list:
include_j = True
if type(ant_j) == str and ant_j.startswith("-"):
ant_j = ant_j[1:]
include_j = False
pols = None
ant_i, ant_j = str(ant_i), str(ant_j)
if not ant_i.isdigit():
ai = re.search(r"(\d+)([x,y,l,r])", ant_i).groups()
if not ant_j.isdigit():
aj = re.search(r"(\d+)([x,y,l,r])", ant_j).groups()
if ant_i.isdigit() and ant_j.isdigit():
ai = [ant_i, ""]
aj = [ant_j, ""]
elif ant_i.isdigit() and not ant_j.isdigit():
if "x" in ant_j or "y" in ant_j:
pols = ["x" + aj[1], "y" + aj[1]]
else:
pols = ["l" + aj[1], "r" + aj[1]]
ai = [ant_i, ""]
elif not ant_i.isdigit() and ant_j.isdigit():
if "x" in ant_i or "y" in ant_i:
pols = [ai[1] + "x", ai[1] + "y"]
else:
pols = [ai[1] + "l", ai[1] + "r"]
aj = [ant_j, ""]
elif not ant_i.isdigit() and not ant_j.isdigit():
pols = [ai[1] + aj[1]]
ant_tuple = (abs(int(ai[0])), abs(int(aj[0])))
# Order tuple according to order in object
if ant_tuple in ant_pairs_data:
pass
elif ant_tuple[::-1] in ant_pairs_data:
ant_tuple = ant_tuple[::-1]
else:
if not (
ant_tuple[0] in ants_data or ant_tuple[0] in warned_ants
):
warned_ants.append(ant_tuple[0])
if not (
ant_tuple[1] in ants_data or ant_tuple[1] in warned_ants
):
warned_ants.append(ant_tuple[1])
if pols is not None:
for pol in pols:
if not (pol.lower() in pols_data or pol in warned_pols):
warned_pols.append(pol)
continue
if include_i and include_j:
if ant_tuple not in ant_pairs_nums:
ant_pairs_nums.append(ant_tuple)
if pols is not None:
for pol in pols:
if (
pol.lower() in pols_data
and polstr2num(pol, x_orientation=x_orientation)
not in polarizations
):
polarizations.append(
polstr2num(pol, x_orientation=x_orientation)
)
elif not (
pol.lower() in pols_data or pol in warned_pols
):
warned_pols.append(pol)
else:
if pols is not None:
for pol in pols:
if pol.lower() in pols_data:
if uv.Npols == 1 and [pol.lower()] == pols_data:
ant_pairs_nums.remove(ant_tuple)
if (
polstr2num(pol, x_orientation=x_orientation)
in polarizations
):
polarizations.remove(
polstr2num(
pol, x_orientation=x_orientation,
)
)
elif not (
pol.lower() in pols_data or pol in warned_pols
):
warned_pols.append(pol)
elif ant_tuple in ant_pairs_nums:
ant_pairs_nums.remove(ant_tuple)
if ant_str.upper() == "ALL":
ant_pairs_nums = None
elif len(ant_pairs_nums) == 0:
if not ant_str.upper() in ["AUTO", "CROSS"]:
ant_pairs_nums = None
if len(polarizations) == 0:
polarizations = None
else:
polarizations.sort(reverse=True)
if print_toggle:
print("\nParsed antenna pairs:")
if ant_pairs_nums is not None:
for pair in ant_pairs_nums:
print(pair)
print("\nParsed polarizations:")
if polarizations is not None:
for pol in polarizations:
print(polnum2str(pol, x_orientation=x_orientation))
if len(warned_ants) > 0:
warnings.warn(
"Warning: Antenna number {a} passed, but not present "
"in the ant_1_array or ant_2_array".format(
a=(",").join(map(str, warned_ants))
)
)
if len(warned_pols) > 0:
warnings.warn(
"Warning: Polarization {p} is not present in "
"the polarization_array".format(p=(",").join(warned_pols).upper())
)
return ant_pairs_nums, polarizations
def _combine_filenames(filename1, filename2):
"""Combine the filename attribute from multiple UVBase objects.
The 4 cases are:
1. `filename1` has been set, `filename2` has not
2. `filename1` has not been set, `filename2` has
3. `filename1` and `filename2` both have been set
4. `filename1` and `filename2` both have not been set
In case (1), we do not want to update the attribute, because it is
already set correctly. In case (2), we want to replace `filename1`
with the value from `filename2. In case (3), we want to take the union of
the sets of the filenames. In case (4), we want the filename attribute
to still be `None`.
Parameters
----------
filename1 : list of str or None
The list of filenames for the first UVBase object. If it is not set, it
should be `None`.
filename2 : list of str or None
The list of filenames for the second UVData object. If it is not set, it
should be `None`.
Returns
-------
combined_filenames : list of str or None
The combined list, with potentially duplicate entries removed.
"""
combined_filenames = filename1
if filename1 is not None:
if filename2 is not None:
combined_filenames = sorted(set(filename1).union(set(filename2)))
elif filename2 is not None:
combined_filenames = filename2
return combined_filenames
def _get_dset_shape(dset, indices):
"""
Given a 3-tuple of indices, determine the indexed array shape.
Parameters
----------
dset : numpy array or h5py dataset
A numpy array or a reference to an HDF5 dataset on disk. Requires the
`dset.shape` attribute exists and returns a tuple.
indices : tuple
A 3-tuple with the indices to extract along each dimension of dset.
Each element should contain a list of indices, a slice element,
or a list of slice elements that will be concatenated after slicing.
For data arrays with 4 dimensions, the second dimension (the old spw axis)
should not be included because it can only be length one.
Returns
-------
tuple
a 3- or 4-tuple with the shape of the indexed array
tuple
a 3- or 4-tuple with indices used (will be different than input if dset has
4 dimensions)
"""
dset_shape = list(dset.shape)
if len(dset_shape) == 4 and len(indices) == 3:
indices = (indices[0], np.s_[:], indices[1], indices[2])
for i, inds in enumerate(indices):
# check for integer
if isinstance(inds, (int, np.integer)):
dset_shape[i] = 1
# check for slice object
if isinstance(inds, slice):
dset_shape[i] = _get_slice_len(inds, dset_shape[i])
# check for list
if isinstance(inds, list):
# check for list of integers
if isinstance(inds[0], (int, np.integer)):
dset_shape[i] = len(inds)
elif isinstance(inds[0], slice):
dset_shape[i] = sum((_get_slice_len(s, dset_shape[i]) for s in inds))
return dset_shape, indices
def _convert_to_slices(indices, max_nslice_frac=0.1):
"""
Convert list of indices to a list of slices.
Parameters
----------
indices : list
A 1D list of integers for array indexing.
max_nslice_frac : float
A float from 0 -- 1. If the number of slices
needed to represent input 'indices' divided by len(indices)
exceeds this fraction, then we determine that we cannot
easily represent 'indices' with a list of slices.
Returns
-------
list
list of slice objects used to represent indices
bool
If True, indices is easily represented by slices
(max_nslice_frac condition met), otherwise False
Notes
-----
Example:
if: indices = [1, 2, 3, 4, 10, 11, 12, 13, 14]
then: slices = [slice(1, 5, 1), slice(11, 15, 1)]
"""
# check for integer index
if isinstance(indices, (int, np.integer)):
indices = [indices]
# check for already a slice
if isinstance(indices, slice):
return [indices], True
# assert indices is longer than 2, or return trivial solutions
if len(indices) == 0:
return [slice(0, 0, 0)], False
elif len(indices) == 1:
return [slice(indices[0], indices[0] + 1, 1)], True
elif len(indices) == 2:
return [slice(indices[0], indices[1] + 1, indices[1] - indices[0])], True
# setup empty slices list
Ninds = len(indices)
slices = []
# iterate over indices
for i, ind in enumerate(indices):
if i == 0:
# start the first slice object
start = ind
last_step = indices[i + 1] - ind
continue
# calculate step from previous index
step = ind - indices[i - 1]
# if step != last_step, this ends the slice
if step != last_step:
# append to list
slices.append(slice(start, indices[i - 1] + 1, last_step))
# check if this is the last element
if i == Ninds - 1:
# append last element
slices.append(slice(ind, ind + 1, 1))
continue
# setup next step
start = ind
last_step = indices[i + 1] - ind
# check if this is the last element
elif i == Ninds - 1:
# end slice and append
slices.append(slice(start, ind + 1, step))
# determine whether slices are a reasonable representation
Nslices = len(slices)
passed = (float(Nslices) / len(indices)) < max_nslice_frac
return slices, passed
def _get_slice_len(s, axlen):
"""
Get length of a slice s into array of len axlen.
Parameters
----------
s : slice object
Slice object to index with
axlen : int
Length of axis s slices into
Returns
-------
int
Length of slice object
"""
if s.start is None:
start = 0
else:
start = s.start
if s.stop is None:
stop = axlen
else:
stop = np.min([s.stop, axlen])
if s.step is None:
step = 1
else:
step = s.step
return ((stop - 1 - start) // step) + 1
def _index_dset(dset, indices, input_array=None):
"""
Index a UVH5 data, flags or nsamples h5py dataset.
Parameters
----------
dset : h5py dataset
A reference to an HDF5 dataset on disk.
indices : tuple
A 3-tuple with the indices to extract along each dimension of dset.
Each element should contain a list of indices, a slice element,
or a list of slice elements that will be concatenated after slicing.
Indices must be provided such that all dimensions can be indexed
simultaneously. For data arrays with 4 dimensions, the second dimension
(the old spw axis) should not be included because it can only be length one.
Returns
-------
ndarray
The indexed dset
Notes
-----
This makes and fills an empty array with dset indices.
For trivial indexing, (e.g. a trivial slice), constructing
a new array and filling it is suboptimal over direct
indexing, e.g. dset[indices].
This function specializes in repeated slices over the same axis,
e.g. if indices is [[slice(0, 5), slice(10, 15), ...], ..., ]
"""
# get dset and arr shape
dset_shape = dset.shape
arr_shape, indices = _get_dset_shape(dset, indices)
if input_array is None:
# create empty array of dset dtype
arr = np.empty(arr_shape, dtype=dset.dtype)
else:
arr = input_array
# get arr and dset indices for each dimension in indices
dset_indices = []
arr_indices = []
for i, dset_inds in enumerate(indices):
if isinstance(dset_inds, (int, np.integer)):
# this dimension is len 1, so slice is fine
arr_indices.append([slice(None)])
dset_indices.append([[dset_inds]])
elif isinstance(dset_inds, slice):
# this dimension is just a slice, so slice is fine
arr_indices.append([slice(None)])
dset_indices.append([dset_inds])
elif isinstance(dset_inds, (list, np.ndarray)):
if isinstance(dset_inds[0], (int, np.integer)):
# this is a list of integers, append slice
arr_indices.append([slice(None)])
dset_indices.append([dset_inds])
elif isinstance(dset_inds[0], slice):
# this is a list of slices, need list of slice lens
slens = [_get_slice_len(s, dset_shape[i]) for s in dset_inds]
ssums = [sum(slens[:j]) for j in range(len(slens))]
arr_inds = [slice(s, s + l) for s, l in zip(ssums, slens)]
arr_indices.append(arr_inds)
dset_indices.append(dset_inds)
if len(dset_shape) == 3:
freq_dim = 1
pol_dim = 2
else:
freq_dim = 2
pol_dim = 3
# iterate over each of the 3 axes and fill the array
for blt_arr, blt_dset in zip(arr_indices[0], dset_indices[0]):
for freq_arr, freq_dset in zip(arr_indices[freq_dim], dset_indices[freq_dim]):
for pol_arr, pol_dset in zip(arr_indices[pol_dim], dset_indices[pol_dim]):
if input_array is None:
# index dset and assign to arr
if len(dset_shape) == 3:
arr[blt_arr, freq_arr, pol_arr] = dset[
blt_dset, freq_dset, pol_dset
]
else:
arr[blt_arr, :, freq_arr, pol_arr] = dset[
blt_dset, :, freq_dset, pol_dset
]
else:
# index arr and assign to dset
if len(dset_shape) == 3:
dset[blt_dset, freq_dset, pol_dset] = arr[
blt_arr, freq_arr, pol_arr
]
else:
dset[blt_dset, :, freq_dset, pol_dset] = arr[
blt_arr, :, freq_arr, pol_arr
]
if input_array is None:
return arr
else:
return
| 38.190699 | 88 | 0.616454 |
48999c30f6ce3553bc46c4455cd9c9d9aeb16c39 | 1,244 | py | Python | gui menubarandtabs.py | Annonymous-error/general-codes | 06c8833a92e73331e5269b44e57c3f5efa284d7a | [
"Apache-2.0"
] | 1 | 2020-11-07T14:48:25.000Z | 2020-11-07T14:48:25.000Z | gui menubarandtabs.py | Annonymous-error/general-codes | 06c8833a92e73331e5269b44e57c3f5efa284d7a | [
"Apache-2.0"
] | null | null | null | gui menubarandtabs.py | Annonymous-error/general-codes | 06c8833a92e73331e5269b44e57c3f5efa284d7a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue May 5 00:05:41 2020
tabs in gui app
menu baar tutuorial
@author: Ayush Gupta
"""
import tkinter as tk
from tkinter import ttk
win=tk.Tk()
win.title('menubar with tabs')
############################
nb=ttk.Notebook(win)
page1=ttk.Frame(nb)
page2=ttk.Frame(nb)
nb.add(page1,text='one')
nb.add(page2,text='Two')
# nb.grid(row=0,column=0)
nb.pack(expand=True,fill='both')
label1=ttk.Label(page1,text='this is tabbed app')
label1.grid(row=0,column=0)
###############################
def func():
print('func called')
# menubar=tk.Menu(win) simple menu bar
# menubar.add_command(label='File',command=func)
# menubar.add_command(label='Edit',command=func)
# win.config(menu=menubar)
main_menu = tk.Menu(page1)
file_menu = tk.Menu(main_menu,tearoff=0)
file_menu.add_command(label='New file',command=func)
file_menu.add_separator()
file_menu.add_command(label='New window',command=func)
main_menu.add_cascade(label='File',menu=file_menu)
edit_menu=tk.Menu(main_menu,tearoff=0)
edit_menu.add_cascade(label='undo',command=func)
main_menu.add_cascade(label='Edit', menu=edit_menu)
win.config(menu=main_menu)
win.mainloop()
| 22.618182 | 57 | 0.663987 |
cd47ae390791dbd92ab2b6ff6007182b0cdd5681 | 519 | py | Python | source/plugins/Patches/patch_to_cpp.py | supahas/PDA-Loader | ced7fa54ce3e82be7d93e5ffe3725a1f2d402830 | [
"MIT"
] | null | null | null | source/plugins/Patches/patch_to_cpp.py | supahas/PDA-Loader | ced7fa54ce3e82be7d93e5ffe3725a1f2d402830 | [
"MIT"
] | null | null | null | source/plugins/Patches/patch_to_cpp.py | supahas/PDA-Loader | ced7fa54ce3e82be7d93e5ffe3725a1f2d402830 | [
"MIT"
] | null | null | null | import os
from sys import argv
from textwrap import wrap
run, filename = argv
with open(filename,'r') as f:
lines = f.readlines()
for line in lines:
if (line.startswith("#") | line.startswith("\n") | line.startswith("\r")):
continue
if (line.startswith("//")):
print(" " + line)
continue
line = line.replace(" ", "")
address = line.split(':')[0]
value = line.split(':')[3]
value = wrap(value, 2)
value = ', 0x'.join(value)
value = "0x" + value
print(" { (void*)" + address + ",{ " + value + " } },") | 23.590909 | 75 | 0.595376 |
0a838ff39522937305d0403755825aba6fc50232 | 27 | py | Python | to_nwb/extensions/buzsaki_meta/__init__.py | mpompolas/to_nwb | 1317f0ee0f4d80dde451d60d8eb5c6a544e214fe | [
"BSD-3-Clause"
] | 1 | 2020-03-31T20:02:01.000Z | 2020-03-31T20:02:01.000Z | to_nwb/extensions/buzsaki_meta/__init__.py | mpompolas/to_nwb | 1317f0ee0f4d80dde451d60d8eb5c6a544e214fe | [
"BSD-3-Clause"
] | 2 | 2020-08-27T18:16:04.000Z | 2020-09-08T18:43:34.000Z | to_nwb/extensions/buzsaki_meta/__init__.py | mpompolas/to_nwb | 1317f0ee0f4d80dde451d60d8eb5c6a544e214fe | [
"BSD-3-Clause"
] | 5 | 2018-04-04T21:27:23.000Z | 2019-04-01T13:40:00.000Z | from .buzsaki_meta import * | 27 | 27 | 0.814815 |
ea3e9e055b5acce4350c24000227b29c84761f1f | 593 | py | Python | Python Files/timeit_code3.py | gerryjenkinslb/cs22-slides-and-py-files | 9474f7a2e50d57afa13edc3b13c008f7295da747 | [
"MIT"
] | 28 | 2019-07-05T04:00:45.000Z | 2022-02-16T09:43:50.000Z | Python Files/timeit_code3.py | gerryjenkinslb/cs22-slides-and-py-files | 9474f7a2e50d57afa13edc3b13c008f7295da747 | [
"MIT"
] | null | null | null | Python Files/timeit_code3.py | gerryjenkinslb/cs22-slides-and-py-files | 9474f7a2e50d57afa13edc3b13c008f7295da747 | [
"MIT"
] | 22 | 2018-10-24T04:42:05.000Z | 2022-02-04T08:17:27.000Z | from timeit import Timer
def build_list(n):
return list(range(n)) # create list of 1 to n
def access(l): # do access at 0 n//2 and n-1
l[0] # we do three access to get a little more of the actual time and
l[n//2] # get average of times at different places in list
l[n-1]
n = 100
t1 = Timer("access(l1)", # side note, don't need timeit. prefix
"from __main__ import access,build_list,n; l1 = build_list(n)" )
times = t1.repeat(25,1)
secs = [ x/3 for x in times]
for t in secs:
print("%.10f secs" % (t))
print("best time %.8f" % (min(secs)))
| 21.178571 | 77 | 0.615514 |
d10190acc4b9fba9ef9b16aa49796a2eb17d2413 | 643 | py | Python | src/tests/test_pass.py | bspeagle/py_git_diff | 1674afc1dfac0408372e11945f4a36b297b77e66 | [
"MIT"
] | null | null | null | src/tests/test_pass.py | bspeagle/py_git_diff | 1674afc1dfac0408372e11945f4a36b297b77e66 | [
"MIT"
] | null | null | null | src/tests/test_pass.py | bspeagle/py_git_diff | 1674afc1dfac0408372e11945f4a36b297b77e66 | [
"MIT"
] | null | null | null | '''
Passing tests
'''
import os
from typing import Any
import pytest
from helpers.github import API
api = API()
pass_token = Any
pass_org = os.getenv('PASS_ORG')
pass_repo = os.getenv('PASS_REPO')
def test_pass_auth(token):
'''
Pass 'auth' to Github
'''
pass_token = token
api.authenticate(pass_token)
assert api._current_user is not None
def test_pass_org():
'''
Pass 'get organization'
'''
api.get_organization(pass_org)
assert api._org.login == pass_org
def test_pass_repo():
'''
Pass 'get repo'
'''
api.get_organization(pass_org)
api._repo.full_name == pass_repo
| 14.953488 | 40 | 0.659409 |
4941df9f3a7fa873ac43e8a2222e5c2fb6c2f6b7 | 596 | py | Python | unrest/settings.py | chriscauley/django-unrest | dd1078afe2333654d60f57d35ff5f5e990587155 | [
"MIT"
] | null | null | null | unrest/settings.py | chriscauley/django-unrest | dd1078afe2333654d60f57d35ff5f5e990587155 | [
"MIT"
] | null | null | null | unrest/settings.py | chriscauley/django-unrest | dd1078afe2333654d60f57d35ff5f5e990587155 | [
"MIT"
] | null | null | null | # usage: (in project settings file)
# from unrest.settings import get_secret_key
# SECRET_KEY = get_secret_key(BASE_DIR)
import os
from django.core.management.utils import get_random_secret_key
def get_secret_key(BASE_DIR):
key_path = os.path.join(BASE_DIR, 'settings', '.secret_key')
if os.path.exists(key_path):
with open(key_path, 'r') as f:
SECRET_KEY = f.read()
else:
SECRET_KEY = get_random_secret_key()
with open(key_path, 'w') as f:
f.write(SECRET_KEY)
print('wrote secret key to', key_path)
return SECRET_KEY | 31.368421 | 64 | 0.674497 |
63bec2f2c75a4cde03f846d677da00ef2c00fed3 | 212 | py | Python | source/guiComponents/tkinterImage.py | staujd02/Pi-RFID-Video-Player | 613d5a9355b660afb5414b3f4a9dad219b69fc36 | [
"Apache-2.0"
] | 1 | 2020-02-15T15:21:03.000Z | 2020-02-15T15:21:03.000Z | source/guiComponents/tkinterImage.py | staujd02/Pi-RFID-Video-Player | 613d5a9355b660afb5414b3f4a9dad219b69fc36 | [
"Apache-2.0"
] | 8 | 2019-12-14T16:31:13.000Z | 2021-05-22T23:06:35.000Z | source/guiComponents/tkinterImage.py | staujd02/Pi-RFID-Video-Player | 613d5a9355b660afb5414b3f4a9dad219b69fc36 | [
"Apache-2.0"
] | null | null | null | from PIL import Image
from PIL import ImageTk
class TkinterImage(object):
def __init__(self, path):
self.path = path
def getImage(self):
return ImageTk.PhotoImage(Image.open(self.path)) | 21.2 | 56 | 0.693396 |
99692f7ef577cebc4657de1c0837a81201c08b11 | 4,307 | py | Python | combine.py | online-behaviour/2017-election | b6c0b8a52336c26909b8c852de55d18d38a4cbfb | [
"Apache-2.0"
] | null | null | null | combine.py | online-behaviour/2017-election | b6c0b8a52336c26909b8c852de55d18d38a4cbfb | [
"Apache-2.0"
] | null | null | null | combine.py | online-behaviour/2017-election | b6c0b8a52336c26909b8c852de55d18d38a4cbfb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3 -W all
"""
combine: combine results of different machine learners
usage: combine -T train-file [ -t test-file] [-m]
note: expected input line format: gold-label label-1 label-2 ...
20180118 erikt(at)xs4all.nl
"""
import getopt
import sys
COMMAND = sys.argv[0]
USAGE = "usage: "+COMMAND+" -T train-file [ -t test-file ]"
def processOpts(argv):
argv.pop(0)
try: options = getopt.getopt(argv,"mT:t:",[])
except: sys.exit(USAGE)
printModel = ""
trainFile = ""
testFile = ""
for option in options[0]:
if option[0] == "-T": trainFile = option[1]
elif option[0] == "-t": testFile = option[1]
elif option[0] == "-m": printModel = True
if trainFile == "": sys.exit(USAGE)
return(trainFile,testFile,printModel)
def applyModel(inFileName,model):
try: inFile = open(inFileName,"r")
except: sys.exit(COMMAND+": cannot open file "+inFileName)
nbrOfFields = -1
lineCount = 0
correctCount = []
correct = 0
for line in inFile:
lineCount += 1
line = line.rstrip()
fields = line.split()
if nbrOfFields < 0: nbrOfFields = len(fields)
if len(fields) != nbrOfFields:
sys.exit(COMMAND+": unexpected line "+line)
goldLabel = fields.pop(0)
for i in range(0,len(fields)):
while len(correctCount) < i+1: correctCount.append(0)
if fields[i] == goldLabel: correctCount[i] += 1
bestSystem = ""
bestCount = -1
for i in range(0,len(correctCount)):
if correctCount[i] > bestCount:
bestCount = correctCount[i]
bestSystem = i
dataWithoutLabel = " ".join(fields)
bestLabel = ""
if dataWithoutLabel in model["exceptions"]:
bestLabel = model["exceptions"][dataWithoutLabel]
else: bestLabel = fields[model["best system"]]
if bestLabel == goldLabel: correct += 1
print("# correct: {0:0.1f}%; best individual system: {1:0.1f}% (system {2})".format(100*correct/lineCount,100*bestCount/lineCount,bestSystem+1))
inFile.close()
return(0)
def makeModel(inFileName,printModel):
nbrOfFields = -1
correctCount = []
dataWithLabels = {}
dataWithoutLabels = {}
labels = {}
lineCount = 0
try: inFile = open(inFileName,"r")
except: sys.exit(COMMAND+": cannot open file "+inFileName)
for line in inFile:
lineCount += 1
line = line.rstrip()
fields = line.split()
if nbrOfFields < 0: nbrOfFields = len(fields)
if len(fields) != nbrOfFields:
sys.exit(COMMAND+": unexpected line "+line)
goldLabel = fields.pop(0)
for i in range(0,len(fields)):
while len(correctCount) < i+1: correctCount.append(0)
if fields[i] == goldLabel: correctCount[i] += 1
if not line in dataWithLabels: dataWithLabels[line] = 1
else: dataWithLabels[line] += 1
dataWithoutLabel = " ".join(fields)
dataWithoutLabels[dataWithoutLabel] = 1
labels[goldLabel] = 1
inFile.close()
bestSystem = ""
bestCount = -1
for i in range(0,len(correctCount)):
if correctCount[i] > bestCount:
bestCount = correctCount[i]
bestSystem = i
print("# best system: {0} ({1:0.1f}%)".format(bestSystem+1,100*bestCount/lineCount))
exceptions = {}
for dataWithoutLabel in dataWithoutLabels:
bestLabel = "???"
bestCount = -1
for label in labels:
key = label+" "+dataWithoutLabel
if key in dataWithLabels and dataWithLabels[key] > bestCount:
bestCount = dataWithLabels[key]
bestLabel = label
systemLabels = dataWithoutLabel.split()
if systemLabels[bestSystem] != bestLabel and bestCount >= 5:
exceptions[dataWithoutLabel] = bestLabel
if printModel: print("{0} {1} {2}".format(bestCount,bestLabel,dataWithoutLabel))
return({"best system":bestSystem,"exceptions":exceptions})
def main(argv):
trainFile, testFile, printModel = processOpts(argv)
model = makeModel(trainFile,printModel)
if testFile != "": applyModel(testFile,model)
return(0)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 35.891667 | 148 | 0.604829 |
66cd9dc73bd4b8abd066dbd225712f192e4a22a0 | 6,187 | py | Python | pr_copula/main_copula_density.py | edfong/MP | 276ed8d7bb36d635ff2647c8a45622b5636b6087 | [
"MIT"
] | 5 | 2021-05-03T20:48:05.000Z | 2022-03-17T10:38:13.000Z | pr_copula/main_copula_density.py | edfong/MP | 276ed8d7bb36d635ff2647c8a45622b5636b6087 | [
"MIT"
] | null | null | null | pr_copula/main_copula_density.py | edfong/MP | 276ed8d7bb36d635ff2647c8a45622b5636b6087 | [
"MIT"
] | null | null | null | from scipy.optimize import minimize
from collections import namedtuple
import time
import numpy as np
from tqdm import tqdm
#import jax
import jax.numpy as jnp
from jax import vmap
from jax.random import permutation,PRNGKey,split
#import package functions
from . import copula_density_functions as mvcd
from . import sample_copula_density_functions as samp_mvcd
### Fitting ###
#Compute overhead v_{1:n}, return fit copula object for prediction
def fit_copula_density(y,n_perm = 10, seed = 20,n_perm_optim = None, single_bandwidth = True):
#Set seed for scipy
np.random.seed(seed)
#Generate random permutations
key = PRNGKey(seed)
key,*subkey = split(key,n_perm +1 )
subkey = jnp.array(subkey)
y_perm = vmap(permutation,(0,None))(subkey,y)
#Initialize parameter and put on correct scale to lie in [0,1]
d = jnp.shape(y)[1]
if single_bandwidth == True:
rho_init = 0.9*jnp.ones(1)
else:
rho_init = 0.9*jnp.ones(d)
hyperparam_init = jnp.log(1/rho_init - 1)
#calculate rho_opt
#either use all permutations or a selected number to fit bandwidth
if n_perm_optim is None:
y_perm_opt = y_perm
else:
y_perm_opt = y_perm[0:n_perm_optim]
#Compiling
print('Compiling...')
start = time.time()
temp = mvcd.fun_jll_perm_sp(hyperparam_init,y_perm_opt)
temp = mvcd.grad_jll_perm_sp(hyperparam_init,y_perm_opt)
temp = mvcd.update_pn_loop_perm(rho_init,y_perm)[0].block_until_ready()
end = time.time()
print('Compilation time: {}s'.format(round(end-start, 3)))
print('Optimizing...')
start = time.time()
opt = minimize(fun = mvcd.fun_jll_perm_sp, x0= hyperparam_init,\
args = (y_perm_opt),jac =mvcd.grad_jll_perm_sp,method = 'SLSQP')
#check optimization succeeded
if opt.success == False:
print('Optimization failed')
#unscale hyperparameter
hyperparam_opt = opt.x
rho_opt = 1/(1+jnp.exp(hyperparam_opt))
end = time.time()
print('Optimization time: {}s'.format(round(end-start, 3)))
print('Fitting...')
start = time.time()
vn_perm= mvcd.update_pn_loop_perm(rho_opt,y_perm)[0].block_until_ready()
end = time.time()
print('Fit time: {}s'.format(round(end-start, 3)))
copula_density_obj = namedtuple('copula_density_obj',['vn_perm','rho_opt','preq_loglik'])
return copula_density_obj(vn_perm,rho_opt,-opt.fun)
#Predict on test data using copula object
def predict_copula_density(copula_density_obj,y_test):
print('Predicting...')
start = time.time()
logcdf_conditionals,logpdf_joints = mvcd.update_ptest_loop_perm_av(copula_density_obj.vn_perm,copula_density_obj.rho_opt,y_test)
logcdf_conditionals = logcdf_conditionals.block_until_ready() #for accurate timing
end = time.time()
print('Prediction time: {}s'.format(round(end-start, 3)))
return logcdf_conditionals,logpdf_joints
#Sample from predcitive density p_n
def sample_copula_density(copula_density_obj,B_samples,seed = 100):
d = np.shape(copula_density_obj.vn_perm)[2]
#Compiling
print('Compiling...')
start = time.time()
temp = samp_mvcd.compute_quantile_pn_av(copula_density_obj.vn_perm,copula_density_obj.rho_opt,0.5*np.ones(d))
end = time.time()
print('Compilation time: {}s'.format(round(end-start, 3)))
#Initialize
y_samp = np.zeros((B_samples,d))
err = np.zeros(B_samples)
n_iter = np.zeros(B_samples)
#Simulate uniform random variables
np.random.seed(seed)
un = np.random.rand(B_samples,d)
#Sampling
print('Sampling...')
start = time.time()
for i in tqdm(range(B_samples)):
y_samp[i],err[i],n_iter[i] = samp_mvcd.compute_quantile_pn_av(copula_density_obj.vn_perm,copula_density_obj.rho_opt,un[i])
end = time.time()
print('Sampling time: {}s'.format(round(end-start, 3)))
print(f'Max abs error in cdf: {np.sqrt(np.max(err)):.2e}')
return y_samp,err,n_iter
### ###
### Predictive Resampling ###
#Forward sampling without diagnostics for speed
def predictive_resample_density(copula_density_obj,y_test,B_postsamples, T_fwdsamples = 5000, seed = 100):
#Fit permutation averaged cdf/pdf
logcdf_conditionals,logpdf_joints = predict_copula_density(copula_density_obj,y_test)
#Initialize random seeds
key = PRNGKey(seed)
key,*subkey = split(key,B_postsamples+1)
subkey = jnp.array(subkey)
#Forward sample
n = jnp.shape(copula_density_obj.vn_perm)[1] #get original data size
print('Predictive resampling...')
start = time.time()
logcdf_conditionals_pr,logpdf_joints_pr = samp_mvcd.predictive_resample_loop_B(subkey,logcdf_conditionals,logpdf_joints,\
copula_density_obj.rho_opt,n,T_fwdsamples)
logcdf_conditionals_pr = logcdf_conditionals_pr.block_until_ready() #for accurate timing
end = time.time()
print('Predictive resampling time: {}s'.format(round(end-start, 3)))
return logcdf_conditionals_pr,logpdf_joints_pr
#Check convergence by running 1 long forward sample chain
def check_convergence_pr(copula_density_obj,y_test,B_postsamples,T_fwdsamples = 10000, seed = 100):
#Fit permutation averaged cdf/pdf
logcdf_conditionals,logpdf_joints = predict_copula_density(copula_density_obj,y_test)
# #Initialize random seeds
key = PRNGKey(seed)
key,*subkey = split(key,B_postsamples+1)
subkey = jnp.array(subkey)
#Forward sample
n = jnp.shape(copula_density_obj.vn_perm)[1] #get original data size
print('Predictive resampling...')
start = time.time()
logcdf_conditionals_pr,logpdf_joints_pr,pdiff,cdiff = samp_mvcd.pr_loop_conv_B(subkey,logcdf_conditionals,logpdf_joints,\
copula_density_obj.rho_opt,n,T_fwdsamples)
logcdf_conditionals_pr = logcdf_conditionals_pr.block_until_ready() #for accurate timing
end = time.time()
print('Predictive resampling time: {}s'.format(round(end-start, 3)))
return logcdf_conditionals_pr,logpdf_joints_pr,pdiff,cdiff
### ###
| 37.271084 | 142 | 0.700501 |
e4d8dda3bd4f6cca99d7c3f442ebbf596879b689 | 10,802 | py | Python | docker/docker.py | 1105042987/Dominant-Patterns | 713b535e80aff0f04e20d1ef56d005e183a5d8a5 | [
"MIT"
] | 1 | 2021-06-14T12:01:24.000Z | 2021-06-14T12:01:24.000Z | docker/docker.py | 1105042987/Dominant-Patterns | 713b535e80aff0f04e20d1ef56d005e183a5d8a5 | [
"MIT"
] | null | null | null | docker/docker.py | 1105042987/Dominant-Patterns | 713b535e80aff0f04e20d1ef56d005e183a5d8a5 | [
"MIT"
] | null | null | null | import os,sys
base = sys.path[0]
sys.path.append(os.path.abspath(os.path.join(base, "..")))
import torch
import shutil
import importlib
import traceback
from tqdm import tqdm
from os.path import join as PJOIN
from tensorboardX import SummaryWriter
from collections import Iterator
from docker.tool import meter,yellow
import torch.nn as nn
import gc
class Docker(object):
def __init__(self,cfg):
super(Docker,self).__init__()
print(yellow('Compiling the model ...'))
network_file = 'model.{}'.format(cfg.system['net'][0])
dataset_file = 'dataset.{}'.format(cfg.dataset['file_name'])
network_module = importlib.import_module(network_file)
dataset_module = importlib.import_module(dataset_file)
self.dev = torch.device('cuda', cfg.system['gpu'][0]) if len(cfg.system['gpu'])>=1 and \
torch.cuda.is_available() else torch.device('cpu')
self.multi_dev = len(cfg.system['gpu'])>1
self.epoch = 'test'
self.net = getattr(network_module,cfg.system['net'][1])(**cfg.system['net_param'])
self.load_param(cfg,'net')
self.net = self.net.to(self.dev)
if self.multi_dev and torch.cuda.device_count()>1:
self.net = nn.DataParallel(self.net,cfg.system['gpu'])
self.criterion = network_module.loss(**cfg.system['loss_param'])
if cfg.mode == 'train':
self.best = None
self.epoch_start = 1
self.eval_on_train = cfg.optimizer['eval_on_train']
self.epoch_end = cfg.optimizer['max_epoch'] + 1
self.save_epoch = cfg.optimizer['save_epoch']
self.max_batch = cfg.optimizer['max_batch']
if cfg.optimizer['type'] == 'adam':
self.opt = torch.optim.Adam(self.net.parameters(), # filter(lambda p:p.requires_grad, self.net.parameters()),
lr=cfg.optimizer['learning_rate'], **cfg.optimizer['adam'])
elif cfg.optimizer['type'] == 'sgd':
self.opt = torch.optim.SGD(self.net.parameters(), #filter(lambda p:p.requires_grad, self.net.parameters()),
lr=cfg.optimizer['learning_rate'], **cfg.optimizer['sgd'])
self.sch = torch.optim.lr_scheduler.MultiStepLR(self.opt, cfg.optimizer['milestones'],
gamma=cfg.optimizer['decay_rate'], last_epoch=-1)
self.load_param(cfg,'others')
print(yellow('Loading the dataset ...'))
if cfg.mode == 'train':
self.trainloader = dataset_module.dataloader(cfg.dataset[cfg.mode],cfg.mode)
if self.max_batch is None: self.max_batch = len(self.trainloader)
self.testloader = dataset_module.dataloader(cfg.dataset['test'],
'test') if cfg.optimizer['test_on_train'] else None
else:
self.testloader = dataset_module.dataloader(cfg.dataset[cfg.mode],cfg.mode)
self.result_dir = cfg.system['result_dir']
self.evaluate = network_module.evaluate(**cfg.system['evaluate_param'])
self.evaluate.result_dir = PJOIN(self.result_dir,'save')
self.writer = SummaryWriter(PJOIN(self.result_dir,'tensorboard')) if cfg.mode == 'train' else None
def load_param(self,cfg,obj):
direct = cfg.system['load_path']
if direct is None: return
if obj == 'net':
weight = torch.load(PJOIN(direct,'weight.pth'), map_location=lambda storage, loc:storage)
self.net.load_state_dict(weight)
else:
other = torch.load(PJOIN(direct,'others.pth'), map_location=lambda storage, loc:storage)
if cfg.mode == 'test':
print('Test at position: {}, Epoch: {}'.format(yellow(direct),yellow(other.get('epoch','Unknow'))))
else:
self.opt.load_state_dict(other['opt'])
self.sch.load_state_dict(other['sch'])
self.best = other.get('cur_loss', None)
self.epoch_start += other.get('epoch',0)
self.epoch_end += other.get('epoch',0)
def save(self,loss_now):
best_save = PJOIN(self.result_dir,'ckp','best')
if self.epoch == self.epoch_start:
os.makedirs(best_save)
if self.best is None: self.best = loss_now
self.__save_param(best_save,self.best)
return
if loss_now < self.best:
self.best = loss_now
self.__save_param(best_save, self.best)
if self.epoch != self.epoch_end-1:
if self.save_epoch == 0: return # Just save the best
if self.epoch % self.save_epoch != 0: return # Save every save epoch
now_save = PJOIN(self.result_dir,'ckp',str(self.epoch))
os.makedirs(now_save)
self.__save_param(now_save, loss_now)
def log_record(self,dic,board_name,additional={}):
log = 'Epoch:{:0>4} '.format(self.epoch)
for key,val in dic.items():
log+='{}:{} '.format(key,val)
for key,val in additional.items():
log+='{}:{} '.format(key,val)
if len(dic)>4:
print(board_name)
print(log.replace(' ','\n\r'))
else:
print(board_name,log)
if self.writer is not None:
with open(PJOIN(self.result_dir,board_name+'_log.txt'),'a+') as f:
f.write(log+'\n')
self.writer.add_scalars(board_name, dic, self.epoch)
else:
with open(PJOIN(self.result_dir,'FinalTest.txt'),'w') as f:
f.write(log.replace(' ','\n\r'))
def train(self,with_tqdm=True):
print(yellow('Training begin:'))
try:
loss_meter = meter()
main_loss_meter = meter()
eval_meter = meter()
for self.epoch in range(self.epoch_start, self.epoch_end):
self.net.train()
loss_meter.reset()
main_loss_meter.reset()
eval_meter.reset()
pbar = tqdm(total=self.max_batch, desc='Training Epoch {}'.format(self.epoch), ascii=True, ncols=130)
for idx,data in enumerate(self.trainloader):
if idx >= self.max_batch: break
self.opt.zero_grad()
if isinstance(data, Iterator):
preds, targets = [], []
for d in tqdm(data, ascii=True, leave=False, ncols=130):
inputs, sub_pred, sub_tar = self.__step(d)
preds.append(sub_pred)
targets.append(sub_tar)
preds = torch.cat(preds,0)
targets = torch.cat(targets,0)
else:
inputs, preds, targets = self.__step(data)
loss, record_dic = self.criterion(preds, targets)
loss.backward()
self.opt.step()
if self.eval_on_train:
eval_dic = self.evaluate(inputs, preds, targets, False, False)
eval_meter.add(eval_dic)
loss_meter.add(record_dic)
main_loss_meter.add({'main':loss.item()})
if with_tqdm:
pbar.set_postfix(record_dic)
pbar.update()
pbar.close()
self.sch.step()
self.log_record(loss_meter.mean(), 'Train_Loss')
if self.eval_on_train:
log_dic = eval_meter.mean()
self.log_record(log_dic, 'Train_Eval', self.evaluate.final_call())
if self.testloader is not None:
self.test(False,False,with_tqdm)
self.save(main_loss_meter.mean()['main'])
self.writer.close()
except:
pbar.close()
self.writer.close()
traceback.print_exc()
sys.stdout.flush()
key = input(yellow('\nDo you want to reserve this train (Default No)? y/n: '))
if key != 'y':
shutil.rmtree(self.result_dir)
def test(self,visualize=False,save_result=False,with_tqdm=True):
self.net.eval()
loss_meter = meter()
eval_meter = meter()
pbar = tqdm(total=len(self.testloader), desc='Testing', ascii=True, ncols=130)
with torch.no_grad():
for idx, data in enumerate(self.testloader):
if isinstance(data, Iterator):
preds, targets = [], []
for d in tqdm(data, ascii=True, leave=False, ncols=130):
inputs, sub_pred, sub_tar = self.__step(d)
preds.append(sub_pred)
targets.append(sub_tar)
gc.collect()
torch.cuda.empty_cache()
preds = torch.cat(preds,0)
targets = torch.cat(targets,0)
else:
inputs,preds,targets = self.__step(data)
gc.collect()
torch.cuda.empty_cache()
loss, loss_dic = self.criterion(preds, targets)
eval_dic = self.evaluate(inputs, preds, targets, visualize, save_result)
loss_meter.add(loss_dic)
eval_meter.add(eval_dic)
if with_tqdm:
pbar.set_postfix(loss_dic)
pbar.update()
pbar.close()
self.log_record(loss_meter.mean(),'Test_Loss')
log_dic = eval_meter.mean()
self.log_record(log_dic, 'Test_Eval', self.evaluate.final_call())
return eval_meter.mean()
def __save_param(self,_dir,_loss):
if self.multi_dev:
torch.save(self.net.module.state_dict(), PJOIN(_dir,'weight.pth'))
else:
torch.save(self.net.state_dict(), PJOIN(_dir,'weight.pth'))
torch.save({
'opt': self.opt.state_dict(),
'sch': self.sch.state_dict(),
'epoch':self.epoch,
'cur_loss': _loss,
}, PJOIN(_dir,'others.pth'))
def __step(self,data):
if self.multi_dev:
inputs, targets = data[0], to_dev(data[1], self.dev)
else:
inputs, targets = to_dev(data,self.dev)
preds = to_dev(self.net(inputs), self.dev) if self.multi_dev else self.net(inputs)
return inputs,preds,targets
def to_dev(data,dev):
if type(data) in [tuple,list]:
return [to_dev(x,dev) for x in data]
else:
if type(data) == str:
return data
return data.to(dev)
| 43.732794 | 126 | 0.548324 |
66800340d7cba567183c954d93c4fc6b8b67f7d6 | 4,743 | py | Python | src/gen_types.py | mdda/libgpuarray | 5e9d33b3ad80684158938c2937a81161939992eb | [
"0BSD"
] | null | null | null | src/gen_types.py | mdda/libgpuarray | 5e9d33b3ad80684158938c2937a81161939992eb | [
"0BSD"
] | null | null | null | src/gen_types.py | mdda/libgpuarray | 5e9d33b3ad80684158938c2937a81161939992eb | [
"0BSD"
] | null | null | null | import sys
from mako import exceptions
from mako.template import Template
TYPEMAP = {}
i = 0
def add_type(name, C, sz):
global i
TYPEMAP[i] = ("ga_"+name, sz), name, C
i+=1
add_type("bool", "uint8_t", 1)
add_type("byte", "int8_t", 1)
add_type("ubyte", "uint8_t", 1)
for name, sz in [("short", 2), ("int", 4), ("long", 8)]:
add_type(name, "int%s_t"%(sz*8,), sz)
add_type("u"+name, "uint%s_t"%(sz*8,), sz)
add_type("longlong", "int128_t", 16)
add_type("ulonglong", "uint128_t", 16)
add_type("float", "float", 4)
add_type("double", "double", 8)
add_type("quad", "ga_quad", 16)
add_type("cfloat", "ga_cfloat", 8)
add_type("cdouble", "ga_cdouble", 16)
add_type("cquad", "ga_cquad", 32)
assert i <= 23
i=23 # to sync with numpy.
add_type("half", "half_t", 2);
add_type("size", "size_t", "sizeof(size_t)");
decls = """
#ifdef _MSC_VER
typedef signed __int8 int8_t;
typedef unsigned __int8 uint8_t;
typedef signed __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef signed __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
#else
#include <stdint.h>
#endif
typedef struct _int128 {
union int128_u {
int8_t as_int8[16];
int16_t as_int16[8];
int32_t as_int32[4];
int64_t as_int64[2];
} value;
} int128_t;
typedef struct _uint128 {
union uint128_u {
uint8_t as_uint8[16];
uint16_t as_uint16[8];
uint32_t as_uint32[4];
uint64_t as_uint64[2];
} value;
} uint128_t;
typedef struct _quad {
union {
struct {
int16_t exp;
uint16_t hi;
uint32_t lo;
};
uint128_t raw;
} u;
} ga_quad;
typedef uint16_t half_t;
typedef struct _cfloat {
float r;
float i;
} ga_cfloat;
typedef struct _cdouble {
double r;
double i;
} ga_cdouble;
typedef struct _cquad {
ga_quad r;
ga_quad i;
} ga_cquad;
"""
ntypes = i
VECTORMAP = {}
i = 0
def add_type(name, sz):
global i
VECTORMAP[i] = ("ga_"+name, sz, "GA_"+name.upper()), name
i+=1
for s in [2, 3, 4, 8, 16]:
add_type("byte"+str(s), s)
add_type("ubyte"+str(s), s)
for name, sz in [("short", 2), ("int", 4), ("long", 8)]:
for s in [2, 3, 4, 8, 16]:
add_type(name+str(s), sz*s)
add_type("u"+name+str(s), sz*s)
for name, sz in [("float", 4), ("double", 8), ("half", 2)]:
for s in [2, 4, 8, 16]:
add_type(name+str(s), sz*s)
nvec = i
head_tmpl = Template(""" /* This file is generated by gen_types.py */
/** \\file types.h
* \\brief Type declarations and access.
*/
#ifndef GPUARRAY_TYPES_H
#define GPUARRAY_TYPES_H
#include <sys/types.h>
#include <stddef.h>
#include <gpuarray/config.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CONFUSE_EMACS
}
#endif
/**
* Structure that holds the properties of a type.
*/
typedef struct _gpuarray_type {
/**
* Type name to use in the buffers.
*/
const char *cluda_name;
/**
* Size of one element (in bytes).
*/
size_t size;
/**
* Alignement requirement for the type.
*/
size_t align;
/**
* Code for the type.
*/
int typecode;
} gpuarray_type;
/**
* List of all built-in types.
*/
enum GPUARRAY_TYPES {
GA_POINTER = -2,
GA_BUFFER = -1,
% for i, v in sorted(TYPEMAP.items()):
GA_${v[1].upper()} = ${i},
% endfor
/** \\cond INTERNAL_DOCS */
GA_NBASE = ${ntypes},
GA_DELIM = 255, /* To be forward-compatible with numpy */
/** \\endcond */
% for i, v in sorted(VECTORMAP.items()):
GA_${v[1].upper()},
% endfor
/** \\cond INTERNAL_DOCS */
GA_NVEC,
GA_ENDVEC = 512
/** \\endcond */
};
#ifdef __cplusplus
}
#endif
#endif /* GPUARRAY_TYPES */
""")
impl_tmpl = Template(""" /* This file is generated by gen_types.py */
#include "gpuarray/types.h"
#include <stdlib.h> /* For NULL */
${decls}
% for k, v in TYPEMAP.items():
typedef struct {char c; ${v[2]} x; } st_${v[1]};
#define ${v[1].upper()}_ALIGN (sizeof(st_${v[1]}) - sizeof(${v[2]}))
% endfor
const gpuarray_type scalar_types[] = {
% for i in range(ntypes):
% if i in TYPEMAP:
{"${TYPEMAP[i][0][0]}", ${TYPEMAP[i][0][1]}, ${TYPEMAP[i][1].upper()}_ALIGN, GA_${TYPEMAP[i][1].upper()}},
% else:
{NULL, 0, 0, -1},
% endif
% endfor
};
const gpuarray_type vector_types[] = {
% for i, v in sorted(VECTORMAP.items()):
{"${v[0][0]}", ${v[0][1]}, 0, GA_${v[1].upper()}},
% endfor
};
""")
try:
header = head_tmpl.render(TYPEMAP=TYPEMAP, VECTORMAP=VECTORMAP, ntypes=ntypes)
impl = impl_tmpl.render(TYPEMAP=TYPEMAP, VECTORMAP=VECTORMAP, ntypes=ntypes, decls=decls)
except Exception:
print exceptions.text_error_template().render()
sys.exit(1)
with open("gpuarray/types.h", "w") as f:
f.write(header)
with open("gpuarray_types.c", "w") as f:
f.write(impl)
| 20.097458 | 108 | 0.623445 |
8fa24ae6d5eb659820e0717b905bfaa4ebcc1041 | 2,855 | py | Python | bluebrain/repo-patches/packages/highfive/package.py | BlueBrain/Spack | dc328512c70e182f3c24bb0ce64fa3586482bdf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | bluebrain/repo-patches/packages/highfive/package.py | BlueBrain/Spack | dc328512c70e182f3c24bb0ce64fa3586482bdf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | bluebrain/repo-patches/packages/highfive/package.py | BlueBrain/Spack | dc328512c70e182f3c24bb0ce64fa3586482bdf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Highfive(CMakePackage):
"""HighFive - Header only C++ HDF5 interface"""
homepage = "https://github.com/BlueBrain/HighFive"
url = "https://github.com/BlueBrain/HighFive/archive/v2.0.tar.gz"
git = "https://github.com/BlueBrain/HighFive.git"
version('master', branch='master')
version('2.4.1', tag='v2.4.1')
version('2.4.0', tag='v2.4.0')
version('2.3.1', tag='v2.3.1')
version('2.3', tag='v2.3')
version('2.2.2', tag='v2.2.2')
version('2.2.1', tag='v2.2.1')
version('2.1.1', tag='v2.1.1')
version('2.0', sha256='deee33d7f578e33dccb5d04771f4e01b89a980dd9a3ff449dd79156901ee8d25')
version('1.5', sha256='f194bda482ab15efa7c577ecc4fb7ee519f6d4bf83470acdb3fb455c8accb407')
version('1.2', sha256='4d8f84ee1002e8fd6269b62c21d6232aea3d56ce4171609e39eb0171589aab31')
version('1.1', sha256='430fc312fc1961605ffadbfad82b9753a5e59482e9fbc64425fb2c184123d395')
version('1.0', sha256='d867fe73d00817f686d286f3c69a23731c962c3e2496ca1657ea7302cd0bb944')
# This is a header-only lib so dependencies shall be specified in the
# target project directly and never specified here since they get truncated
# when installed as external packages (which makes sense to improve reuse)
variant('boost', default=True, description='Support Boost')
variant('mpi', default=True, description='Support MPI')
variant('eigen', default=False, description='Support Eigen')
variant('xtensor', default=False, description='Support xtensor')
# Develop builds tests which require boost
conflicts('~boost', when='@develop')
depends_on('boost @1.41:', when='+boost')
depends_on('hdf5 ~mpi', when='~mpi')
depends_on('hdf5 +mpi', when='+mpi')
depends_on('eigen', when='+eigen')
depends_on('xtensor', when='+xtensor')
depends_on('mpi', when='+mpi')
def cmake_args(self):
return [
'-DUSE_BOOST:Bool=' + str(self.spec.satisfies('+boost')),
'-DUSE_EIGEN:Bool=' + str(self.spec.satisfies('+eigen')),
'-DUSE_XTENSOR:Bool=' + str(self.spec.satisfies('+xtensor')),
'-DHIGHFIVE_PARALLEL_HDF5:Bool='
+ str(self.spec.satisfies('+mpi')),
'-DHIGHFIVE_EXAMPLES:Bool='
+ str(self.spec.satisfies('@develop')),
'-DHIGHFIVE_UNIT_TESTS:Bool='
+ str(self.spec.satisfies('@develop')),
'-DHIGHFIVE_TEST_SINGLE_INCLUDES:Bool='
+ str(self.spec.satisfies('@develop')),
'-DHDF5_NO_FIND_PACKAGE_CONFIG_FILE=1', # Dont use targets
'-DHIGHFIVE_USE_INSTALL_DEPS:Bool=ON',
]
| 43.923077 | 93 | 0.663398 |
87f104de1a8187ad8950a53a846e75efe66643e9 | 8,822 | py | Python | test/master_buffering_test.py | AndyDiamondstein/vitess | 295c300cd22c109f8be7a454c03c96c6b8e3b55c | [
"BSD-3-Clause"
] | 1 | 2021-03-14T10:04:18.000Z | 2021-03-14T10:04:18.000Z | test/master_buffering_test.py | AndyDiamondstein/vitess | 295c300cd22c109f8be7a454c03c96c6b8e3b55c | [
"BSD-3-Clause"
] | null | null | null | test/master_buffering_test.py | AndyDiamondstein/vitess | 295c300cd22c109f8be7a454c03c96c6b8e3b55c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
"""Tests that VTGate buffers master traffic when expected."""
import logging
import struct
import unittest
from vtdb import keyrange
from vtdb import vtgate_client
import environment
import tablet
import utils
shard_0_master = tablet.Tablet()
shard_0_replica1 = tablet.Tablet()
KEYSPACE_NAME = 'test_keyspace'
SHARD_NAMES = ['0']
SHARD_KID_MAP = {
'0': [
527875958493693904, 626750931627689502,
345387386794260318, 332484755310826578,
1842642426274125671, 1326307661227634652,
1761124146422844620, 1661669973250483744,
3361397649937244239, 2444880764308344533,
9767889778372766922, 9742070682920810358,
10296850775085416642, 9537430901666854108,
10440455099304929791, 11454183276974683945,
11185910247776122031, 10460396697869122981,
13379616110062597001, 12826553979133932576],
}
CREATE_VT_INSERT_TEST = '''create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
keyspace_id bigint(20) unsigned NOT NULL,
primary key (id)
) Engine=InnoDB'''
create_tables = [
CREATE_VT_INSERT_TEST,
]
pack_kid = struct.Struct('!Q').pack
def setUpModule():
logging.debug('in setUpModule')
try:
environment.topo_server().setup()
# start mysql instance external to the test
setup_procs = [shard_0_master.init_mysql(),
shard_0_replica1.init_mysql(),
]
utils.wait_procs(setup_procs)
setup_tablets()
setup_vtgate()
# After VTGate comes up, populate it with some initial data
initial_writes(0, keyrange.KeyRange(''))
except Exception, e:
logging.exception('error during set up: %s', e)
tearDownModule()
raise
def tearDownModule():
logging.debug('in tearDownModule')
utils.required_teardown()
if utils.options.skip_teardown:
return
logging.debug('Tearing down the servers and setup')
tablet.kill_tablets([shard_0_master,
shard_0_replica1])
teardown_procs = [shard_0_master.teardown_mysql(),
shard_0_replica1.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_replica1.remove_tree()
def setup_tablets():
# Start up a master mysql and vttablet
logging.debug('Setting up tablets')
utils.run_vtctl(['CreateKeyspace', KEYSPACE_NAME])
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', KEYSPACE_NAME,
'keyspace_id', 'uint64'])
shard_0_master.init_tablet(
'master',
keyspace=KEYSPACE_NAME,
shard='0',
tablet_index=0)
shard_0_replica1.init_tablet(
'replica',
keyspace=KEYSPACE_NAME,
shard='0',
tablet_index=1)
utils.run_vtctl(['RebuildKeyspaceGraph', KEYSPACE_NAME], auto_log=True)
for t in [shard_0_master, shard_0_replica1]:
t.create_db('vt_test_keyspace')
for create_table in create_tables:
t.mquery(shard_0_master.dbname, create_table)
t.start_vttablet(wait_for_state=None, target_tablet_type='replica')
for t in [shard_0_master]:
t.wait_for_vttablet_state('SERVING')
for t in [shard_0_replica1]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', KEYSPACE_NAME+'/0',
shard_0_master.tablet_alias], auto_log=True)
for t in [shard_0_replica1]:
utils.wait_for_tablet_type(t.tablet_alias, 'replica')
for t in [shard_0_master, shard_0_replica1]:
t.wait_for_vttablet_state('SERVING')
utils.run_vtctl(
['RebuildKeyspaceGraph', KEYSPACE_NAME], auto_log=True)
utils.check_srv_keyspace(
'test_nj', KEYSPACE_NAME,
'Partitions(master): -\n'
'Partitions(rdonly): -\n'
'Partitions(replica): -\n')
def setup_vtgate(port=None, extra_args=None):
utils.VtGate(port=port).start(
extra_args=extra_args,
tablets=[shard_0_master, shard_0_replica1])
utils.vtgate.wait_for_endpoints(
'%s.%s.master' % (KEYSPACE_NAME, SHARD_NAMES[0]),
1)
utils.vtgate.wait_for_endpoints(
'%s.%s.replica' % (KEYSPACE_NAME, SHARD_NAMES[0]),
1)
def initial_writes(shard_index, writes_keyrange):
vtgate_conn = get_connection()
_delete_all('vt_insert_test')
count = 10
kid_list = SHARD_KID_MAP[SHARD_NAMES[shard_index]]
for x in xrange(count):
keyspace_id = kid_list[count%len(kid_list)]
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=KEYSPACE_NAME,
keyspace_ids=[pack_kid(keyspace_id)],
writable=True)
cursor.begin()
cursor.execute(
'insert into vt_insert_test (msg, keyspace_id) '
'values (:msg, :keyspace_id)',
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
cursor.commit()
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=KEYSPACE_NAME,
keyranges=[writes_keyrange])
rowcount = cursor.execute('select * from vt_insert_test', {})
assert rowcount == count, 'master fetch works'
def get_connection(timeout=10.0):
protocol, endpoint = utils.vtgate.rpc_endpoint(python=True)
try:
return vtgate_client.connect(protocol, endpoint, timeout)
except Exception:
logging.exception('Connection to vtgate (timeout=%s) failed.', timeout)
raise
def _delete_all(table_name):
vtgate_conn = get_connection()
# This write is to set up the test with fresh insert
# and hence performing it directly on the connection.
vtgate_conn.begin()
vtgate_conn._execute(
'delete from %s' % table_name, {},
tablet_type='master', keyspace_name=KEYSPACE_NAME,
keyranges=[keyrange.KeyRange('')])
vtgate_conn.commit()
def restart_vtgate(extra_args=None):
if extra_args is None:
extra_args = []
port = utils.vtgate.port
utils.vtgate.kill()
setup_vtgate(port=port, extra_args=extra_args)
class BaseTestCase(unittest.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
logging.info('Start: %s.', '.'.join(self.id().split('.')[-2:]))
# TODO(liguo): once we have the final master buffering code in place, these
# tests should verify that we only buffer when the master is unavailable.
class TestMasterBuffering(BaseTestCase):
shard_index = 0
keyrange = keyrange.KeyRange('')
def setUp(self):
super(TestMasterBuffering, self).setUp()
restart_vtgate(extra_args=[
'-enable_fake_master_buffer',
'-buffer_keyspace', KEYSPACE_NAME,
'-buffer_shard', SHARD_NAMES[self.shard_index],
'-fake_buffer_delay', '1ms',
])
def get_sucessful_buffered_requests(self):
return utils.vtgate.get_vars()['BufferedRequestsSuccessful']
def test_tx_is_buffered(self):
"""Tests that for a transaction, we buffer exactly one request."""
vtgate_conn = get_connection()
kid_list = SHARD_KID_MAP[SHARD_NAMES[self.shard_index]]
keyspace_id = kid_list[0]
initial_buffered = self.get_sucessful_buffered_requests()
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=KEYSPACE_NAME,
keyspace_ids=[pack_kid(keyspace_id)],
writable=True)
cursor.begin()
cursor.execute(
'insert into vt_insert_test (msg, keyspace_id) '
'values (:msg, :keyspace_id)',
{'msg': 'test %s' % 1000, 'keyspace_id': keyspace_id})
cursor.execute('select * from vt_insert_test', {})
cursor.rollback()
num_buffered = self.get_sucessful_buffered_requests() - initial_buffered
# No matter how many requests there were in the transaction, we should only
# buffer one request (the Begin to the vttablet).
self.assertEqual(num_buffered, 1)
def test_master_read_is_buffered(self):
"""Tests that we buffer master reads."""
vtgate_conn = get_connection()
kid_list = SHARD_KID_MAP[SHARD_NAMES[self.shard_index]]
keyspace_id = kid_list[0]
initial_buffered = self.get_sucessful_buffered_requests()
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=KEYSPACE_NAME,
keyspace_ids=[pack_kid(keyspace_id)])
cursor.execute('select * from vt_insert_test', {})
num_buffered = self.get_sucessful_buffered_requests() - initial_buffered
self.assertEqual(num_buffered, 1)
def test_replica_read_is_not_buffered(self):
"""Tests that we do not buffer replica reads."""
vtgate_conn = get_connection()
initial_buffered = self.get_sucessful_buffered_requests()
vtgate_conn._execute(
'select * from vt_insert_test', {},
tablet_type='replica', keyspace_name=KEYSPACE_NAME,
keyranges=[self.keyrange]
)
num_buffered = self.get_sucessful_buffered_requests() - initial_buffered
self.assertEqual(num_buffered, 0)
if __name__ == '__main__':
utils.main()
| 30.525952 | 79 | 0.705622 |
d391615f7d107a542df2e89a464951c787492df4 | 2,031 | py | Python | dbd-course-recommender/course_recommender/users/views.py | singh-priyank/DBMS_Course | 6538cd7bc2172b8a54c6c71776a2f5ad4daeeb32 | [
"MIT"
] | 1 | 2020-11-13T12:37:28.000Z | 2020-11-13T12:37:28.000Z | dbd-course-recommender/course_recommender/users/views.py | singh-priyank/DBMS_Course | 6538cd7bc2172b8a54c6c71776a2f5ad4daeeb32 | [
"MIT"
] | 1 | 2020-11-17T07:17:29.000Z | 2021-04-23T20:39:59.000Z | dbd-course-recommender/course_recommender/users/views.py | singh-priyank/DBMS_Course | 6538cd7bc2172b8a54c6c71776a2f5ad4daeeb32 | [
"MIT"
] | null | null | null | import random
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404, redirect, render
from course.models import *
from course.services import get_enrolled_subjects, get_recommmendations
from .forms import *
from .models import Student
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(
request, f'Your account has been created! You are now able to log in')
messages.success(
request, f'Please Update your profile first.')
return redirect('users-login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
account = get_object_or_404(Student, account = request.user)
context = {'home_page': 'active',
'account' : account,
}
return render(request, 'users/profile.html', context)
@login_required
def EditProfile(request):
student = get_object_or_404(Student, account = request.user)
if request.method == "POST":
p_form = EditProfileForm(request.POST,request.FILES, instance= student)
u_form = UserUpdateForm(request.POST, instance= request.user)
if p_form.is_valid() and u_form.is_valid():
u_form.save()
p_form.save()
messages.success(request,'Your Profile has been updated!')
return redirect('users-profile')
else:
messages.error(request, p_form.errors)
messages.error(request, u_form.errors)
else:
p_form= EditProfileForm()
u_form =UserUpdateForm()
context={'p_form': p_form, 'u_form': u_form}
return render(request, 'users/update-profile.html', context )
| 34.423729 | 86 | 0.658296 |
d02e72b10c35810edfb8a5c886165edc4f25669c | 49 | py | Python | src/test/python/testSmvUserLib/library/submod/lib2.py | ninjapapa/SMV2 | 42cf9f176c3ec0bed61f66fbf859c18d97027dd6 | [
"Apache-2.0"
] | null | null | null | src/test/python/testSmvUserLib/library/submod/lib2.py | ninjapapa/SMV2 | 42cf9f176c3ec0bed61f66fbf859c18d97027dd6 | [
"Apache-2.0"
] | 34 | 2022-02-26T04:27:34.000Z | 2022-03-29T23:05:47.000Z | src/test/python/testSmvUserLib/library/submod/lib2.py | ninjapapa/SMV2 | 42cf9f176c3ec0bed61f66fbf859c18d97027dd6 | [
"Apache-2.0"
] | null | null | null | # dummy library file
def mylib2method():
pass
| 9.8 | 20 | 0.714286 |
07981c9f0e60589441b17283bb042ff17a222e56 | 772 | py | Python | Gabaritos/Caderno-05-gabarito.py | AnabeatrizMacedo241/Python-101 | 3aca95ece3b81456d87c5b8e08937d585fd79845 | [
"MIT"
] | 3 | 2021-07-12T16:25:44.000Z | 2021-07-27T15:11:59.000Z | Gabaritos/Caderno-05-gabarito.py | AnabeatrizMacedo241/Python-101 | 3aca95ece3b81456d87c5b8e08937d585fd79845 | [
"MIT"
] | null | null | null | Gabaritos/Caderno-05-gabarito.py | AnabeatrizMacedo241/Python-101 | 3aca95ece3b81456d87c5b8e08937d585fd79845 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 25 15:42:54 2021
@author: anabeatrizmacedo
"""
1.
x = -1
if x < 0:
raise Exception("Número menor do que zero")
2.
def num():
try:
num1 = int(input("Digite um número: "))
except:
print ("Você não digitou um número!")
num2 = int(input("Tente novamente. Digite um número: "))
finally:
print ("Obrigado!")
num()
3. Lista = ['a', 'e', 'i', 'o', 'u']
for index, res in enumerate(Lista):
if index < 3:
print(res)
4.
listaA = [0, 2, 4]
listaB = [1, 3, 5]
listaC = [12, 8, 5]
list(map(lambda x, y, z: x*y*z, listaA, listaB, listaC))
5.
lista_num = [1, -1, -2, 2, 3, -3, -4, -5]
list(filter(lambda x: x<0, lista_num))
| 19.794872 | 68 | 0.536269 |
a22bb709482ef5114348f71048b11e4cea94cccf | 15,280 | py | Python | resources/lib/globals.py | vascobraga41/plugin.video.netflix | d0be74cc8c0d51c19c606751bd212ff09254e5d1 | [
"MIT"
] | null | null | null | resources/lib/globals.py | vascobraga41/plugin.video.netflix | d0be74cc8c0d51c19c606751bd212ff09254e5d1 | [
"MIT"
] | null | null | null | resources/lib/globals.py | vascobraga41/plugin.video.netflix | d0be74cc8c0d51c19c606751bd212ff09254e5d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Global addon constants
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
# Everything that is to be globally accessible must be defined in this module.
# Using the Kodi reuseLanguageInvoker feature, only the code in the addon.py or service.py module
# will be run every time the addon is called.
# All other modules (imports) are initialized only on the first invocation of the add-on.
import collections
import os
from urllib.parse import parse_qsl, unquote, urlparse
import xbmcaddon
from xbmcgui import Window
class GlobalVariables:
"""Encapsulation for global variables to work around quirks with
Kodi's reuseLanguageInvoker behavior"""
# pylint: disable=attribute-defined-outside-init
# pylint: disable=invalid-name, too-many-instance-attributes
# Values in the variables VIEW_* stand for a partial menu id,
# contained in the settings xml, example 'profiles' stand for id 'viewmodeprofiles'
VIEW_PROFILES = 'profiles'
VIEW_MAINMENU = 'mainmenu'
VIEW_MYLIST = 'mylist'
VIEW_FOLDER = 'folder'
VIEW_MOVIE = 'movie'
VIEW_SHOW = 'show'
VIEW_SEASON = 'season'
VIEW_EPISODE = 'episode'
VIEW_SEARCH = 'search'
VIEW_EXPORTED = 'exported'
CONTENT_IMAGES = 'images'
CONTENT_FOLDER = 'files'
CONTENT_MOVIE = 'movies'
CONTENT_SHOW = 'tvshows'
CONTENT_SEASON = 'seasons'
CONTENT_EPISODE = 'episodes'
'''
--Main Menu key infos--
path Passes information to the called method
generally structured as follows: [func. name, menu id, context id]
loco_contexts Contexts used to obtain the list of contents (use only one context when loco_known = True)
loco_known If True, keys label_id/description_id/icon are ignored, these values are obtained from LoCo list
label_id The ID for the menu title
description_id Description info text
icon Set a default image
view Override the default "partial menu id" of view
content_type Override the default content type (CONTENT_SHOW)
has_show_setting Means that the menu has the show/hide settings, by default is True
has_sort_setting Means that the menu has the sort settings, by default is False
no_use_cache The cache will not be used to store the contents of the menu
Explanation of function names in the 'path' key:
video_list Automatically gets the list_id by making a loco request,
the list_id search is made using the value specified on the loco_contexts key
video_list_sorted To work must have a third argument on the path that is the context_id
or instead specified the key request_context_name
'''
MAIN_MENU_ITEMS = collections.OrderedDict([
('myList', {'path': ['video_list_sorted', 'myList'],
'loco_contexts': ['queue'],
'loco_known': True,
'request_context_name': 'mylist',
'view': VIEW_MYLIST,
'has_sort_setting': True}),
('continueWatching', {'path': ['video_list', 'continueWatching'],
'loco_contexts': ['continueWatching'],
'loco_known': True}),
('newAndPopular', {'path': ['category_list', 'newAndPopular'],
'loco_contexts': ['comingSoon'],
'loco_known': False,
'label_id': 30700,
'description_id': 30146,
'icon': 'DefaultRecentlyAddedMovies.png'}),
('chosenForYou', {'path': ['video_list', 'chosenForYou'],
'loco_contexts': ['topTen'],
'loco_known': True}),
('recentlyAdded', {'path': ['video_list_sorted', 'recentlyAdded', '1592210'],
'loco_contexts': None,
'loco_known': False,
'request_context_name': 'genres',
'label_id': 30145,
'description_id': 30146,
'icon': 'DefaultRecentlyAddedMovies.png',
'has_sort_setting': True}),
('newRelease', {'path': ['video_list_sorted', 'newRelease'],
'loco_contexts': ['newRelease'],
'loco_known': True,
'request_context_name': 'newrelease',
'has_sort_setting': True}),
('currentTitles', {'path': ['video_list', 'currentTitles'],
'loco_contexts': ['trendingNow'],
'loco_known': True}),
('mostViewed', {'path': ['video_list', 'mostViewed'],
'loco_contexts': ['popularTitles'],
'loco_known': True}),
('netflixOriginals', {'path': ['video_list_sorted', 'netflixOriginals', '839338'],
'loco_contexts': ['netflixOriginals'],
'loco_known': True,
'request_context_name': 'genres',
'has_sort_setting': True}),
('assistiveAudio', {'path': ['video_list_sorted', 'assistiveAudio', 'None'],
'loco_contexts': None,
'loco_known': False,
'request_context_name': 'assistiveAudio',
'label_id': 30163,
'description_id': 30164,
'icon': 'DefaultTVShows.png',
'has_sort_setting': True}),
('recommendations', {'path': ['recommendations', 'recommendations'],
'loco_contexts': ['similars', 'becauseYouAdded', 'becauseYouLiked', 'watchAgain',
'bigRow'],
'loco_known': False,
'label_id': 30001,
'description_id': 30094,
'icon': 'DefaultUser.png'}),
('tvshowsGenres', {'path': ['subgenres', 'tvshowsGenres', '83'],
'loco_contexts': None,
'loco_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30174,
'description_id': None,
'icon': 'DefaultTVShows.png',
'has_sort_setting': True}),
('moviesGenres', {'path': ['subgenres', 'moviesGenres', '34399'],
'loco_contexts': None,
'loco_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30175,
'description_id': None,
'icon': 'DefaultMovies.png',
'content_type': CONTENT_MOVIE,
'has_sort_setting': True}),
('tvshows', {'path': ['genres', 'tvshows', '83'],
'loco_contexts': None,
'loco_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30095,
'description_id': None,
'icon': 'DefaultTVShows.png',
'has_sort_setting': True}),
('movies', {'path': ['genres', 'movies', '34399'],
'loco_contexts': None,
'loco_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30096,
'description_id': None,
'icon': 'DefaultMovies.png',
'content_type': CONTENT_MOVIE,
'has_sort_setting': True}),
('genres', {'path': ['genres', 'genres'],
'loco_contexts': ['genre'],
'loco_known': False,
'request_context_name': 'genres', # Used for sub-menus
'label_id': 30010,
'description_id': 30093,
'icon': 'DefaultGenre.png',
'has_sort_setting': True}),
('search', {'path': ['search', 'search'],
'loco_contexts': None,
'loco_known': False,
'label_id': 30400,
'description_id': 30092,
'icon': 'DefaultAddonsSearch.png',
'view': VIEW_SEARCH,
'has_sort_setting': True}),
('exported', {'path': ['exported', 'exported'],
'loco_contexts': None,
'loco_known': False,
'label_id': 30048,
'description_id': 30091,
'icon': 'DefaultHardDisk.png',
'view': VIEW_EXPORTED})
])
MODE_DIRECTORY = 'directory'
MODE_ACTION = 'action'
MODE_PLAY = 'play'
MODE_PLAY_STRM = 'play_strm'
MODE_LIBRARY = 'library'
MODE_KEYMAPS = 'keymaps'
def __init__(self):
"""Do nothing on constructing the object"""
# The class initialization (GlobalVariables) will only take place at the first initialization of this module
# on subsequent add-on invocations (invoked by reuseLanguageInvoker) will have no effect.
# Define here also any other variables necessary for the correct loading of the other project modules
self.WND_KODI_HOME = Window(10000) # Kodi home window
self.IS_ADDON_FIRSTRUN = None
self.ADDON = None
self.ADDON_DATA_PATH = None
self.DATA_PATH = None
self.CACHE_MANAGEMENT = None
self.CACHE_TTL = None
self.CACHE_MYLIST_TTL = None
self.CACHE_METADATA_TTL = None
def init_globals(self, argv):
"""Initialized globally used module variables. Needs to be called at start of each plugin instance!"""
# IS_ADDON_FIRSTRUN: specifies if the add-on has been initialized for the first time
# (reuseLanguageInvoker not used yet)
self.IS_ADDON_FIRSTRUN = self.IS_ADDON_FIRSTRUN is None
self.IS_ADDON_EXTERNAL_CALL = False
# xbmcaddon.Addon must be created at every instance otherwise it does not read any new changes to the settings
self.ADDON = xbmcaddon.Addon()
self.URL = urlparse(argv[0])
self.REQUEST_PATH = unquote(self.URL[2][1:])
try:
self.PARAM_STRING = argv[2][1:]
except IndexError:
self.PARAM_STRING = ''
self.REQUEST_PARAMS = dict(parse_qsl(self.PARAM_STRING))
if self.IS_ADDON_FIRSTRUN:
# Global variables that do not need to be generated at every instance
self.ADDON_ID = self.ADDON.getAddonInfo('id')
self.PLUGIN = self.ADDON.getAddonInfo('name')
self.VERSION_RAW = self.ADDON.getAddonInfo('version')
self.VERSION = remove_ver_suffix(self.VERSION_RAW)
self.ICON = self.ADDON.getAddonInfo('icon')
self.DEFAULT_FANART = self.ADDON.getAddonInfo('fanart')
self.ADDON_DATA_PATH = self.ADDON.getAddonInfo('path') # Add-on folder
self.DATA_PATH = self.ADDON.getAddonInfo('profile') # Add-on user data folder
self.CACHE_PATH = os.path.join(self.DATA_PATH, 'cache')
self.COOKIES_PATH = os.path.join(self.DATA_PATH, 'COOKIES')
try:
self.PLUGIN_HANDLE = int(argv[1])
self.IS_SERVICE = False
self.BASE_URL = f'{self.URL[0]}://{self.URL[1]}'
except IndexError:
self.PLUGIN_HANDLE = 0
self.IS_SERVICE = True
self.BASE_URL = f'plugin://{self.ADDON_ID}'
from resources.lib.common.kodi_ops import KodiVersion
self.KODI_VERSION = KodiVersion()
# Initialize the log
from resources.lib.utils.logging import LOG
LOG.initialize(self.ADDON_ID, self.PLUGIN_HANDLE,
self.ADDON.getSettingBool('enable_debug'),
self.ADDON.getSettingBool('enable_timing'))
if self.IS_ADDON_FIRSTRUN:
self.init_database()
# Initialize the cache
if self.IS_SERVICE:
from resources.lib.services.cache_management import CacheManagement
self.CACHE_MANAGEMENT = CacheManagement()
self.CACHE = self.CACHE_MANAGEMENT
from resources.lib.services.settings_monitor import SettingsMonitor
self.SETTINGS_MONITOR = SettingsMonitor()
else:
from resources.lib.common.cache import Cache
self.CACHE = Cache()
self.IPC_OVER_HTTP = self.ADDON.getSettingBool('enable_ipc_over_http')
def init_database(self):
# Initialize local database
import resources.lib.database.db_local as db_local
self.LOCAL_DB = db_local.NFLocalDatabase()
# Initialize shared database
use_mysql = G.ADDON.getSettingBool('use_mysql')
import resources.lib.database.db_shared as db_shared
from resources.lib.common.exceptions import DBMySQLConnectionError, DBMySQLError
try:
shared_db_class = db_shared.get_shareddb_class(use_mysql=use_mysql)
self.SHARED_DB = shared_db_class()
except (DBMySQLConnectionError, DBMySQLError) as exc:
import resources.lib.kodi.ui as ui
if isinstance(exc, DBMySQLError):
# There is a problem with the database
ui.show_addon_error_info(exc)
# The MySQL database cannot be reached, fallback to local SQLite database
# When this code is called from addon, is needed apply the change also in the
# service, so disabling it run the SettingsMonitor
self.ADDON.setSettingBool('use_mysql', False)
ui.show_notification(self.ADDON.getLocalizedString(30206), time=10000)
shared_db_class = db_shared.get_shareddb_class()
self.SHARED_DB = shared_db_class()
def is_known_menu_context(self, context):
"""Return true if context are one of the menu with loco_known=True"""
for _, data in self.MAIN_MENU_ITEMS.items():
if data['loco_known']:
if data['loco_contexts'][0] == context:
return True
return False
def remove_ver_suffix(version):
"""Remove the codename suffix from version value"""
import re
pattern = re.compile(r'\+\w+\.\d$') # Example: +matrix.1
return re.sub(pattern, '', version)
# We initialize an instance importable of GlobalVariables from run_addon.py and run_service.py,
# where G.init_globals() MUST be called before you do anything else.
G = GlobalVariables()
| 49.771987 | 120 | 0.561846 |
35cb851b2012138dbd38efc6b931fe490f309b19 | 3,193 | py | Python | createPriorities.py | AndrewBuck/cosmic | f59771eb4c5c8a8e6a940bb118d34bdeac278894 | [
"Unlicense"
] | null | null | null | createPriorities.py | AndrewBuck/cosmic | f59771eb4c5c8a8e6a940bb118d34bdeac278894 | [
"Unlicense"
] | null | null | null | createPriorities.py | AndrewBuck/cosmic | f59771eb4c5c8a8e6a940bb118d34bdeac278894 | [
"Unlicense"
] | null | null | null | import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cosmic.settings")
django.setup()
from cosmicapp.models import *
#--------------------------------------------------------------------------------
#TODO: This table needs a constraint that name/priority is unique for all rows.
priority, created = ProcessPriority.objects.get_or_create(
name = 'imagestats',
priority = 10000,
priorityClass = 'batch'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'parseHeaders',
priority = 10000,
priorityClass = 'batch'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'generateThumbnails',
priority = 10000,
priorityClass = 'batch'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'sextractor',
priority = 3010,
priorityClass = 'batch'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'sextractor',
priority = 100000,
priorityClass = 'interactive'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'image2xy',
priority = 3008,
priorityClass = 'batch'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'image2xy',
priority = 100000,
priorityClass = 'interactive'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'daofind',
priority = 3006,
priorityClass = 'batch'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'daofind',
priority = 100000,
priorityClass = 'interactive'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'starfind',
priority = 3004,
priorityClass = 'batch'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'starfind',
priority = 100000,
priorityClass = 'interactive'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'flagSources',
priority = 3002,
priorityClass = 'batch'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'starmatch',
priority = 3000,
priorityClass = 'batch'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'starmatch',
priority = 100000,
priorityClass = 'interactive'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'flagSources',
priority = 100000,
priorityClass = 'interactive'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'astrometryNet',
priority = 2975,
priorityClass = 'batch'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'astrometryNet',
priority = 100000,
priorityClass = 'interactive'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'imageCombine',
priority = 1000,
priorityClass = 'batch'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'imageCombine',
priority = 100000,
priorityClass = 'interactive'
)
priority, created = ProcessPriority.objects.get_or_create(
name = 'calculateUserCostTotals',
priority = 9999999,
priorityClass = 'batch'
)
| 24.007519 | 81 | 0.671156 |
488edec88defea0e937efa99117fa95b823ce223 | 40,523 | py | Python | src/generation/cil/cil_generator.py | harry1911/CoolCompiler | 0eb4636bb50341d94f757b36d2362e9d03959046 | [
"MIT"
] | null | null | null | src/generation/cil/cil_generator.py | harry1911/CoolCompiler | 0eb4636bb50341d94f757b36d2362e9d03959046 | [
"MIT"
] | null | null | null | src/generation/cil/cil_generator.py | harry1911/CoolCompiler | 0eb4636bb50341d94f757b36d2362e9d03959046 | [
"MIT"
] | null | null | null |
from general import visitor
from general import ast_hierarchy as ast_cool
from general import cil_hierarchy as ast_cil
from .dataTypesCollector import TypesCollector
from .context import Scope, ObjecContext, Defaults
from collections import OrderedDict
class CilGeneratorVisitor:
def __init__(self, astCool, enviroment):
self.astCool= astCool
self.cilProgram = None
self.types_dict = enviroment.types_dict
self.objectContext = ObjecContext(enviroment.types_list[1:])
self.current_type = None # type(current_type) = Type
self.context = Scope()
self.num_labels = 0
self.defaults = {} # { str(class_name) : Defaults }
def generate_code(self):
collector = TypesCollector(self.astCool, self.types_dict)
self.defaults = collector.getTypes()
self.cilProgram = collector.astCil # types added
self.visit(self.astCool, self.context) # build Data and Code seccions
return self.cilProgram
def add_constructor(self, type_defaults, context):
child_context = context.create_child()
child_context.self = child_context.define_local()
function_name = type_defaults.class_name + '.ctor'
argument_list = [ast_cil.CILArgument('self', child_context.self)]
# localvars = [child_context.self]
localvars = []
code = []
for feature_attr in type_defaults.defaults:
attr_offset = self.cilProgram.dotTYPES.types[self.current_type.name].attributes.get(feature_attr.name).offset
if feature_attr.type_attribute in ['Int','Bool']:
local_var = child_context.define_local()
localvars.append(local_var)
local_tag = child_context.define_local(self.types_dict[feature_attr.type_attribute])
localvars.append(local_tag)
cil_allocate = ast_cil.CILAllocate(local_tag)
cil_assign1 = ast_cil.CILAssignment(local_var, cil_allocate)
code.append(cil_assign1)
cil_setattr1 = ast_cil.CILSetAttr(child_context.self, attr_offset, local_var)
code.append(cil_setattr1)
elif feature_attr.type_attribute == 'String':
local_var = child_context.define_local()
localvars.append(local_var)
cil_str = ast_cil.CILString()
cil_assign2 = ast_cil.CILAssignment(local_var, cil_str)
code.append(cil_assign2)
cil_setattr2 = ast_cil.CILSetAttr(child_context.self, attr_offset, local_var)
code.append(cil_setattr2)
for feature_attr in type_defaults.defaults:
if feature_attr.expression is not None:
attr_offset = self.cilProgram.dotTYPES.types[self.current_type.name].attributes.get(feature_attr.name).offset
self.visit(feature_attr.expression, child_context)
localvars += feature_attr.expression.locals
code += feature_attr.expression.code
cil_setattr3 = ast_cil.CILSetAttr(child_context.self, attr_offset, feature_attr.expression.value)
code.append(cil_setattr3)
code.append(ast_cil.CILReturn(child_context.self))
cil_func = ast_cil.CILFunction(function_name, argument_list, localvars, code)
self.cilProgram.dotCODE.append(cil_func)
@visitor.on('node')
def visit(self, node, context):
pass
@visitor.when(ast_cool.ProgramNode)
def visit(self, node, context):
for _class in node.class_list:
child_context = context.create_child()
self.visit(_class, child_context)
@visitor.when(ast_cool.ClassNode)
def visit(self, node, context):
self.current_type = self.objectContext.get_type(node.name)
self.add_constructor(self.defaults[node.name], context)
for method in node.method_list:
self.visit(method, context)
self.cilProgram.dotCODE.append(method.code)
@visitor.when(ast_cool.FeatureMethodNode)
def visit(self, node, context):
child_context = context.create_child()
child_context.self = child_context.define_local()
function_name = self.current_type.name + '.' + node.name
# localvars = [child_context.self]
localvars = []
argument_list = [ast_cil.CILArgument('self', child_context.self)]
for param in node.formal_parameter_list:
local_arg = child_context.define_local()
# localvars.append(local_arg)
argument = ast_cil.CILArgument(param.name, local_arg)
argument_list.append(argument)
child_context.define_variable(param.name, param.type_parameter, local_arg)
self.visit(node.expression, child_context)
local_value = node.expression.value
localvars += node.expression.locals
code = node.expression.code
code.append(ast_cil.CILReturn(local_value))
cil_func = ast_cil.CILFunction(function_name, argument_list, localvars, code)
node.value = local_value
node.locals = localvars
node.code = cil_func
# [Assign]
@visitor.when(ast_cool.AssignNode)
def visit(self, node, context):
self.visit(node.expression, context)
localvars = node.expression.locals
code = node.expression.code
vinfo = context.find_variable(node.instance.name)
if vinfo is None:
# check if this variable is an attr
attr = self.cilProgram.dotTYPES.types[self.current_type.name].attributes.get(node.instance.name)
if attr is None:
# throw an error because var is not defined(this shouldn't happend)
raise Exception('Attr is not defined.')
cil_setattr = ast_cil.CILSetAttr(context.self, attr.offset, node.expression.value)
code.append(cil_setattr)
else:
cil_assign = ast_cil.CILAssignment(vinfo.cil_name, ast_cil.CILVar(node.expression.value))
code.append(cil_assign)
node.value = node.expression.value
node.locals = localvars
node.code = code
# [Self]
@visitor.when(ast_cool.SelfNode)
def visit(self, node, context):
node.value = context.self
node.locals = []
node.code = []
# [Var - Identifier]
@visitor.when(ast_cool.ObjectNode)
def visit(self, node, context):
localvars = []
code = []
vinfo = context.find_variable(node.name)
if vinfo is None:
# check if this variable is an attr
attr = self.cilProgram.dotTYPES.types[self.current_type.name].attributes.get(node.name)
if attr is None:
# throw an error because var is not defined(this shouldn't happend)
pass
local_value = context.define_local()
localvars.append(local_value)
cil_getattr = ast_cil.CILGetAttr(context.self, attr.offset)
cil_assign = ast_cil.CILAssignment(local_value, cil_getattr)
code.append(cil_assign)
else:
local_value = vinfo.cil_name
node.value = local_value
node.locals = localvars
node.code = code
# [True]
@visitor.when(ast_cool.TrueNode)
def visit(self, node, context):
local_bool_content = context.define_local(1)
local_bool_tag = context.define_local(self.types_dict['Bool'])
local_value = context.define_local()
cil_allocate = ast_cil.CILAllocate(local_bool_tag)
cil_assign = ast_cil.CILAssignment(local_value, cil_allocate)
cil_setattr = ast_cil.CILSetAttr(local_value, 0, local_bool_content)
node.value = local_value
node.locals = [local_value, local_bool_content, local_bool_tag]
node.code = [cil_assign, cil_setattr]
# [False]
@visitor.when(ast_cool.FalseNode)
def visit(self, node, context):
local_bool_tag = context.define_local(self.types_dict['Bool'])
local_value = context.define_local()
cil_allocate = ast_cil.CILAllocate(local_bool_tag)
cil_assign = ast_cil.CILAssignment(local_value, cil_allocate)
node.value = local_value
node.locals = [local_value, local_bool_tag]
node.code = [cil_assign]
# [Int]
@visitor.when(ast_cool.IntegerNode)
def visit(self, node, context):
local_int_content = context.define_local(int(node.int_token))
local_int_tag = context.define_local(self.types_dict['Int'])
local_value = context.define_local()
cil_allocate = ast_cil.CILAllocate(local_int_tag)
cil_assign = ast_cil.CILAssignment(local_value, cil_allocate)
cil_setattr = ast_cil.CILSetAttr(local_value, 0, local_int_content)
node.value = local_value
node.locals = [local_value, local_int_content, local_int_tag]
node.code = [cil_assign, cil_setattr]
# [String]
@visitor.when(ast_cool.StringNode)
def visit(self, node, context):
local_value = context.define_local()
cil_str = ast_cil.CILString(node.str_token)
cil_assign = ast_cil.CILAssignment(local_value, cil_str)
node.value = local_value
node.locals = [local_value]
node.code = [cil_assign]
# [New]
@visitor.when(ast_cool.NewObjectNode)
def visit(self, node, context):
localvars = []
code = []
local_instance = context.define_local()
localvars.append(local_instance)
# TODO: code added
if node.new_type == 'String':
cil_str = ast_cil.CILString()
cil_assign3 = ast_cil.CILAssignment(local_instance, cil_str)
code.append(cil_assign3)
node.value = local_instance
node.locals = localvars
node.code = code
return
if node.new_type == 'SELF_TYPE':
local_tag = context.define_local()
localvars.append(local_tag)
cil_typeof = ast_cil.CILTypeOf(context.self)
cil_assign1 = ast_cil.CILAssignment(local_tag, cil_typeof)
code.append(cil_assign1)
else:
local_tag = context.define_local(self.types_dict[node.new_type])
localvars.append(local_tag)
cil_allocate = ast_cil.CILAllocate(local_tag)
cil_assign2 = ast_cil.CILAssignment(local_instance, cil_allocate)
code.append(cil_assign2)
# local_value = context.define_local()
# localvars.append(local_value)
# cil_self_param = ast_cil.CILParam(local_instance)
# code.append(cil_self_param)
# cil_ctor = ast_cil.CILConstructor(local_tag)
# cil_assign3 = ast_cil.CILAssignment(local_instance, cil_ctor)
# code.append(cil_assign3)
node.value = local_instance
node.locals = localvars
node.code = code
# [DynamicDispatch]
@visitor.when(ast_cool.DynamicDispatchNode)
def visit(self, node, context):
localvars = []
code = []
params = []
for arg_expr in node.arguments:
self.visit(arg_expr, context)
localvars += arg_expr.locals
code += arg_expr.code
params.append(ast_cil.CILParam(arg_expr.value))
self.visit(node.instance, context)
localvars += node.instance.locals
code += node.instance.code
self.num_labels += 1
cil_dispatchnotvoid_label = ast_cil.CILLabel('DISPATCH_NOT_VOID' + str(self.num_labels))
cil_cond = ast_cil.CILCondition(node.instance.value, cil_dispatchnotvoid_label.label)
code.append(cil_cond)
cil_goto1 = ast_cil.CILGoTo('_dispatch_abort') # DISPATCH_ON_VOID
code.append(cil_goto1)
code.append(cil_dispatchnotvoid_label) # DISPATCH_NOT_VOID
param_instance = ast_cil.CILParam(node.instance.value)
# take the method offset
type_name = node.instance.computed_type.name
if type_name == 'SELF_TYPE':
type_name = node.instance.computed_type.parent
meth_offset = self.cilProgram.dotTYPES.types[type_name].methods.get(node.method).offset
# check order of parameters
code.append(param_instance)
for _param in params:
code.append(_param)
local_value = context.define_local()
localvars.append(local_value)
cil_dcall = ast_cil.CILDinamicCall(node.instance.value, meth_offset)
cil_assign = ast_cil.CILAssignment(local_value, cil_dcall)
code.append(cil_assign)
node.value = local_value
node.locals = localvars
node.code = code
# [StaticDispatch]
@visitor.when(ast_cool.StaticDispatchNode)
def visit(self, node, context):
localvars = []
code = []
params = []
for arg_expr in node.arguments:
self.visit(arg_expr, context)
localvars += arg_expr.locals
code += arg_expr.code
params.append(ast_cil.CILParam(arg_expr.value))
self.visit(node.instance, context)
localvars += node.instance.locals
code += node.instance.code
self.num_labels += 1
cil_dispatchnotvoid_label = ast_cil.CILLabel('DISPATCH_NOT_VOID' + str(self.num_labels))
cil_cond = ast_cil.CILCondition(node.instance.value, cil_dispatchnotvoid_label.label)
code.append(cil_cond)
cil_goto1 = ast_cil.CILGoTo('_dispatch_abort') # DISPATCH_ON_VOID
code.append(cil_goto1)
code.append(cil_dispatchnotvoid_label) # DISPATCH_NOT_VOID
param_instance = ast_cil.CILParam(node.instance.value)
# take the method function
func_name = self.cilProgram.dotTYPES.types[node.type_dispatch].methods.get(node.method).func
# check order of parameters
code.append(param_instance)
for _param in params:
code.append(_param)
local_value = context.define_local()
localvars.append(local_value)
cil_scall = ast_cil.CILStaticCall(func_name)
cil_assign = ast_cil.CILAssignment(local_value, cil_scall)
code.append(cil_assign)
node.value = local_value
node.locals = localvars
node.code = code
# [If-True]
# [If-False]
@visitor.when(ast_cool.IfNode)
def visit(self, node, context):
self.visit(node.if_expression, context)
localvars = node.if_expression.locals
code = node.if_expression.code
local_value = context.define_local()
localvars.append(local_value)
self.num_labels += 1
cil_then_label = ast_cil.CILLabel('THEN' + str(self.num_labels))
self.num_labels += 1
cil_end_label = ast_cil.CILLabel('END' + str(self.num_labels))
local_if_value = context.define_local()
localvars.append(local_if_value)
cil_getattr = ast_cil.CILGetAttr(node.if_expression.value, 0)
cil_assign1 = ast_cil.CILAssignment(local_if_value, cil_getattr)
code.append(cil_assign1)
cil_condition = ast_cil.CILCondition(local_if_value, cil_then_label.label)
code.append(cil_condition)
self.visit(node.else_expression, context)
localvars += node.else_expression.locals
code += node.else_expression.code
cil_assign2 = ast_cil.CILAssignment(local_value, ast_cil.CILVar(node.else_expression.value))
code.append(cil_assign2)
cil_goto = ast_cil.CILGoTo(cil_end_label.label)
code.append(cil_goto)
code.append(cil_then_label) # THEN
self.visit(node.then_expression, context)
localvars += node.then_expression.locals
code += node.then_expression.code
cil_assign3 = ast_cil.CILAssignment(local_value, ast_cil.CILVar(node.then_expression.value))
code.append(cil_assign3)
code.append(cil_end_label) # END
node.value = local_value
node.locals = localvars
node.code = code
# [Sequence]
@visitor.when(ast_cool.BlockNode)
def visit(self, node, context):
local_value = None
code = []
localvars = []
for expr in node.expression_list:
self.visit(expr, context)
local_value = expr.value
code += expr.code
localvars += expr.locals
node.value = local_value
node.locals = localvars
node.code = code
# [Let]
@visitor.when(ast_cool.LetInNode)
def visit(self, node, context):
localvars = []
code = []
current_context = context
for _decl in node.declaration_list:
local_decl = current_context.define_local()
localvars.append(local_decl)
if _decl.expression is None:
if _decl._type == 'String':
cil_str = ast_cil.CILString()
cil_assign1 = ast_cil.CILAssignment(local_decl, cil_str)
code.append(cil_assign1)
elif _decl._type in ['Int','Bool']:
local_tag = current_context.define_local(self.types_dict[_decl._type])
localvars.append(local_tag)
cil_allocate = ast_cil.CILAllocate(local_tag)
cil_assign2 = ast_cil.CILAssignment(local_decl, cil_allocate)
code.append(cil_assign2)
else:
self.visit(_decl.expression, current_context)
localvars += _decl.expression.locals
code += _decl.expression.code
cil_assign3 = ast_cil.CILAssignment(local_decl, ast_cil.CILVar(_decl.expression.value))
code.append(cil_assign3)
new_child_context = current_context.create_child()
new_child_context.define_variable(_decl.name, _decl._type, local_decl)
current_context = new_child_context
self.visit(node.expression, current_context)
localvars += node.expression.locals
code += node.expression.code
node.value = node.expression.value
node.locals = localvars
node.code = code
# [Case]
@visitor.when(ast_cool.CaseNode)
def visit(self, node, context):
self.visit(node.case_expression, context)
localvars = node.case_expression.locals
code = node.case_expression.code
self.num_labels += 1
cil_caseonvoid_label = ast_cil.CILLabel('CASE_ON_VOID' + str(self.num_labels))
self.num_labels += 1
cil_taketag_label = ast_cil.CILLabel('TAKE_TAG' + str(self.num_labels))
self.num_labels += 1
cil_while_label = ast_cil.CILLabel('WHILE' + str(self.num_labels))
self.num_labels += 1
cil_casenobranch_label = ast_cil.CILLabel('CASE_NO_BRANCH' + str(self.num_labels))
self.num_labels += 1
cil_takeparenttag_label = ast_cil.CILLabel('TAKE_PARENT_TAG' + str(self.num_labels))
self.num_labels += 1
cil_end_label = ast_cil.CILLabel('END' + str(self.num_labels))
cil_condition1 = ast_cil.CILCondition(node.case_expression.value, cil_taketag_label.label)
code.append(cil_condition1)
code.append(cil_caseonvoid_label) # CASE_ON_VOID _case_abort2
cil_goto1 = ast_cil.CILGoTo('_case_abort2')
code.append(cil_goto1)
# cil_goto1 = ast_cil.CILGoTo(cil_end_label.label)
# code.append(cil_goto1)
code.append(cil_taketag_label) # TAKE_TAG
local_instance_tag = context.define_local()
localvars.append(local_instance_tag)
cil_typeof = ast_cil.CILTypeOf(node.case_expression.value)
cil_assign1 = ast_cil.CILAssignment(local_instance_tag, cil_typeof)
code.append(cil_assign1)
local_value = context.define_local()
localvars.append(local_value)
code.append(cil_while_label) # WHILE
for _branch in node.branch_list:
local_branch_tag = context.define_local(self.types_dict[_branch.type_branch])
localvars.append(local_branch_tag)
local_diff = context.define_local()
localvars.append(local_diff)
cil_minus = ast_cil.CILMinus(local_instance_tag, local_branch_tag)
cil_assign2 = ast_cil.CILAssignment(local_diff, cil_minus)
code.append(cil_assign2)
self.num_labels += 1
cil_typenotmatch_label = ast_cil.CILLabel('TYPE_NOT_MATCH' + str(self.num_labels))
cil_condition = ast_cil.CILCondition(local_diff, cil_typenotmatch_label.label)
code.append(cil_condition)
child_context = context.create_child()
child_context.define_variable(_branch.name, _branch.type_branch, node.case_expression.value)
self.visit(_branch.expression, child_context)
localvars += _branch.expression.locals
code += _branch.expression.code
cil_assign3 = ast_cil.CILAssignment(local_value, ast_cil.CILVar(_branch.expression.value))
code.append(cil_assign3)
cil_goto = ast_cil.CILGoTo(cil_end_label.label)
code.append(cil_goto)
code.append(cil_typenotmatch_label) # TYPE_NOT_MATCH
cil_condition2 = ast_cil.CILCondition(local_instance_tag, cil_takeparenttag_label.label)
code.append(cil_condition2)
code.append(cil_casenobranch_label) # CASE_NO_BRANCH _case_abort
cil_goto2 = ast_cil.CILGoTo('_case_abort')
code.append(cil_goto2)
# cil_goto2 = ast_cil.CILGoTo(cil_end_label.label)
# code.append(cil_goto2)
code.append(cil_takeparenttag_label) # TAKE_PARENT_TAG
cil_parent = ast_cil.CILGetIndex(local_instance_tag)
cil_assign4 = ast_cil.CILAssignment(local_instance_tag, cil_parent)
code.append(cil_assign4)
cil_goto3 = ast_cil.CILGoTo(cil_while_label.label)
code.append(cil_goto3)
code.append(cil_end_label) # END
node.value = local_value
node.locals = localvars
node.code = code
# [Loop-True]
# [Loop-False]
@visitor.when(ast_cool.WhileLoopNode)
def visit(self, node, context):
localvars = []
code = []
self.num_labels += 1
cil_while_label = ast_cil.CILLabel('WHILE' + str(self.num_labels))
self.num_labels += 1
cil_loop_label = ast_cil.CILLabel('LOOP' + str(self.num_labels))
self.num_labels += 1
cil_pool_label = ast_cil.CILLabel('POOL' + str(self.num_labels))
local_while_value = context.define_local()
localvars.append(local_while_value)
code.append(cil_while_label) # WHILE
self.visit(node.while_expression, context)
localvars += node.while_expression.locals
code += node.while_expression.code
cil_getattr = ast_cil.CILGetAttr(node.while_expression.value, 0)
cil_assign = ast_cil.CILAssignment(local_while_value, cil_getattr)
code.append(cil_assign)
cil_condition = ast_cil.CILCondition(local_while_value, cil_loop_label.label)
code.append(cil_condition)
cil_goto1 = ast_cil.CILGoTo(cil_pool_label.label)
code.append(cil_goto1)
code.append(cil_loop_label) # LOOP
self.visit(node.loop_expression, context)
localvars += node.loop_expression.locals
code += node.loop_expression.code
cil_goto2 = ast_cil.CILGoTo(cil_while_label.label)
code.append(cil_goto2)
code.append(cil_pool_label) # POOL
# return void
local_value = context.define_local()
localvars.append(local_value)
node.value = local_value
node.locals = localvars
node.code = code
# [IsVoid-True]
# [IsVoid-False]
@visitor.when(ast_cool.IsVoidNode)
def visit(self, node, context):
self.visit(node.expression, context)
code = node.expression.code
localvars = node.expression.locals
self.num_labels += 1
cil_false_label = ast_cil.CILLabel('FALSE' + str(self.num_labels))
self.num_labels += 1
cil_end_label = ast_cil.CILLabel('END' + str(self.num_labels))
local_value = context.define_local()
localvars.append(local_value)
local_bool_tag = context.define_local(self.types_dict['Bool'])
localvars.append(local_bool_tag)
cil_allocate = ast_cil.CILAllocate(local_bool_tag)
cil_assign = ast_cil.CILAssignment(local_value, cil_allocate)
code.append(cil_assign)
cil_condition = ast_cil.CILCondition(node.expression.value, cil_false_label.label)
code.append(cil_condition)
# TRUE
local_true_content = context.define_local(1)
localvars.append(local_true_content)
cil_setattr_true = ast_cil.CILSetAttr(local_value, 0, local_true_content)
code.append(cil_setattr_true)
cil_goto = ast_cil.CILGoTo(cil_end_label.label)
code.append(cil_goto)
code.append(cil_false_label) # FALSE
local_false_content = context.define_local(0)
localvars.append(local_false_content)
cil_setattr_false = ast_cil.CILSetAttr(local_value, 0, local_false_content)
code.append(cil_setattr_false)
code.append(cil_end_label) #END
node.value = local_value
node.locals = localvars
node.code = code
# [Not]
@visitor.when(ast_cool.ComplementNode)
def visit(self, node, context):
self.visit(node.expression, context)
code = node.expression.code
localvars = node.expression.locals
local_cero = context.define_local(0)
localvars.append(local_cero)
local_int_content = context.define_local()
localvars.append(local_int_content)
cil_getattr = ast_cil.CILGetAttr(node.expression.value, 0)
cil_assign1 = ast_cil.CILAssignment(local_int_content, cil_getattr)
code.append(cil_assign1)
local_int_value = context.define_local()
localvars.append(local_int_value)
cil_neg = ast_cil.CILMinus(local_cero, local_int_content)
cil_assign2 = ast_cil.CILAssignment(local_int_value, cil_neg)
code.append(cil_assign2)
local_value = context.define_local()
localvars.append(local_value)
local_int_tag = context.define_local(self.types_dict['Int'])
localvars.append(local_int_tag)
cil_allocate = ast_cil.CILAllocate(local_int_tag)
cil_assign3 = ast_cil.CILAssignment(local_value, cil_allocate)
cil_setattr = ast_cil.CILSetAttr(local_value, 0, local_int_value)
code += [cil_assign3, cil_setattr]
node.value = local_value
node.locals = localvars
node.code = code
# [Comp]
@visitor.when(ast_cool.LessThanOrEqualNode)
def visit(self, node, context):
self.visit(node.left_expression, context)
code = node.left_expression.code
localvars = node.left_expression.locals
self.visit(node.right_expression, context)
code += node.right_expression.code
localvars += node.right_expression.locals
local_int_left = context.define_local()
localvars.append(local_int_left)
cil_getattr1 = ast_cil.CILGetAttr(node.left_expression.value, 0)
cil_assign1 = ast_cil.CILAssignment(local_int_left, cil_getattr1)
code.append(cil_assign1)
local_int_right = context.define_local()
localvars.append(local_int_right)
cil_getattr2 = ast_cil.CILGetAttr(node.right_expression.value, 0)
cil_assign2 = ast_cil.CILAssignment(local_int_right, cil_getattr2)
code.append(cil_assign2)
local_bool_content = context.define_local()
localvars.append(local_bool_content)
cil_lesseq = ast_cil.CILLessThanEq(local_int_left, local_int_right)
cil_assign3 = ast_cil.CILAssignment(local_bool_content, cil_lesseq)
code.append(cil_assign3)
local_value = context.define_local()
localvars.append(local_value)
local_bool_tag = context.define_local(self.types_dict['Bool'])
localvars.append(local_bool_tag)
cil_allocate = ast_cil.CILAllocate(local_bool_tag)
cil_assign4 = ast_cil.CILAssignment(local_value, cil_allocate)
cil_setattr = ast_cil.CILSetAttr(local_value, 0, local_bool_content)
code += [cil_assign4, cil_setattr]
node.value = local_value
node.locals = localvars
node.code = code
@visitor.when(ast_cool.LessThanNode)
def visit(self, node, context):
self.visit(node.left_expression, context)
code = node.left_expression.code
localvars = node.left_expression.locals
self.visit(node.right_expression, context)
code += node.right_expression.code
localvars += node.right_expression.locals
local_int_left = context.define_local()
localvars.append(local_int_left)
cil_getattr1 = ast_cil.CILGetAttr(node.left_expression.value, 0)
cil_assign1 = ast_cil.CILAssignment(local_int_left, cil_getattr1)
code.append(cil_assign1)
local_int_right = context.define_local()
localvars.append(local_int_right)
cil_getattr2 = ast_cil.CILGetAttr(node.right_expression.value, 0)
cil_assign2 = ast_cil.CILAssignment(local_int_right, cil_getattr2)
code.append(cil_assign2)
local_bool_content = context.define_local()
localvars.append(local_bool_content)
cil_less = ast_cil.CILLessThan(local_int_left, local_int_right)
cil_assign3 = ast_cil.CILAssignment(local_bool_content, cil_less)
code.append(cil_assign3)
local_value = context.define_local()
localvars.append(local_value)
local_bool_tag = context.define_local(self.types_dict['Bool'])
localvars.append(local_bool_tag)
cil_allocate = ast_cil.CILAllocate(local_bool_tag)
cil_assign4 = ast_cil.CILAssignment(local_value, cil_allocate)
cil_setattr = ast_cil.CILSetAttr(local_value, 0, local_bool_content)
code += [cil_assign4, cil_setattr]
node.value = local_value
node.locals = localvars
node.code = code
# [Neg]
@visitor.when(ast_cool.NegationNode)
def visit(self, node, context):
self.visit(node.expression, context)
code = node.expression.code
localvars = node.expression.locals
local_one = context.define_local(1)
localvars.append(local_one)
local_bool_content = context.define_local()
localvars.append(local_bool_content)
cil_getattr = ast_cil.CILGetAttr(node.expression.value, 0)
cil_assign1 = ast_cil.CILAssignment(local_bool_content, cil_getattr)
code.append(cil_assign1)
local_bool_value = context.define_local()
localvars.append(local_bool_value)
cil_neg = ast_cil.CILMinus(local_one, local_bool_content)
cil_assign2 = ast_cil.CILAssignment(local_bool_value, cil_neg)
code.append(cil_assign2)
local_value = context.define_local()
localvars.append(local_value)
local_bool_tag = context.define_local(self.types_dict['Bool'])
localvars.append(local_bool_tag)
cil_allocate = ast_cil.CILAllocate(local_bool_tag)
cil_assign3 = ast_cil.CILAssignment(local_value, cil_allocate)
cil_setattr = ast_cil.CILSetAttr(local_value, 0, local_bool_value)
code += [cil_assign3, cil_setattr]
node.value = local_value
node.locals = localvars
node.code = code
# [Arith]
@visitor.when(ast_cool.PlusNode)
def visit(self, node, context):
self.visit(node.left_expression, context)
code = node.left_expression.code
localvars = node.left_expression.locals
self.visit(node.right_expression, context)
code += node.right_expression.code
localvars += node.right_expression.locals
local_int_left = context.define_local()
localvars.append(local_int_left)
cil_getattr1 = ast_cil.CILGetAttr(node.left_expression.value, 0)
cil_assign1 = ast_cil.CILAssignment(local_int_left, cil_getattr1)
code.append(cil_assign1)
local_int_right = context.define_local()
localvars.append(local_int_right)
cil_getattr2 = ast_cil.CILGetAttr(node.right_expression.value, 0)
cil_assign2 = ast_cil.CILAssignment(local_int_right, cil_getattr2)
code.append(cil_assign2)
local_int_content = context.define_local()
localvars.append(local_int_content)
cil_plus = ast_cil.CILPLus(local_int_left, local_int_right)
cil_assign3 = ast_cil.CILAssignment(local_int_content, cil_plus)
code.append(cil_assign3)
local_value = context.define_local()
localvars.append(local_value)
local_int_tag = context.define_local(self.types_dict['Int'])
localvars.append(local_int_tag)
cil_allocate = ast_cil.CILAllocate(local_int_tag)
cil_assign4 = ast_cil.CILAssignment(local_value, cil_allocate)
cil_setattr = ast_cil.CILSetAttr(local_value, 0, local_int_content)
code += [cil_assign4, cil_setattr]
node.value = local_value
node.locals = localvars
node.code = code
@visitor.when(ast_cool.MinusNode)
def visit(self, node, context):
self.visit(node.left_expression, context)
code = node.left_expression.code
localvars = node.left_expression.locals
self.visit(node.right_expression, context)
code += node.right_expression.code
localvars += node.right_expression.locals
local_int_left = context.define_local()
localvars.append(local_int_left)
cil_getattr1 = ast_cil.CILGetAttr(node.left_expression.value, 0)
cil_assign1 = ast_cil.CILAssignment(local_int_left, cil_getattr1)
code.append(cil_assign1)
local_int_right = context.define_local()
localvars.append(local_int_right)
cil_getattr2 = ast_cil.CILGetAttr(node.right_expression.value, 0)
cil_assign2 = ast_cil.CILAssignment(local_int_right, cil_getattr2)
code.append(cil_assign2)
local_int_content = context.define_local()
localvars.append(local_int_content)
cil_minus = ast_cil.CILMinus(local_int_left, local_int_right)
cil_assign3 = ast_cil.CILAssignment(local_int_content, cil_minus)
code.append(cil_assign3)
local_value = context.define_local()
localvars.append(local_value)
local_int_tag = context.define_local(self.types_dict['Int'])
localvars.append(local_int_tag)
cil_allocate = ast_cil.CILAllocate(local_int_tag)
cil_assign4 = ast_cil.CILAssignment(local_value, cil_allocate)
cil_setattr = ast_cil.CILSetAttr(local_value, 0, local_int_content)
code += [cil_assign4, cil_setattr]
node.value = local_value
node.locals = localvars
node.code = code
@visitor.when(ast_cool.StarNode)
def visit(self, node, context):
self.visit(node.left_expression, context)
code = node.left_expression.code
localvars = node.left_expression.locals
self.visit(node.right_expression, context)
code += node.right_expression.code
localvars += node.right_expression.locals
local_int_left = context.define_local()
localvars.append(local_int_left)
cil_getattr1 = ast_cil.CILGetAttr(node.left_expression.value, 0)
cil_assign1 = ast_cil.CILAssignment(local_int_left, cil_getattr1)
code.append(cil_assign1)
local_int_right = context.define_local()
localvars.append(local_int_right)
cil_getattr2 = ast_cil.CILGetAttr(node.right_expression.value, 0)
cil_assign2 = ast_cil.CILAssignment(local_int_right, cil_getattr2)
code.append(cil_assign2)
local_int_content = context.define_local()
localvars.append(local_int_content)
cil_mult = ast_cil.CILMult(local_int_left, local_int_right)
cil_assign3 = ast_cil.CILAssignment(local_int_content, cil_mult)
code.append(cil_assign3)
local_value = context.define_local()
localvars.append(local_value)
local_int_tag = context.define_local(self.types_dict['Int'])
localvars.append(local_int_tag)
cil_allocate = ast_cil.CILAllocate(local_int_tag)
cil_assign4 = ast_cil.CILAssignment(local_value, cil_allocate)
cil_setattr = ast_cil.CILSetAttr(local_value, 0, local_int_content)
code += [cil_assign4, cil_setattr]
node.value = local_value
node.locals = localvars
node.code = code
@visitor.when(ast_cool.DivNode)
def visit(self, node, context):
self.visit(node.left_expression, context)
code = node.left_expression.code
localvars = node.left_expression.locals
self.visit(node.right_expression, context)
code += node.right_expression.code
localvars += node.right_expression.locals
local_int_left = context.define_local()
localvars.append(local_int_left)
cil_getattr1 = ast_cil.CILGetAttr(node.left_expression.value, 0)
cil_assign1 = ast_cil.CILAssignment(local_int_left, cil_getattr1)
code.append(cil_assign1)
local_int_right = context.define_local()
localvars.append(local_int_right)
cil_getattr2 = ast_cil.CILGetAttr(node.right_expression.value, 0)
cil_assign2 = ast_cil.CILAssignment(local_int_right, cil_getattr2)
code.append(cil_assign2)
self.num_labels += 1
cil_notnone_label = ast_cil.CILLabel('NOT_NONE' + str(self.num_labels))
self.num_labels += 1
cil_divisionby0_label = ast_cil.CILLabel('DIVISION_BY_0' + str(self.num_labels))
cil_condition = ast_cil.CILCondition(local_int_right, cil_notnone_label.label)
code.append(cil_condition)
code.append(cil_divisionby0_label) # DIVISION_BY_0
cil_goto = ast_cil.CILGoTo('_divide_by_0')
code.append(cil_goto)
code.append(cil_notnone_label) # NOT_NONE
local_int_content = context.define_local()
localvars.append(local_int_content)
cil_div = ast_cil.CILDiv(local_int_left, local_int_right)
cil_assign3 = ast_cil.CILAssignment(local_int_content, cil_div)
code.append(cil_assign3)
local_value = context.define_local()
localvars.append(local_value)
local_int_tag = context.define_local(self.types_dict['Int'])
localvars.append(local_int_tag)
cil_allocate = ast_cil.CILAllocate(local_int_tag)
cil_assign4 = ast_cil.CILAssignment(local_value, cil_allocate)
cil_setattr = ast_cil.CILSetAttr(local_value, 0, local_int_content)
code += [cil_assign4, cil_setattr]
node.value = local_value
node.locals = localvars
node.code = code
# [Equal]
@visitor.when(ast_cool.EqualNode)
def visit(self, node, context):
self.visit(node.left_expression, context)
code = node.left_expression.code
localvars = node.left_expression.locals
self.visit(node.right_expression, context)
code += node.right_expression.code
localvars += node.right_expression.locals
local_value = context.define_local()
localvars.append(local_value)
local_bool_tag = context.define_local(self.types_dict['Bool'])
localvars.append(local_bool_tag)
cil_allocate = ast_cil.CILAllocate(local_bool_tag)
cil_assign1 = ast_cil.CILAssignment(local_value, cil_allocate)
code.append(cil_assign1)
local_bool_content = context.define_local()
localvars.append(local_bool_content)
cil_eq = ast_cil.CILEqual(node.left_expression.value, node.right_expression.value)
cil_assign3 = ast_cil.CILAssignment(local_bool_content, cil_eq)
cil_setattr_bool = ast_cil.CILSetAttr(local_value, 0, local_bool_content)
code += [cil_assign3, cil_setattr_bool]
node.value = local_value
node.locals = localvars
node.code = code
| 37.871963 | 125 | 0.665795 |
0c3538e560d86ee8d3781c58a35fffad78aa69e7 | 2,495 | py | Python | aries_cloudagent/protocols/introduction/messages/tests/test_invitation.py | euroledger/aries-cloudagent-python | caf457276b19df374c16c2890e1c7e4914f46254 | [
"Apache-2.0"
] | 2 | 2020-02-26T14:22:44.000Z | 2021-05-06T20:13:36.000Z | aries_cloudagent/protocols/introduction/messages/tests/test_invitation.py | euroledger/aries-cloudagent-python | caf457276b19df374c16c2890e1c7e4914f46254 | [
"Apache-2.0"
] | 6 | 2021-03-10T20:05:19.000Z | 2022-02-27T05:41:09.000Z | aries_cloudagent/protocols/introduction/messages/tests/test_invitation.py | euroledger/aries-cloudagent-python | caf457276b19df374c16c2890e1c7e4914f46254 | [
"Apache-2.0"
] | 4 | 2020-02-19T23:02:11.000Z | 2021-11-18T11:33:43.000Z | from unittest import mock, TestCase
from asynctest import TestCase as AsyncTestCase
from ....connections.messages.connection_invitation import ConnectionInvitation
from ..invitation import Invitation
from ...message_types import INVITATION, PROTOCOL_PACKAGE
class TestConfig:
label = "Label"
did = "did:sov:QmWbsNYhMrjHiqZDTUTEJs"
endpoint_url = "https://example.com/endpoint"
endpoint_did = "did:sov:A2wBhNYhMrjHiqZDTUYH7u"
key = "8HH5gYEeNc3z7PYXmd54d4x6qAfCNrqQqEB3nS7Zfu7K"
test_message = "test message"
class TestInvitation(TestCase, TestConfig):
def setUp(self):
self.connection_invitation = ConnectionInvitation(
label=self.label, recipient_keys=[self.key], endpoint=self.endpoint_url
)
self.invitation = Invitation(
invitation=self.connection_invitation, message=self.test_message
)
def test_init(self):
"""Test initialization."""
assert self.invitation.invitation == self.connection_invitation
assert self.invitation.message == self.test_message
def test_type(self):
"""Test type."""
assert self.invitation._type == INVITATION
@mock.patch(f"{PROTOCOL_PACKAGE}.messages.invitation.InvitationSchema.load")
def test_deserialize(self, mock_invitation_schema_load):
"""
Test deserialization.
"""
obj = {"obj": "obj"}
invitation = Invitation.deserialize(obj)
mock_invitation_schema_load.assert_called_once_with(obj)
assert invitation is mock_invitation_schema_load.return_value
@mock.patch(f"{PROTOCOL_PACKAGE}.messages.invitation.InvitationSchema.dump")
def test_serialize(self, mock_invitation_schema_dump):
"""
Test serialization.
"""
invitation_dict = self.invitation.serialize()
mock_invitation_schema_dump.assert_called_once_with(self.invitation)
assert invitation_dict is mock_invitation_schema_dump.return_value
class TestInvitationSchema(AsyncTestCase, TestConfig):
"""Test invitation schema."""
async def test_make_model(self):
invitation = Invitation(
invitation=ConnectionInvitation(
label=self.label, recipient_keys=[self.key], endpoint=self.endpoint_url
),
message=self.test_message,
)
data = invitation.serialize()
model_instance = Invitation.deserialize(data)
assert type(model_instance) is type(invitation)
| 34.178082 | 87 | 0.703808 |
480467f865fae8ba1e2bc45837c26434d52917dc | 446 | py | Python | src/tagger.py | bamdadsabbagh/tagger | c34eb031dd4fe4a0c5ecb228eb32ddfd73983214 | [
"MIT"
] | 1 | 2020-11-30T15:40:36.000Z | 2020-11-30T15:40:36.000Z | src/tagger.py | bamdadsabbagh/tagger | c34eb031dd4fe4a0c5ecb228eb32ddfd73983214 | [
"MIT"
] | 30 | 2020-07-09T10:21:26.000Z | 2022-02-04T16:12:24.000Z | src/tagger.py | bamdadsabbagh/tagger | c34eb031dd4fe4a0c5ecb228eb32ddfd73983214 | [
"MIT"
] | null | null | null | # components
from env import *
from tagger_write_none import TaggerWriteNone
from tagger_write_data import TaggerWriteData
# packages
import style
def Tagger(files, discogs):
if discogs is None:
TaggerWriteNone(files)
return
print(style.blue(discogs['json'].get('artists_sort') + ' - ' + discogs['json'].get('title')))
print(style.blue(discogs['url']))
print()
TaggerWriteData(files, discogs)
return
| 20.272727 | 97 | 0.692825 |
f53592aec332b6e3174c1c82cd9c1c6d0bc00753 | 5,541 | py | Python | azure-mgmt-network/azure/mgmt/network/models/express_route_circuit.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | 1 | 2018-11-09T06:16:34.000Z | 2018-11-09T06:16:34.000Z | azure-mgmt-network/azure/mgmt/network/models/express_route_circuit.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | null | null | null | azure-mgmt-network/azure/mgmt/network/models/express_route_circuit.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | 1 | 2018-11-09T06:17:41.000Z | 2018-11-09T06:17:41.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ExpressRouteCircuit(Resource):
"""ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict
:param sku: The SKU.
:type sku: :class:`ExpressRouteCircuitSku
<azure.mgmt.network.models.ExpressRouteCircuitSku>`
:param allow_classic_operations: Allow classic operations
:type allow_classic_operations: bool
:param circuit_provisioning_state: The CircuitProvisioningState state of
the resource.
:type circuit_provisioning_state: str
:param service_provider_provisioning_state: The
ServiceProviderProvisioningState state of the resource. Possible values
are 'NotProvisioned', 'Provisioning', 'Provisioned', and 'Deprovisioning'.
Possible values include: 'NotProvisioned', 'Provisioning', 'Provisioned',
'Deprovisioning'
:type service_provider_provisioning_state: str or
:class:`ServiceProviderProvisioningState
<azure.mgmt.network.models.ServiceProviderProvisioningState>`
:param authorizations: The list of authorizations.
:type authorizations: list of :class:`ExpressRouteCircuitAuthorization
<azure.mgmt.network.models.ExpressRouteCircuitAuthorization>`
:param peerings: The list of peerings.
:type peerings: list of :class:`ExpressRouteCircuitPeering
<azure.mgmt.network.models.ExpressRouteCircuitPeering>`
:param service_key: The ServiceKey.
:type service_key: str
:param service_provider_notes: The ServiceProviderNotes.
:type service_provider_notes: str
:param service_provider_properties: The ServiceProviderProperties.
:type service_provider_properties:
:class:`ExpressRouteCircuitServiceProviderProperties
<azure.mgmt.network.models.ExpressRouteCircuitServiceProviderProperties>`
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param gateway_manager_etag: The GatewayManager Etag.
:type gateway_manager_etag: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'ExpressRouteCircuitSku'},
'allow_classic_operations': {'key': 'properties.allowClassicOperations', 'type': 'bool'},
'circuit_provisioning_state': {'key': 'properties.circuitProvisioningState', 'type': 'str'},
'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'},
'authorizations': {'key': 'properties.authorizations', 'type': '[ExpressRouteCircuitAuthorization]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'service_key': {'key': 'properties.serviceKey', 'type': 'str'},
'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'},
'service_provider_properties': {'key': 'properties.serviceProviderProperties', 'type': 'ExpressRouteCircuitServiceProviderProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, sku=None, allow_classic_operations=None, circuit_provisioning_state=None, service_provider_provisioning_state=None, authorizations=None, peerings=None, service_key=None, service_provider_notes=None, service_provider_properties=None, provisioning_state=None, gateway_manager_etag=None, etag=None):
super(ExpressRouteCircuit, self).__init__(id=id, location=location, tags=tags)
self.sku = sku
self.allow_classic_operations = allow_classic_operations
self.circuit_provisioning_state = circuit_provisioning_state
self.service_provider_provisioning_state = service_provider_provisioning_state
self.authorizations = authorizations
self.peerings = peerings
self.service_key = service_key
self.service_provider_notes = service_provider_notes
self.service_provider_properties = service_provider_properties
self.provisioning_state = provisioning_state
self.gateway_manager_etag = gateway_manager_etag
self.etag = etag
| 50.372727 | 354 | 0.697708 |
7f4f23677f52a4a2abda5cc382a1b8d695e0fe04 | 1,361 | py | Python | services/rank.py | openghg/openghg | 9a05dd6fe3cee6123898b8f390cfaded08dbb408 | [
"Apache-2.0"
] | 5 | 2021-03-02T09:04:07.000Z | 2022-01-25T09:58:16.000Z | services/rank.py | openghg/openghg | 9a05dd6fe3cee6123898b8f390cfaded08dbb408 | [
"Apache-2.0"
] | 229 | 2020-09-30T15:08:39.000Z | 2022-03-31T14:23:55.000Z | services/rank.py | openghg/openghg | 9a05dd6fe3cee6123898b8f390cfaded08dbb408 | [
"Apache-2.0"
] | null | null | null | from typing import Dict
from openghg.store.base import Datasource
from openghg.store import ObsSurface
def set_rank(args: Dict) -> None:
obs = ObsSurface.load()
rank = args["rank"]
uuid = args["uuid"]
dateranges = args["dateranges"]
overwrite = args["overwrite"]
obs.set_rank(uuid=uuid, rank=rank, date_range=dateranges, overwrite=overwrite)
def clear_rank(args: Dict) -> None:
obs = ObsSurface.load()
uuid = args["uuid"]
obs.clear_rank(uuid=uuid)
def get_sources(args: Dict) -> Dict:
obs = ObsSurface.load()
datasource_uuids = obs.datasources()
rank_table = obs.rank_data()
site = args["site"]
species = args["species"]
# Shallow load the Datasources (only get their JSON metadata)
datasources = (Datasource.load(uuid=uuid, shallow=True) for uuid in datasource_uuids)
matching_sources = [d for d in datasources if d.search_metadata(site=site, species=species)]
if not matching_sources:
return {}
def name_str(d):
return "_".join([d.species(), d.inlet(), d.instrument()])
user_info = {
name_str(d): {"rank_data": rank_table.get(d.uuid(), "NA"), "data_range": d.daterange_str()} for d in matching_sources
}
key_lookup = {name_str(d): d.uuid() for d in matching_sources}
return {"user_info": user_info, "key_lookup": key_lookup}
| 26.686275 | 125 | 0.67083 |
9aee0c7c10977c3c7dab04f0ca3dce82cf079338 | 903 | py | Python | swagger-iu/swagger-demo.py | donwany/momo | 1974169f6b62ecae54870da29a1114f493d7d03d | [
"Apache-2.0"
] | 2 | 2020-02-27T06:00:23.000Z | 2021-06-25T09:39:42.000Z | swagger-iu/swagger-demo.py | donwany/momo | 1974169f6b62ecae54870da29a1114f493d7d03d | [
"Apache-2.0"
] | null | null | null | swagger-iu/swagger-demo.py | donwany/momo | 1974169f6b62ecae54870da29a1114f493d7d03d | [
"Apache-2.0"
] | 3 | 2020-07-15T16:50:09.000Z | 2021-06-22T18:55:49.000Z | # http://127.0.0.1:5000/apidocs/
from flask import Flask
from flasgger import Swagger
from flask_restful import Api, Resource
app = Flask(__name__)
api = Api(app)
swagger = Swagger(app)
class Username(Resource):
def get(self, username):
"""
This examples uses FlaskRESTful Resource
It works also with swag_from, schemas and spec_dict
---
parameters:
- in: path
name: username
type: string
required: true
responses:
200:
description: A single user item
schema:
id: User
properties:
username:
type: string
description: The name of the user
default: Steven Wilson
"""
return {'username': username}, 200
api.add_resource(Username, '/username/<username>')
app.run(debug=True)
| 23.153846 | 58 | 0.568106 |
9ba4dd14143fc987bf7e6b489bdc2c0be3b46ad2 | 165 | py | Python | server/views.py | JBris/vue-python-graphql | ba208130f572f29c7f427784c1e6997f4168ee01 | [
"MIT"
] | 7 | 2020-06-08T02:57:33.000Z | 2021-05-06T12:03:29.000Z | server/views.py | JBris/vue-aiohttp-graphql | ba208130f572f29c7f427784c1e6997f4168ee01 | [
"MIT"
] | 2 | 2021-03-10T14:11:41.000Z | 2022-02-13T10:30:37.000Z | server/views.py | JBris/vue-aiohttp-graphql | ba208130f572f29c7f427784c1e6997f4168ee01 | [
"MIT"
] | 2 | 2020-06-21T09:38:28.000Z | 2020-07-15T03:29:19.000Z | from aiohttp import web
async def index(request):
res = { "message": "Please use the /graphql and /graphiql endpoints." }
return web.json_response(data=res) | 33 | 75 | 0.715152 |
e49ea48691e81ca6ce760e0c1c94afe0fc152645 | 7,660 | py | Python | clientsForLIne/client5.py | Aliced3645/DataCenterMarketing | 67bc485e73cf538498a89b28465afb822717affb | [
"Apache-2.0"
] | 1 | 2015-05-23T00:07:36.000Z | 2015-05-23T00:07:36.000Z | clientsForLIne/client5.py | Aliced3645/DataCenterMarketing | 67bc485e73cf538498a89b28465afb822717affb | [
"Apache-2.0"
] | null | null | null | clientsForLIne/client5.py | Aliced3645/DataCenterMarketing | 67bc485e73cf538498a89b28465afb822717affb | [
"Apache-2.0"
] | null | null | null | import threading
from scapy.all import *
import os
import subprocess
import json
import argparse
import random
import socket
from struct import *
import datetime
import pcapy
import sys
import time
parser = argparse.ArgumentParser(description='Client')
parser.add_argument('-interface', dest='interface', action='store', help='Network card Interface connected to swtich')
parser.add_argument('-id', dest='bidderID', action='store', help='ID for the bidder, e.g., mike')
parser.add_argument('-host', dest='host', action='store', help='source host ID')
parser.add_argument('-all', dest='allhosts', action='store', help='total number of hosts')
args = parser.parse_args()
interface = args.interface
bidderID = args.bidderID
host = args.host
allhosts = args.allhosts
bidRound = 0
if interface is None:
interface = "h5-eth0"
if bidderID is None:
bidderID = "cay"
if host is None:
host = 4
if allhosts is None:
allhosts = 5
def constructBidString(value, destID, minRate, data, start, end, latency):
global host
global bidderID
json = "{\"Bidder\":\"%s\", \"Value\":%s, \"SID\":%s, \"DID\":%s, \"MinRate\":%s, \"Data\":%s,\"Start\":%s, \"End\":%s, \"Latency\":%s}" % (bidderID, value, host, destID, minRate, data, start, end, latency)
return json
def randomRequestGenerator():
#random items to generate:
value = random.randint(0,1000)
destID = random.randint(0,allhosts - 2)
minRate = random.randint(0, 10)
#relative time..
start = random.randint(10000,100000)
end = random.randint(start, 200000)
data = (end - start) / 1000.0 * minRate
latency = random.randint(1000000, 10000000)
latencyq = 100000000
randomJson = constructBidString(value, destID, minRate, data, start, end,latencyq)
return randomJson
def eth_addr (a) :
b = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % (ord(a[0]) , ord(a[1]) , ord(a[2]), ord(a[3]), ord(a[4]) , ord(a[5]))
return b
lastFetched = ''
def parse_packet(packet) :
#parse ethernet header
eth_length = 14
global lastFetched
eth_header = packet[:eth_length]
eth = unpack('!6s6sH' , eth_header)
eth_protocol = socket.ntohs(eth[2])
#Parse IP packets, IP Protocol number = 8
if eth_protocol == 8 :
#Parse IP header
#take first 20 characters for the ip header
ip_header = packet[eth_length:20+eth_length]
#now unpack them :)
iph = unpack('!BBHHHBBH4s4s' , ip_header)
version_ihl = iph[0]
ihl = version_ihl & 0xF
iph_length = ihl * 4
s_addr = socket.inet_ntoa(iph[8]);
#we fetch the response packet
if str(s_addr) == '1.2.3.4':
h_size = eth_length + iph_length
data = packet[h_size:]
lastFetched = data[0:]
return '1.2.3.4'
#Reminder from the server that
#the client could start to send packets
elif str(s_addr) == '1.2.3.5':
h_size = eth_length + iph_length
data = packet[h_size:]
lastFetched = data[0:]
return '1.2.3.5'
def sniffing():
global lastFetched
global localBiddingRound
global bidRound
cap = pcapy.open_live(interface , 65536 , 0 , 0)
#while bidRound < 5 :
while True:
(header, packet) = cap.next()
#function to parse a packet
result = parse_packet(packet)
if result is not None:
if result == '1.2.3.4':
bidRound += 1
print lastFetched
content = randomRequestGenerator()
packet = Ether() / IP(dst="10.0.0.255") / content
sendp(packet, iface=interface, count=1)
'''
if result == '1.2.3.5':
#parse JSON first
parsed_json = json.loads(lastFetched)
print 'Get a reminder from the server, start transmitting..'
destIP = parsed_json['destIP']
bandwidth = parsed_json['bandwidth']
duration = parsed_json['duration']
data = parsed_json['data']
pSend = threading.Thread(target=udpSend, args=(destIP,
bandwidth, duration, data))
pSend.start()
'''
datablock = 'cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc'
granuality = 1000.0
def udpSend(destIP, bandwidth, duration, data):
# current method : send until all data ends
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
start_time = time.time()
left = data
allData = 0
count = 0
granulaityData = (float) (bandwidth / granuality)
print 'Begin transmitting to ' + str(destIP) + ' at the speed of ' + str(bandwidth) + 'MB/s'
print 'Duration: ' + str(duration) + 'ms, ', 'data: ' + str(data) + 'MB'
while left > 0 :
dataThisRound = 0
startThisRound = time.time()
while dataThisRound <= granulaityData:
allData += 0.001
dataThisRound += 0.001
count += 1
sock.sendto(datablock, (destIP, 9999))
left = left - 0.001
if count == 1000:
rate = allData / (time.time() - start_time)
count = 0
passedTime = time.time() - startThisRound
if passedTime < 0.001:
t = time.sleep(0.001 - passedTime)
return
#a thread always receiving data for UDP
def udpListen():
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind(('0.0.0.0', 9999))
while True:
data, addr = sock.recvfrom(5000)
return
if __name__ == "__main__":
p = threading.Thread(target=sniffing)
p.start()
random.seed(5)
p2 = threading.Thread(target=udpListen)
p2.start()
#make the first bid
content = randomRequestGenerator()
print content
packet = Ether() / IP(dst="10.0.0.255") / content
# send
sendp(packet, iface=interface, count=1)
bidRound += 1
p.join()
'''
while True:
content = randomRequestGenerator()
print content
#send scapy packet ...
#p = threading.Thread(target = replyListener)
#p.daemon = True
#p.start()
p = threading.Thread(target=sniffing)
p.start()
packet = Ether() / IP(dst="10.0.0.255") / content
# send
sendp(packet, iface=interface, count=1)
#sniffing()
p.join()
print lastFetched
#parse the result
if lastFetched[0:3] == 'Yes':
print 'ready to send UDP flows'
'''
| 34.349776 | 1,038 | 0.637598 |
b242016f050e49a669400dabcfb71654c057e11f | 2,026 | py | Python | test/pypendency/test_relations.py | Taschenbergerm/pypendency | d941f584cabd0e6acc56ec3df43be174198ae4b7 | [
"Apache-2.0"
] | null | null | null | test/pypendency/test_relations.py | Taschenbergerm/pypendency | d941f584cabd0e6acc56ec3df43be174198ae4b7 | [
"Apache-2.0"
] | 1 | 2021-06-23T15:05:40.000Z | 2021-06-23T15:05:40.000Z | test/pypendency/test_relations.py | Taschenbergerm/pypendency | d941f584cabd0e6acc56ec3df43be174198ae4b7 | [
"Apache-2.0"
] | null | null | null | import uuid
import pytest
from pypendency.models.generics import BaseNode, Relation, Direction
def test_relation_hash():
external_node_id_1 = str(uuid.uuid4())
external_node_id_2 = str(uuid.uuid4())
node1 = BaseNode("E1",
slug="e1",
type="Service",
description="a External Testnode",
id=external_node_id_1,
external=True)
node2 = BaseNode("E2",
slug="e2",
type="Service",
description="a External Testnode",
id=external_node_id_2,
external=True)
node1.edge_to(node2)
node2.edge_from(node1)
relation_set = set(node1.relations)
relation_set.add(*node2.relations)
set_length = len(relation_set)
relations_equality = node1.relations[0] == node2.relations[0]
pytest.assume(relations_equality)
pytest.assume(set_length == 1)
@pytest.mark.parametrize(
"direction,want",
[
[Direction.Bijection, 1],
[Direction.Link, 1],
[Direction.Injective, 2]
])
def test_relation_hash(direction, want):
external_node_id_1 = str(uuid.uuid4())
external_node_id_2 = str(uuid.uuid4())
node1 = BaseNode("E1",
slug="e1",
type="Service",
description="a External Testnode",
id=external_node_id_1,
external=True)
node2 = BaseNode("E2",
slug="e2",
type="Service",
description="a External Testnode",
id=external_node_id_2,
external=True)
rel_1 = Relation(origin=node1, destination=node2, label="Depends", direction=direction)
rel_2 = Relation(origin=node2, destination=node1, label="Depends", direction=direction)
relation_set = {rel_1, rel_2}
set_length = len(relation_set)
pytest.assume(set_length == want)
| 31.169231 | 91 | 0.564166 |
e0bad462716adf9a18c7a6fdad8a8b9c1f5000c7 | 240 | py | Python | python/testData/refactoring/extractmethod/OutNotEmptyStatements2.after.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/refactoring/extractmethod/OutNotEmptyStatements2.after.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/refactoring/extractmethod/OutNotEmptyStatements2.after.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | def f():
a = 1
a, result = sum_squares(a)
print("Sum of squares: " + a + " = " + result)
def sum_squares(a_new):
result = 0
while a_new < 10:
result += a_new * a_new
a_new += 1
return a_new, result
| 18.461538 | 50 | 0.525 |
a910d63f982eeeb59e30b7de828f8fb26a71917d | 680 | py | Python | Chapter09/combining/join.py | PacktPublishing/Hands-On-Reactive-Programming-with-Python | d9da4f3f070695508bb36ef9d97f1212ecaf6fab | [
"MIT"
] | 56 | 2018-06-28T05:04:36.000Z | 2022-02-06T18:36:29.000Z | Chapter09/combining/join.py | azataiot/Hands-On-Reactive-Programming-with-Python | d9da4f3f070695508bb36ef9d97f1212ecaf6fab | [
"MIT"
] | 2 | 2019-08-19T03:51:49.000Z | 2019-09-25T09:00:57.000Z | Chapter09/combining/join.py | azataiot/Hands-On-Reactive-Programming-with-Python | d9da4f3f070695508bb36ef9d97f1212ecaf6fab | [
"MIT"
] | 18 | 2018-09-16T05:50:13.000Z | 2022-01-02T19:59:04.000Z | import rx
import rx.operators as ops
from rx.subject import Subject
import time
numbers1 = Subject()
numbers2 = Subject()
numbers1.pipe(
ops.join(
numbers2,
lambda i: rx.just(True).pipe(ops.delay(200)),
lambda i: rx.just(True).pipe(ops.delay(300)),
),
ops.starmap(lambda i, j: i + j),
).subscribe(
on_next=lambda i: print("on_next {}".format(i)),
on_error=lambda e: print("on_error: {}".format(e)),
on_completed=lambda: print("on_completed")
)
numbers1.on_next(0)
numbers2.on_next(2)
numbers1.on_next(1)
time.sleep(0.4)
numbers1.on_next(2)
numbers2.on_next(5)
time.sleep(0.25)
numbers1.on_next(3)
numbers2.on_next(3)
| 21.935484 | 59 | 0.667647 |
d791f03f450e7dcaa0e4ac6b88bfc692434452fe | 1,542 | py | Python | YoutubeDownloader/putload/views.py | hilton-edeir/Youtube-Downloader | d2f013aa4cc28e40e6fd8be1af3693a8f2b7d174 | [
"MIT"
] | null | null | null | YoutubeDownloader/putload/views.py | hilton-edeir/Youtube-Downloader | d2f013aa4cc28e40e6fd8be1af3693a8f2b7d174 | [
"MIT"
] | null | null | null | YoutubeDownloader/putload/views.py | hilton-edeir/Youtube-Downloader | d2f013aa4cc28e40e6fd8be1af3693a8f2b7d174 | [
"MIT"
] | null | null | null | from django.contrib import messages
from django.shortcuts import render
from pytube import YouTube
def home(request):
if request.method == "POST":
try:
link = request.POST['yt_link']
video = YouTube(link)
return render(request, "video.html", {"video": video, "link": link})
except Exception as exception:
print(exception)
messages.add_message(request, messages.ERROR, "Something went wrong, please try again")
return render(request, "home.html")
def download_video(request):
if request.method == "POST":
link = request.POST['video_link']
try:
video = YouTube(link)
video.streams.get_highest_resolution().download('Downloads')
messages.add_message(request, messages.SUCCESS, "Download completed")
except Exception as exception:
print(exception)
messages.add_message(request, messages.ERROR, "Download failed, please try again")
return render(request, "home.html")
def download_audio(request):
if request.method == "POST":
link = request.POST['video_link']
try:
video = YouTube(link)
video.streams.get_audio_only().download()
messages.add_message(request, messages.SUCCESS, "Download completed")
except Exception as exception:
print(exception)
messages.add_message(request, messages.ERROR, "Download failed, please try again")
return render(request, "home.html")
| 31.469388 | 99 | 0.639429 |
525c9d0e6428a6636ba78bbdc7f955b0ffd9b37c | 2,891 | py | Python | http_server_root/dashboard.py | andycavatorta/pinball | f718982ed76521090f5eee5fb5a25cd3e8ce5ce4 | [
"MIT"
] | 1 | 2021-04-01T17:33:48.000Z | 2021-04-01T17:33:48.000Z | http_server_root/dashboard.py | andycavatorta/pinball | f718982ed76521090f5eee5fb5a25cd3e8ce5ce4 | [
"MIT"
] | null | null | null | http_server_root/dashboard.py | andycavatorta/pinball | f718982ed76521090f5eee5fb5a25cd3e8ce5ce4 | [
"MIT"
] | null | null | null | import datetime
from http.server import HTTPServer, SimpleHTTPRequestHandler
import json
import os
import queue
import time
import threading
import socketserver
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
tb_path = os.path.dirname(os.path.realpath(__file__))
clients = []
class SimpleChat(WebSocket):
def handleMessage(self):
#print("got ws message", self.data)
print("handleMessage",self.data)
def handleConnected(self):
#print(self.address, 'connected')
for client in clients:
client.sendMessage(self.address[0] + u' - connected')
clients.append(self)
def handleClose(self):
clients.remove(self)
#print(self.address, 'closed')
for client in clients:
client.sendMessage(self.address[0] + u' - disconnected')
def sendToClients(self, message):
#print("Sending message to client : ", message)
for client in clients:
#print("client",client)
client.sendMessage(message)
class Message_Receiver(threading.Thread):
def __init__(
self,
_websocket
):
self.websocket = _websocket
threading.Thread.__init__(self)
self.queue = queue.Queue()
self.start()
def add_to_queue(self, topic, message,origin,destination):
self.queue.put((topic, message,origin,destination))
def run(self):
while True:
topic, message,origin,destination = self.queue.get(block=True)
#print("topic, message",topic, message)
message_json = json.dumps([str(topic), message, str(origin)])
self.websocket.sendToClients(self.websocket,message_json)
"""
try:
topic, message = self.queue.get(block=True, timeout=self.tb_ref.settings.Dashboard.refresh_interval)
print("topic, message",topic, message)
message_json = json.dumps([topic, message])
self.websocket.sendToClients(self.websocket,message_json)
# self.websocket.sendToClients(message_json)
except queue.Empty:
self.generate_system_status()
"""
def status_receiver(message):
message_receiver.add_to_queue("status_event",message)
def exception_receiver(message):
message_receiver.add_to_queue("exception_event",message)
def init():
global message_receiver
server_address = ('0.0.0.0', 8080)
httpd = HTTPServer(server_address, SimpleHTTPRequestHandler)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.start()
server = SimpleWebSocketServer('', 8001, SimpleChat)
server_thread = threading.Thread(target=server.serveforever)
server_thread.start()
message_receiver = Message_Receiver(server.websocketclass)
return message_receiver.add_to_queue
| 33.616279 | 116 | 0.665168 |
f59412f444a3cd43690841dd8e6e5e7768a13216 | 3,294 | py | Python | GeneTools/nanoarg/mapping_table.py | gaarangoa/genomic-scripts | e7ebcd61d6b1b7d8e89899fae19df6d6ebe311e0 | [
"BSD-2-Clause"
] | null | null | null | GeneTools/nanoarg/mapping_table.py | gaarangoa/genomic-scripts | e7ebcd61d6b1b7d8e89899fae19df6d6ebe311e0 | [
"BSD-2-Clause"
] | null | null | null | GeneTools/nanoarg/mapping_table.py | gaarangoa/genomic-scripts | e7ebcd61d6b1b7d8e89899fae19df6d6ebe311e0 | [
"BSD-2-Clause"
] | null | null | null | import click
import json
import logging
import pandas as pd
from tqdm import tqdm
import sys
origins = {
1:'ARGs',
2:'MGEs',
4:'MRGs',
3:'Functional Genes'
}
pathogens = {
1352: 'Enterococcus faecium',
1280: 'Staphylococcus aureus',
573: 'Klebsiella pneumonia',
470: 'Acinetobacter baumannii',
287: 'Pseudomonas aeruginosa',
42895: 'Enterobacter spp.',
543: 'Enterobacteriaceae',
1352: 'Enterococcus faecium',
1280: 'Staphylococcus aureus',
210: 'Helicobacter pylori',
205: 'Campylobacter sp',
590: 'Salmonellae',
485: 'Neisseria gonorrhoeae',
1313: 'Streptococcus pneumoniae',
727: 'Haemophilus influenzae',
625: 'Shigella sp'
}
def traverse_data(data):
for read in tqdm(data):
for gene in read['data']:
gene['gene_id'] = gene['metadata'][0]
gene['category'] = gene['metadata'][3]
gene['gene_name'] = gene['metadata'][4]
gene['read'] = gene['block_id']
gene['group'] = origins[gene['origin']]
if origins[gene['origin']] == 'MRGs':
gene['gene_name'] = gene['category']
if origins[gene['origin']] == 'Functional Genes':
gene['gene_name'] = gene['category']
gene['NCBI_taxa_id'] = read['read'][0]['taxa_id']
gene['taxa_centrifuge_score'] = read['read'][0]['taxa_score']
gene['species'] = read['read'][0]['taxa_species']
try:
assert(pathogens[int(gene['NCBI_taxa_id'])])
gene['is_pathogen'] = 1
except:
gene['is_pathogen'] = 0
del gene['metadata']
del gene['block_id']
del gene['color']
del gene['origin']
del gene['stroke_width']
del gene['total_reads']
del gene['value']
del gene['score']
del gene['position']
yield gene
@click.command()
@click.option('--input-file', default='', help='JSON fil downloaded from NanoARG')
@click.option('--output-file', default='', help='file with the mapping table as shown in the genes mapped to nanopore reads')
def mapping_table(input_file, output_file):
'''
Generate table of genes mapped to nanopore reads
This tool will generate the full table named "genes
mapped to nanopore reads" under the NanoARG website.
https://bench.cs.vt.edu/nanoarg/
'''
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format="%(levelname)s %(asctime)s - %(message)s"
)
log = logging.getLogger()
log.info('loading input file ' + input_file)
data = json.load(open(input_file))
log.info('traversing file ' + input_file)
reads = pd.DataFrame(traverse_data(data[0]))
dataset = reads[
[
'read',
'gene_id',
'gene_name',
'group',
'category',
'start',
'end',
'strand',
'identity',
'bitscore',
'evalue',
'NCBI_taxa_id',
'taxa_centrifuge_score',
'species',
'coverage',
'is_pathogen'
]
]
log.info('Storing table to '+ output_file)
dataset.to_csv(output_file, index=False) | 26.352 | 125 | 0.563752 |
d1c76e27d4ca05a30048c993fb168157bce0d15f | 874 | py | Python | buggy_python_code.py | ppochop/pv080_buggy_python | a61f08ea3d0f9e632c3eacb3460f876a7117af9a | [
"MIT"
] | null | null | null | buggy_python_code.py | ppochop/pv080_buggy_python | a61f08ea3d0f9e632c3eacb3460f876a7117af9a | [
"MIT"
] | 1 | 2021-05-12T07:22:43.000Z | 2021-05-12T07:34:35.000Z | buggy_python_code.py | pernitz/kryptocviko | ac36930c41dea7ce6f3861c0f5e273c001a3c58b | [
"MIT"
] | null | null | null | # contains bunch of buggy examples
# taken from https://hackernoon.com/10-common-security-gotchas-in-python-and-how-to-avoid-them-e19fbe265e03
import cPickle
import subprocess
import base64
import subprocess
import flask
# Input injection
def transcode_file(request, filename):
command = 'ffmpeg -i "{source}" output_file.mpg'.format(source=filename)
subprocess.call(command, shell=True) # a bad idea!
# Assert statements
def foo(request, user):
assert user.is_admin, 'user does not have access'
# secure code...
# Pickles
class RunBinSh(object):
def __reduce__(self):
return (subprocess.Popen, (('/bin/sh',),))
@app.route('/')
def index():
module = flask.request.args.get("module")
exec("import urllib%s as urllib" % module) # Noncompliant
print(base64.b64encode(cPickle.dumps(RunBinSh())))
| 24.971429 | 108 | 0.688787 |
b2c6c0cda29db0893eaf7fa5e01cff592736cf89 | 123 | py | Python | padasip/misc/__init__.py | huangshunliang/padasip | b44c2815000dd4d1b855c49e469072e919df15cd | [
"MIT"
] | 194 | 2016-08-28T09:23:19.000Z | 2022-03-30T02:55:22.000Z | padasip/misc/__init__.py | huangshunliang/padasip | b44c2815000dd4d1b855c49e469072e919df15cd | [
"MIT"
] | 14 | 2016-11-15T13:33:53.000Z | 2022-02-04T13:41:12.000Z | padasip/misc/__init__.py | huangshunliang/padasip | b44c2815000dd4d1b855c49e469072e919df15cd | [
"MIT"
] | 41 | 2016-12-07T19:40:25.000Z | 2022-02-24T21:32:19.000Z | from padasip.misc.error_evaluation import MSE, RMSE, MAE, logSE
from padasip.misc.error_evaluation import get_mean_error
| 24.6 | 63 | 0.837398 |
842867ac8128e68e7708ca621b29fddc5a1441bb | 1,552 | py | Python | assets/img/posts/Resize.py | jmtorrente/sleek | 2a959260334ff025ace244be6e46ebda3de5e2ec | [
"MIT"
] | null | null | null | assets/img/posts/Resize.py | jmtorrente/sleek | 2a959260334ff025ace244be6e46ebda3de5e2ec | [
"MIT"
] | null | null | null | assets/img/posts/Resize.py | jmtorrente/sleek | 2a959260334ff025ace244be6e46ebda3de5e2ec | [
"MIT"
] | null | null | null | ################################################################################################
# Name: Image_Resize
# Desc: Compress image file using python
# Date: 2019-02-10
# Author: jmtorrented
################################################################################################
from PIL import Image
from resizeimage import resizeimage
FotoName = 'Nikon.jpg' #Modify
Casa = 'C:/Users/jmtor/Documents/Projects/Web/assets/img/posts/'
Work = 'O:/Web/jmtorrente.github.io/assets/img/posts/'
imName = "Nikon" #Modify
Location = Casa #Modify Casa or Work
with open((Casa + FotoName), 'r+b') as f: #Modify Casa or Work
with Image.open(f) as image:
cover = resizeimage.resize_width(image, 230)
cover.save((Location + imName + "_placehold" + '.jpg'), image.format)
cover = resizeimage.resize_width(image, 535)
cover.save((Location + imName + "_thumb" + '.jpg'), image.format)
cover = resizeimage.resize_width(image, 1070)
cover.save((Location + imName + "_thumb@2x" + '.jpg'), image.format)
cover = resizeimage.resize_width(image, 575)
cover.save((Location + imName + "_xs" + '.jpg'), image.format)
cover = resizeimage.resize_width(image, 767)
cover.save((Location + imName + "_sm" + '.jpg'), image.format)
cover = resizeimage.resize_width(image, 991)
cover.save((Location + imName + "_md" + '.jpg'), image.format)
#cover = resizeimage.resize_width(image, 1999)
#cover.save((imName + "_lg" + '.jpeg'), image.format)
| 38.8 | 96 | 0.571521 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.