hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a046f5eb9200705de487c8e1c936389e801af46
| 3,935
|
py
|
Python
|
games/arcade_pinball/arcade_button.py
|
bn102/surrortg-sdk
|
5f51515d0fd83741b3359b9a682c0a9afc38886f
|
[
"MIT"
] | 21
|
2020-11-03T23:41:56.000Z
|
2022-03-21T04:11:46.000Z
|
games/arcade_pinball/arcade_button.py
|
bn102/surrortg-sdk
|
5f51515d0fd83741b3359b9a682c0a9afc38886f
|
[
"MIT"
] | 5
|
2021-02-11T14:36:03.000Z
|
2021-07-20T11:45:07.000Z
|
games/arcade_pinball/arcade_button.py
|
bn102/surrortg-sdk
|
5f51515d0fd83741b3359b9a682c0a9afc38886f
|
[
"MIT"
] | 11
|
2020-11-13T11:14:33.000Z
|
2022-03-21T04:11:51.000Z
|
import asyncio
import logging
import pigpio
from games.arcade_pinball.config import (
BUTTON_PRESS_TIME,
MAX_HOLD_TIME,
MAX_INPUTS_PER_INPUT,
PER_SECONDS,
)
from surrortg.inputs import Switch
from surrortg.inputs.input_filters import SpamFilter
class ArcadeMultiButton(Switch):
def __init__(
self,
pi,
pins,
name,
abuse_function=None,
button_press_time=BUTTON_PRESS_TIME,
):
self.pi = pi
self.pins = pins
self.name = name
self.button_press_time = button_press_time
self.abuse_callback = abuse_function
self.task = None
self.spam_filter = SpamFilter(MAX_INPUTS_PER_INPUT, PER_SECONDS)
for pin in self.pins:
self.pi.set_mode(pin, pigpio.OUTPUT)
async def on(self, seat=0):
if not self.spam_filter.too_much_spam():
logging.debug(f"{self.name} on")
for pin in self.pins:
self.pi.write(pin, 0)
self._reset_timer(True)
else:
logging.info(f"Too much spam for {self.name}")
await self.off()
async def off(self, seat=0):
logging.debug(f"{self.name} off")
for pin in self.pins:
self.pi.write(pin, 1)
self._reset_timer(False)
async def shutdown(self, seat=0):
# ArcadePinballGame handles stopping pigpio
if self.pi.connected:
await self.off()
async def single_press(self):
await self.on()
await asyncio.sleep(self.button_press_time)
await self.off()
def _reset_timer(self, start_new):
if self.task is not None and not self.task.cancelled():
self.task.cancel()
if start_new:
self.task = asyncio.create_task(self._lock_controls())
async def _lock_controls(self):
await asyncio.sleep(MAX_HOLD_TIME)
logging.info("Locking controls due to abuse")
if self.abuse_callback is not None:
await self.abuse_callback()
class ArcadeButton(ArcadeMultiButton):
def __init__(
self,
pi,
pin,
name,
abuse_function=None,
button_press_time=BUTTON_PRESS_TIME,
):
super().__init__(
pi,
[pin],
name,
abuse_function=abuse_function,
button_press_time=button_press_time,
)
if __name__ == "__main__":
from games.arcade_pinball.config import (
LEFT_FLIPPER_PIN,
MAGNET_BUTTON_PIN,
RIGHT_FLIPPER_PINS,
SERVICE_BUTTON_PIN,
START_BUTTON_PIN,
)
async def test_buttons():
pi = pigpio.pi()
if not pi.connected:
raise RuntimeError("Could not connect to pigpio")
left_flipper = ArcadeButton(pi, LEFT_FLIPPER_PIN, "left")
right_flipper = ArcadeMultiButton(pi, RIGHT_FLIPPER_PINS, "right")
magnet_button = ArcadeButton(pi, MAGNET_BUTTON_PIN, "magnet")
start_button = ArcadeButton(pi, START_BUTTON_PIN, "start")
service_menu_button = ArcadeButton(pi, SERVICE_BUTTON_PIN, "service")
try:
while True:
await left_flipper.on()
await right_flipper.on()
await magnet_button.on()
await start_button.on()
await service_menu_button.on()
asyncio.sleep(5)
await left_flipper.off()
await right_flipper.off()
await magnet_button.off()
await start_button.off()
await service_menu_button.off()
asyncio.sleep(5)
except KeyboardInterrupt:
await left_flipper.off()
await right_flipper.off()
await magnet_button.off()
await start_button.off()
await service_menu_button.off()
pi.stop()
asyncio.run(test_buttons())
| 28.722628
| 77
| 0.596696
|
4a0470a029c9a3c99094242233164502dd912201
| 1,009
|
py
|
Python
|
demo.py
|
mqingyn/tornask
|
35570301870c1192ed9985454d129b74286ae77b
|
[
"BSD-3-Clause"
] | 7
|
2015-01-15T08:00:10.000Z
|
2018-12-07T15:21:26.000Z
|
demo.py
|
mqingyn/tornask
|
35570301870c1192ed9985454d129b74286ae77b
|
[
"BSD-3-Clause"
] | 1
|
2015-06-05T14:19:35.000Z
|
2016-01-09T01:21:19.000Z
|
demo.py
|
mqingyn/tornask
|
35570301870c1192ed9985454d129b74286ae77b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by mqingyn on 2015/1/15.
import time
from datetime import datetime
from tornask.manager import taskmgr, run_tasks
from tornado.concurrent import run_on_executor
from tornado.netutil import ThreadedResolver, IOLoop
from tornado.httpclient import AsyncHTTPClient
from tornado.gen import coroutine
class Task(object):
def __init__(self):
self.executor = ThreadedResolver().executor
self.io_loop = IOLoop.current()
@run_on_executor
def task_interval(self):
print 'start at', datetime.now()
time.sleep(3)
print 'end at', datetime.now()
@coroutine
def task_callat(self):
response = yield AsyncHTTPClient().fetch("http://www.baidu.com")
print response.body
if __name__ == '__main__':
tasks = Task()
taskmgr.task_register("task_at", tasks.task_interval, interval=1)
taskmgr.task_register("task_callat", tasks.task_callat, call_at=("23:25", "23:35"))
run_tasks()
| 28.828571
| 87
| 0.701685
|
4a047265754689cf181b41db5cac8583477cd0c8
| 1,848
|
py
|
Python
|
python-base/base/network.py
|
lovelifeming/AI-Studies-Road
|
d92e234211f89cc92c74dd49e9e5b9394b7fa4ed
|
[
"Apache-2.0"
] | null | null | null |
python-base/base/network.py
|
lovelifeming/AI-Studies-Road
|
d92e234211f89cc92c74dd49e9e5b9394b7fa4ed
|
[
"Apache-2.0"
] | null | null | null |
python-base/base/network.py
|
lovelifeming/AI-Studies-Road
|
d92e234211f89cc92c74dd49e9e5b9394b7fa4ed
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/5/12 10:39
# @Author : zengsm
# @File : network
import datetime
import requests
from bs4 import BeautifulSoup
def add_github_host(sys: str = 'windows'):
""" 一般Github的访问有两部分:主站的访问和二级域名的资源加载(比如样式文件等)一般Github加载缓慢,主要是
assets-cdn.github.com、avatars0.githubusercontent.com 以及 avatars1.githubusercontent.com 三个域名的解析问题。
为了提高速度,可以使用HOSTS加速对Github的域名解析。查看国内能解析的DNS: http://tool.chinaz.com/dns
解决办法,修改hosts主机映射文件:
windows C:\Windows\System32\drivers\etc\hosts
linux /etc/hosts
添加github一系列网址的IP地址、域名映射
原理就是:当我们访问github时,那些域名什么的就不需要去DNS服务器上询问了,直接从本地HOST文件中获得。"""
s = """
github.io
github.com
raw.github.com
help.github.com
status.github.com
training.github.com
nodeload.github.com
assets-cdn.github.com
github.githubassets.com
documentcloud.github.com
avatars0.githubusercontent.com
avatars1.githubusercontent.com
avatars2.githubusercontent.com"""
ans = []
for i in s.split():
# http://ip.tool.chinaz.com/ http://ip.chinaz.com/
url = "http://ip.tool.chinaz.com/" + i.strip()
resp = requests.get(url)
soup = BeautifulSoup(resp.text)
x = soup.find(class_="IcpMain02")
x = x.find_all("span", class_="Whwtdhalf")
x = "%s %s" % (x[5].string.strip(), i.strip())
print(x)
ans.append(x)
hosts = r"C:\Windows\System32\drivers\etc\hosts" if sys is 'windows' else '/etc/hosts'
print('hosts file path:' + hosts)
print(ans)
with open(hosts, "a") as f:
f.write('\n')
f.write('### add new ip address and host name by time: ' + datetime.datetime.now().__str__() + '\n')
f.writelines('\n'.join([i for i in ans]))
f.write('\n')
if __name__ == '__main__':
add_github_host() # 添加 github 地址映射
print('end...')
| 30.8
| 108
| 0.651515
|
4a0473985300d9368f6de4fcbddb3aaa723aa7a7
| 496
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/box/marker/line/_color.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/box/marker/line/_color.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/box/marker/line/_color.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="box.marker.line", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", False),
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "style"),
**kwargs
)
| 35.428571
| 85
| 0.643145
|
4a04757df96c6f275c7408658ea501ba8141d6a7
| 2,211
|
py
|
Python
|
blog/test_models.py
|
FarmCodeGary/InspirationForge
|
2b78e9c388608ac19ed0ecb114ce5e0cc1f33213
|
[
"MIT"
] | 1
|
2015-09-16T17:14:36.000Z
|
2015-09-16T17:14:36.000Z
|
blog/test_models.py
|
FarmCodeGary/InspirationForge
|
2b78e9c388608ac19ed0ecb114ce5e0cc1f33213
|
[
"MIT"
] | null | null | null |
blog/test_models.py
|
FarmCodeGary/InspirationForge
|
2b78e9c388608ac19ed0ecb114ce5e0cc1f33213
|
[
"MIT"
] | null | null | null |
"""
Unit tests for the blog app's Django models.
"""
import os.path
import datetime
from django.test import TestCase
from django.utils import timezone
from .models import Article, image_filename
class ArticleMethodTests(TestCase):
"""
Tests for methods of the Article model.
"""
def test_save_generates_html(self):
"""
save() should populate the article's text field with HTML.
"""
content_source = "Test post text"
article = Article(title="test article", content_source=content_source)
article.save()
self.assertEqual(
article.rendered_content.strip(),
"<p>Test post text</p>")
def test_published_articles(self):
"""
published_articles() should include only articles with a pub_date in
the past, with the most recent articles first.
"""
Article.objects.create(
title="Past Article",
content_source="test",
pub_date=(timezone.now() + datetime.timedelta(days=-1)))
Article.objects.create(
title="Future Article",
content_source="test",
pub_date=(timezone.now() + datetime.timedelta(days=1)))
Article.objects.create(
title="Current Article",
content_source="test",
pub_date=timezone.now())
self.assertQuerysetEqual(
Article.published_articles(),
['<Article: Current Article>', '<Article: Past Article>'])
class ImageFilenameTests(TestCase):
"""
Tests for fixing the filenames of images.
"""
def test_with_spaces(self):
"""
image_filename should convert spaces to hyphens.
"""
original = "test image.png"
result = image_filename(None, original) # The instance doesn't matter
self.assertEqual(os.path.basename(result), "test-image.png")
def test_with_underscores(self):
"""
image_filename should convert underscores to hyphens.
"""
original = "test_image.png"
result = image_filename(None, original) # The instance doesn't matter
self.assertEqual(os.path.basename(result), "test-image.png")
| 31.140845
| 78
| 0.622795
|
4a0475a410dc8df71227778fcee5c8b68ed9f583
| 945
|
py
|
Python
|
chapter2/PLU.py
|
ElliotShang/numerical-analysis
|
769610dc45cc4498b49f8311d7023b725c1c7bc2
|
[
"MIT"
] | null | null | null |
chapter2/PLU.py
|
ElliotShang/numerical-analysis
|
769610dc45cc4498b49f8311d7023b725c1c7bc2
|
[
"MIT"
] | null | null | null |
chapter2/PLU.py
|
ElliotShang/numerical-analysis
|
769610dc45cc4498b49f8311d7023b725c1c7bc2
|
[
"MIT"
] | null | null | null |
import numpy as np
def LU_partial_decomposition(matrix):
n, m = matrix.shape
P = np.identity(n)
L = np.identity(n)
U = matrix.copy()
PF = np.identity(n)
LF = np.zeros((n, n))
for k in range(0, n - 1):
index = np.argmax(abs(U[k:, k]))
index = index + k
if index != k:
P = np.identity(n)
P[[index, k], k:n] = P[[k, index], k:n]
U[[index, k], k:n] = U[[k, index], k:n]
PF = np.dot(P, PF)
LF = np.dot(P, LF)
L = np.identity(n)
for j in range(k + 1, n):
L[j, k] = -(U[j, k] / U[k, k])
LF[j, k] = (U[j, k] / U[k, k])
U = np.dot(L, U)
np.fill_diagonal(LF, 1)
return PF, LF, U
A = [[1, 8, 2, 3],
[-6, -3, 8, 1],
[2, 4, 4, 2],
[10, 5, -5, 6]]
A = np.array(A)
P1, L1, U1 = LU_partial_decomposition(A)
print(P1)
print(L1)
print(U1)
| 24.868421
| 52
| 0.424339
|
4a0475bb4d6d608070747e517200f2f2d59c33c0
| 908
|
py
|
Python
|
tests/complex_predictor.py
|
BuzzniAILab/mlserving
|
8b8add9dbe5cdd6392e0c87ee789492de0a1c70e
|
[
"MIT"
] | 13
|
2020-08-23T17:35:53.000Z
|
2022-02-10T14:14:03.000Z
|
tests/complex_predictor.py
|
orlevi111/ganesha
|
137cc388806fc98f7768298da01ebeddf03f9464
|
[
"MIT"
] | 3
|
2020-08-20T21:09:01.000Z
|
2021-06-25T15:33:54.000Z
|
tests/complex_predictor.py
|
orlevi111/ganesha
|
137cc388806fc98f7768298da01ebeddf03f9464
|
[
"MIT"
] | 3
|
2021-04-12T01:56:22.000Z
|
2021-10-05T12:50:12.000Z
|
from mlserving.api import Response, Request
from mlserving.api import request_schema
from mlserving.predictors import RESTPredictor
REQUEST_SCHEMA = {
'feature1': 'float',
'feature2': 'float',
'feature3': 'float'
}
@request_schema(REQUEST_SCHEMA)
class MyPredictor(RESTPredictor):
def __init__(self):
self.weights = [1, 1, 1]
def pre_process(self, input_data: dict, req: Request):
# Some pre_processing
features = [
input_data.get('feature1', 0) * self.weights[0],
input_data.get('feature2', 0) * self.weights[1],
input_data.get('feature3', 0) * self.weights[2]
]
return features
def predict(self, features, req: Request) -> float:
return sum(features)
def post_process(self, prediction, req: Request) -> Response:
return Response(
data={'score': prediction}
)
| 26.705882
| 65
| 0.629956
|
4a0475e3fc2a68ea3d69ee764ea979e9c55068f0
| 3,422
|
py
|
Python
|
designate/tests/test_zone_manager/test_tasks.py
|
cneill/designate-testing
|
7bf320062d85a12bff2aee8d26c133941a289fc4
|
[
"Apache-2.0"
] | null | null | null |
designate/tests/test_zone_manager/test_tasks.py
|
cneill/designate-testing
|
7bf320062d85a12bff2aee8d26c133941a289fc4
|
[
"Apache-2.0"
] | null | null | null |
designate/tests/test_zone_manager/test_tasks.py
|
cneill/designate-testing
|
7bf320062d85a12bff2aee8d26c133941a289fc4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_log import log as logging
from oslo_utils import timeutils
from designate.zone_manager import tasks
from designate.tests import TestCase
from designate.storage.impl_sqlalchemy import tables
from designate.tests import fixtures
LOG = logging.getLogger(__name__)
class TaskTest(TestCase):
def setUp(self):
super(TaskTest, self).setUp()
def _enable_tasks(self, tasks):
self.config(
enabled_tasks=tasks,
group="service:zone_manager")
class DeletedDomainPurgeTest(TaskTest):
def setUp(self):
super(DeletedDomainPurgeTest, self).setUp()
self.config(
interval=3600,
time_threshold=604800,
batch_size=100,
group="zone_manager_task:domain_purge"
)
self.purge_task_fixture = self.useFixture(
fixtures.ZoneManagerTaskFixture(tasks.DeletedDomainPurgeTask)
)
def _create_deleted_zone(self, name, mock_deletion_time):
# Create a domain and set it as deleted
domain = self.create_domain(name=name)
self._delete_domain(domain, mock_deletion_time)
return domain
def _fetch_all_domains(self):
"""Fetch all domains including deleted ones
"""
query = tables.domains.select()
return self.central_service.storage.session.execute(query).fetchall()
def _delete_domain(self, domain, mock_deletion_time):
# Set a domain as deleted
zid = domain.id.replace('-', '')
query = tables.domains.update().\
where(tables.domains.c.id == zid).\
values(
action='NONE',
deleted=zid,
deleted_at=mock_deletion_time,
status='DELETED',
)
pxy = self.central_service.storage.session.execute(query)
self.assertEqual(pxy.rowcount, 1)
return domain
def _create_deleted_zones(self):
# Create a number of deleted zones in the past days
zones = []
now = timeutils.utcnow()
for age in range(18):
age *= (24 * 60 * 60) # seconds
delta = datetime.timedelta(seconds=age)
deletion_time = now - delta
name = "example%d.org." % len(zones)
z = self._create_deleted_zone(name, deletion_time)
zones.append(z)
return zones
def test_purge_zones(self):
"""Create 18 zones, run zone_manager, check if 7 zones are remaining
"""
self.config(quota_domains=1000)
self._create_deleted_zones()
self.purge_task_fixture.task()
zones = self._fetch_all_domains()
LOG.info("Number of zones: %d", len(zones))
self.assertEqual(len(zones), 7)
| 31.685185
| 77
| 0.651666
|
4a0476b53903af7e9fd3ea1829e95c7b43231660
| 9,301
|
py
|
Python
|
src/sentry/tasks/deletion.py
|
mastacheata/sentry
|
cc4536901db0323d1e6433416abf1d0ecd977d61
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/tasks/deletion.py
|
mastacheata/sentry
|
cc4536901db0323d1e6433416abf1d0ecd977d61
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/tasks/deletion.py
|
mastacheata/sentry
|
cc4536901db0323d1e6433416abf1d0ecd977d61
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.tasks.deletion
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from celery.utils.log import get_task_logger
from sentry.exceptions import DeleteAborted
from sentry.signals import pending_delete
from sentry.tasks.base import instrumented_task, retry
from sentry.utils.query import bulk_delete_objects
logger = get_task_logger(__name__)
@instrumented_task(name='sentry.tasks.deletion.delete_organization', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry(exclude=(DeleteAborted,))
def delete_organization(object_id, continuous=True, **kwargs):
from sentry.models import (
Organization, OrganizationMember, OrganizationStatus, Team, TeamStatus
)
try:
o = Organization.objects.get(id=object_id)
except Organization.DoesNotExist:
return
if o.status == OrganizationStatus.VISIBLE:
raise DeleteAborted('Aborting organization deletion as status is invalid')
if o.status != OrganizationStatus.DELETION_IN_PROGRESS:
o.update(status=OrganizationStatus.DELETION_IN_PROGRESS)
pending_delete.send(sender=Organization, instance=o)
for team in Team.objects.filter(organization=o).order_by('id')[:1]:
logger.info('Removing Team id=%s where organization=%s', team.id, o.id)
team.update(status=TeamStatus.DELETION_IN_PROGRESS)
delete_team(team.id, continuous=False)
if continuous:
delete_organization.delay(object_id=object_id, countdown=15)
return
model_list = (OrganizationMember,)
has_more = delete_objects(model_list, relation={'organization': o}, logger=logger)
if has_more:
if continuous:
delete_organization.delay(object_id=object_id, countdown=15)
return
o.delete()
@instrumented_task(name='sentry.tasks.deletion.delete_team', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry(exclude=(DeleteAborted,))
def delete_team(object_id, continuous=True, **kwargs):
from sentry.models import Team, TeamStatus, Project, ProjectStatus
try:
t = Team.objects.get(id=object_id)
except Team.DoesNotExist:
return
if t.status == TeamStatus.VISIBLE:
raise DeleteAborted('Aborting team deletion as status is invalid')
if t.status != TeamStatus.DELETION_IN_PROGRESS:
pending_delete.send(sender=Team, instance=t)
t.update(status=TeamStatus.DELETION_IN_PROGRESS)
# Delete 1 project at a time since this is expensive by itself
for project in Project.objects.filter(team=t).order_by('id')[:1]:
logger.info('Removing Project id=%s where team=%s', project.id, t.id)
project.update(status=ProjectStatus.DELETION_IN_PROGRESS)
delete_project(project.id, continuous=False)
if continuous:
delete_team.delay(object_id=object_id, countdown=15)
return
t.delete()
@instrumented_task(name='sentry.tasks.deletion.delete_project', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry(exclude=(DeleteAborted,))
def delete_project(object_id, continuous=True, **kwargs):
from sentry.models import (
Activity, EventMapping, Group, GroupAssignee, GroupBookmark,
GroupEmailThread, GroupHash, GroupMeta, GroupResolution,
GroupRuleStatus, GroupSeen, GroupTagKey, GroupTagValue, Project,
ProjectBookmark, ProjectKey, ProjectStatus, Release, ReleaseFile,
SavedSearchUserDefault, SavedSearch, TagKey, TagValue, UserReport
)
try:
p = Project.objects.get(id=object_id)
except Project.DoesNotExist:
return
if p.status == ProjectStatus.VISIBLE:
raise DeleteAborted('Aborting project deletion as status is invalid')
if p.status != ProjectStatus.DELETION_IN_PROGRESS:
pending_delete.send(sender=Project, instance=p)
p.update(status=ProjectStatus.DELETION_IN_PROGRESS)
# Immediately revoke keys
ProjectKey.objects.filter(project_id=object_id).delete()
model_list = (
Activity, EventMapping, GroupAssignee, GroupBookmark, GroupEmailThread,
GroupHash, GroupSeen, GroupRuleStatus, GroupTagKey, GroupTagValue,
ProjectBookmark, ProjectKey, TagKey, TagValue, SavedSearchUserDefault,
SavedSearch, UserReport
)
for model in model_list:
has_more = bulk_delete_objects(model, project_id=p.id, logger=logger)
if has_more:
if continuous:
delete_project.delay(object_id=object_id, countdown=15)
return
# TODO(dcramer): no project relation so we cant easily bulk
# delete today
has_more = delete_objects([GroupMeta, GroupResolution],
relation={'group__project': p},
logger=logger)
if has_more:
if continuous:
delete_project.delay(object_id=object_id, countdown=15)
return
has_more = delete_events(relation={'project_id': p.id}, logger=logger)
if has_more:
if continuous:
delete_project.delay(object_id=object_id, countdown=15)
return
# Release needs to handle deletes after Group is cleaned up as the foreign
# key is protected
model_list = (Group, ReleaseFile, Release)
for model in model_list:
has_more = bulk_delete_objects(model, project_id=p.id, logger=logger)
if has_more:
if continuous:
delete_project.delay(object_id=object_id, countdown=15)
return
p.delete()
@instrumented_task(name='sentry.tasks.deletion.delete_group', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry(exclude=(DeleteAborted,))
def delete_group(object_id, continuous=True, **kwargs):
from sentry.models import (
EventMapping, Group, GroupAssignee, GroupBookmark, GroupHash, GroupMeta,
GroupResolution, GroupRuleStatus, GroupStatus, GroupTagKey,
GroupTagValue, GroupEmailThread, UserReport, GroupRedirect,
)
try:
group = Group.objects.get(id=object_id)
except Group.DoesNotExist:
return
if group.status != GroupStatus.DELETION_IN_PROGRESS:
group.update(status=GroupStatus.DELETION_IN_PROGRESS)
bulk_model_list = (
# prioritize GroupHash
GroupHash, GroupAssignee, GroupBookmark, GroupMeta, GroupResolution,
GroupRuleStatus, GroupTagValue, GroupTagKey, EventMapping,
GroupEmailThread, UserReport, GroupRedirect,
)
for model in bulk_model_list:
has_more = bulk_delete_objects(model, group_id=object_id, logger=logger)
if has_more:
if continuous:
delete_group.delay(object_id=object_id, countdown=15)
return
has_more = delete_events(relation={'group_id': object_id}, logger=logger)
if has_more:
if continuous:
delete_group.delay(object_id=object_id, countdown=15)
return
group.delete()
@instrumented_task(name='sentry.tasks.deletion.delete_tag_key', queue='cleanup',
default_retry_delay=60 * 5, max_retries=None)
@retry(exclude=(DeleteAborted,))
def delete_tag_key(object_id, continuous=True, **kwargs):
from sentry.models import (
GroupTagKey, GroupTagValue, TagKey, TagKeyStatus, TagValue
)
try:
tagkey = TagKey.objects.get(id=object_id)
except TagKey.DoesNotExist:
return
if tagkey.status != TagKeyStatus.DELETION_IN_PROGRESS:
tagkey.update(status=TagKeyStatus.DELETION_IN_PROGRESS)
bulk_model_list = (
GroupTagValue, GroupTagKey, TagValue
)
for model in bulk_model_list:
has_more = bulk_delete_objects(model, project_id=tagkey.project_id,
key=tagkey.key, logger=logger)
if has_more:
if continuous:
delete_tag_key.delay(object_id=object_id, countdown=15)
return
tagkey.delete()
def delete_events(relation, limit=100, logger=None):
from sentry.app import nodestore
from sentry.models import Event, EventTag
has_more = False
if logger is not None:
logger.info('Removing %r objects where %r', Event, relation)
result_set = list(Event.objects.filter(**relation)[:limit])
has_more = bool(result_set)
if has_more:
# delete objects from nodestore first
node_ids = set(r.data.id for r in result_set)
nodestore.delete_multi(node_ids)
event_ids = [r.id for r in result_set]
# bulk delete by id
EventTag.objects.filter(event_id__in=event_ids).delete()
Event.objects.filter(id__in=event_ids).delete()
return has_more
def delete_objects(models, relation, limit=100, logger=None):
# This handles cascades properly
has_more = False
for model in models:
if logger is not None:
logger.info('Removing %r objects where %r', model, relation)
for obj in model.objects.filter(**relation)[:limit]:
obj.delete()
has_more = True
if has_more:
return True
return has_more
| 35.5
| 86
| 0.68498
|
4a0476f1a9bad507517307c4625123ace3969be6
| 10,387
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20160330/get_express_route_circuit.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20160330/get_express_route_circuit.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20160330/get_express_route_circuit.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetExpressRouteCircuitResult',
'AwaitableGetExpressRouteCircuitResult',
'get_express_route_circuit',
]
@pulumi.output_type
class GetExpressRouteCircuitResult:
"""
ExpressRouteCircuit resource
"""
def __init__(__self__, allow_classic_operations=None, authorizations=None, circuit_provisioning_state=None, etag=None, id=None, location=None, name=None, peerings=None, provisioning_state=None, service_key=None, service_provider_notes=None, service_provider_properties=None, service_provider_provisioning_state=None, sku=None, tags=None, type=None):
if allow_classic_operations and not isinstance(allow_classic_operations, bool):
raise TypeError("Expected argument 'allow_classic_operations' to be a bool")
pulumi.set(__self__, "allow_classic_operations", allow_classic_operations)
if authorizations and not isinstance(authorizations, list):
raise TypeError("Expected argument 'authorizations' to be a list")
pulumi.set(__self__, "authorizations", authorizations)
if circuit_provisioning_state and not isinstance(circuit_provisioning_state, str):
raise TypeError("Expected argument 'circuit_provisioning_state' to be a str")
pulumi.set(__self__, "circuit_provisioning_state", circuit_provisioning_state)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peerings and not isinstance(peerings, list):
raise TypeError("Expected argument 'peerings' to be a list")
pulumi.set(__self__, "peerings", peerings)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if service_key and not isinstance(service_key, str):
raise TypeError("Expected argument 'service_key' to be a str")
pulumi.set(__self__, "service_key", service_key)
if service_provider_notes and not isinstance(service_provider_notes, str):
raise TypeError("Expected argument 'service_provider_notes' to be a str")
pulumi.set(__self__, "service_provider_notes", service_provider_notes)
if service_provider_properties and not isinstance(service_provider_properties, dict):
raise TypeError("Expected argument 'service_provider_properties' to be a dict")
pulumi.set(__self__, "service_provider_properties", service_provider_properties)
if service_provider_provisioning_state and not isinstance(service_provider_provisioning_state, str):
raise TypeError("Expected argument 'service_provider_provisioning_state' to be a str")
pulumi.set(__self__, "service_provider_provisioning_state", service_provider_provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="allowClassicOperations")
def allow_classic_operations(self) -> Optional[bool]:
"""
allow classic operations
"""
return pulumi.get(self, "allow_classic_operations")
@property
@pulumi.getter
def authorizations(self) -> Optional[Sequence['outputs.ExpressRouteCircuitAuthorizationResponse']]:
"""
Gets or sets list of authorizations
"""
return pulumi.get(self, "authorizations")
@property
@pulumi.getter(name="circuitProvisioningState")
def circuit_provisioning_state(self) -> Optional[str]:
"""
Gets or sets CircuitProvisioningState state of the resource
"""
return pulumi.get(self, "circuit_provisioning_state")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> Optional[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]:
"""
Gets or sets list of peerings
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="serviceKey")
def service_key(self) -> Optional[str]:
"""
Gets or sets ServiceKey
"""
return pulumi.get(self, "service_key")
@property
@pulumi.getter(name="serviceProviderNotes")
def service_provider_notes(self) -> Optional[str]:
"""
Gets or sets ServiceProviderNotes
"""
return pulumi.get(self, "service_provider_notes")
@property
@pulumi.getter(name="serviceProviderProperties")
def service_provider_properties(self) -> Optional['outputs.ExpressRouteCircuitServiceProviderPropertiesResponse']:
"""
Gets or sets ServiceProviderProperties
"""
return pulumi.get(self, "service_provider_properties")
@property
@pulumi.getter(name="serviceProviderProvisioningState")
def service_provider_provisioning_state(self) -> Optional[str]:
"""
Gets or sets ServiceProviderProvisioningState state of the resource
"""
return pulumi.get(self, "service_provider_provisioning_state")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.ExpressRouteCircuitSkuResponse']:
"""
Gets or sets sku
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetExpressRouteCircuitResult(GetExpressRouteCircuitResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetExpressRouteCircuitResult(
allow_classic_operations=self.allow_classic_operations,
authorizations=self.authorizations,
circuit_provisioning_state=self.circuit_provisioning_state,
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
peerings=self.peerings,
provisioning_state=self.provisioning_state,
service_key=self.service_key,
service_provider_notes=self.service_provider_notes,
service_provider_properties=self.service_provider_properties,
service_provider_provisioning_state=self.service_provider_provisioning_state,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_express_route_circuit(circuit_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetExpressRouteCircuitResult:
"""
ExpressRouteCircuit resource
:param str circuit_name: The name of the circuit.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['circuitName'] = circuit_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20160330:getExpressRouteCircuit', __args__, opts=opts, typ=GetExpressRouteCircuitResult).value
return AwaitableGetExpressRouteCircuitResult(
allow_classic_operations=__ret__.allow_classic_operations,
authorizations=__ret__.authorizations,
circuit_provisioning_state=__ret__.circuit_provisioning_state,
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
peerings=__ret__.peerings,
provisioning_state=__ret__.provisioning_state,
service_key=__ret__.service_key,
service_provider_notes=__ret__.service_provider_notes,
service_provider_properties=__ret__.service_provider_properties,
service_provider_provisioning_state=__ret__.service_provider_provisioning_state,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| 39.645038
| 353
| 0.67623
|
4a04783d4ceeece8fdbdb130728ea34b76cdff57
| 1,395
|
py
|
Python
|
ooobuild/dyn/text/x_text_portion_append.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/text/x_text_portion_append.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/text/x_text_portion_append.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.text
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.text import XTextPortionAppend as XTextPortionAppend
setattr(XTextPortionAppend, '__ooo_ns__', 'com.sun.star.text')
setattr(XTextPortionAppend, '__ooo_full_ns__', 'com.sun.star.text.XTextPortionAppend')
setattr(XTextPortionAppend, '__ooo_type_name__', 'interface')
else:
from ...lo.text.x_text_portion_append import XTextPortionAppend as XTextPortionAppend
__all__ = ['XTextPortionAppend']
| 37.702703
| 90
| 0.774194
|
4a0478f8bac47cb0367413d9b39f773ab918909f
| 124,589
|
py
|
Python
|
autotest/ogr/ogr_gml_read.py
|
kammerer/gdal
|
a981e817543921ddc262f0ea2c137b52dea1e3f2
|
[
"MIT"
] | 1
|
2018-12-19T14:08:20.000Z
|
2018-12-19T14:08:20.000Z
|
autotest/ogr/ogr_gml_read.py
|
kammerer/gdal
|
a981e817543921ddc262f0ea2c137b52dea1e3f2
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_gml_read.py
|
kammerer/gdal
|
a981e817543921ddc262f0ea2c137b52dea1e3f2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: GML Reading Driver testing.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2006, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2008-2014, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import shutil
import gdaltest
import ogrtest
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
import pytest
###############################################################################
# Test reading geometry and attribute from ionic wfs gml file.
#
def test_ogr_gml_1():
gdaltest.have_gml_reader = 0
gml_ds = ogr.Open('data/ionic_wfs.gml')
if gml_ds is None:
if gdal.GetLastErrorMsg().find('Xerces') != -1:
pytest.skip()
pytest.fail('failed to open test file.')
gdaltest.have_gml_reader = 1
assert gml_ds.GetLayerCount() == 1, 'wrong number of layers'
lyr = gml_ds.GetLayerByName('GEM')
feat = lyr.GetNextFeature()
assert feat.GetField('Name') == 'Aartselaar', 'Wrong name field value'
wkt = 'POLYGON ((44038 511549,44015 511548,43994 511522,43941 511539,43844 511514,43754 511479,43685 511521,43594 511505,43619 511452,43645 511417,4363 511387,437 511346,43749 511298,43808 511229,43819 511205,4379 511185,43728 511167,43617 511175,43604 511151,43655 511125,43746 511143,43886 511154,43885 511178,43928 511186,43977 511217,4404 511223,44008 511229,44099 51131,44095 511335,44106 51135,44127 511379,44124 511435,44137 511455,44105 511467,44098 511484,44086 511499,4407 511506,44067 511535,44038 511549))'
assert not ogrtest.check_feature_geometry(feat, wkt)
feat = lyr.GetNextFeature()
assert feat is None, 'got unexpected feature.'
###############################################################################
# Do the same test somewhere without a .gfs file.
def test_ogr_gml_2():
if not gdaltest.have_gml_reader:
pytest.skip()
# copy gml file (but not .gfs file)
open('tmp/ionic_wfs.gml', 'w').write(open('data/ionic_wfs.gml').read())
gml_ds = ogr.Open('tmp/ionic_wfs.gml')
assert gml_ds.GetLayerCount() == 1, 'wrong number of layers'
lyr = gml_ds.GetLayerByName('GEM')
feat = lyr.GetNextFeature()
assert feat.GetField('Name') == 'Aartselaar', 'Wrong name field value'
wkt = 'POLYGON ((44038 511549,44015 511548,43994 511522,43941 511539,43844 511514,43754 511479,43685 511521,43594 511505,43619 511452,43645 511417,4363 511387,437 511346,43749 511298,43808 511229,43819 511205,4379 511185,43728 511167,43617 511175,43604 511151,43655 511125,43746 511143,43886 511154,43885 511178,43928 511186,43977 511217,4404 511223,44008 511229,44099 51131,44095 511335,44106 51135,44127 511379,44124 511435,44137 511455,44105 511467,44098 511484,44086 511499,4407 511506,44067 511535,44038 511549))'
assert not ogrtest.check_feature_geometry(feat, wkt)
feat = lyr.GetNextFeature()
assert feat is None, 'got unexpected feature.'
###############################################################################
# Similar test for RNF style line data.
def test_ogr_gml_3():
if not gdaltest.have_gml_reader:
pytest.skip()
gml_ds = ogr.Open('data/rnf_eg.gml')
assert gml_ds.GetLayerCount() == 1, 'wrong number of layers'
lyr = gml_ds.GetLayerByName('RoadSegment')
feat = lyr.GetNextFeature()
assert feat.GetField('ngd_id') == 817792, 'Wrong ngd_id field value'
assert feat.GetField('type') == 'HWY', 'Wrong type field value'
wkt = 'LINESTRING (-63.500411040289066 46.240122507771368,-63.501009714909742 46.240344881690326,-63.502170462373471 46.241041855639622,-63.505862621395394 46.24195250605576,-63.506719184531178 46.242002742901576,-63.507197272602212 46.241931577811606,-63.508403092799554 46.241752283460158,-63.509946573455622 46.241745397977233)'
assert not ogrtest.check_feature_geometry(feat, wkt)
feat = lyr.GetNextFeature()
assert feat is None, 'got unexpected feature.'
###############################################################################
# Test of read GML file with UTF-8 BOM indicator.
# Test also support for nested GML elements (#3680)
def test_ogr_gml_4():
if not gdaltest.have_gml_reader:
pytest.skip()
gml_ds = ogr.Open('data/bom.gml')
assert gml_ds.GetLayerCount() == 1, 'wrong number of layers'
lyr = gml_ds.GetLayerByName('CartographicText')
assert lyr.GetFeatureCount() == 3, 'wrong number of features'
# Test 1st feature
feat = lyr.GetNextFeature()
assert feat.GetField('featureCode') == 10198, 'Wrong featureCode field value'
assert feat.GetField('anchorPosition') == 8, 'Wrong anchorPosition field value'
wkt = 'POINT (347243.85 461299.5)'
assert not ogrtest.check_feature_geometry(feat, wkt)
# Test 2nd feature
feat = lyr.GetNextFeature()
assert feat.GetField('featureCode') == 10069, 'Wrong featureCode field value'
wkt = 'POINT (347251.45 461250.85)'
assert not ogrtest.check_feature_geometry(feat, wkt)
###############################################################################
# Test of read GML file that triggeered bug #2349
def test_ogr_gml_5():
if not gdaltest.have_gml_reader:
pytest.skip()
gml_ds = ogr.Open('data/ticket_2349_test_1.gml')
lyr = gml_ds.GetLayerByName('MyPolyline')
lyr.SetAttributeFilter('height > 300')
lyr.GetNextFeature()
###############################################################################
# Test of various FIDs (various prefixes and lengths) (Ticket#1017)
def test_ogr_gml_6():
if not gdaltest.have_gml_reader:
pytest.skip()
files = ['test_point1', 'test_point2', 'test_point3', 'test_point4']
fids = []
for filename in files:
fids[:] = []
gml_ds = ogr.Open('data' + os.sep + filename + '.gml')
lyr = gml_ds.GetLayer()
feat = lyr.GetNextFeature()
while feat is not None:
if (feat.GetFID() < 0) or (feat.GetFID() in fids):
gml_ds = None
os.remove('data' + os.sep + filename + '.gfs')
pytest.fail('Wrong FID value')
fids.append(feat.GetFID())
feat = lyr.GetNextFeature()
gml_ds = None
os.remove('data' + os.sep + filename + '.gfs')
###############################################################################
# Test of colon terminated prefixes for attribute values (Ticket#2493)
def test_ogr_gml_7():
if not gdaltest.have_gml_reader:
pytest.skip()
gdal.SetConfigOption('GML_EXPOSE_FID', 'FALSE')
gml_ds = ogr.Open('data/test_point.gml')
gdal.SetConfigOption('GML_EXPOSE_FID', None)
lyr = gml_ds.GetLayer()
ldefn = lyr.GetLayerDefn()
# Test fix for #2969
assert lyr.GetFeatureCount() == 5, 'Bad feature count'
try:
ldefn.GetFieldDefn(0).GetFieldTypeName
except:
pytest.skip()
assert ldefn.GetFieldDefn(0).GetFieldTypeName(ldefn.GetFieldDefn(0).GetType()) == 'Real'
assert ldefn.GetFieldDefn(1).GetFieldTypeName(ldefn.GetFieldDefn(1).GetType()) == 'Integer'
assert ldefn.GetFieldDefn(2).GetFieldTypeName(ldefn.GetFieldDefn(2).GetType()) == 'String'
###############################################################################
# Test a GML file with some non-ASCII UTF-8 content that triggered a bug (Ticket#2948)
def test_ogr_gml_8():
if not gdaltest.have_gml_reader:
pytest.skip()
gml_ds = ogr.Open('data/utf8.gml')
lyr = gml_ds.GetLayer()
feat = lyr.GetNextFeature()
if sys.version_info >= (3, 0, 0):
assert feat.GetFieldAsString('name') == '\xc4\x80liamanu'.encode('latin1').decode('utf-8')
else:
assert feat.GetFieldAsString('name') == '\xc4\x80liamanu'
###############################################################################
# Test writing invalid UTF-8 content in a GML file (ticket #2971)
def test_ogr_gml_9():
if not gdaltest.have_gml_reader:
pytest.skip()
drv = ogr.GetDriverByName('GML')
ds = drv.CreateDataSource('tmp/broken_utf8.gml')
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('test', ogr.OFTString))
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetFieldBinaryFromHexString('test', '80626164') # \x80bad'
# Avoid the warning
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateFeature(dst_feat)
gdal.PopErrorHandler()
assert ret == 0, 'CreateFeature failed.'
ds = None
ds = ogr.Open('tmp/broken_utf8.gml')
lyr = ds.GetLayerByName('test')
feat = lyr.GetNextFeature()
assert feat.GetField('test') == '?bad', 'Unexpected content.'
ds = None
os.remove('tmp/broken_utf8.gml')
os.remove('tmp/broken_utf8.xsd')
###############################################################################
# Test writing different data types in a GML file (ticket #2857)
# TODO: Add test for other data types as they are added to the driver.
def test_ogr_gml_10():
if not gdaltest.have_gml_reader:
pytest.skip()
drv = ogr.GetDriverByName('GML')
ds = drv.CreateDataSource('tmp/fields.gml')
lyr = ds.CreateLayer('test')
field_defn = ogr.FieldDefn('string', ogr.OFTString)
field_defn.SetWidth(100)
lyr.CreateField(field_defn)
lyr.CreateField(ogr.FieldDefn('date', ogr.OFTDate))
field_defn = ogr.FieldDefn('real', ogr.OFTReal)
field_defn.SetWidth(4)
field_defn.SetPrecision(2)
lyr.CreateField(field_defn)
lyr.CreateField(ogr.FieldDefn('float', ogr.OFTReal))
field_defn = ogr.FieldDefn('integer', ogr.OFTInteger)
field_defn.SetWidth(5)
lyr.CreateField(field_defn)
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetField('string', 'test string of length 24')
dst_feat.SetField('date', '2003/04/22')
dst_feat.SetField('real', 12.34)
dst_feat.SetField('float', 1234.5678)
dst_feat.SetField('integer', '1234')
ret = lyr.CreateFeature(dst_feat)
assert ret == 0, 'CreateFeature failed.'
ds = None
ds = ogr.Open('tmp/fields.gml')
lyr = ds.GetLayerByName('test')
feat = lyr.GetNextFeature()
assert feat.GetFieldDefnRef(feat.GetFieldIndex('string')).GetType() == ogr.OFTString, \
('String type is reported wrong. Got ' + str(feat.GetFieldDefnRef(feat.GetFieldIndex('string')).GetType()))
assert feat.GetFieldDefnRef(feat.GetFieldIndex('date')).GetType() == ogr.OFTString, \
('Date type is not reported as OFTString. Got ' + str(feat.GetFieldDefnRef(feat.GetFieldIndex('date')).GetType()))
assert feat.GetFieldDefnRef(feat.GetFieldIndex('real')).GetType() == ogr.OFTReal, \
('Real type is reported wrong. Got ' + str(feat.GetFieldDefnRef(feat.GetFieldIndex('real')).GetType()))
assert feat.GetFieldDefnRef(feat.GetFieldIndex('float')).GetType() == ogr.OFTReal, \
('Float type is not reported as OFTReal. Got ' + str(feat.GetFieldDefnRef(feat.GetFieldIndex('float')).GetType()))
assert feat.GetFieldDefnRef(feat.GetFieldIndex('integer')).GetType() == ogr.OFTInteger, \
('Integer type is reported wrong. Got ' + str(feat.GetFieldDefnRef(feat.GetFieldIndex('integer')).GetType()))
assert feat.GetField('string') == 'test string of length 24', \
('Unexpected string content.' + feat.GetField('string'))
assert feat.GetField('date') == '2003/04/22', \
('Unexpected string content.' + feat.GetField('date'))
assert feat.GetFieldAsDouble('real') == 12.34, 'Unexpected real content.'
assert feat.GetField('float') == 1234.5678, 'Unexpected float content.'
assert feat.GetField('integer') == 1234, 'Unexpected integer content.'
assert lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('string')).GetWidth() == 100, \
'Unexpected width of string field.'
assert lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('real')).GetWidth() == 4, \
'Unexpected width of real field.'
assert lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('real')).GetPrecision() == 2, \
'Unexpected precision of real field.'
assert lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('integer')).GetWidth() == 5, \
'Unexpected width of integer field.'
ds = None
os.remove('tmp/fields.gml')
os.remove('tmp/fields.xsd')
###############################################################################
# Test reading a geometry element specified with <GeometryElementPath>
def test_ogr_gml_11():
if not gdaltest.have_gml_reader:
pytest.skip()
# Make sure the .gfs file is more recent that the .gml one
try:
gml_mtime = os.stat('data/testgeometryelementpath.gml').st_mtime
gfs_mtime = os.stat('data/testgeometryelementpath.gfs').st_mtime
touch_gfs = gfs_mtime <= gml_mtime
except:
touch_gfs = True
if touch_gfs:
print('Touching .gfs file')
f = open('data/testgeometryelementpath.gfs', 'rb+')
data = f.read(1)
f.seek(0, 0)
f.write(data)
f.close()
ds = ogr.Open('data/testgeometryelementpath.gml')
lyr = ds.GetLayer(0)
assert lyr.GetGeometryColumn() == 'location1container|location1', \
'did not get expected geometry column name'
feat = lyr.GetNextFeature()
assert feat.GetField('attrib1') == 'attrib1_value', \
'did not get expected value for attrib1'
assert feat.GetField('attrib2') == 'attrib2_value', \
'did not get expected value for attrib2'
geom = feat.GetGeometryRef()
assert geom.ExportToWkt() == 'POINT (3 50)', 'did not get expected geometry'
ds = None
###############################################################################
# Test reading a virtual GML file
def test_ogr_gml_12():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.Open('/vsizip/data/testgeometryelementpath.zip/testgeometryelementpath.gml')
lyr = ds.GetLayer(0)
assert lyr.GetGeometryColumn() == 'location1container|location1', \
'did not get expected geometry column name'
feat = lyr.GetNextFeature()
assert feat.GetField('attrib1') == 'attrib1_value', \
'did not get expected value for attrib1'
assert feat.GetField('attrib2') == 'attrib2_value', \
'did not get expected value for attrib2'
geom = feat.GetGeometryRef()
assert geom.ExportToWkt() == 'POINT (3 50)', 'did not get expected geometry'
ds = None
###############################################################################
# Test reading GML with StringList, IntegerList and RealList fields
def test_ogr_gml_13():
if not gdaltest.have_gml_reader:
pytest.skip()
for _ in range(2):
ds = ogr.Open('data/testlistfields.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
assert feat.GetFieldAsStringList(feat.GetFieldIndex('attrib1')) == ['value1', 'value2'], \
'did not get expected value for attrib1'
assert feat.GetField(feat.GetFieldIndex('attrib2')) == 'value3', \
'did not get expected value for attrib2'
assert feat.GetFieldAsIntegerList(feat.GetFieldIndex('attrib3')) == [4, 5], \
'did not get expected value for attrib3'
assert feat.GetFieldAsDoubleList(feat.GetFieldIndex('attrib4')) == [6.1, 7.1], \
'did not get expected value for attrib4'
ds = None
gdal.Unlink('data/testlistfields.gfs')
###############################################################################
# Test xlink resolution
def test_ogr_gml_14():
if not gdaltest.have_gml_reader:
pytest.skip()
# We need CURL for xlink resolution, and a sign that Curl is available
# is the availability of the WMS driver
gdaltest.wms_drv = gdal.GetDriverByName('WMS')
if gdaltest.wms_drv is None:
pytest.skip()
if gdaltest.gdalurlopen('http://download.osgeo.org/gdal/data/gml/xlink3.gml') is None:
pytest.skip('cannot open URL')
files = ['xlink1.gml', 'xlink2.gml', 'expected1.gml', 'expected2.gml']
for f in files:
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/gml/' + f, f):
pytest.skip()
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', 'NONE')
gdal.SetConfigOption('GML_SAVE_RESOLVED_TO', 'tmp/cache/xlink1resolved.gml')
with gdaltest.error_handler():
gml_ds = ogr.Open('tmp/cache/xlink1.gml')
gml_ds = None
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', 'gml:directedNode')
gdal.SetConfigOption('GML_SAVE_RESOLVED_TO', 'tmp/cache/xlink2resolved.gml')
gml_ds = ogr.Open('tmp/cache/xlink1.gml')
del gml_ds
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', None)
gdal.SetConfigOption('GML_SAVE_RESOLVED_TO', None)
try:
fp = open('tmp/cache/xlink1resolved.gml', 'r')
text = fp.read()
fp.close()
os.remove('tmp/cache/xlink1resolved.gml')
fp = open('tmp/cache/expected1.gml', 'r')
expectedtext = fp.read()
fp.close()
except (IOError, OSError):
pytest.fail()
assert text == expectedtext, 'Problem with file 1'
try:
fp = open('tmp/cache/xlink2resolved.gml', 'r')
text = fp.read()
fp.close()
os.remove('tmp/cache/xlink2resolved.gml')
fp = open('tmp/cache/expected2.gml', 'r')
expectedtext = fp.read()
fp.close()
except (IOError, OSError):
pytest.fail()
assert text == expectedtext, 'Problem with file 2'
###############################################################################
# Run test_ogrsf
def test_ogr_gml_15():
if not gdaltest.have_gml_reader:
pytest.skip()
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
pytest.skip()
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro data/test_point.gml')
assert ret.find('INFO') != -1 and ret.find('ERROR') == -1
###############################################################################
# Read CityGML generic attributes
def test_ogr_gml_16():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.Open('data/citygml.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetField('Name_') != 'aname' or \
feat.GetField('a_int_attr') != 2 or \
feat.GetField('a_double_attr') != 3.45:
feat.DumpReadable()
pytest.fail('did not get expected values')
###############################################################################
# Read layer SRS for WFS 1.0.0 return
def test_ogr_gml_17():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.Open('data/gnis_pop_100.gml')
lyr = ds.GetLayer(0)
sr = lyr.GetSpatialRef()
got_wkt = sr.ExportToWkt()
assert got_wkt.find('GEOGCS["WGS 84"') != -1, 'did not get expected SRS'
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
assert got_wkt == 'POINT (2.09 34.12)', 'did not get expected geometry'
###############################################################################
# Read layer SRS for WFS 1.1.0 return
def test_ogr_gml_18():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.Open('data/gnis_pop_110.gml')
lyr = ds.GetLayer(0)
sr = lyr.GetSpatialRef()
got_wkt = sr.ExportToWkt()
assert got_wkt.find('GEOGCS["WGS 84"') != -1, 'did not get expected SRS'
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
assert got_wkt == 'POINT (2.09 34.12)', 'did not get expected geometry'
###############################################################################
# Read layer SRS for WFS 1.1.0 return, but without trying to restore
# (long, lat) order. So we should get EPSGA:4326 and (lat, long) order
def test_ogr_gml_19():
if not gdaltest.have_gml_reader:
pytest.skip()
try:
os.remove('data/gnis_pop_110.gfs')
except OSError:
pass
gdal.SetConfigOption('GML_INVERT_AXIS_ORDER_IF_LAT_LONG', 'NO')
ds = ogr.Open('data/gnis_pop_110.gml')
gdal.SetConfigOption('GML_INVERT_AXIS_ORDER_IF_LAT_LONG', None)
lyr = ds.GetLayer(0)
sr = lyr.GetSpatialRef()
got_wkt = sr.ExportToWkt()
assert (not (got_wkt.find('GEOGCS["WGS 84"') == -1 or \
got_wkt.find('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]') == -1)), \
'did not get expected SRS'
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
assert got_wkt == 'POINT (34.12 2.09)', 'did not get expected geometry'
###############################################################################
# Test parsing a .xsd where the type definition is before its reference
def test_ogr_gml_20():
if not gdaltest.have_gml_reader:
pytest.skip()
try:
os.remove('data/archsites.gfs')
except OSError:
pass
ds = ogr.Open('data/archsites.gml')
lyr = ds.GetLayer(0)
ldefn = lyr.GetLayerDefn()
try:
ldefn.GetFieldDefn(0).GetFieldTypeName
except:
pytest.skip()
idx = ldefn.GetFieldIndex("gml_id")
assert idx != -1, 'did not get expected column "gml_id"'
idx = ldefn.GetFieldIndex("cat")
fddefn = ldefn.GetFieldDefn(idx)
assert fddefn.GetFieldTypeName(fddefn.GetType()) == 'Integer64', \
'did not get expected column type for col "cat"'
idx = ldefn.GetFieldIndex("str1")
fddefn = ldefn.GetFieldDefn(idx)
assert fddefn.GetFieldTypeName(fddefn.GetType()) == 'String', \
'did not get expected column type for col "str1"'
assert lyr.GetGeometryColumn() == 'the_geom', \
'did not get expected geometry column name'
assert ldefn.GetGeomType() == ogr.wkbPoint, 'did not get expected geometry type'
ds = None
try:
os.stat('data/archsites.gfs')
pytest.fail('did not expected .gfs -> XSD parsing failed')
except OSError:
return
###############################################################################
# Test writing GML3
def test_ogr_gml_21(frmt='GML3'):
if not gdaltest.have_gml_reader:
pytest.skip()
# Create GML3 file
sr = osr.SpatialReference()
sr.ImportFromEPSG(4326)
for filename in ['tmp/gml_21.gml', 'tmp/gml_21.xsd', 'tmp/gml_21.gfs']:
try:
os.remove(filename)
except OSError:
pass
ds = ogr.GetDriverByName('GML').CreateDataSource('tmp/gml_21.gml', options=['FORMAT=' + frmt])
lyr = ds.CreateLayer('firstlayer', srs=sr)
lyr.CreateField(ogr.FieldDefn('string_field', ogr.OFTString))
feat = ogr.Feature(lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (2 49)')
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, 'foo')
geom = ogr.CreateGeometryFromWkt('POINT (3 48)')
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
ds = None
# Reopen the file
ds = ogr.Open('tmp/gml_21.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
assert feat.GetGeometryRef().ExportToWkt() == 'POINT (2 49)', \
'did not get expected geometry'
ds = None
# Test that .gml and .xsd are identical to what is expected
f1 = open('tmp/gml_21.gml', 'rt')
if frmt == 'GML3.2':
f2 = open('data/expected_gml_gml32.gml', 'rt')
else:
f2 = open('data/expected_gml_21.gml', 'rt')
line1 = f1.readline()
line2 = f2.readline()
while line1 != '':
line1 = line1.strip()
line2 = line2.strip()
if line1 != line2:
print(open('tmp/gml_21.gml', 'rt').read())
pytest.fail('.gml file not identical to expected')
line1 = f1.readline()
line2 = f2.readline()
f1.close()
f2.close()
f1 = open('tmp/gml_21.xsd', 'rt')
if frmt == 'GML3':
f2 = open('data/expected_gml_21.xsd', 'rt')
elif frmt == 'GML3.2':
f2 = open('data/expected_gml_gml32.xsd', 'rt')
else:
f2 = open('data/expected_gml_21_deegree3.xsd', 'rt')
line1 = f1.readline()
line2 = f2.readline()
while line1 != '':
line1 = line1.strip()
line2 = line2.strip()
if line1 != line2:
print(open('tmp/gml_21.xsd', 'rt').read())
pytest.fail('.xsd file not identical to expected')
line1 = f1.readline()
line2 = f2.readline()
f1.close()
f2.close()
def test_ogr_gml_21_deegree3():
return test_ogr_gml_21('GML3Deegree')
def test_ogr_gml_21_gml32():
return test_ogr_gml_21('GML3.2')
###############################################################################
# Read a OpenLS DetermineRouteResponse document
def test_ogr_gml_22():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.Open('data/paris_typical_strike_demonstration.xml')
lyr = ds.GetLayerByName('RouteGeometry')
assert lyr is not None, 'cannot find RouteGeometry'
lyr = ds.GetLayerByName('RouteInstruction')
assert lyr is not None, 'cannot find RouteInstruction'
count = lyr.GetFeatureCount()
assert count == 9, 'did not get expected feature count'
ds = None
###############################################################################
# Test that use SRS defined in global gml:Envelope if no SRS is set for any
# feature geometry
def test_ogr_gml_23():
if not gdaltest.have_gml_reader:
pytest.skip()
try:
os.remove('tmp/global_geometry.gfs')
except OSError:
pass
shutil.copy('data/global_geometry.xml', 'tmp/global_geometry.xml')
# Here we use only the .xml file
ds = ogr.Open('tmp/global_geometry.xml')
lyr = ds.GetLayer(0)
sr = lyr.GetSpatialRef()
got_wkt = sr.ExportToWkt()
assert (got_wkt.find('GEOGCS["WGS 84"') != -1 and \
got_wkt.find('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]') == -1), \
'did not get expected SRS'
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
assert got_wkt == 'POINT (2 49)', 'did not get expected geometry'
extent = lyr.GetExtent()
assert extent == (2.0, 3.0, 49.0, 50.0), 'did not get expected layer extent'
###############################################################################
# Test that use SRS defined in global gml:Envelope if no SRS is set for any
# feature geometry
def test_ogr_gml_24():
if not gdaltest.have_gml_reader:
pytest.skip()
try:
os.remove('data/global_geometry.gfs')
except OSError:
pass
# Here we use only the .xml file and the .xsd file
ds = ogr.Open('data/global_geometry.xml')
lyr = ds.GetLayer(0)
# Because we read the .xsd, we (currently) don't find the SRS
# sr = lyr.GetSpatialRef()
# got_wkt = sr.ExportToWkt()
# if got_wkt.find('GEOGCS["WGS 84"') == -1 or \
# got_wkt.find('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]') != -1:
# gdaltest.post_reason('did not get expected SRS')
# print(got_wkt)
# return 'fail'
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
assert got_wkt == 'POINT (2 49)', 'did not get expected geometry'
extent = lyr.GetExtent()
assert extent == (2.0, 3.0, 49.0, 50.0), 'did not get expected layer extent'
###############################################################################
# Test fixes for #3934 and #3935
def test_ogr_gml_25():
if not gdaltest.have_gml_reader:
pytest.skip()
if int(gdal.VersionInfo('VERSION_NUM')) < 1900:
pytest.skip('would crash')
try:
os.remove('data/curveProperty.gfs')
except OSError:
pass
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', 'YES')
ds = ogr.Open('data/curveProperty.xml')
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', None)
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
assert got_wkt == 'POLYGON ((14 21,6 21,6 9,14 9,22 9,22 21,14 21))', \
'did not get expected geometry'
###############################################################################
# Test writing and reading 3D geoms (GML2)
def test_ogr_gml_26():
if not gdaltest.have_gml_reader:
pytest.skip()
import test_cli_utilities
if test_cli_utilities.get_ogr2ogr_path() is None:
pytest.skip()
gdaltest.runexternal(test_cli_utilities.get_ogr2ogr_path() + ' -f GML tmp/ogr_gml_26.gml data/poly.shp -zfield eas_id')
f = open('tmp/ogr_gml_26.gml', 'rt')
content = f.read()
f.close()
assert content.find("<gml:coord><gml:X>478315.53125</gml:X><gml:Y>4762880.5</gml:Y><gml:Z>158</gml:Z></gml:coord>") != -1
ds = ogr.Open('tmp/ogr_gml_26.gml')
lyr = ds.GetLayer(0)
assert lyr.GetGeomType() == ogr.wkbPolygon25D
ds = None
###############################################################################
# Test writing and reading 3D geoms (GML3)
def test_ogr_gml_27():
if not gdaltest.have_gml_reader:
pytest.skip()
import test_cli_utilities
if test_cli_utilities.get_ogr2ogr_path() is None:
pytest.skip()
gdaltest.runexternal(test_cli_utilities.get_ogr2ogr_path() + ' -f GML tmp/ogr_gml_27.gml data/poly.shp -zfield eas_id -dsco FORMAT=GML3')
f = open('tmp/ogr_gml_27.gml', 'rt')
content = f.read()
f.close()
assert content.find("<gml:lowerCorner>478315.53125 4762880.5 158</gml:lowerCorner>") != -1
ds = ogr.Open('tmp/ogr_gml_27.gml')
lyr = ds.GetLayer(0)
assert lyr.GetGeomType() == ogr.wkbPolygon25D
ds = None
###############################################################################
# Test writing and reading layers of type wkbNone (#4154)
def test_ogr_gml_28():
if not gdaltest.have_gml_reader:
pytest.skip()
import test_cli_utilities
if test_cli_utilities.get_ogr2ogr_path() is None:
pytest.skip()
gdaltest.runexternal(test_cli_utilities.get_ogr2ogr_path() + ' -f GML tmp/ogr_gml_28.gml data/idlink.dbf')
# Try with .xsd
ds = ogr.Open('tmp/ogr_gml_28.gml')
lyr = ds.GetLayer(0)
assert lyr.GetGeomType() == ogr.wkbNone
ds = None
os.unlink('tmp/ogr_gml_28.xsd')
ds = ogr.Open('tmp/ogr_gml_28.gml')
lyr = ds.GetLayer(0)
assert lyr.GetGeomType() == ogr.wkbNone
ds = None
# Try with .gfs
ds = ogr.Open('tmp/ogr_gml_28.gml')
lyr = ds.GetLayer(0)
assert lyr.GetGeomType() == ogr.wkbNone
ds = None
###############################################################################
# Test reading FME GMLs
def test_ogr_gml_29():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.Open('data/testfmegml.gml')
expected_results = [[ogr.wkbMultiPoint, 'MULTIPOINT (2 49)'],
[ogr.wkbMultiPolygon, 'MULTIPOLYGON (((2 49,3 49,3 50,2 50,2 49)))'],
[ogr.wkbMultiLineString, 'MULTILINESTRING ((2 49,3 50))'],
]
for j, expected_result in enumerate(expected_results):
lyr = ds.GetLayer(j)
assert lyr.GetGeomType() == expected_result[0], \
('layer %d, did not get expected layer geometry type' % j)
for _ in range(2):
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
assert got_wkt == expected_result[1], \
('layer %d, did not get expected geometry' % j)
ds = None
###############################################################################
# Test reading a big field and a big geometry
def test_ogr_gml_30():
if not gdaltest.have_gml_reader:
pytest.skip()
field1 = " "
for _ in range(11):
field1 = field1 + field1
geom = "0 1 " * 512
data = """<FeatureCollection xmlns:gml="http://www.opengis.net/gml">
<gml:featureMember>
<layer1>
<geometry><gml:LineString><gml:posList>%s</gml:posList></gml:LineString></geometry>
<field1>A%sZ</field1>
</layer1>
</gml:featureMember>
</FeatureCollection>""" % (geom, field1)
f = gdal.VSIFOpenL("/vsimem/ogr_gml_30.gml", "wb")
gdal.VSIFWriteL(data, 1, len(data), f)
gdal.VSIFCloseL(f)
ds = ogr.Open("/vsimem/ogr_gml_30.gml")
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
field1 = feat.GetField(0)
geom_wkt = feat.GetGeometryRef().ExportToWkt()
ds = None
gdal.Unlink("/vsimem/ogr_gml_30.gml")
gdal.Unlink("/vsimem/ogr_gml_30.gfs")
assert len(field1) == 2050, 'did not get expected len(field1)'
assert len(geom_wkt) == 2060, 'did not get expected len(geom_wkt)'
###############################################################################
# Test SEQUENTIAL_LAYERS
def test_ogr_gml_31():
if not gdaltest.have_gml_reader:
pytest.skip()
gdal.SetConfigOption('GML_READ_MODE', 'SEQUENTIAL_LAYERS')
test_ogr_gml_29()
gdal.SetConfigOption('GML_READ_MODE', None)
# Test reading second layer and then first layer
gdal.SetConfigOption('GML_READ_MODE', 'SEQUENTIAL_LAYERS')
ds = ogr.Open('data/testfmegml.gml')
gdal.SetConfigOption('GML_READ_MODE', None)
lyr = ds.GetLayer(1)
feat = lyr.GetNextFeature()
feat = lyr.GetNextFeature()
assert feat.GetFID() == 1, 'did not get feature when reading directly second layer'
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
feat = lyr.GetNextFeature()
assert feat.GetFID() == 1, 'did not get feature when reading back first layer'
###############################################################################
# Test SEQUENTIAL_LAYERS without a .gfs
def test_ogr_gml_32():
if not gdaltest.have_gml_reader:
pytest.skip()
# Test without .xsd or .gfs
f = gdal.VSIFOpenL("data/testfmegml.gml", "rb")
data = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
f = gdal.VSIFOpenL("/vsimem/ogr_gml_31.gml", "wb")
gdal.VSIFWriteL(data, 1, len(data), f)
gdal.VSIFCloseL(f)
ds = ogr.Open('/vsimem/ogr_gml_31.gml')
lyr = ds.GetLayer(1)
feat = lyr.GetNextFeature()
feat = lyr.GetNextFeature()
assert feat.GetFID() == 1, 'did not get feature when reading directly second layer'
ds = None
f = gdal.VSIFOpenL("/vsimem/ogr_gml_31.gfs", "rb")
data = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
data = str(data)
assert data.find("<SequentialLayers>true</SequentialLayers>") != -1, \
'did not find <SequentialLayers>true</SequentialLayers> in .gfs'
gdal.Unlink("/vsimem/ogr_gml_31.gml")
gdal.Unlink("/vsimem/ogr_gml_31.gfs")
###############################################################################
# Test INTERLEAVED_LAYERS
def test_ogr_gml_33():
if not gdaltest.have_gml_reader:
pytest.skip()
# Test reading second layer and then first layer
gdal.SetConfigOption('GML_READ_MODE', 'INTERLEAVED_LAYERS')
ds = ogr.Open('data/testfmegml_interleaved.gml')
gdal.SetConfigOption('GML_READ_MODE', None)
read_sequence = [[0, 1],
[0, None],
[1, 3],
[2, 5],
[2, None],
[0, 2],
[1, 4],
[1, None],
[2, 6],
[2, None],
[0, None],
[1, None],
[2, None]]
for i, read_seq in enumerate(read_sequence):
lyr = ds.GetLayer(read_seq[0])
feat = lyr.GetNextFeature()
if feat is None:
fid = None
else:
fid = feat.GetFID()
expected_fid = read_seq[1]
assert fid == expected_fid, ('failed at step %d' % i)
###############################################################################
# Test writing non-ASCII UTF-8 content (#4117, #4299)
def test_ogr_gml_34():
if not gdaltest.have_gml_reader:
pytest.skip()
drv = ogr.GetDriverByName('GML')
ds = drv.CreateDataSource('/vsimem/ogr_gml_34.gml')
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn("name", ogr.OFTString))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, '\xc4\x80liamanu<&')
lyr.CreateFeature(feat)
feat = None
ds = None
ds = ogr.Open('/vsimem/ogr_gml_34.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
assert feat.GetFieldAsString('name') == '\xc4\x80liamanu<&'
ds = None
gdal.Unlink('/vsimem/ogr_gml_34.gml')
gdal.Unlink('/vsimem/ogr_gml_34.xsd')
###############################################################################
# Test GML_SKIP_RESOLVE_ELEMS=HUGE (#4380)
def test_ogr_gml_35():
if not gdaltest.have_gml_reader:
pytest.skip()
if ogr.GetDriverByName('SQLite') is None:
pytest.skip()
if not ogrtest.have_geos():
pytest.skip()
try:
os.remove('tmp/GmlTopo-sample.sqlite')
except OSError:
pass
try:
os.remove('tmp/GmlTopo-sample.gfs')
except OSError:
pass
try:
os.remove('tmp/GmlTopo-sample.resolved.gml')
except OSError:
pass
shutil.copy('data/GmlTopo-sample.xml', 'tmp/GmlTopo-sample.xml')
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', 'HUGE')
ds = ogr.Open('tmp/GmlTopo-sample.xml')
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', None)
with pytest.raises(OSError, message='did not expect tmp/GmlTopo-sample.sqlite'):
os.stat('tmp/GmlTopo-sample.sqlite')
assert gdal.GetLastErrorMsg() == '', 'did not expect error'
assert ds.GetLayerCount() == 3, ('expected 3 layers, got %d' % ds.GetLayerCount())
lyr = ds.GetLayerByName('Suolo')
feat = lyr.GetNextFeature()
wkt = 'MULTIPOLYGON (((-0.1 0.6,-0.0 0.7,0.2 0.7,0.3 0.6,0.5 0.6,0.5 0.8,0.7 0.8,0.8 0.6,0.9 0.6,0.9 0.4,0.7 0.3,0.7 0.2,0.9 0.1,0.9 -0.1,0.6 -0.2,0.3 -0.2,0.2 -0.2,-0.1 0.0,-0.1 0.1,-0.1 0.2,0.1 0.3,0.1 0.4,-0.0 0.4,-0.1 0.5,-0.1 0.6)))'
assert not ogrtest.check_feature_geometry(feat, wkt), feat.GetGeometryRef()
ds = None
ds = ogr.Open('tmp/GmlTopo-sample.xml')
lyr = ds.GetLayerByName('Suolo')
feat = lyr.GetNextFeature()
assert not ogrtest.check_feature_geometry(feat, wkt), feat.GetGeometryRef()
ds = None
###############################################################################
# Test GML_SKIP_RESOLVE_ELEMS=NONE (and new GMLTopoSurface interpretation)
def test_ogr_gml_36(GML_FACE_HOLE_NEGATIVE='NO'):
if not gdaltest.have_gml_reader:
pytest.skip()
if GML_FACE_HOLE_NEGATIVE == 'NO':
if not ogrtest.have_geos():
pytest.skip()
try:
os.remove('tmp/GmlTopo-sample.gfs')
except OSError:
pass
try:
os.remove('tmp/GmlTopo-sample.resolved.gml')
except OSError:
pass
shutil.copy('data/GmlTopo-sample.xml', 'tmp/GmlTopo-sample.xml')
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', 'NONE')
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', GML_FACE_HOLE_NEGATIVE)
ds = ogr.Open('tmp/GmlTopo-sample.xml')
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', None)
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', None)
assert gdal.GetLastErrorMsg() == '', 'did not expect error'
lyr = ds.GetLayerByName('Suolo')
feat = lyr.GetNextFeature()
if GML_FACE_HOLE_NEGATIVE == 'NO':
wkt = 'MULTIPOLYGON (((-0.1 0.6,-0.0 0.7,0.2 0.7,0.3 0.6,0.5 0.6,0.5 0.8,0.7 0.8,0.8 0.6,0.9 0.6,0.9 0.4,0.7 0.3,0.7 0.2,0.9 0.1,0.9 -0.1,0.6 -0.2,0.3 -0.2,0.2 -0.2,-0.1 0.0,-0.1 0.1,-0.1 0.2,0.1 0.3,0.1 0.4,-0.0 0.4,-0.1 0.5,-0.1 0.6)))'
else:
wkt = 'POLYGON ((-0.1 0.6,-0.0 0.7,0.2 0.7,0.3 0.6,0.5 0.6,0.5 0.8,0.7 0.8,0.8 0.6,0.9 0.6,0.9 0.4,0.7 0.3,0.7 0.2,0.9 0.1,0.9 -0.1,0.6 -0.2,0.3 -0.2,0.2 -0.2,-0.1 0.0,-0.1 0.1,-0.1 0.2,0.1 0.3,0.1 0.4,-0.0 0.4,-0.1 0.5,-0.1 0.6),(0.2 0.2,0.2 0.4,0.4 0.4,0.5 0.2,0.5 0.1,0.5 0.0,0.2 0.0,0.2 0.2),(0.6 0.1,0.8 0.1,0.8 -0.1,0.6 -0.1,0.6 0.1))'
assert not ogrtest.check_feature_geometry(feat, wkt), feat.GetGeometryRef()
ds = None
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', GML_FACE_HOLE_NEGATIVE)
ds = ogr.Open('tmp/GmlTopo-sample.xml')
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', None)
lyr = ds.GetLayerByName('Suolo')
feat = lyr.GetNextFeature()
assert not ogrtest.check_feature_geometry(feat, wkt), feat.GetGeometryRef()
ds = None
###############################################################################
# Test GML_SKIP_RESOLVE_ELEMS=NONE with old GMLTopoSurface interpretation
def test_ogr_gml_37():
return test_ogr_gml_36('YES')
###############################################################################
# Test new GMLTopoSurface interpretation (#3934) with HUGE xlink resolver
def test_ogr_gml_38(resolver='HUGE'):
if not gdaltest.have_gml_reader:
pytest.skip()
if resolver == 'HUGE':
if ogr.GetDriverByName('SQLite') is None:
pytest.skip()
if not ogrtest.have_geos():
pytest.skip()
try:
os.remove('tmp/sample_gml_face_hole_negative_no.sqlite')
except OSError:
pass
try:
os.remove('tmp/sample_gml_face_hole_negative_no.gfs')
except OSError:
pass
try:
os.remove('tmp/sample_gml_face_hole_negative_no.resolved.gml')
except OSError:
pass
shutil.copy('data/sample_gml_face_hole_negative_no.xml', 'tmp/sample_gml_face_hole_negative_no.xml')
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', resolver)
ds = ogr.Open('tmp/sample_gml_face_hole_negative_no.xml')
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', None)
gdal.SetConfigOption('GML_FACE_HOLE_NEGATIVE', None)
if resolver == 'HUGE':
with pytest.raises(OSError, message='did not expect tmp/sample_gml_face_hole_negative_no.sqlite'):
os.stat('tmp/sample_gml_face_hole_negative_no.sqlite')
assert gdal.GetLastErrorMsg() == '', 'did not expect error'
lyr = ds.GetLayerByName('Suolo')
feat = lyr.GetNextFeature()
wkt = 'MULTIPOLYGON (((0.9 0.6,0.9 0.4,0.7 0.3,0.7 0.2,0.9 0.1,0.9 -0.1,0.6 -0.2,0.3 -0.2,0.2 -0.2,-0.1 0.0,-0.1 0.1,-0.1 0.2,0.1 0.3,0.1 0.4,-0.0 0.4,-0.1 0.5,-0.1 0.6,-0.0 0.7,0.2 0.7,0.3 0.6,0.5 0.6,0.5 0.8,0.7 0.8,0.8 0.6,0.9 0.6),(0.6 0.1,0.6 -0.1,0.8 -0.1,0.8 0.1,0.6 0.1),(0.2 0.4,0.2 0.2,0.2 0.0,0.5 0.0,0.5 0.1,0.5 0.2,0.4 0.4,0.2 0.4)))'
assert not ogrtest.check_feature_geometry(feat, wkt), feat.GetGeometryRef()
ds = None
###############################################################################
# Test new GMLTopoSurface interpretation (#3934) with standard xlink resolver
def test_ogr_gml_39():
return test_ogr_gml_38('NONE')
###############################################################################
# Test parsing XSD where simpleTypes not inlined, but defined elsewhere in the .xsd (#4328)
def test_ogr_gml_40():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.Open('data/testLookForSimpleType.xml')
lyr = ds.GetLayer(0)
fld_defn = lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('CITYNAME'))
assert fld_defn.GetWidth() == 26
###############################################################################
# Test validating against .xsd
def test_ogr_gml_41():
gdaltest.have_gml_validation = False
if not gdaltest.have_gml_reader:
pytest.skip()
if not gdaltest.download_file('http://schemas.opengis.net/SCHEMAS_OPENGIS_NET.zip', 'SCHEMAS_OPENGIS_NET.zip'):
pytest.skip()
ds = ogr.Open('data/expected_gml_21.gml')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', '/vsizip/./tmp/cache/SCHEMAS_OPENGIS_NET.zip')
lyr = ds.ExecuteSQL('SELECT ValidateSchema()')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', None)
feat = lyr.GetNextFeature()
val = feat.GetFieldAsInteger(0)
feat = None
ds.ReleaseResultSet(lyr)
if val == 0:
assert gdal.GetLastErrorMsg().find('not implemented due to missing libxml2 support') != -1
pytest.skip()
gdaltest.have_gml_validation = True
###############################################################################
# Test validating against .xsd
def test_ogr_gml_42():
if not gdaltest.have_gml_validation:
pytest.skip()
try:
os.mkdir('tmp/cache/SCHEMAS_OPENGIS_NET')
except OSError:
pass
try:
os.stat('tmp/cache/SCHEMAS_OPENGIS_NET/gml')
except OSError:
gdaltest.unzip('tmp/cache/SCHEMAS_OPENGIS_NET', 'tmp/cache/SCHEMAS_OPENGIS_NET.zip')
ds = ogr.Open('data/expected_gml_gml32.gml')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', './tmp/cache/SCHEMAS_OPENGIS_NET')
lyr = ds.ExecuteSQL('SELECT ValidateSchema()')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', None)
feat = lyr.GetNextFeature()
val = feat.GetFieldAsInteger(0)
feat = None
ds.ReleaseResultSet(lyr)
assert val != 0
###############################################################################
# Test automated downloading of WFS schema
def test_ogr_gml_43():
# The service times out
pytest.skip()
# pylint: disable=unreachable
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.Open('data/wfs_typefeature.gml')
assert ds is not None
ds = None
try:
os.stat('data/wfs_typefeature.gfs')
gfs_found = True
except OSError:
gfs_found = False
if gfs_found:
if gdaltest.gdalurlopen('http://testing.deegree.org:80/deegree-wfs/services?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=app:Springs&NAMESPACE=xmlns(app=http://www.deegree.org/app)') is None:
can_download_schema = False
else:
can_download_schema = gdal.GetDriverByName('HTTP') is not None
assert not can_download_schema, '.gfs found, but schema could be downloaded'
###############################################################################
# Test providing a custom XSD filename
def test_ogr_gml_44():
if not gdaltest.have_gml_reader:
pytest.skip()
xsd_content = """<?xml version="1.0" encoding="UTF-8"?>
<xs:schema targetNamespace="http://ogr.maptools.org/" xmlns:ogr="http://ogr.maptools.org/" xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:gml="http://www.opengis.net/gml" elementFormDefault="qualified" version="1.0">
<xs:import namespace="http://www.opengis.net/gml" schemaLocation="http://schemas.opengeospatial.net/gml/2.1.2/feature.xsd"/><xs:element name="FeatureCollection" type="ogr:FeatureCollectionType" substitutionGroup="gml:_FeatureCollection"/>
<xs:complexType name="FeatureCollectionType">
<xs:complexContent>
<xs:extension base="gml:AbstractFeatureCollectionType">
<xs:attribute name="lockId" type="xs:string" use="optional"/>
<xs:attribute name="scope" type="xs:string" use="optional"/>
</xs:extension>
</xs:complexContent>
</xs:complexType>
<xs:element name="test_point" type="ogr:test_point_Type" substitutionGroup="gml:_Feature"/>
<xs:complexType name="test_point_Type">
<xs:complexContent>
<xs:extension base="gml:AbstractFeatureType">
<xs:sequence>
<xs:element name="geometryProperty" type="gml:GeometryPropertyType" nillable="true" minOccurs="1" maxOccurs="1"/>
<xs:element name="dbl" nillable="true" minOccurs="0" maxOccurs="1">
<xs:simpleType>
<xs:restriction base="xs:decimal">
<xs:totalDigits value="32"/>
<xs:fractionDigits value="3"/>
</xs:restriction>
</xs:simpleType>
</xs:element>
</xs:sequence>
</xs:extension>
</xs:complexContent>
</xs:complexType>
</xs:schema>"""
gdal.FileFromMemBuffer('/vsimem/ogr_gml_44.xsd', xsd_content)
ds = ogr.Open('data/test_point.gml,xsd=/vsimem/ogr_gml_44.xsd')
lyr = ds.GetLayer(0)
# fid and dbl
assert lyr.GetLayerDefn().GetFieldCount() == 2
ds = None
gdal.Unlink('/vsimem/ogr_gml_44.xsd')
###############################################################################
# Test PREFIX and TARGET_NAMESPACE creation options
def test_ogr_gml_45():
if not gdaltest.have_gml_reader:
pytest.skip()
drv = ogr.GetDriverByName('GML')
ds = drv.CreateDataSource('/vsimem/ogr_gml_45.gml', options=['PREFIX=foo', 'TARGET_NAMESPACE=http://bar/'])
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('str', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('int', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('dbl', ogr.OFTReal))
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetField('str', 'str')
dst_feat.SetField('int', 1)
dst_feat.SetField('dbl', 2.34)
lyr.CreateFeature(dst_feat)
dst_feat = None
ds = None
if not gdaltest.have_gml_validation:
gdal.Unlink('/vsimem/ogr_gml_45.gml')
gdal.Unlink('/vsimem/ogr_gml_45.xsd')
pytest.skip()
# Validate document
ds = ogr.Open('/vsimem/ogr_gml_45.gml')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', './tmp/cache/SCHEMAS_OPENGIS_NET')
lyr = ds.ExecuteSQL('SELECT ValidateSchema()')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', None)
feat = lyr.GetNextFeature()
val = feat.GetFieldAsInteger(0)
feat = None
ds.ReleaseResultSet(lyr)
ds = None
gdal.Unlink('/vsimem/ogr_gml_45.gml')
gdal.Unlink('/vsimem/ogr_gml_45.xsd')
assert val != 0
###############################################################################
# Validate different kinds of GML files
def test_ogr_gml_46():
if not gdaltest.have_gml_validation:
pytest.skip()
wkt_list = ['',
'POINT (0 1)',
# 'POINT (0 1 2)',
'LINESTRING (0 1,2 3)',
# 'LINESTRING (0 1 2,3 4 5)',
'POLYGON ((0 0,0 1,1 1,1 0,0 0))',
# 'POLYGON ((0 0 10,0 1 10,1 1 10,1 0 10,0 0 10))',
'MULTIPOINT (0 1)',
# 'MULTIPOINT (0 1 2)',
'MULTILINESTRING ((0 1,2 3))',
# 'MULTILINESTRING ((0 1 2,3 4 5))',
'MULTIPOLYGON (((0 0,0 1,1 1,1 0,0 0)))',
# 'MULTIPOLYGON (((0 0 10,0 1 10,1 1 10,1 0 10,0 0 10)))',
'GEOMETRYCOLLECTION (POINT (0 1))',
# 'GEOMETRYCOLLECTION (POINT (0 1 2))'
]
format_list = ['GML2', 'GML3', 'GML3Deegree', 'GML3.2']
for wkt in wkt_list:
for frmt in format_list:
drv = ogr.GetDriverByName('GML')
ds = drv.CreateDataSource('/vsimem/ogr_gml_46.gml', options=['FORMAT=%s' % frmt])
if wkt != '':
geom = ogr.CreateGeometryFromWkt(wkt)
geom_type = geom.GetGeometryType()
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
else:
geom = None
geom_type = ogr.wkbNone
srs = None
lyr = ds.CreateLayer('test', geom_type=geom_type, srs=srs)
lyr.CreateField(ogr.FieldDefn('str', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('int', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('dbl', ogr.OFTReal))
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetField('str', 'str')
dst_feat.SetField('int', 1)
dst_feat.SetField('dbl', 2.34)
dst_feat.SetGeometry(geom)
lyr.CreateFeature(dst_feat)
dst_feat = None
ds = None
# Validate document
ds = ogr.Open('/vsimem/ogr_gml_46.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
got_geom = feat.GetGeometryRef()
if got_geom is None:
got_geom_wkt = ''
else:
got_geom_wkt = got_geom.ExportToWkt()
if got_geom_wkt != wkt:
gdaltest.post_reason('geometry do not match')
print('got %s, expected %s' % (got_geom_wkt, wkt))
feat = None
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', './tmp/cache/SCHEMAS_OPENGIS_NET')
lyr = ds.ExecuteSQL('SELECT ValidateSchema()')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', None)
feat = lyr.GetNextFeature()
val = feat.GetFieldAsInteger(0)
feat = None
ds.ReleaseResultSet(lyr)
ds = None
if val == 0:
gdaltest.post_reason('validation failed for format=%s, wkt=%s' % (frmt, wkt))
f = gdal.VSIFOpenL('/vsimem/ogr_gml_46.gml', 'rb')
content = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
print(content)
f = gdal.VSIFOpenL('/vsimem/ogr_gml_46.xsd', 'rb')
content = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
print(content)
gdal.Unlink('/vsimem/ogr_gml_46.gml')
gdal.Unlink('/vsimem/ogr_gml_46.xsd')
assert val != 0
# Only minor schema changes
if frmt == 'GML3Deegree':
break
###############################################################################
# Test validation of WFS GML documents
def test_ogr_gml_47():
if not gdaltest.have_gml_validation:
pytest.skip()
filenames = ['data/wfs10.xml', 'data/wfs11.xml', 'data/wfs20.xml']
for filename in filenames:
# Validate document
ds = ogr.Open(filename)
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', './tmp/cache/SCHEMAS_OPENGIS_NET')
lyr = ds.ExecuteSQL('SELECT ValidateSchema()')
gdal.SetConfigOption('GDAL_OPENGIS_SCHEMAS', None)
feat = lyr.GetNextFeature()
val = feat.GetFieldAsInteger(0)
feat = None
ds.ReleaseResultSet(lyr)
ds = None
assert val != 0, ('validation failed for file=%s' % filename)
###############################################################################
# Test that we can parse some particular .xsd files that have the geometry
# field declared as :
# <xsd:element name="geometry" minOccurs="0" maxOccurs="1">
# <xsd:complexType>
# <xsd:sequence>
# <xsd:element ref="gml:_Geometry"/>
# </xsd:sequence>
# </xsd:complexType>
# </xsd:element>
def test_ogr_gml_48():
if not gdaltest.have_gml_reader:
pytest.skip()
gdal.Unlink('data/schema_with_geom_in_complextype.gfs')
ds = ogr.Open('data/schema_with_geom_in_complextype.xml')
lyr = ds.GetLayer(0)
assert lyr.GetGeomType() == ogr.wkbUnknown
assert lyr.GetLayerDefn().GetFieldDefn(0).GetType() == ogr.OFTString
ds = None
###############################################################################
# Test a pseudo Inspire GML file
def test_ogr_gml_49():
if not gdaltest.have_gml_reader:
pytest.skip()
xsd_content = """<ogr:FeatureCollection xmlns:gml="http://www.opengis.net/gml" xmlns:ogr="http://ogr.maptools.org/">
<gml:featureMember>
<ogr:test>
<ogr:geometry><gml:Polygon><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>2,49 2,50 3,50 3,49 2,49</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs></gml:Polygon></ogr:geometry>
<ogr:otherGeometry><gml:Point><gml:pos>-2 -49</gml:pos></gml:Point></ogr:otherGeometry>
</ogr:test>
</gml:featureMember>
</ogr:FeatureCollection>
"""
gdal.FileFromMemBuffer('/vsimem/ogr_gml_49.gml', xsd_content)
ds = ogr.Open('/vsimem/ogr_gml_49.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
assert feat.GetGeometryRef().GetGeometryType() == ogr.wkbPolygon
ds = None
# Now with .gfs file present (#6247)
ds = ogr.Open('/vsimem/ogr_gml_49.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
assert feat.GetGeometryRef().GetGeometryType() == ogr.wkbPolygon
ds = None
gdal.Unlink('/vsimem/ogr_gml_49.gml')
gdal.Unlink('/vsimem/ogr_gml_49.gfs')
###############################################################################
# Test support for StringList, IntegerList, RealList
def test_ogr_gml_50():
if not gdaltest.have_gml_reader:
pytest.skip()
drv = ogr.GetDriverByName('GML')
ds = drv.CreateDataSource('/vsimem/ogr_gml_50.gml')
lyr = ds.CreateLayer('listlayer')
field_defn = ogr.FieldDefn('stringlist', ogr.OFTStringList)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('intlist', ogr.OFTIntegerList)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('reallist', ogr.OFTRealList)
lyr.CreateField(field_defn)
feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
feat.SetFieldStringList(0, ['a', 'b'])
feat.SetFieldIntegerList(1, [2, 3])
feat.SetFieldDoubleList(2, [4.56, 5.67])
lyr.CreateFeature(feat)
ds = None
ds = ogr.Open('/vsimem/ogr_gml_50.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetFieldAsStringList(lyr.GetLayerDefn().GetFieldIndex('stringlist')) != ['a', 'b']:
feat.DumpReadable()
pytest.fail()
if feat.GetFieldAsIntegerList(lyr.GetLayerDefn().GetFieldIndex('intlist')) != [2, 3]:
feat.DumpReadable()
pytest.fail()
if feat.GetFieldAsDoubleList(lyr.GetLayerDefn().GetFieldIndex('reallist')) != [4.56, 5.67]:
feat.DumpReadable()
pytest.fail()
ds = None
gdal.Unlink('/vsimem/ogr_gml_50.gml')
gdal.Unlink('/vsimem/ogr_gml_50.xsd')
###############################################################################
# Test -dsco WRITE_FEATURE_BOUNDED_BY=no -dsco STRIP_PREFIX=YES
def test_ogr_gml_51():
if not gdaltest.have_gml_reader:
pytest.skip()
import test_cli_utilities
if test_cli_utilities.get_ogr2ogr_path() is None:
pytest.skip()
for frmt in ['GML2', 'GML3']:
gdaltest.runexternal(test_cli_utilities.get_ogr2ogr_path() + ' -f GML tmp/ogr_gml_51.gml data/poly.shp -dsco FORMAT=%s -dsco WRITE_FEATURE_BOUNDED_BY=no -dsco STRIP_PREFIX=YES' % frmt)
f = open('tmp/ogr_gml_51.gml', 'rt')
content = f.read()
f.close()
assert content.find("<FeatureCollection") != -1
if frmt == 'GML3':
assert content.find("<featureMember>") != -1
assert content.find("""<poly""") != -1
assert content.find("""<AREA>215229.266</AREA>""") != -1
assert content.find("""<gml:boundedBy><gml:Envelope><gml:lowerCorner>479647""") == -1
ds = ogr.Open('tmp/ogr_gml_51.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
assert feat is not None
ds = None
###############################################################################
# Test reading MTKGML files
def test_ogr_gml_52():
if not gdaltest.have_gml_reader:
pytest.skip()
try:
os.remove('data/fake_mtkgml.gfs')
except OSError:
pass
for _ in range(2):
ds = ogr.Open('data/fake_mtkgml.xml')
lyr = ds.GetLayerByName('A')
assert lyr.GetGeomType() == ogr.wkbPoint25D
srs = lyr.GetSpatialRef()
assert srs is not None
wkt = srs.ExportToWkt()
assert '3067' in wkt
feat = lyr.GetNextFeature()
if feat.GetField('gid') != '1' or \
feat.GetField('regular_attribute') != 5 or \
feat.GetField('foo_href') != 'some_ref' or \
feat.GetField('teksti') != 'En francais !' or \
feat.GetField('teksti_kieli') != 'fr' or \
ogrtest.check_feature_geometry(feat, 'POINT (280000 7000000 0)') != 0:
feat.DumpReadable()
pytest.fail()
lyr = ds.GetLayerByName('B')
assert lyr.GetGeomType() == ogr.wkbPolygon25D
srs = lyr.GetSpatialRef()
assert srs is not None
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'POLYGON ((280000 7000000 0,281000 7000000 0,281000 7001000 0,280000 7001000 0,280000 7000000 0))') != 0:
feat.DumpReadable()
pytest.fail()
lyr = ds.GetLayerByName('C')
assert lyr.GetGeomType() == ogr.wkbLineString25D
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'LINESTRING (280000 7000000 0,281000 7000000 0,281000 7001000 0,280000 7001000 0,280000 7000000 0)') != 0:
feat.DumpReadable()
pytest.fail()
ds = None
os.remove('data/fake_mtkgml.gfs')
###############################################################################
# Test that we don't recognize .xsd files themselves
def test_ogr_gml_53():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.Open('data/archsites.xsd')
assert ds is None
ds = None
###############################################################################
# Test that we can open an empty GML datasource (#249, #5205)
def test_ogr_gml_54():
if not gdaltest.have_gml_reader:
pytest.skip()
gdal.Unlink('data/empty.gfs')
ds = ogr.Open('data/empty.gml')
assert ds is not None
ds = None
# with .gfs now
ds = ogr.Open('data/empty.gml')
assert ds is not None
ds = None
gdal.Unlink('data/empty.gfs')
###############################################################################
# Test support for <xs:include> in schemas
# Necessary for Finnish NLS data
def test_ogr_gml_55():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.Open('data/ogr_gml_55.gml')
lyr = ds.GetLayer(0)
assert lyr.GetLayerDefn().GetFieldDefn(0).GetType() == ogr.OFTString
ds = None
with pytest.raises(OSError):
os.unlink('data/ogr_gml_55.gfs')
###############################################################################
# Test support for gml:FeaturePropertyType and multiple geometry field
# Necessary for Finnish NLS data
def test_ogr_gml_56():
if not gdaltest.have_gml_reader:
pytest.skip()
gdal.Unlink('data/ogr_gml_56.gfs')
gdal.SetConfigOption('GML_REGISTRY', 'data/ogr_gml_56_registry.xml')
ds = ogr.Open('data/ogr_gml_56.gml')
gdal.SetConfigOption('GML_REGISTRY', None)
lyr = ds.GetLayerByName('mainFeature')
assert lyr.GetSpatialRef() is not None
feat = lyr.GetNextFeature()
assert feat.GetFieldAsString(feat.GetFieldIndex('subFeatureProperty_href')) == '#subFeature.0'
assert feat.GetFieldAsStringList(feat.GetFieldIndex('subFeatureRepeatedProperty_href')) == ['#subFeatureRepeated.0', '#subFeatureRepeated.1']
assert feat.GetGeomFieldRef(0).ExportToWkt() == 'POLYGON ((0 0,0 1,1 1,1 0,0 0))'
assert feat.GetGeomFieldRef(1).ExportToWkt() == 'POINT (10 10)'
lyr = ds.GetLayerByName('subFeature')
assert lyr.GetLayerDefn().GetGeomFieldCount() == 0
feat = lyr.GetNextFeature()
assert feat.GetFieldAsStringList(feat.GetFieldIndex('subFeatureRepeatedProperty_href')) == ['#subFeatureRepeated.2']
assert feat.GetField('foo') == 'bar'
lyr = ds.GetLayerByName('subFeatureRepeated')
feat = lyr.GetNextFeature()
assert feat.GetField('gml_id') == 'subFeatureRepeated.2'
assert feat.GetField('bar') == 'baz'
feat = lyr.GetNextFeature()
assert feat.GetField('gml_id') == 'subFeatureRepeated.0'
feat = lyr.GetNextFeature()
assert feat.GetField('gml_id') == 'subFeatureRepeated.1'
ds = None
with pytest.raises(OSError):
os.unlink('data/ogr_gml_56.gfs')
###############################################################################
# Test write support for multiple geometry field
def test_ogr_gml_57():
if not gdaltest.have_gml_reader:
pytest.skip()
for i in range(4):
options = []
if i == 3:
options = ['FORMAT=GML3.2']
ds = ogr.GetDriverByName('GML').CreateDataSource('/vsimem/ogr_gml_57.gml', options=options)
assert ds.TestCapability(ogr.ODsCCreateGeomFieldAfterCreateLayer) == 1
lyr = ds.CreateLayer('myLayer', geom_type=ogr.wkbNone)
assert lyr.TestCapability(ogr.OLCCreateGeomField) == 1
geomfielddefn = ogr.GeomFieldDefn('first_geometry', ogr.wkbPoint)
if i == 1 or i == 2:
sr = osr.SpatialReference()
sr.ImportFromEPSG(32630)
geomfielddefn.SetSpatialRef(sr)
lyr.CreateGeomField(geomfielddefn)
geomfielddefn = ogr.GeomFieldDefn('second_geometry', ogr.wkbLineString)
if i == 1:
sr = osr.SpatialReference()
sr.ImportFromEPSG(32630)
geomfielddefn.SetSpatialRef(sr)
elif i == 2:
sr = osr.SpatialReference()
sr.ImportFromEPSG(32631)
geomfielddefn.SetSpatialRef(sr)
lyr.CreateGeomField(geomfielddefn)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeomFieldDirectly(0, ogr.CreateGeometryFromWkt('POINT (0 1)'))
feat.SetGeomFieldDirectly(1, ogr.CreateGeometryFromWkt('LINESTRING (2 3,4 5)'))
lyr.CreateFeature(feat)
feat = None
ds = None
if False: # pylint: disable=using-constant-test
f = gdal.VSIFOpenL('/vsimem/ogr_gml_57.gml', 'rb')
print(gdal.VSIFReadL(1, 1000, f))
gdal.VSIFCloseL(f)
ds = ogr.Open('/vsimem/ogr_gml_57.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
assert not (i == 1 and feat.GetGeomFieldRef(0).GetSpatialReference().ExportToWkt().find('32630') < 0)
assert not (i == 1 and feat.GetGeomFieldRef(1).GetSpatialReference().ExportToWkt().find('32630') < 0)
assert not (i == 2 and feat.GetGeomFieldRef(1).GetSpatialReference().ExportToWkt().find('32631') < 0)
assert feat.GetGeomFieldRef(0).ExportToWkt() == 'POINT (0 1)'
assert feat.GetGeomFieldRef(1).ExportToWkt() == 'LINESTRING (2 3,4 5)'
ds = None
gdal.Unlink('/vsimem/ogr_gml_57.gml')
gdal.Unlink('/vsimem/ogr_gml_57.xsd')
###############################################################################
# Test support for Inspire Cadastral schemas
def test_ogr_gml_58():
if not gdaltest.have_gml_reader:
pytest.skip()
gdal.Unlink('data/inspire_cadastralparcel.gfs')
ds = ogr.Open('data/inspire_cadastralparcel.xml')
lyr = ds.GetLayer(0)
lyr_defn = lyr.GetLayerDefn()
assert lyr_defn.GetGeomFieldCount() == 2
assert lyr_defn.GetGeomFieldDefn(0).GetName() == 'geometry'
assert lyr_defn.GetGeomFieldDefn(0).GetType() == ogr.wkbMultiPolygon
assert lyr_defn.GetGeomFieldDefn(1).GetName() == 'referencePoint'
assert lyr_defn.GetGeomFieldDefn(1).GetType() == ogr.wkbPoint
feat = lyr.GetNextFeature()
expected = [('gml_id', 'CadastralParcel-01'),
('areaValue', 10.0),
('areaValue_uom', 'm2'),
('beginLifespanVersion', '2000-01-01T00:00:00.0Z'),
('endLifespanVersion', '2001-01-01T00:00:00.0Z'),
('inspireId_localId', 'CadastralParcel-01-localId'),
('inspireId_namespace', 'namespace'),
('label', 'label'),
('nationalCadastralReference', 'nationalCadastralReference'),
('validFrom', '2002-01-01T00:00:00.0Z'),
('validTo', '2003-01-01T00:00:00.0Z'),
('basicPropertyUnit_href', ['#BPU.1', '#BPU.2']),
('administrativeUnit_href', '#AU.1'),
('zoning_href', '#CZ.1')]
for (key, val) in expected:
assert feat.GetField(key) == val
assert feat.GetGeomFieldRef(0).ExportToWkt() == 'MULTIPOLYGON (((2 49,2 50,3 50,3 49)))'
assert feat.GetGeomFieldRef(1).ExportToWkt() == 'POINT (2.5 49.5)'
feat = lyr.GetNextFeature()
expected = [('gml_id', 'CadastralParcel-02'),
('areaValue', None),
('areaValue_uom', None),
('beginLifespanVersion', '2000-01-01T00:00:00.0Z'),
('endLifespanVersion', None),
('inspireId_localId', 'CadastralParcel-02-localId'),
('inspireId_namespace', 'namespace'),
('label', 'label'),
('nationalCadastralReference', 'nationalCadastralReference'),
('validFrom', None),
('validTo', None),
('basicPropertyUnit_href', None),
('administrativeUnit_href', None),
('zoning_href', None)]
for (key, val) in expected:
assert feat.GetField(key) == val
assert feat.GetGeomFieldRef(0).ExportToWkt() == 'MULTIPOLYGON (((2 49,2 50,3 50,3 49)))'
assert feat.GetGeomFieldRef(1) is None
feat = None
lyr = None
ds = None
ds = ogr.Open('data/inspire_basicpropertyunit.xml')
lyr = ds.GetLayer(0)
lyr_defn = lyr.GetLayerDefn()
assert lyr_defn.GetGeomFieldCount() == 0
feat = lyr.GetNextFeature()
expected = [('gml_id', 'BasicPropertyUnit-01'),
('inspireId_localId', 'BasicPropertyUnit-01-localId'),
('inspireId_namespace', 'namespace'),
('nationalCadastralReference', 'nationalCadastralReference'),
('areaValue', 10.0),
('areaValue_uom', 'm2'),
('validFrom', '2000-01-01T00:00:00.0Z'),
('validTo', '2001-01-01T00:00:00.0Z'),
('beginLifespanVersion', '2002-01-01T00:00:00.0Z'),
('endLifespanVersion', '2003-01-01T00:00:00.0Z'),
('administrativeUnit_href', '#AU.1')]
for (key, val) in expected:
assert feat.GetField(key) == val
feat = lyr.GetNextFeature()
expected = [('gml_id', 'BasicPropertyUnit-02'),
('inspireId_localId', 'BasicPropertyUnit-02-localId'),
('inspireId_namespace', 'namespace'),
('nationalCadastralReference', 'nationalCadastralReference'),
('areaValue', None),
('areaValue_uom', None),
('validFrom', '2000-01-01T00:00:00.0Z'),
('validTo', None),
('beginLifespanVersion', '2002-01-01T00:00:00.0Z'),
('endLifespanVersion', None),
('administrativeUnit_href', None)]
for (key, val) in expected:
assert feat.GetField(key) == val
feat = None
lyr = None
ds = None
ds = ogr.Open('data/inspire_cadastralboundary.xml')
lyr = ds.GetLayer(0)
lyr_defn = lyr.GetLayerDefn()
assert lyr_defn.GetGeomFieldCount() == 1
assert lyr_defn.GetGeomFieldDefn(0).GetName() == 'geometry'
assert lyr_defn.GetGeomFieldDefn(0).GetType() == ogr.wkbLineString
feat = lyr.GetNextFeature()
expected = [('gml_id', 'CadastralBoundary-01'),
('beginLifespanVersion', '2000-01-01T00:00:00.0Z'),
('endLifespanVersion', '2001-01-01T00:00:00.0Z'),
('estimatedAccuracy', 1.0),
('estimatedAccuracy_uom', 'm'),
('inspireId_localId', 'CadastralBoundary-01-localId'),
('inspireId_namespace', 'namespace'),
('validFrom', '2002-01-01T00:00:00.0Z'),
('validTo', '2003-01-01T00:00:00.0Z'),
('parcel_href', ['#Parcel.1', '#Parcel.2'])]
for (key, val) in expected:
assert feat.GetField(key) == val
assert feat.GetGeomFieldRef(0).ExportToWkt() == 'LINESTRING (2 49,3 50)'
feat = lyr.GetNextFeature()
expected = [('gml_id', 'CadastralBoundary-02'),
('beginLifespanVersion', '2000-01-01T00:00:00.0Z'),
('endLifespanVersion', None),
('estimatedAccuracy', None),
('estimatedAccuracy_uom', None),
('inspireId_localId', 'CadastralBoundary-02-localId'),
('inspireId_namespace', 'namespace'),
('validFrom', None),
('validTo', None),
('parcel_href', None)]
for (key, val) in expected:
assert feat.GetField(key) == val
assert feat.GetGeomFieldRef(0).ExportToWkt() == 'LINESTRING (2 49,3 50)'
feat = None
lyr = None
ds = None
ds = ogr.Open('data/inspire_cadastralzoning.xml')
lyr = ds.GetLayer(0)
lyr_defn = lyr.GetLayerDefn()
assert lyr_defn.GetGeomFieldCount() == 2
assert lyr_defn.GetGeomFieldDefn(0).GetName() == 'geometry'
assert lyr_defn.GetGeomFieldDefn(0).GetType() == ogr.wkbMultiPolygon
assert lyr_defn.GetGeomFieldDefn(1).GetName() == 'referencePoint'
assert lyr_defn.GetGeomFieldDefn(1).GetType() == ogr.wkbPoint
feat = lyr.GetNextFeature()
expected = [('gml_id', 'CadastralZoning-01'),
('beginLifespanVersion', '2000-01-01T00:00:00.0Z'),
('endLifespanVersion', '2001-01-01T00:00:00.0Z'),
('estimatedAccuracy', 1.0),
('estimatedAccuracy_uom', 'm'),
('inspireId_localId', 'CadastralZoning-01-localId'),
('inspireId_namespace', 'namespace'),
('label', 'label'),
('level', '3'),
('levelName', ['English', 'Francais', 'Deutsch']),
('levelName_locale', ['en', 'fr', 'de']),
('name_language', ['language']),
('name_nativeness', ['nativeness']),
('name_nameStatus', ['nameStatus']),
('name_pronunciation', None),
('name_spelling_text', ['text']),
('name_spelling_script', ['script']),
('nationalCadastalZoningReference', 'nationalCadastalZoningReference'),
('validFrom', '2002-01-01T00:00:00.0Z'),
('validTo', '2003-01-01T00:00:00.0Z'),
('upperLevelUnit_href', '#ulu.1')]
for (key, val) in expected:
assert feat.GetField(key) == val
assert feat.GetGeomFieldRef(0).ExportToWkt() == 'MULTIPOLYGON (((2 49,2 50,3 50,3 49)))'
assert feat.GetGeomFieldRef(1).ExportToWkt() == 'POINT (2.5 49.5)'
feat = lyr.GetNextFeature()
expected = [('gml_id', 'CadastralZoning-02'),
('beginLifespanVersion', '2000-01-01T00:00:00.0Z'),
('endLifespanVersion', None),
('estimatedAccuracy', None),
('estimatedAccuracy_uom', None),
('inspireId_localId', None),
('inspireId_namespace', None),
('label', 'label'),
('level', '3'),
('levelName', ['English']),
('levelName_locale', ['en']),
('name_language', None),
('name_nativeness', None),
('name_nameStatus', None),
('name_pronunciation', None),
('name_spelling_text', None),
('name_spelling_script', None),
('nationalCadastalZoningReference', 'nationalCadastalZoningReference'),
('validFrom', None),
('validTo', None),
('upperLevelUnit_href', None)]
for (key, val) in expected:
assert feat.GetField(key) == val
assert feat.GetGeomFieldRef(0).ExportToWkt() == 'MULTIPOLYGON (((2 49,2 50,3 50,3 49)))'
assert feat.GetGeomFieldRef(1) is None
feat = None
lyr = None
ds = None
###############################################################################
# Test GFS conditions
def test_ogr_gml_59():
if not gdaltest.have_gml_reader:
pytest.skip()
# Make sure the .gfs file is more recent that the .gml one
try:
gml_mtime = os.stat('data/testcondition.gml').st_mtime
gfs_mtime = os.stat('data/testcondition.gfs').st_mtime
touch_gfs = gfs_mtime <= gml_mtime
except:
touch_gfs = True
if touch_gfs:
print('Touching .gfs file')
f = open('data/testcondition.gfs', 'rb+')
data = f.read(1)
f.seek(0, 0)
f.write(data)
f.close()
ds = ogr.Open('data/testcondition.gml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
expected = [('name_en', 'English name'),
('name_fr', 'Nom francais'),
('name_others_lang', ['de']),
('name_others', ['Deutsche name'])]
for (key, val) in expected:
assert feat.GetField(key) == val
feat = None
lyr = None
ds = None
########################################################
# Test reading WFS 2.0 GetFeature documents with wfs:FeatureCollection
# as a wfs:member of the top wfs:FeatureCollection
def test_ogr_gml_60():
if not gdaltest.have_gml_reader:
pytest.skip()
# Make sure the .gfs file is more recent that the .gml one
gdal.Unlink('data/wfs_200_multiplelayers.gfs')
for _ in range(2):
ds = ogr.Open('data/wfs_200_multiplelayers.gml')
lyr = ds.GetLayerByName('road')
assert lyr.GetFeatureCount() == 1
feat = lyr.GetNextFeature()
assert feat.GetField('gml_id') == 'road.21'
lyr = ds.GetLayerByName('popplace')
assert lyr.GetFeatureCount() == 1
feat = lyr.GetNextFeature()
assert feat.GetField('gml_id') == 'popplace.BACMK'
ds = None
gdal.Unlink('data/wfs_200_multiplelayers.gfs')
###############################################################################
# Test reading a element specified with a full path in <ElementPath>
def test_ogr_gml_61():
if not gdaltest.have_gml_reader:
pytest.skip()
# Make sure the .gfs file is more recent that the .gml one
try:
gml_mtime = os.stat('data/gmlsubfeature.gml').st_mtime
gfs_mtime = os.stat('data/gmlsubfeature.gfs').st_mtime
touch_gfs = gfs_mtime <= gml_mtime
except:
touch_gfs = True
if touch_gfs:
print('Touching .gfs file')
f = open('data/gmlsubfeature.gfs', 'rb+')
data = f.read(1)
f.seek(0, 0)
f.write(data)
f.close()
ds = ogr.Open('data/gmlsubfeature.gml')
lyr = ds.GetLayer(0)
assert lyr.GetFeatureCount() == 2, 'did not get expected geometry column name'
feat = lyr.GetNextFeature()
if feat.GetField('gml_id') != 'Object.1' or feat.GetField('foo') != 'bar':
feat.DumpReadable()
pytest.fail()
geom = feat.GetGeometryRef()
if geom.ExportToWkt() != 'POLYGON ((2 48,2 49,3 49,3 48,2 48))':
feat.DumpReadable()
pytest.fail()
feat = lyr.GetNextFeature()
if feat.GetField('gml_id') != 'Object.2' or feat.GetField('foo') != 'baz':
feat.DumpReadable()
pytest.fail()
geom = feat.GetGeometryRef()
if geom.ExportToWkt() != 'POLYGON ((2 -48,2 -49,3 -49,3 -48,2 -48))':
feat.DumpReadable()
pytest.fail()
ds = None
###############################################################################
# Test GML_ATTRIBUTES_TO_OGR_FIELDS option
def test_ogr_gml_62():
if not gdaltest.have_gml_reader:
pytest.skip()
gdal.Unlink('tmp/gmlattributes.gfs')
shutil.copy('data/gmlattributes.gml', 'tmp/gmlattributes.gml')
# Default behaviour
ds = ogr.Open('tmp/gmlattributes.gml')
lyr = ds.GetLayer(0)
assert lyr.GetLayerDefn().GetFieldCount() == 1
ds = None
# Test GML_ATTRIBUTES_TO_OGR_FIELDS=YES
gdal.Unlink('tmp/gmlattributes.gfs')
# Without and then with .gfs
for i in range(2):
if i == 0:
gdal.SetConfigOption('GML_ATTRIBUTES_TO_OGR_FIELDS', 'YES')
ds = ogr.Open('tmp/gmlattributes.gml')
if i == 0:
gdal.SetConfigOption('GML_ATTRIBUTES_TO_OGR_FIELDS', None)
lyr = ds.GetLayer(0)
assert lyr.GetLayerDefn().GetFieldCount() == 4, i
feat = lyr.GetNextFeature()
if feat.GetField('element_attr1') != '1' or \
feat.GetField('element2_attr1') != 'a' or \
feat.GetField('element2') != 'foo' or \
feat.IsFieldSet('element3_attr1'):
feat.DumpReadable()
pytest.fail(i)
feat = lyr.GetNextFeature()
if feat.IsFieldSet('element_attr1') or \
feat.IsFieldSet('element2_attr1') or \
feat.IsFieldSet('element2') or \
feat.GetField('element3_attr1') != 1:
feat.DumpReadable()
pytest.fail(i)
feat = lyr.GetNextFeature()
if feat.GetField('element_attr1') != 'a' or \
feat.IsFieldSet('element2_attr1') or \
feat.IsFieldSet('element2') or \
feat.IsFieldSet('element3_attr1'):
feat.DumpReadable()
pytest.fail(i)
feat = None
ds = None
###############################################################################
# Test reading RUIAN VFR files
def test_ogr_gml_63():
if not gdaltest.have_gml_reader:
pytest.skip()
# test ST file type
ds = ogr.Open('data/ruian_st_v1.xml.gz')
# check number of layers
nlayers = ds.GetLayerCount()
assert nlayers == 14
# check name of first layer
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'Staty'
# check geometry column name
assert lyr.GetGeometryColumn() == 'DefinicniBod'
ds = None
# test OB file type
ds = ogr.Open('data/ruian_ob_v1.xml.gz')
# check number of layers
nlayers = ds.GetLayerCount()
assert nlayers == 11
# check number of features
nfeatures = 0
for i in range(nlayers):
lyr = ds.GetLayer(i)
nfeatures += lyr.GetFeatureCount()
assert nfeatures == 7
###############################################################################
# Test multiple instances of parsers (#5571)
def test_ogr_gml_64():
if not gdaltest.have_gml_reader:
pytest.skip()
for parser in ['XERCES', 'EXPAT']:
for _ in range(2):
gdal.SetConfigOption('GML_PARSER', parser)
ds = ogr.Open('data/rnf_eg.gml')
gdal.SetConfigOption('GML_PARSER', None)
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
assert feat is not None, parser
###############################################################################
# Test SRSDIMENSION_LOC=GEOMETRY option (#5606)
def test_ogr_gml_65():
if not gdaltest.have_gml_reader:
pytest.skip()
option_expected_list = [['SRSDIMENSION_LOC=GEOMETRY', '<ogr:geometryProperty><gml:MultiSurface srsDimension="3"><gml:surfaceMember><gml:Polygon><gml:exterior><gml:LinearRing><gml:posList>0 1 2 3 4 5 6 7 8 0 1 2</gml:posList></gml:LinearRing></gml:exterior></gml:Polygon></gml:surfaceMember></gml:MultiSurface></ogr:geometryProperty>'],
['SRSDIMENSION_LOC=POSLIST', '<ogr:geometryProperty><gml:MultiSurface><gml:surfaceMember><gml:Polygon><gml:exterior><gml:LinearRing><gml:posList srsDimension="3">0 1 2 3 4 5 6 7 8 0 1 2</gml:posList></gml:LinearRing></gml:exterior></gml:Polygon></gml:surfaceMember></gml:MultiSurface></ogr:geometryProperty>'],
['SRSDIMENSION_LOC=GEOMETRY,POSLIST', '<ogr:geometryProperty><gml:MultiSurface srsDimension="3"><gml:surfaceMember><gml:Polygon><gml:exterior><gml:LinearRing><gml:posList srsDimension="3">0 1 2 3 4 5 6 7 8 0 1 2</gml:posList></gml:LinearRing></gml:exterior></gml:Polygon></gml:surfaceMember></gml:MultiSurface></ogr:geometryProperty>'],
]
for (option, expected) in option_expected_list:
filename = '/vsimem/ogr_gml_65.gml'
# filename = 'ogr_gml_65.gml'
ds = ogr.GetDriverByName('GML').CreateDataSource(filename, options=['FORMAT=GML3', option])
lyr = ds.CreateLayer('lyr')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt("MULTIPOLYGON (((0 1 2,3 4 5,6 7 8,0 1 2)))"))
lyr.CreateFeature(feat)
ds = None
f = gdal.VSIFOpenL(filename, 'rb')
data = gdal.VSIFReadL(1, 10000, f).decode('ascii')
gdal.VSIFCloseL(f)
assert expected in data
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != "MULTIPOLYGON (((0 1 2,3 4 5,6 7 8,0 1 2)))":
feat.DumpReadable()
pytest.fail()
ds = None
gdal.Unlink(filename)
gdal.Unlink(filename[0:-3] + "xsd")
###############################################################################
# Test curve geometries
def test_ogr_gml_66():
if not gdaltest.have_gml_reader:
pytest.skip()
filename = '/vsimem/ogr_gml_66.gml'
# filename = 'ogr_gml_66.gml'
ds = ogr.GetDriverByName('GML').CreateDataSource(filename, options=['FORMAT=GML3'])
lyr = ds.CreateLayer('compoundcurve', geom_type=ogr.wkbCompoundCurve)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('COMPOUNDCURVE (CIRCULARSTRING (0 0,1 1,2 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('CIRCULARSTRING (0 0,1 1,2 0)'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 0,1 1,2 0)'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('curvepolygon', geom_type=ogr.wkbCurvePolygon)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('CURVEPOLYGON ( CIRCULARSTRING(0 0,1 0,0 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('multisurface', geom_type=ogr.wkbMultiSurface)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTISURFACE (CURVEPOLYGON ( CIRCULARSTRING(0 0,1 0,0 0)))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTIPOLYGON (((0 0,0 1,1 1,0 0)))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('multicurve', geom_type=ogr.wkbMultiCurve)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTICURVE ( CIRCULARSTRING(0 0,1 0,0 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTICURVE ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('polygon', geom_type=ogr.wkbPolygon)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('linestring', geom_type=ogr.wkbLineString)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 0,0 1,1 1,0 0)'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('multipolygon', geom_type=ogr.wkbMultiPolygon)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTIPOLYGON (((0 0,0 1,1 1,0 0)))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('multilinestring', geom_type=ogr.wkbMultiLineString)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTILINESTRING ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('compoundcurve_untyped')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 0,1 1,2 0)'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('COMPOUNDCURVE (CIRCULARSTRING (0 0,1 1,2 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 0,1 1,2 0)'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('curvepolygon_untyped')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('CURVEPOLYGON ( CIRCULARSTRING(0 0,1 0,0 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('multisurface_untyped')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTIPOLYGON (((0 0,0 1,1 1,0 0)))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTISURFACE (CURVEPOLYGON ( CIRCULARSTRING(0 0,1 0,0 0)))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTIPOLYGON (((0 0,0 1,1 1,0 0)))'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('multicurve_untyped')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTILINESTRING ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTICURVE (CIRCULARSTRING (0 0,1 1,2 0))'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('MULTILINESTRING ((0 0,0 1,1 1,0 0))'))
lyr.CreateFeature(f)
f = None
ds = None
# Test first with .xsd and then without
for i in range(3):
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('compoundcurve')
assert lyr.GetGeomType() == ogr.wkbCompoundCurve
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'COMPOUNDCURVE (CIRCULARSTRING (0 0,1 1,2 0))':
feat.DumpReadable()
pytest.fail()
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'COMPOUNDCURVE (CIRCULARSTRING (0 0,1 1,2 0))':
feat.DumpReadable()
pytest.fail()
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'COMPOUNDCURVE ((0 0,1 1,2 0))':
feat.DumpReadable()
pytest.fail()
lyr = ds.GetLayerByName('curvepolygon')
assert lyr.GetGeomType() == ogr.wkbCurvePolygon
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'CURVEPOLYGON (CIRCULARSTRING (0 0,0.5 0.5,1 0,0.5 -0.5,0 0))':
feat.DumpReadable()
pytest.fail()
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'CURVEPOLYGON ((0 0,0 1,1 1,0 0))':
feat.DumpReadable()
pytest.fail()
lyr = ds.GetLayerByName('multisurface')
assert lyr.GetGeomType() == ogr.wkbMultiSurface
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTISURFACE (CURVEPOLYGON (CIRCULARSTRING (0 0,0.5 0.5,1 0,0.5 -0.5,0 0)))':
feat.DumpReadable()
pytest.fail()
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTISURFACE (((0 0,0 1,1 1,0 0)))':
feat.DumpReadable()
pytest.fail()
lyr = ds.GetLayerByName('multicurve')
assert lyr.GetGeomType() == ogr.wkbMultiCurve
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTICURVE (CIRCULARSTRING (0 0,0.5 0.5,1 0,0.5 -0.5,0 0))':
feat.DumpReadable()
pytest.fail()
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTICURVE ((0 0,0 1,1 1,0 0))':
feat.DumpReadable()
pytest.fail()
lyr = ds.GetLayerByName('polygon')
assert lyr.GetGeomType() == ogr.wkbPolygon
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'POLYGON ((0 0,0 1,1 1,0 0))':
feat.DumpReadable()
pytest.fail()
lyr = ds.GetLayerByName('linestring')
assert lyr.GetGeomType() == ogr.wkbLineString
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'LINESTRING (0 0,0 1,1 1,0 0)':
feat.DumpReadable()
pytest.fail()
lyr = ds.GetLayerByName('multipolygon')
assert lyr.GetGeomType() == ogr.wkbMultiPolygon
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTIPOLYGON (((0 0,0 1,1 1,0 0)))':
feat.DumpReadable()
pytest.fail()
lyr = ds.GetLayerByName('multilinestring')
assert lyr.GetGeomType() == ogr.wkbMultiLineString
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTILINESTRING ((0 0,0 1,1 1,0 0))':
feat.DumpReadable()
pytest.fail()
lyr = ds.GetLayerByName('compoundcurve_untyped')
if i != 0:
assert lyr.GetGeomType() == ogr.wkbCompoundCurve
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'COMPOUNDCURVE ((0 0,1 1,2 0))':
feat.DumpReadable()
pytest.fail()
else:
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'LINESTRING (0 0,1 1,2 0)':
feat.DumpReadable()
pytest.fail()
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'COMPOUNDCURVE (CIRCULARSTRING (0 0,1 1,2 0))':
feat.DumpReadable()
pytest.fail()
lyr = ds.GetLayerByName('curvepolygon_untyped')
if i != 0:
assert lyr.GetGeomType() == ogr.wkbCurvePolygon
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'CURVEPOLYGON ((0 0,0 1,1 1,0 0))':
feat.DumpReadable()
pytest.fail()
else:
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'POLYGON ((0 0,0 1,1 1,0 0))':
feat.DumpReadable()
pytest.fail()
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'CURVEPOLYGON (CIRCULARSTRING (0 0,0.5 0.5,1 0,0.5 -0.5,0 0))':
feat.DumpReadable()
pytest.fail()
lyr = ds.GetLayerByName('multisurface_untyped')
if i != 0:
assert lyr.GetGeomType() == ogr.wkbMultiSurface
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTISURFACE (((0 0,0 1,1 1,0 0)))':
feat.DumpReadable()
pytest.fail()
else:
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTIPOLYGON (((0 0,0 1,1 1,0 0)))':
feat.DumpReadable()
pytest.fail()
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTISURFACE (CURVEPOLYGON (CIRCULARSTRING (0 0,0.5 0.5,1 0,0.5 -0.5,0 0)))':
feat.DumpReadable()
pytest.fail()
lyr = ds.GetLayerByName('multicurve_untyped')
if i != 0:
assert lyr.GetGeomType() == ogr.wkbMultiCurve
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTICURVE ((0 0,0 1,1 1,0 0))':
feat.DumpReadable()
pytest.fail()
else:
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTILINESTRING ((0 0,0 1,1 1,0 0))':
feat.DumpReadable()
pytest.fail()
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTICURVE (CIRCULARSTRING (0 0,1 1,2 0))':
feat.DumpReadable()
pytest.fail()
ds = None
gdal.Unlink(filename[0:-3] + "xsd")
gdal.Unlink(filename)
gdal.Unlink(filename[0:-3] + "gfs")
###############################################################################
# Test boolean, int16, integer64 type
def test_ogr_gml_67():
if not gdaltest.have_gml_reader:
pytest.skip()
filename = '/vsimem/ogr_gml_67.gml'
ds = ogr.GetDriverByName('GML').CreateDataSource(filename)
lyr = ds.CreateLayer('test')
fld_defn = ogr.FieldDefn('b1', ogr.OFTInteger)
fld_defn.SetSubType(ogr.OFSTBoolean)
lyr.CreateField(fld_defn)
fld_defn = ogr.FieldDefn('b2', ogr.OFTInteger)
fld_defn.SetSubType(ogr.OFSTBoolean)
lyr.CreateField(fld_defn)
fld_defn = ogr.FieldDefn('bool_list', ogr.OFTIntegerList)
fld_defn.SetSubType(ogr.OFSTBoolean)
lyr.CreateField(fld_defn)
fld_defn = ogr.FieldDefn('short', ogr.OFTInteger)
fld_defn.SetSubType(ogr.OFSTInt16)
lyr.CreateField(fld_defn)
fld_defn = ogr.FieldDefn('float', ogr.OFTReal)
fld_defn.SetSubType(ogr.OFSTFloat32)
lyr.CreateField(fld_defn)
fld_defn = ogr.FieldDefn('int64', ogr.OFTInteger64)
lyr.CreateField(fld_defn)
fld_defn = ogr.FieldDefn('int64list', ogr.OFTInteger64List)
lyr.CreateField(fld_defn)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField(0, 1)
f.SetField(1, 0)
f.SetFieldIntegerList(2, [1, 0])
f.SetField(3, -32768)
f.SetField(4, 1.23)
f.SetField(5, 1)
f.SetFieldInteger64List(6, [1])
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetFID(1234567890123)
f.SetField(5, 1234567890123)
f.SetFieldInteger64List(6, [1, 1234567890123])
lyr.CreateFeature(f)
f = None
ds = None
# Test first with .xsd and then without
for i in range(3):
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
assert (lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('b1')).GetType() == ogr.OFTInteger and \
lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('b1')).GetSubType() == ogr.OFSTBoolean), \
i
assert (lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('bool_list')).GetType() == ogr.OFTIntegerList and \
lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('bool_list')).GetSubType() == ogr.OFSTBoolean), \
i
if i == 0:
assert (lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('short')).GetType() == ogr.OFTInteger and \
lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('short')).GetSubType() == ogr.OFSTInt16), \
i
if i == 0:
assert (lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('float')).GetType() == ogr.OFTReal and \
lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('float')).GetSubType() == ogr.OFSTFloat32), \
i
assert lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('int64')).GetType() == ogr.OFTInteger64, \
i
assert lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('int64list')).GetType() == ogr.OFTInteger64List, \
i
f = lyr.GetNextFeature()
if f.GetField('b1') != 1 or f.GetField('b2') != 0 or f.GetFieldAsString('bool_list') != '(2:1,0)' or f.GetField('short') != -32768 or f.GetField('float') != 1.23:
f.DumpReadable()
pytest.fail(i)
f = lyr.GetNextFeature()
if f.GetFID() != 1234567890123 or f.GetField('int64') != 1234567890123 or f.GetField('int64list') != [1, 1234567890123]:
f.DumpReadable()
pytest.fail(i)
ds = None
gdal.Unlink(filename[0:-3] + "xsd")
gdal.Unlink(filename)
gdal.Unlink(filename[0:-3] + "gfs")
###############################################################################
# Test reading GML with xsd with a choice of geometry properites
def test_ogr_gml_68():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.Open('data/choicepolygonmultipolygon.gml')
expected_results = ['MULTIPOLYGON (((0 0,0 1,1 1,1 0,0 0)))',
'MULTIPOLYGON (((0 0,0 1,1 1,1 0,0 0)),((10 0,10 1,11 1,11 0,10 0)))']
lyr = ds.GetLayer(0)
assert lyr.GetGeomType() == ogr.wkbMultiPolygon, \
' did not get expected layer geometry type'
for i in range(2):
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
assert got_wkt == expected_results[i], 'did not get expected geometry'
ds = None
###############################################################################
# Test not nullable fields
def test_ogr_gml_69():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.GetDriverByName('GML').CreateDataSource('/vsimem/ogr_gml_69.gml')
lyr = ds.CreateLayer('test', geom_type=ogr.wkbNone)
field_defn = ogr.FieldDefn('field_not_nullable', ogr.OFTString)
field_defn.SetNullable(0)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('field_nullable', ogr.OFTString)
lyr.CreateField(field_defn)
field_defn = ogr.GeomFieldDefn('geomfield_not_nullable', ogr.wkbPoint)
field_defn.SetNullable(0)
lyr.CreateGeomField(field_defn)
field_defn = ogr.GeomFieldDefn('geomfield_nullable', ogr.wkbPoint)
lyr.CreateGeomField(field_defn)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('field_not_nullable', 'not_null')
f.SetGeomFieldDirectly('geomfield_not_nullable', ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
# Error case: missing geometry
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('field_not_nullable', 'not_null')
gdal.PushErrorHandler()
ret = lyr.CreateFeature(f)
gdal.PopErrorHandler()
assert ret != 0
f = None
# Error case: missing non-nullable field
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT(0 0)'))
gdal.PushErrorHandler()
ret = lyr.CreateFeature(f)
gdal.PopErrorHandler()
assert ret != 0
f = None
ds = None
ds = gdal.OpenEx('/vsimem/ogr_gml_69.gml', open_options=['EMPTY_AS_NULL=NO'])
lyr = ds.GetLayerByName('test')
assert lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_not_nullable')).IsNullable() == 0
assert lyr.GetLayerDefn().GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex('field_nullable')).IsNullable() == 1
assert lyr.GetLayerDefn().GetGeomFieldDefn(lyr.GetLayerDefn().GetGeomFieldIndex('geomfield_not_nullable')).IsNullable() == 0
assert lyr.GetLayerDefn().GetGeomFieldDefn(lyr.GetLayerDefn().GetGeomFieldIndex('geomfield_nullable')).IsNullable() == 1
ds = None
gdal.Unlink("/vsimem/ogr_gml_69.gml")
gdal.Unlink("/vsimem/ogr_gml_69.xsd")
###############################################################################
# Test default fields (not really supported, but we must do something as we
# support not nullable fields)
def test_ogr_gml_70():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.GetDriverByName('GML').CreateDataSource('/vsimem/ogr_gml_70.gml')
lyr = ds.CreateLayer('test', geom_type=ogr.wkbNone)
field_defn = ogr.FieldDefn('field_string', ogr.OFTString)
field_defn.SetDefault("'a'")
field_defn.SetNullable(0)
lyr.CreateField(field_defn)
f = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(f)
f = None
ds = None
ds = ogr.Open('/vsimem/ogr_gml_70.gml')
lyr = ds.GetLayerByName('test')
f = lyr.GetNextFeature()
if f.GetField('field_string') != 'a':
f.DumpReadable()
pytest.fail()
ds = None
gdal.Unlink("/vsimem/ogr_gml_70.gml")
gdal.Unlink("/vsimem/ogr_gml_70.xsd")
###############################################################################
# Test reading WFS 2.0 layer resulting from a join operation
def ogr_gml_71_helper(ds):
assert ds.GetLayerCount() == 1
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'join_table1_table2'
fields = [('table1.gml_id', ogr.OFTString),
('table1.foo', ogr.OFTInteger),
('table1.bar', ogr.OFTInteger),
('table2.gml_id', ogr.OFTString),
('table2.bar', ogr.OFTInteger),
('table2.baz', ogr.OFTString)]
layer_defn = lyr.GetLayerDefn()
assert layer_defn.GetFieldCount() == len(fields)
for i, field in enumerate(fields):
fld_defn = layer_defn.GetFieldDefn(i)
assert fld_defn.GetName() == field[0], i
assert fld_defn.GetType() == field[1], i
assert layer_defn.GetGeomFieldCount() == 2
assert layer_defn.GetGeomFieldDefn(0).GetName() == 'table1.geometry'
assert layer_defn.GetGeomFieldDefn(1).GetName() == 'table2.geometry'
f = lyr.GetNextFeature()
if f.GetField('table1.gml_id') != 'table1-1' or \
f.GetField('table1.foo') != 1 or \
f.IsFieldSet('table1.bar') or \
f.GetField('table2.gml_id') != 'table2-1' or \
f.GetField('table2.bar') != 2 or \
f.GetField('table2.baz') != 'foo' or \
f.GetGeomFieldRef(0) is not None or \
f.GetGeomFieldRef(1).ExportToWkt() != 'POINT (2 49)':
f.DumpReadable()
pytest.fail()
f = lyr.GetNextFeature()
if f.GetField('table1.gml_id') != 'table1-2' or \
f.IsFieldSet('table1.foo') or \
f.GetField('table1.bar') != 2 or \
f.GetField('table2.gml_id') != 'table2-2' or \
f.GetField('table2.bar') != 2 or \
f.GetField('table2.baz') != 'bar' or \
f.GetGeomFieldRef(0).ExportToWkt() != 'POINT (3 50)' or \
f.GetGeomFieldRef(1).ExportToWkt() != 'POINT (2 50)':
f.DumpReadable()
pytest.fail()
def test_ogr_gml_71():
if not gdaltest.have_gml_reader:
pytest.skip()
# With .xsd
gdal.Unlink('data/wfsjointlayer.gfs')
ds = ogr.Open('data/wfsjointlayer.gml')
ogr_gml_71_helper(ds)
ds = None
with pytest.raises(OSError):
os.unlink('data/wfsjointlayer.gfs')
# With .xsd but that is only partially understood
ds = gdal.OpenEx('data/wfsjointlayer.gml', open_options=['XSD=data/wfsjointlayer_not_understood.xsd'])
ogr_gml_71_helper(ds)
ds = None
try:
os.unlink('data/wfsjointlayer.gfs')
except OSError:
pytest.fail()
# Without .xsd nor .gfs
shutil.copy('data/wfsjointlayer.gml', 'tmp/wfsjointlayer.gml')
gdal.Unlink('tmp/wfsjointlayer.gfs')
ds = ogr.Open('tmp/wfsjointlayer.gml')
ogr_gml_71_helper(ds)
ds = None
try:
os.stat('tmp/wfsjointlayer.gfs')
except OSError:
pytest.fail()
# With .gfs
ds = ogr.Open('tmp/wfsjointlayer.gml')
ogr_gml_71_helper(ds)
ds = None
###############################################################################
# Test name and description
def test_ogr_gml_72():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.GetDriverByName('GML').CreateDataSource('/vsimem/ogr_gml_72.gml', options=['NAME=name', 'DESCRIPTION=description'])
ds.SetMetadata({'NAME': 'ignored', 'DESCRIPTION': 'ignored'})
ds = None
ds = ogr.Open('/vsimem/ogr_gml_72.gml')
assert ds.GetMetadata() == {'NAME': 'name', 'DESCRIPTION': 'description'}
ds = None
gdal.Unlink("/vsimem/ogr_gml_72.gml")
gdal.Unlink("/vsimem/ogr_gml_72.xsd")
gdal.Unlink("/vsimem/ogr_gml_72.gfs")
ds = ogr.GetDriverByName('GML').CreateDataSource('/vsimem/ogr_gml_72.gml')
ds.SetMetadata({'NAME': 'name', 'DESCRIPTION': 'description'})
ds = None
ds = ogr.Open('/vsimem/ogr_gml_72.gml')
assert ds.GetMetadata() == {'NAME': 'name', 'DESCRIPTION': 'description'}
ds = None
gdal.Unlink("/vsimem/ogr_gml_72.gml")
gdal.Unlink("/vsimem/ogr_gml_72.xsd")
gdal.Unlink("/vsimem/ogr_gml_72.gfs")
###############################################################################
# Read a CSW GetRecordsResponse document
def test_ogr_gml_73():
if not gdaltest.have_gml_reader:
pytest.skip()
try:
os.remove('data/cswresults.gfs')
except OSError:
pass
ds = ogr.Open('data/cswresults.xml')
for i in range(3):
lyr = ds.GetLayer(i)
sr = lyr.GetSpatialRef()
got_wkt = sr.ExportToWkt()
assert '4326' in got_wkt, 'did not get expected SRS'
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
got_wkt = geom.ExportToWkt()
assert got_wkt == 'POLYGON ((-180 -90,-180 90,180 90,180 -90,-180 -90))', \
'did not get expected geometry'
ds = None
try:
os.remove('data/cswresults.gfs')
except OSError:
pass
###############################################################################
# Test FORCE_SRS_DETECTION open option
def test_ogr_gml_74():
if not gdaltest.have_gml_reader:
pytest.skip()
# With .xsd
ds = gdal.OpenEx('data/expected_gml_gml32.gml', open_options=['FORCE_SRS_DETECTION=YES'])
lyr = ds.GetLayer(0)
assert lyr.GetSpatialRef() is not None, 'did not get expected SRS'
assert lyr.GetFeatureCount() == 2, 'did not get expected feature count'
shutil.copy('data/expected_gml_gml32.gml', 'tmp/ogr_gml_74.gml')
if os.path.exists('tmp/ogr_gml_74.gfs'):
os.unlink('tmp/ogr_gml_74.gfs')
# Without .xsd or .gfs
ds = gdal.OpenEx('tmp/ogr_gml_74.gml', open_options=['FORCE_SRS_DETECTION=YES'])
lyr = ds.GetLayer(0)
assert lyr.GetSpatialRef() is not None, 'did not get expected SRS'
assert lyr.GetFeatureCount() == 2, 'did not get expected feature count'
# With .gfs
ds = gdal.OpenEx('tmp/ogr_gml_74.gml', open_options=['FORCE_SRS_DETECTION=YES'])
lyr = ds.GetLayer(0)
assert lyr.GetSpatialRef() is not None, 'did not get expected SRS'
assert lyr.GetFeatureCount() == 2, 'did not get expected feature count'
ds = None
os.unlink('tmp/ogr_gml_74.gml')
os.unlink('tmp/ogr_gml_74.gfs')
###############################################################################
# Test we don't open a WMTS Capabilities doc
def test_ogr_gml_75():
if not gdaltest.have_gml_reader:
pytest.skip()
gdal.FileFromMemBuffer("/vsimem/ogr_gml_75.xml",
"""<?xml version="1.0" encoding="UTF-8"?>
<Capabilities xmlns="http://www.opengis.net/wmts/1.0"
xmlns:ows="http://www.opengis.net/ows/1.1"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:gml="http://www.opengis.net/gml"
xsi:schemaLocation="http://www.opengis.net/wmts/1.0 http://somewhere"
version="1.0.0">
<ows:OperationsMetadata>
<ows:Operation name="GetCapabilities">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://foo"/>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
<ows:Operation name="GetTile">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://foo"/>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
</ows:OperationsMetadata>
</Capabilities>""")
ds = ogr.Open('/vsimem/ogr_gml_75.xml')
assert ds is None
gdal.Unlink('/vsimem/ogr_gml_75.xml')
###############################################################################
# Test we are robust to content of XML elements bigger than 2 GB
def test_ogr_gml_76():
if not gdaltest.have_gml_reader:
pytest.skip()
if not gdaltest.run_slow_tests():
pytest.skip()
with gdaltest.error_handler():
ds = ogr.Open('/vsisparse/data/huge_attribute_gml_sparse.xml')
if ds is not None:
lyr = ds.GetLayer(0)
lyr.GetNextFeature()
ds = ogr.Open('/vsisparse/data/huge_geom_gml_sparse.xml')
if ds is not None:
lyr = ds.GetLayer(0)
lyr.GetNextFeature()
###############################################################################
# Test interpretation of http://www.opengis.net/def/crs/EPSG/0/ URLs (#6678)
def test_ogr_gml_77():
if not gdaltest.have_gml_reader:
pytest.skip()
gdal.FileFromMemBuffer("/vsimem/ogr_gml_77.xml",
"""<?xml version="1.0" encoding="utf-8" ?>
<ogr:FeatureCollection
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ogr="http://ogr.maptools.org/"
xmlns:gml="http://www.opengis.net/gml">
<ogr:featureMember>
<ogr:point gml:id="point.0">
<ogr:geometryProperty><gml:Point srsName="http://www.opengis.net/def/crs/EPSG/0/4326"><gml:pos>49 2</gml:pos></gml:Point></ogr:geometryProperty>
<ogr:id>1</ogr:id>
</ogr:point>
</ogr:featureMember>
</ogr:FeatureCollection>
""")
ds = ogr.Open('/vsimem/ogr_gml_77.xml')
lyr = ds.GetLayer(0)
assert lyr.GetSpatialRef().ExportToWkt().find('AXIS') < 0
f = lyr.GetNextFeature()
assert f.GetGeometryRef().ExportToWkt() == 'POINT (2 49)'
ds = None
ds = gdal.OpenEx('/vsimem/ogr_gml_77.xml', open_options=['SWAP_COORDINATES=YES'])
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
assert f.GetGeometryRef().ExportToWkt() == 'POINT (2 49)'
ds = None
ds = gdal.OpenEx('/vsimem/ogr_gml_77.xml', open_options=['SWAP_COORDINATES=NO'])
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
assert f.GetGeometryRef().ExportToWkt() == 'POINT (49 2)'
ds = None
gdal.Unlink('/vsimem/ogr_gml_77.xml')
gdal.Unlink('/vsimem/ogr_gml_77.gfs')
###############################################################################
# Test effect of SWAP_COORDINATES (#6678)
def test_ogr_gml_78():
if not gdaltest.have_gml_reader:
pytest.skip()
gdal.FileFromMemBuffer("/vsimem/ogr_gml_78.xml",
"""<?xml version="1.0" encoding="utf-8" ?>
<ogr:FeatureCollection
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ogr="http://ogr.maptools.org/"
xmlns:gml="http://www.opengis.net/gml">
<ogr:featureMember>
<ogr:point gml:id="point.0">
<ogr:geometryProperty><gml:Point srsName="EPSG:4326"><gml:pos>2 49</gml:pos></gml:Point></ogr:geometryProperty>
<ogr:id>1</ogr:id>
</ogr:point>
</ogr:featureMember>
</ogr:FeatureCollection>
""")
ds = ogr.Open('/vsimem/ogr_gml_78.xml')
lyr = ds.GetLayer(0)
assert lyr.GetSpatialRef().ExportToWkt().find('AXIS') < 0
f = lyr.GetNextFeature()
assert f.GetGeometryRef().ExportToWkt() == 'POINT (2 49)'
ds = None
ds = gdal.OpenEx('/vsimem/ogr_gml_78.xml', open_options=['SWAP_COORDINATES=YES'])
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
assert f.GetGeometryRef().ExportToWkt() == 'POINT (49 2)'
ds = None
ds = gdal.OpenEx('/vsimem/ogr_gml_78.xml', open_options=['SWAP_COORDINATES=NO'])
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
assert f.GetGeometryRef().ExportToWkt() == 'POINT (2 49)'
ds = None
gdal.Unlink('/vsimem/ogr_gml_78.xml')
gdal.Unlink('/vsimem/ogr_gml_78.gfs')
###############################################################################
# Test SRSNAME_FORMAT
def test_ogr_gml_79():
sr = osr.SpatialReference()
sr.ImportFromEPSG(4326)
tests = [['SHORT', 'EPSG:4326', '2 49'],
['OGC_URN', 'urn:ogc:def:crs:EPSG::4326', '49 2'],
['OGC_URL', 'http://www.opengis.net/def/crs/EPSG/0/4326', '49 2']
]
for (srsname_format, expected_srsname, expected_coords) in tests:
ds = ogr.GetDriverByName('GML').CreateDataSource('/vsimem/ogr_gml_79.xml',
options=['FORMAT=GML3', 'SRSNAME_FORMAT=' + srsname_format])
lyr = ds.CreateLayer('firstlayer', srs=sr)
feat = ogr.Feature(lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (2 49)')
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
ds = None
f = gdal.VSIFOpenL("/vsimem/ogr_gml_79.xml", "rb")
if f is not None:
data = gdal.VSIFReadL(1, 10000, f).decode('utf-8')
gdal.VSIFCloseL(f)
assert expected_srsname in data and expected_coords in data, \
srsname_format
gdal.Unlink('/vsimem/ogr_gml_79.xml')
gdal.Unlink('/vsimem/ogr_gml_79.xsd')
###############################################################################
# Test null / unset
def test_ogr_gml_80():
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.GetDriverByName('GML').CreateDataSource('/vsimem/ogr_gml_80.xml')
lyr = ds.CreateLayer('test', geom_type=ogr.wkbNone)
lyr.CreateField(ogr.FieldDefn('int_field', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f['int_field'] = 4
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetFieldNull('int_field')
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(f)
f = None
ds = None
ds = ogr.Open('/vsimem/ogr_gml_80.xml')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f['int_field'] != 4:
f.DumpReadable()
pytest.fail()
f = lyr.GetNextFeature()
if f['int_field'] is not None:
f.DumpReadable()
pytest.fail()
f = lyr.GetNextFeature()
if f.IsFieldSet('int_field'):
f.DumpReadable()
pytest.fail()
f = None
ds = None
gdal.Unlink('/vsimem/ogr_gml_80.xml')
gdal.Unlink('/vsimem/ogr_gml_80.xsd')
###############################################################################
# Test building a .gfs with a field with xsi:nil="true" (#7027)
def test_ogr_gml_81():
if not gdaltest.have_gml_reader:
pytest.skip()
gdal.Unlink('data/test_xsi_nil_gfs.gfs')
ds = ogr.Open('data/test_xsi_nil_gfs.gml')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.GetField('intval') != 1:
f.DumpReadable()
pytest.fail()
ds = None
gdal.Unlink('data/test_xsi_nil_gfs.gfs')
###############################################################################
# Test GML_FEATURE_COLLECTION=YES
def test_ogr_gml_82():
if not gdaltest.have_gml_reader:
pytest.skip()
gdal.VectorTranslate('/vsimem/ogr_gml_82.gml', 'data/poly.shp',
format='GML',
datasetCreationOptions=['FORMAT=GML3',
'GML_FEATURE_COLLECTION=YES'])
ds = ogr.Open('/vsimem/ogr_gml_82.gml')
lyr = ds.GetLayer(0)
assert lyr.GetFeatureCount() == 10
ds = None
f = gdal.VSIFOpenL("/vsimem/ogr_gml_82.gml", "rb")
if f is not None:
data = gdal.VSIFReadL(1, 10000, f).decode('utf-8')
gdal.VSIFCloseL(f)
assert 'gml:FeatureCollection' in data
f = gdal.VSIFOpenL("/vsimem/ogr_gml_82.xsd", "rb")
if f is not None:
data = gdal.VSIFReadL(1, 10000, f).decode('utf-8')
gdal.VSIFCloseL(f)
assert 'name = "FeatureCollection"' not in data
assert 'gmlsf' not in data
gdal.Unlink('/vsimem/ogr_gml_82.gml')
gdal.Unlink('/vsimem/ogr_gml_82.xsd')
###############################################################################
def test_ogr_gml_gml2_write_geometry_error():
ds = ogr.GetDriverByName('GML').CreateDataSource('/vsimem/ogr_gml_83.gml')
lyr = ds.CreateLayer('test')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('GEOMETRYCOLLECTION(POINT(0 0), TIN EMPTY)'))
with gdaltest.error_handler():
lyr.CreateFeature(f)
ds = None
gdal.Unlink('/vsimem/ogr_gml_83.gml')
gdal.Unlink('/vsimem/ogr_gml_83.xsd')
###############################################################################
# Cleanup
def test_ogr_gml_cleanup():
if not gdaltest.have_gml_reader:
pytest.skip()
gdal.SetConfigOption('GML_SKIP_RESOLVE_ELEMS', None)
gdal.SetConfigOption('GML_SAVE_RESOLVED_TO', None)
gdaltest.clean_tmp()
fl = gdal.ReadDir('/vsimem/')
if fl is not None:
print(fl)
return test_ogr_gml_clean_files()
def test_ogr_gml_clean_files():
try:
os.remove('data/bom.gfs')
except OSError:
pass
try:
os.remove('data/utf8.gfs')
except OSError:
pass
try:
os.remove('data/ticket_2349_test_1.gfs')
except OSError:
pass
try:
os.remove('data/citygml.gfs')
except OSError:
pass
try:
os.remove('data/gnis_pop_100.gfs')
except OSError:
pass
try:
os.remove('data/gnis_pop_110.gfs')
except OSError:
pass
try:
os.remove('data/paris_typical_strike_demonstration.gfs')
except OSError:
pass
try:
os.remove('data/global_geometry.gfs')
except OSError:
pass
try:
os.remove('tmp/global_geometry.gfs')
except OSError:
pass
try:
os.remove('tmp/global_geometry.xml')
except OSError:
pass
try:
os.remove('data/curveProperty.gfs')
except OSError:
pass
try:
os.remove('tmp/ogr_gml_26.gml')
os.remove('tmp/ogr_gml_26.xsd')
except OSError:
pass
try:
os.remove('tmp/ogr_gml_27.gml')
os.remove('tmp/ogr_gml_27.xsd')
except OSError:
pass
try:
os.remove('tmp/ogr_gml_28.gml')
os.remove('tmp/ogr_gml_28.gfs')
except OSError:
pass
try:
os.remove('tmp/GmlTopo-sample.sqlite')
except OSError:
pass
try:
os.remove('tmp/GmlTopo-sample.gfs')
except OSError:
pass
try:
os.remove('tmp/GmlTopo-sample.resolved.gml')
except OSError:
pass
try:
os.remove('tmp/GmlTopo-sample.xml')
except OSError:
pass
try:
os.remove('tmp/sample_gml_face_hole_negative_no.sqlite')
except OSError:
pass
try:
os.remove('tmp/sample_gml_face_hole_negative_no.gfs')
except OSError:
pass
try:
os.remove('tmp/sample_gml_face_hole_negative_no.resolved.gml')
except OSError:
pass
try:
os.remove('tmp/sample_gml_face_hole_negative_no.xml')
except OSError:
pass
try:
os.remove('data/wfs_typefeature.gfs')
except OSError:
pass
try:
os.remove('tmp/ogr_gml_51.gml')
os.remove('tmp/ogr_gml_51.xsd')
except OSError:
pass
try:
os.remove('tmp/gmlattributes.gml')
os.remove('tmp/gmlattributes.gfs')
except OSError:
pass
files = os.listdir('data')
for filename in files:
if len(filename) > 13 and filename[-13:] == '.resolved.gml':
os.unlink('data/' + filename)
gdal.Unlink('data/test_xsi_nil_gfs.gfs')
| 33.99427
| 522
| 0.59356
|
4a047a371c46e555832238041f0eb90f9214d29f
| 7,523
|
py
|
Python
|
quicklogic_fasm/qlfasm.py
|
QuickLogic-Corp/quicklogic-fasm
|
d525679a4b62d674f5941f366ac54a4629eeabd9
|
[
"Apache-2.0"
] | 1
|
2022-03-22T10:03:19.000Z
|
2022-03-22T10:03:19.000Z
|
quicklogic_fasm/qlfasm.py
|
QuickLogic-Corp/quicklogic-fasm
|
d525679a4b62d674f5941f366ac54a4629eeabd9
|
[
"Apache-2.0"
] | 8
|
2021-06-25T15:38:43.000Z
|
2022-01-26T12:33:27.000Z
|
quicklogic_fasm/qlfasm.py
|
QuickLogic-Corp/quicklogic-fasm
|
d525679a4b62d674f5941f366ac54a4629eeabd9
|
[
"Apache-2.0"
] | 1
|
2020-07-02T11:00:44.000Z
|
2020-07-02T11:00:44.000Z
|
#!/usr/bin/env python3
import argparse
import os
import errno
from pathlib import Path
from fasm_utils.database import Database
import pkg_resources
from quicklogic_fasm.qlassembler.pp3.ql725a import QL725AAssembler
from quicklogic_fasm.qlassembler.eos_s3.ql732b import QL732BAssembler
def load_quicklogic_database(db_root):
'''Creates Database object for QuickLogic Fabric.
Parameters
----------
db_root: str
A path to directory containing QuickLogic Database files
Returns
-------
Database: Database object for QuickLogic
'''
db = Database(db_root)
for entry in os.scandir(db_root):
if entry.is_file() and entry.name.endswith(".db"):
basename = os.path.basename(entry.name)
db.add_table(basename, entry.path)
return db
def get_db_dir(dev_type):
if (dev_type == "ql-pp3"):
return Path(pkg_resources.resource_filename('quicklogic_fasm', 'ql725a'))
elif (dev_type == "ql-eos-s3"):
return Path(pkg_resources.resource_filename('quicklogic_fasm', 'ql732b'))
elif (dev_type == "ql-pp3e"):
return Path(pkg_resources.resource_filename('quicklogic_fasm', 'ql732b')) # FIXME: add proper PP3E support
else:
print("Unsuported device type")
exit(errno.EINVAL)
def main():
parser = argparse.ArgumentParser(
description="Converts FASM file to the bitstream or the other way around"
)
parser.add_argument(
"infile",
type=Path,
help="The input file (FASM, or bitstream when disassembling)"
)
parser.add_argument(
"outfile",
type=Path,
help="The output file (bitstream, or FASM when disassembling)"
)
parser.add_argument(
"--dev-type",
type=str,
required=True,
help="Device type (supported: eos-s3, pp3)"
)
parser.add_argument(
"--db-root",
type=str,
default=None,
help="Path to the fasm database (defaults based on device type)"
)
parser.add_argument(
"--default-bitstream",
type=str,
default=None,
help="Path to an external default bitstream to overlay FASM on"
)
parser.add_argument(
"--no-default-bitstream",
action="store_true",
help="Do not use any default bitstream (i.e. use all-zero blank)"
)
parser.add_argument(
"-d", "--disassemble",
action="store_true",
help="Disasseble bitstream"
)
parser.add_argument(
"-v", "--verbose",
action="store_true",
help="Adds some verbose messages during bitstream production"
)
parser.add_argument(
"--bitmap",
type=str,
default=None,
help="Output CSV file with the device bitmap"
)
parser.add_argument(
"--no-verify-checksum",
action="store_false",
dest="verify_checksum",
help="Disable bitstream checksum verification on decoding"
)
args = parser.parse_args()
db_dir = ""
if (args.db_root is not None):
db_dir = args.db_root
else:
db_dir = get_db_dir(args.dev_type)
if not args.infile.exists:
print("The input file does not exist")
exit(errno.ENOENT)
if not args.outfile.parent.is_dir():
print("The path to file is not a valid directory")
exit(errno.ENOTDIR)
print("Using FASM database: {}".format(db_dir))
db = load_quicklogic_database(db_dir)
assembler = None
if (args.dev_type == "ql-pp3"):
assembler = QL725AAssembler(db,
spi_master=True,
osc_freq=False,
cfg_write_chcksum_post=False,
cfg_read_chcksum_post=False,
cfg_done_out_mask=False,
add_header=True,
add_checksum=True,
verify_checksum=args.verify_checksum)
elif (args.dev_type == "ql-eos-s3"):
assembler = QL732BAssembler(db)
elif (args.dev_type == "ql-pp3e"):
assembler = QL732BAssembler(db) # FIXME: add proper PP3E support
else:
print("Unsuported device type")
exit(errno.EINVAL)
if not args.disassemble:
# Load default bitstream
if not args.no_default_bitstream:
if args.default_bitstream is not None:
default_bitstream = args.default_bitstream
if not os.path.isfile(default_bitstream):
print("The default bitstream '{}' does not exist".format(
default_bitstream
))
exit(errno.ENOENT)
else:
default_bitstream = os.path.join(
db_dir, "default_bitstream.bin")
if not os.path.isfile(default_bitstream):
print("WARNING: No default bistream in the database")
default_bitstream = None
if default_bitstream is not None:
assembler.read_bitstream(default_bitstream)
assembler.parse_fasm_filename(str(args.infile))
if (args.dev_type == "ql-pp3"):
# Producing 3 bitstream configurations:
# 1. SPI master mode enabled (original filename)
# 2. SPI slave mode enabled (filename with _spi_slave)
# 3. No header and checksum (filename with _no_header_checksum)
assembler.produce_bitstream(str(args.outfile), verbose=args.verbose)
assembler.set_spi_master(False)
assembler.produce_bitstream(str(args.outfile), verbose=args.verbose)
assembler.set_header(False)
assembler.set_checksum(False)
assembler.produce_bitstream(str(args.outfile), verbose=args.verbose)
else:
assembler.produce_bitstream(str(args.outfile), verbose=args.verbose)
else:
assembler.read_bitstream(str(args.infile))
assembler.disassemble(str(args.outfile), verbose=args.verbose)
# Write bitmap
#
# Writes a CSV file with MAXWL rows and MAXBL columns. Each fields
# represents one bit. A field may take one of the values:
# - 0x00 the bit is unused,
# - 0x01 the bit is defined in the database,
# - 0x02 the bit is set in the bitstream but not in the database,
# - 0x03 the bit is set both in the bitstream and in thhe database.
#
if args.bitmap:
total_bits = assembler.MAXWL * assembler.MAXBL
bitmap = bytearray(total_bits)
# Mark bits present in the database
for entry in assembler.db:
for coord in entry.coords:
ofs = coord.x * assembler.MAXBL + coord.y
bitmap[ofs] |= 0x1
# Mark bits set in the bitstream
for wl in range(assembler.MAXWL):
for bl in range(assembler.MAXBL):
bit = assembler.configbits.get((wl, bl), 0)
ofs = wl * assembler.MAXBL + bl
if bit == 1:
bitmap[ofs] |= 0x2
# Write to CSV
with open(args.bitmap, "w") as fp:
for wl in range(assembler.MAXWL):
i0 = assembler.MAXBL * wl
i1 = assembler.MAXBL * (wl + 1)
line = ",".join([str(b) for b in bitmap[i0:i1]]) + "\n"
fp.write(line)
if __name__ == "__main__":
main()
| 32.012766
| 114
| 0.589393
|
4a047a424df37419b1b0b550af76fe2cbcc5c7c1
| 4,756
|
py
|
Python
|
__init__.py
|
NeonGeckoCom/skill-about
|
8d29702b8e903d48fb809f31f0456c388d40cfbc
|
[
"BSD-3-Clause"
] | null | null | null |
__init__.py
|
NeonGeckoCom/skill-about
|
8d29702b8e903d48fb809f31f0456c388d40cfbc
|
[
"BSD-3-Clause"
] | null | null | null |
__init__.py
|
NeonGeckoCom/skill-about
|
8d29702b8e903d48fb809f31f0456c388d40cfbc
|
[
"BSD-3-Clause"
] | null | null | null |
# NEON AI (TM) SOFTWARE, Software Development Kit & Application Framework
# All trademark and other rights reserved by their respective owners
# Copyright 2008-2022 Neongecko.com Inc.
# Contributors: Daniel McKnight, Guy Daniels, Elon Gasper, Richard Leeds,
# Regina Bloomstine, Casimiro Ferreira, Andrii Pernatii, Kirill Hrymailo
# BSD-3 License
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from neon_utils.skills.neon_skill import NeonSkill
from adapt.intent import IntentBuilder
from os import listdir, path
from mycroft.skills import skill_api_method
class AboutSkill(NeonSkill):
def __init__(self):
super(AboutSkill, self).__init__(name="AboutSkill")
self.skill_info = None
self._update_skills_data()
def initialize(self):
license_intent = IntentBuilder("license_intent").\
optionally("Neon").optionally("Long").require("Tell").require("License").build()
self.register_intent(license_intent, self.read_license)
list_skills_intent = IntentBuilder("list_skills_intent").optionally("Neon").optionally("Tell").\
require("Skills").build()
self.register_intent(list_skills_intent, self.list_skills)
# TODO: Reload skills list when skills are added/removed DM
def read_license(self, message):
"""
Reads back the NeonAI license from skill dialog
:param message: Message associated with request
"""
if self.neon_in_request(message):
if message.data.get("Long"):
self.speak_dialog("license_long")
else:
self.speak_dialog("license_short")
def list_skills(self, message):
"""
Lists all installed skills by name.
:param message: Message associated with request
"""
if self.neon_in_request(message):
skills_list = [s['title'] for s in self.skill_info if s.get('title')]
skills_list.sort()
skills_to_speak = ", ".join(skills_list)
self.speak_dialog("skills_list", data={"list": skills_to_speak})
@skill_api_method
def skill_info_examples(self):
"""
API Method to build a list of examples as listed in skill metadata.
"""
examples = [d.get('examples') or list() for d in self.skill_info]
flat_list = [item for sublist in examples for item in sublist]
return flat_list
def _update_skills_data(self):
"""
Loads skill metadata for all installed skills.
"""
skills = list()
skills_dir = path.dirname(path.dirname(__file__))
for skill in listdir(skills_dir):
if path.isdir(path.join(skills_dir, skill)) and path.isfile(path.join(skills_dir, skill, "__init__.py")):
if path.isfile(path.join(skills_dir, skill, "skill.json")):
with open(path.join(skills_dir, skill, "skill.json")) as f:
skill_data = json.load(f)
else:
skill_name = str(path.basename(skill).split('.')[0]).replace('-', ' ').lower()
skill_data = {"title": skill_name}
skills.append(skill_data)
self.skill_info = skills
def stop(self):
pass
def create_skill():
return AboutSkill()
| 44.037037
| 117
| 0.684609
|
4a047a57bcb9fcca8618531f3025a8f20b92ffc7
| 6,921
|
py
|
Python
|
cogs/DebugMenuCog.py
|
jakenjarvis/Lakshmi
|
de805f7488c1a6b3a4e0d3804be7ecd6c814b446
|
[
"Apache-2.0"
] | 1
|
2020-08-24T01:31:20.000Z
|
2020-08-24T01:31:20.000Z
|
cogs/DebugMenuCog.py
|
jakenjarvis/Lakshmi
|
de805f7488c1a6b3a4e0d3804be7ecd6c814b446
|
[
"Apache-2.0"
] | null | null | null |
cogs/DebugMenuCog.py
|
jakenjarvis/Lakshmi
|
de805f7488c1a6b3a4e0d3804be7ecd6c814b446
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import Union
import random
import asyncio
import aiohttp
import discord
from discord.ext import commands, menus
import LakshmiErrors
from contents.character.Investigator import Investigator
from contents.character.generator.CharacterGenerator import CharacterGenerator
from contents.character.InvestigatorEmbedCreator import InvestigatorEmbedCreator
from contents.character.generator.ParameterComplementary import ParameterComplementary
class DebugMenuCog(commands.Cog, name='Debug開発系'):
def __init__(self, bot):
self.bot = bot
#self.bot.storage
@commands.group(hidden=True)
async def debug(self, context: commands.Context):
if context.invoked_subcommand is None:
raise LakshmiErrors.SubcommandNotFoundException()
@debug.command()
async def menu(self, context: commands.Context):
menu = MyMenu()
await menu.start(context)
@debug.command()
async def confirm(self, context: commands.Context):
confirm = await Confirm('Delete everything?').prompt(context)
if confirm:
await context.send('deleted...')
@debug.command()
async def mysource(self, context: commands.Context):
pages = menus.MenuPages(source=MySource(range(1, 100)), clear_reactions_after=True)
await pages.start(context)
@debug.command()
async def groupby(self, context: commands.Context):
pages = menus.MenuPages(source=GroupBySource(data, key=lambda t: t.key, per_page=12), clear_reactions_after=True)
await pages.start(context)
@debug.command()
async def asynciter(self, context: commands.Context):
pages = menus.MenuPages(source=AsyncIteratorSource(), clear_reactions_after=True)
await pages.start(context)
@debug.group()
async def name(self, context: commands.Context):
if context.invoked_subcommand is None:
raise LakshmiErrors.SubcommandNotFoundException()
@name.command(aliases=['m'])
async def male(self, context: commands.Context):
stock = []
name = await self.generate_name(context, "male")
stock.append(f'{name}')
await self.bot.send("\n".join(stock))
@name.command(aliases=['f'])
async def female(self, context: commands.Context):
stock = []
name = await self.generate_name(context, "female")
stock.append(f'{name}')
await self.bot.send("\n".join(stock))
async def generate_name(self, context: commands.Context, gender: str):
data = ""
request_url = self.bot.storage.environment.get_person_name_api_url()
params = {"gender": f"{gender}"}
async with aiohttp.ClientSession() as session:
async with session.get(request_url, params=params) as response:
if response.status == 200:
data = await response.json()
return f'{data["kanji_name"]}({data["kana_name"]})'
@debug.group(aliases=['g'])
async def generate(self, context: commands.Context):
parameter = ParameterComplementary()
name = await self.generate_name(context, parameter.gender)
generator = CharacterGenerator()
generator.generate(parameter)
generator.investigator.personal_data.name = name
embed = InvestigatorEmbedCreator.create_generate_character_status(generator.investigator)
await self.bot.send(embed=embed)
# --------------------------------------------------
#
# --------------------------------------------------
class MyMenu(menus.Menu):
async def send_initial_message(self, ctx, channel):
return await channel.send(f'Hello {ctx.author}')
@menus.button('\N{THUMBS UP SIGN}')
async def on_thumbs_up(self, payload):
await self.message.edit(content=f'Thanks {self.ctx.author}!')
@menus.button('\N{THUMBS DOWN SIGN}')
async def on_thumbs_down(self, payload):
await self.message.edit(content=f"That's not nice {self.ctx.author}...")
@menus.button('\N{BLACK SQUARE FOR STOP}\ufe0f')
async def on_stop(self, payload):
self.stop()
# --------------------------------------------------
#
# --------------------------------------------------
class Confirm(menus.Menu):
def __init__(self, msg):
super().__init__(timeout=30.0, delete_message_after=True)
self.msg = msg
self.result = None
async def send_initial_message(self, ctx, channel):
return await channel.send(self.msg)
@menus.button('\N{WHITE HEAVY CHECK MARK}')
async def do_confirm(self, payload):
self.result = True
self.stop()
@menus.button('\N{CROSS MARK}')
async def do_deny(self, payload):
self.result = False
self.stop()
async def prompt(self, ctx):
await self.start(ctx, wait=True)
return self.result
# --------------------------------------------------
#
# --------------------------------------------------
class MySource(menus.ListPageSource):
def __init__(self, data):
super().__init__(data, per_page=4)
async def format_page(self, menu, entries):
offset = menu.current_page * self.per_page
return '\n'.join(f'{i}. {v}' for i, v in enumerate(entries, start=offset))
# --------------------------------------------------
#
# --------------------------------------------------
class Test:
def __init__(self, key, value):
self.key = key
self.value = value
data = [
Test(key=key, value=value)
for key in ['test', 'other', 'okay']
for value in range(20)
]
class GroupBySource(menus.GroupByPageSource):
async def format_page(self, menu, entry):
joined = '\n'.join(f'{i}. <Test value={v.value}>' for i, v in enumerate(entry.items, start=1))
return f'**{entry.key}**\n{joined}\nPage {menu.current_page + 1}/{self.get_max_pages()}'
# --------------------------------------------------
#
# --------------------------------------------------
class Test2:
def __init__(self, value):
self.value = value
def __repr__(self):
return f'<Test value={self.value}>'
async def generate(number):
for i in range(number):
yield Test2(i)
class AsyncIteratorSource(menus.AsyncIteratorPageSource):
def __init__(self):
super().__init__(generate(9), per_page=4)
async def format_page(self, menu, entries):
start = menu.current_page * self.per_page
return f'\n'.join(f'{i}. {v!r}' for i, v in enumerate(entries, start=start))
# --------------------------------------------------
#
# --------------------------------------------------
def setup(bot):
bot.add_cog(DebugMenuCog(bot))
| 33.597087
| 122
| 0.583731
|
4a047d042824776f74da10006c9f57769a1f634f
| 1,433
|
py
|
Python
|
model.py
|
LanJosh/QuestionAnswer
|
38066e3268ba1a95640ae87f4066bf7223c1ad4f
|
[
"MIT"
] | null | null | null |
model.py
|
LanJosh/QuestionAnswer
|
38066e3268ba1a95640ae87f4066bf7223c1ad4f
|
[
"MIT"
] | null | null | null |
model.py
|
LanJosh/QuestionAnswer
|
38066e3268ba1a95640ae87f4066bf7223c1ad4f
|
[
"MIT"
] | null | null | null |
"""
Model for extracting answer sentence from the context paragraph.
"""
from nltk.tokenize.moses import MosesDetokenizer
from tqdm import tqdm
import json
import numpy as np
def cosine_similarity(x, y):
"""
Compute the cosine similarity between two vectors x and y
"""
return (x.dot(y)) / (np.sqrt(x.dot(x)) * np.sqrt(y.dot(y)))
def sent_embed(x, word2vec):
"""
Combine the word vectors of a sentence to get a single vector
representation of the entire sentence
"""
sent_vec = np.zeros((300,))
for word in x:
if word in word2vec:
sent_vec = np.add(sent_vec, np.array(word2vec[word]))
return sent_vec
def run():
detokenizer = MosesDetokenizer()
with open('data.json') as f:
data = json.load(f)
word2vec = data['word2vec']
contexts = data['contexts']
questions = data['questions']
predictions = []
for c,qs in tqdm(zip(contexts, questions), total=len(contexts)):
if len(c) == 1:
continue
# Get vector embedding of context
ce = []
for sent in c:
ct = sent_embed(sent,word2vec)
ce.append(ct)
# Get vector embedding of sentence
# Find the most similar sentence in the context
for q in qs:
qe = sent_embed(q,word2vec)
sims = [cosine_similarity(qe, cs) for cs in ce]
max_sim = max(sims)
idx = sims.index(max_sim)
predictions.append(detokenizer.detokenize(c[idx], return_str=True))
return predictions
| 25.589286
| 73
| 0.667132
|
4a047fbe7794f5aa8bc06feb615cb1db3a1cea7f
| 1,409
|
py
|
Python
|
combine_all_cities_and_sightings_locations.py
|
KyleS22/Air-Quality-Impact-On-UFO-Sightings
|
c715c4bd2afc0b3c3c443a0285e3392a85a36a84
|
[
"MIT"
] | null | null | null |
combine_all_cities_and_sightings_locations.py
|
KyleS22/Air-Quality-Impact-On-UFO-Sightings
|
c715c4bd2afc0b3c3c443a0285e3392a85a36a84
|
[
"MIT"
] | null | null | null |
combine_all_cities_and_sightings_locations.py
|
KyleS22/Air-Quality-Impact-On-UFO-Sightings
|
c715c4bd2afc0b3c3c443a0285e3392a85a36a84
|
[
"MIT"
] | null | null | null |
import csv
from tqdm import tqdm
US_CITIES_FILE = "all_us_cities.csv"
SIGHTINGS_FILE = "scrubbed.csv"
cached_city_locations = []
print("Processing City Locations:")
with open(US_CITIES_FILE) as cities_csv:
city_reader = csv.reader(cities_csv)
city_lines = [line for line in cities_csv]
city_lines = city_lines[1:]
for city in tqdm(csv.reader(city_lines), total=len(city_lines)):
city_name = city[1]
state = city[3]
new_key = city_name.lower() + "_" + state.lower()
cached_city_locations.append(new_key)
print("\nProcessing City Locations:")
with open(SIGHTINGS_FILE) as cities_csv:
city_reader = csv.reader(cities_csv)
city_lines = [line for line in cities_csv]
city_lines = city_lines[1:]
for city in tqdm(csv.reader(city_lines), total=len(city_lines)):
city_name = city[1]
city_name = city_name.split("(")[0]
lat = float(city[9])
longitude = float(city[10])
state = city[2]
country = city[3]
if country != "us":
continue
new_key = city_name.lower() + "_" + state.lower()
if not new_key in cached_city_locations:
with open(US_CITIES_FILE,'a') as fd:
writer = csv.writer(fd)
writer.writerow(["us",city_name,city_name,state,'0',lat,longitude])
cached_city_locations.append(new_key)
| 27.096154
| 83
| 0.635202
|
4a047fd378d7ba5a8d538f12eb64c31a3869320a
| 3,514
|
py
|
Python
|
cnfencoder/lexer.py
|
jreig/cnf-encoder
|
9f3f098bc0c7b5aaf87ae4bf949222ae80950a5a
|
[
"MIT"
] | null | null | null |
cnfencoder/lexer.py
|
jreig/cnf-encoder
|
9f3f098bc0c7b5aaf87ae4bf949222ae80950a5a
|
[
"MIT"
] | null | null | null |
cnfencoder/lexer.py
|
jreig/cnf-encoder
|
9f3f098bc0c7b5aaf87ae4bf949222ae80950a5a
|
[
"MIT"
] | null | null | null |
import re
from enum import Enum
# CONSTANT CHARACTERS
EOF = '\0'
VAR_INIT = 'X'
OP_NEG = '¬' # (U+00AC)
OP_AND = '∧' # (U+2227)
OP_OR = '∨' # (U+2228)
OP_XOR = '⊕' # (U+2295)
OP_IMP = '→' # (U+2192)
OP_EQU = '↔' # (U+2194)
P_OPEN = '('
P_CLOSE = ')'
class TokenType(Enum):
EOF = -1
LITERAL = 1
NEGATION = 2
AND = 3
OR = 4
XOR = 5
IMPLICATION = 6
EQUIVALENCE = 7
PARENTHESES_OPEN = 8
PARENTHESES_CLOSE = 9
class Token:
def __init__(self, tokenText, tokenType):
self.text = tokenText
self.type = tokenType
class Lexer:
def __init__(self, input):
self.source = self.preprocess(input)
self.curChar = ''
self.curPos = -1
self.nextChar()
def preprocess(self, input):
# Remove double negation
source = re.sub(r'¬\s¬', '', input)
# Add EOF
source = source + EOF
return source
def nextChar(self):
self.curPos += 1
if self.curPos >= len(self.source):
self.curChar = EOF
else:
self.curChar = self.source[self.curPos]
def peek(self, skipWhitespaces=False):
if self.curPos + 1 >= len(self.source):
return EOF
if not skipWhitespaces:
return self.source[self.curPos+1]
peekPos = self.curPos + 1
while self.source[peekPos] in [' ', '\t', '\r']:
peekPos += 1
return self.source[peekPos]
def skipWhitespace(self):
while self.curChar == ' ' or self.curChar == '\t' or self.curChar == '\r':
self.nextChar()
def getVarText(self, negated):
startPos = self.curPos
while self.peek().isdigit():
self.nextChar()
tokText = self.source[startPos: self.curPos + 1]
if negated:
tokText = OP_NEG + tokText
return tokText
def getToken(self):
token = None
self.skipWhitespace()
# Parenheses
if self.curChar == P_OPEN:
token = Token(self.curChar, TokenType.PARENTHESES_OPEN)
elif self.curChar == P_CLOSE:
token = Token(self.curChar, TokenType.PARENTHESES_CLOSE)
# Neg OP | Neg variable
elif self.curChar == OP_NEG:
if self.peek(skipWhitespaces=True) == VAR_INIT:
self.nextChar()
self.skipWhitespace()
varText = self.getVarText(negated=True)
token = Token(varText, TokenType.LITERAL)
else:
token = Token(self.curChar, TokenType.NEGATION)
# Operators
elif self.curChar == OP_AND:
token = Token(self.curChar, TokenType.AND)
elif self.curChar == OP_OR:
token = Token(self.curChar, TokenType.OR)
elif self.curChar == OP_XOR:
token = Token(self.curChar, TokenType.XOR)
elif self.curChar == OP_IMP:
token = Token(self.curChar, TokenType.IMPLICATION)
elif self.curChar == OP_EQU:
token = Token(self.curChar, TokenType.EQUIVALENCE)
# EOF
elif self.curChar == EOF:
token = Token('', TokenType.EOF)
# Variables
elif self.curChar == VAR_INIT:
tokText = self.getVarText(negated=False)
token = Token(tokText, TokenType.LITERAL)
# Unknown token!
else:
raise Exception('Lexing error. Invalid character: ' + self.curChar)
self.nextChar()
return token
| 26.824427
| 82
| 0.553785
|
4a0481372e175bbcdc30f8f1f83440bd7678f638
| 90,085
|
py
|
Python
|
dep/reportlab/src/reportlab/graphics/charts/axes.py
|
csterryliu/Legal-Attest-Letter-Generator
|
4a7df7ea324dfee38a39daab10777a1c9d222c42
|
[
"MIT"
] | 52
|
2016-09-30T05:53:45.000Z
|
2021-12-26T12:07:48.000Z
|
dep/reportlab/src/reportlab/graphics/charts/axes.py
|
csterryliu/Legal-Attest-Letter-Generator
|
4a7df7ea324dfee38a39daab10777a1c9d222c42
|
[
"MIT"
] | 2
|
2016-09-30T06:05:01.000Z
|
2017-11-05T12:58:47.000Z
|
dep/reportlab/src/reportlab/graphics/charts/axes.py
|
csterryliu/Legal-Attest-Letter-Generator
|
4a7df7ea324dfee38a39daab10777a1c9d222c42
|
[
"MIT"
] | 15
|
2016-11-03T08:50:15.000Z
|
2022-01-14T07:04:35.000Z
|
#Copyright ReportLab Europe Ltd. 2000-2016
#see license.txt for license details
__version__='3.3.0'
__doc__="""Collection of axes for charts.
The current collection comprises axes for charts using cartesian
coordinate systems. All axes might have tick marks and labels.
There are two dichotomies for axes: one of X and Y flavours and
another of category and value flavours.
Category axes have an ordering but no metric. They are divided
into a number of equal-sized buckets. Their tick marks or labels,
if available, go BETWEEN the buckets, and the labels are placed
below to/left of the X/Y-axis, respectively.
Value axes have an ordering AND metric. They correspond to a nu-
meric quantity. Value axis have a real number quantity associated
with it. The chart tells it where to go.
The most basic axis divides the number line into equal spaces
and has tickmarks and labels associated with each; later we
will add variants where you can specify the sampling
interval.
The charts using axis tell them where the labels should be placed.
Axes of complementary X/Y flavours can be connected to each other
in various ways, i.e. with a specific reference point, like an
x/value axis to a y/value (or category) axis. In this case the
connection can be either at the top or bottom of the former or
at any absolute value (specified in points) or at some value of
the former axes in its own coordinate system.
"""
from reportlab.lib.validators import isNumber, isNumberOrNone, isListOfStringsOrNone, isListOfNumbers, \
isListOfNumbersOrNone, isColorOrNone, OneOf, isBoolean, SequenceOf, \
isString, EitherOr, Validator, NoneOr, isInstanceOf, \
isNormalDate, isNoneOrCallable
from reportlab.lib.attrmap import *
from reportlab.lib import normalDate
from reportlab.graphics.shapes import Drawing, Line, PolyLine, Rect, Group, STATE_DEFAULTS, _textBoxLimits, _rotatedBoxLimits
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection
from reportlab.graphics.charts.textlabels import Label, PMVLabel
from reportlab.graphics.charts.utils import nextRoundNumber
from reportlab.graphics.widgets.grids import ShadedRect
from reportlab.lib.colors import Color
from reportlab.lib.utils import isSeq
import copy
try:
reduce # Python 2.x
except NameError:
from functools import reduce
# Helpers.
def _findMinMaxValue(V, x, default, func, special=None):
if isSeq(V[0][0]):
if special:
f=lambda T,x=x,special=special,func=func: special(T,x,func)
else:
f=lambda T,x=x: T[x]
V=list(map(lambda e,f=f: list(map(f,e)),V))
V = list(filter(len,[[x for x in x if x is not None] for x in V]))
if len(V)==0: return default
return func(list(map(func,V)))
def _findMin(V, x, default,special=None):
'''find minimum over V[i][x]'''
return _findMinMaxValue(V,x,default,min,special=special)
def _findMax(V, x, default,special=None):
'''find maximum over V[i][x]'''
return _findMinMaxValue(V,x,default,max,special=special)
def _allInt(values):
'''true if all values are int'''
for v in values:
try:
if int(v)!=v: return 0
except:
return 0
return 1
class AxisLineAnnotation:
'''Create a grid like line using the given user value to draw the line
kwds may contain
startOffset if true v is offset from the default grid start position
endOffset if true v is offset from the default grid end position
scaleValue True/not given --> scale the value
otherwise use the absolute value
lo lowest coordinate to draw default 0
hi highest coordinate to draw at default = length
drawAtLimit True draw line at appropriate limit if its coordinate exceeds the lo, hi range
False ignore if it's outside the range
all Line keywords are acceptable
'''
def __init__(self,v,**kwds):
self._v = v
self._kwds = kwds
def __call__(self,axis):
kwds = self._kwds.copy()
scaleValue = kwds.pop('scaleValue',True)
endOffset = kwds.pop('endOffset',False)
startOffset = kwds.pop('endOffset',False)
if axis.isYAxis:
offs = axis._x
d0 = axis._y
else:
offs = axis._y
d0 = axis._x
s = kwds.pop('start',None)
e = kwds.pop('end',None)
if s is None or e is None:
dim = getattr(getattr(axis,'joinAxis',None),'getGridDims',None)
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
hi = kwds.pop('hi',axis._length)+d0
lo = kwds.pop('lo',0)+d0
lo,hi=min(lo,hi),max(lo,hi)
drawAtLimit = kwds.pop('drawAtLimit',False)
oaglp = axis._get_line_pos
if not scaleValue:
axis._get_line_pos = lambda x: x
try:
v = self._v
if endOffset:
v = v + hi
elif startOffset:
v = v + lo
func = axis._getLineFunc(s-offs,e-offs,kwds.pop('parent',None))
if not hasattr(axis,'_tickValues'):
axis._pseudo_configure()
d = axis._get_line_pos(v)
if d<lo or d>hi:
if not drawAtLimit: return None
if d<lo:
d = lo
else:
d = hi
axis._get_line_pos = lambda x: d
L = func(v)
for k,v in kwds.items():
setattr(L,k,v)
finally:
axis._get_line_pos = oaglp
return L
class AxisBackgroundAnnotation:
'''Create a set of coloured bars on the background of a chart using axis ticks as the bar borders
colors is a set of colors to use for the background bars. A colour of None is just a skip.
Special effects if you pass a rect or Shaded rect instead.
'''
def __init__(self,colors,**kwds):
self._colors = colors
self._kwds = kwds
def __call__(self,axis):
colors = self._colors
if not colors: return
kwds = self._kwds.copy()
isYAxis = axis.isYAxis
if isYAxis:
offs = axis._x
d0 = axis._y
else:
offs = axis._y
d0 = axis._x
s = kwds.pop('start',None)
e = kwds.pop('end',None)
if s is None or e is None:
dim = getattr(getattr(axis,'joinAxis',None),'getGridDims',None)
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
if not hasattr(axis,'_tickValues'):
axis._pseudo_configure()
tv = getattr(axis,'_tickValues',None)
if not tv: return
G = Group()
ncolors = len(colors)
v0 = axis._get_line_pos(tv[0])
for i in range(1,len(tv)):
v1 = axis._get_line_pos(tv[i])
c = colors[(i-1)%ncolors]
if c:
if isYAxis:
y = v0
x = s
height = v1-v0
width = e-s
else:
x = v0
y = s
width = v1-v0
height = e-s
if isinstance(c,Color):
r = Rect(x,y,width,height,fillColor=c,strokeColor=None)
elif isinstance(c,Rect):
r = Rect(x,y,width,height)
for k in c.__dict__:
if k not in ('x','y','width','height'):
setattr(r,k,getattr(c,k))
elif isinstance(c,ShadedRect):
r = ShadedRect(x=x,y=y,width=width,height=height)
for k in c.__dict__:
if k not in ('x','y','width','height'):
setattr(r,k,getattr(c,k))
G.add(r)
v0 = v1
return G
class TickLU:
'''lookup special cases for tick values'''
def __init__(self,*T,**kwds):
self.accuracy = kwds.pop('accuracy',1e-8)
self.T = T
def __contains__(self,t):
accuracy = self.accuracy
for x,v in self.T:
if abs(x-t)<accuracy:
return True
return False
def __getitem__(self,t):
accuracy = self.accuracy
for x,v in self.T:
if abs(x-t)<self.accuracy:
return v
raise IndexError('cannot locate index %r' % t)
class _AxisG(Widget):
def _get_line_pos(self,v):
v = self.scale(v)
try:
v = v[0]
except:
pass
return v
def _cxLine(self,x,start,end):
x = self._get_line_pos(x)
return Line(x, self._y + start, x, self._y + end)
def _cyLine(self,y,start,end):
y = self._get_line_pos(y)
return Line(self._x + start, y, self._x + end, y)
def _cxLine3d(self,x,start,end,_3d_dx,_3d_dy):
x = self._get_line_pos(x)
y0 = self._y + start
y1 = self._y + end
y0, y1 = min(y0,y1),max(y0,y1)
x1 = x + _3d_dx
return PolyLine([x,y0,x1,y0+_3d_dy,x1,y1+_3d_dy],strokeLineJoin=1)
def _cyLine3d(self,y,start,end,_3d_dx,_3d_dy):
y = self._get_line_pos(y)
x0 = self._x + start
x1 = self._x + end
x0, x1 = min(x0,x1),max(x0,x1)
y1 = y + _3d_dy
return PolyLine([x0,y,x0+_3d_dx,y1,x1+_3d_dx,y1],strokeLineJoin=1)
def _getLineFunc(self, start, end, parent=None):
_3d_dx = getattr(parent,'_3d_dx',None)
if _3d_dx is not None:
_3d_dy = getattr(parent,'_3d_dy',None)
f = self.isYAxis and self._cyLine3d or self._cxLine3d
return lambda v, s=start, e=end, f=f,_3d_dx=_3d_dx,_3d_dy=_3d_dy: f(v,s,e,_3d_dx=_3d_dx,_3d_dy=_3d_dy)
else:
f = self.isYAxis and self._cyLine or self._cxLine
return lambda v, s=start, e=end, f=f: f(v,s,e)
def _makeLines(self,g,start,end,strokeColor,strokeWidth,strokeDashArray,strokeLineJoin,strokeLineCap,strokeMiterLimit,parent=None,exclude=[],specials={}):
func = self._getLineFunc(start,end,parent)
if not hasattr(self,'_tickValues'):
self._pseudo_configure()
if exclude:
exf = self.isYAxis and (lambda l: l.y1 in exclude) or (lambda l: l.x1 in exclude)
else:
exf = None
for t in self._tickValues:
L = func(t)
if exf and exf(L): continue
L.strokeColor = strokeColor
L.strokeWidth = strokeWidth
L.strokeDashArray = strokeDashArray
L.strokeLineJoin = strokeLineJoin
L.strokeLineCap = strokeLineCap
L.strokeMiterLimit = strokeMiterLimit
if t in specials:
for a,v in specials[t].items():
setattr(L,a,v)
g.add(L)
def makeGrid(self,g,dim=None,parent=None,exclude=[]):
'''this is only called by a container object'''
c = self.gridStrokeColor
w = self.gridStrokeWidth or 0
if w and c and self.visibleGrid:
s = self.gridStart
e = self.gridEnd
if s is None or e is None:
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
if s or e:
if self.isYAxis: offs = self._x
else: offs = self._y
self._makeLines(g,s-offs,e-offs,c,w,self.gridStrokeDashArray,self.gridStrokeLineJoin,self.gridStrokeLineCap,self.gridStrokeMiterLimit,parent=parent,exclude=exclude,specials=getattr(self,'_gridSpecials',{}))
self._makeSubGrid(g,dim,parent,exclude=[])
def _makeSubGrid(self,g,dim=None,parent=None,exclude=[]):
'''this is only called by a container object'''
if not (getattr(self,'visibleSubGrid',0) and self.subTickNum>0): return
c = self.subGridStrokeColor
w = self.subGridStrokeWidth or 0
if not(w and c): return
s = self.subGridStart
e = self.subGridEnd
if s is None or e is None:
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
if s or e:
if self.isYAxis: offs = self._x
else: offs = self._y
otv = self._calcSubTicks()
try:
self._makeLines(g,s-offs,e-offs,c,w,self.subGridStrokeDashArray,self.subGridStrokeLineJoin,self.subGridStrokeLineCap,self.subGridStrokeMiterLimit,parent=parent,exclude=exclude)
finally:
self._tickValues = otv
def getGridDims(self,start=None,end=None):
if start is None: start = (self._x,self._y)[self.isYAxis]
if end is None: end = start+self._length
return start,end
def isYAxis(self):
if getattr(self,'_dataIndex',None)==1: return True
acn = self.__class__.__name__
return acn[0]=='Y' or acn[:4]=='AdjY'
isYAxis = property(isYAxis)
def isXAxis(self):
if getattr(self,'_dataIndex',None)==0: return True
acn = self.__class__.__name__
return acn[0]=='X' or acn[:11]=='NormalDateX'
isXAxis = property(isXAxis)
def addAnnotations(self,g,A=None):
if A is None: getattr(self,'annotations',[])
for x in A:
g.add(x(self))
def _splitAnnotations(self):
A = getattr(self,'annotations',[])[:]
D = {}
for v in ('early','beforeAxis','afterAxis','beforeTicks',
'afterTicks','beforeTickLabels',
'afterTickLabels','late'):
R = [].append
P = [].append
for a in A:
if getattr(a,v,0):
R(a)
else:
P(a)
D[v] = R.__self__
A[:] = P.__self__
D['late'] += A
return D
def draw(self):
g = Group()
A = self._splitAnnotations()
self.addAnnotations(g,A['early'])
if self.visible:
self.addAnnotations(g,A['beforeAxis'])
g.add(self.makeAxis())
self.addAnnotations(g,A['afterAxis'])
self.addAnnotations(g,A['beforeTicks'])
g.add(self.makeTicks())
self.addAnnotations(g,A['afterTicks'])
self.addAnnotations(g,A['beforeTickLabels'])
g.add(self.makeTickLabels())
self.addAnnotations(g,A['afterTickLabels'])
self.addAnnotations(g,A['late'])
return g
class CALabel(PMVLabel):
_attrMap = AttrMap(BASE=PMVLabel,
labelPosFrac = AttrMapValue(isNumber, desc='where in the category range [0,1] the labels should be anchored'),
)
def __init__(self,**kw):
PMVLabel.__init__(self,**kw)
self._setKeywords(
labelPosFrac = 0.5,
)
# Category axes.
class CategoryAxis(_AxisG):
"Abstract category axis, unusable in itself."
_nodoc = 1
_attrMap = AttrMap(
visible = AttrMapValue(isBoolean, desc='Display entire object, if true.'),
visibleAxis = AttrMapValue(isBoolean, desc='Display axis line, if true.'),
visibleTicks = AttrMapValue(isBoolean, desc='Display axis ticks, if true.'),
visibleLabels = AttrMapValue(isBoolean, desc='Display axis labels, if true.'),
visibleGrid = AttrMapValue(isBoolean, desc='Display axis grid, if true.'),
strokeWidth = AttrMapValue(isNumber, desc='Width of axis line and ticks.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color of axis line and ticks.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for axis line.'),
strokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Line cap 0=butt, 1=round & 2=square"),
strokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Line join 0=miter, 1=round & 2=bevel"),
strokeMiterLimit = AttrMapValue(isNumber,desc="miter limit control miter line joins"),
gridStrokeWidth = AttrMapValue(isNumber, desc='Width of grid lines.'),
gridStrokeColor = AttrMapValue(isColorOrNone, desc='Color of grid lines.'),
gridStrokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for grid lines.'),
gridStrokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Grid Line cap 0=butt, 1=round & 2=square"),
gridStrokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Grid Line join 0=miter, 1=round & 2=bevel"),
gridStrokeMiterLimit = AttrMapValue(isNumber,desc="Grid miter limit control miter line joins"),
gridStart = AttrMapValue(isNumberOrNone, desc='Start of grid lines wrt axis origin'),
gridEnd = AttrMapValue(isNumberOrNone, desc='End of grid lines wrt axis origin'),
drawGridLast = AttrMapValue(isBoolean, desc='if true draw gridlines after everything else.'),
labels = AttrMapValue(None, desc='Handle of the axis labels.'),
categoryNames = AttrMapValue(isListOfStringsOrNone, desc='List of category names.'),
joinAxis = AttrMapValue(None, desc='Join both axes if true.'),
joinAxisPos = AttrMapValue(isNumberOrNone, desc='Position at which to join with other axis.'),
reverseDirection = AttrMapValue(isBoolean, desc='If true reverse category direction.'),
style = AttrMapValue(OneOf('parallel','stacked','parallel_3d'),"How common category bars are plotted"),
labelAxisMode = AttrMapValue(OneOf('high','low','axis', 'axispmv'), desc="Like joinAxisMode, but for the axis labels"),
tickShift = AttrMapValue(isBoolean, desc='Tick shift typically'),
loPad = AttrMapValue(isNumber, desc='extra inner space before start of the axis'),
hiPad = AttrMapValue(isNumber, desc='extra inner space after end of the axis'),
annotations = AttrMapValue(None,desc='list of annotations'),
loLLen = AttrMapValue(isNumber, desc='extra line length before start of the axis'),
hiLLen = AttrMapValue(isNumber, desc='extra line length after end of the axis'),
skipGrid = AttrMapValue(OneOf('none','top','both','bottom'),"grid lines to skip top bottom both none"),
innerTickDraw = AttrMapValue(isNoneOrCallable, desc="Callable to replace _drawInnerTicks"),
)
def __init__(self):
assert self.__class__.__name__!='CategoryAxis', "Abstract Class CategoryAxis Instantiated"
# private properties set by methods. The initial values
# here are to make demos easy; they would always be
# overridden in real life.
self._x = 50
self._y = 50
self._length = 100
self._catCount = 0
# public properties
self.visible = 1
self.visibleAxis = 1
self.visibleTicks = 1
self.visibleLabels = 1
self.visibleGrid = 0
self.drawGridLast = False
self.strokeWidth = 1
self.strokeColor = STATE_DEFAULTS['strokeColor']
self.strokeDashArray = STATE_DEFAULTS['strokeDashArray']
self.gridStrokeLineJoin = self.strokeLineJoin = STATE_DEFAULTS['strokeLineJoin']
self.gridStrokeLineCap = self.strokeLineCap = STATE_DEFAULTS['strokeLineCap']
self.gridStrokeMiterLimit = self.strokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit']
self.gridStrokeWidth = 0.25
self.gridStrokeColor = STATE_DEFAULTS['strokeColor']
self.gridStrokeDashArray = STATE_DEFAULTS['strokeDashArray']
self.gridStart = self.gridEnd = None
self.strokeLineJoin = STATE_DEFAULTS['strokeLineJoin']
self.strokeLineCap = STATE_DEFAULTS['strokeLineCap']
self.strokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit']
self.labels = TypedPropertyCollection(CALabel)
# if None, they don't get labels. If provided,
# you need one name per data point and they are
# used for label text.
self.categoryNames = None
self.joinAxis = None
self.joinAxisPos = None
self.joinAxisMode = None
self.labelAxisMode = 'axis'
self.reverseDirection = 0
self.style = 'parallel'
#various private things which need to be initialized
self._labelTextFormat = None
self.tickShift = 0
self.loPad = 0
self.hiPad = 0
self.loLLen = 0
self.hiLLen = 0
def setPosition(self, x, y, length):
# ensure floating point
self._x = float(x)
self._y = float(y)
self._length = float(length)
def configure(self, multiSeries,barWidth=None):
self._catCount = max(list(map(len,multiSeries)))
self._barWidth = barWidth or ((self._length-self.loPad-self.hiPad)/float(self._catCount or 1))
self._calcTickmarkPositions()
if self.labelAxisMode == 'axispmv':
self._pmv = [sum([series[i] for series in multiSeries]) for i in xrange(self._catCount)]
def _calcTickmarkPositions(self):
n = self._catCount
if self.tickShift:
self._tickValues = [t+0.5 for t in range(n)]
else:
if self.reverseDirection:
self._tickValues = list(range(-1,n))
else:
self._tickValues = list(range(n+1))
def _scale(self,idx):
if self.reverseDirection: idx = self._catCount-idx-1
return idx
def _assertYAxis(axis):
assert axis.isYAxis, "Cannot connect to other axes (%s), but Y- ones." % axis.__class__.__name__
def _assertXAxis(axis):
assert axis.isXAxis, "Cannot connect to other axes (%s), but X- ones." % axis.__class__.__name__
class _XTicks:
_tickTweaks = 0 #try 0.25-0.5
def _drawTicksInner(self,tU,tD,g):
itd = getattr(self,'innerTickDraw',None)
if itd:
itd(self,tU,tD,g)
elif tU or tD:
sW = self.strokeWidth
tW = self._tickTweaks
if tW:
if tU and not tD:
tD = tW*sW
elif tD and not tU:
tU = tW*sW
self._makeLines(g,tU,-tD,self.strokeColor,sW,self.strokeDashArray,self.strokeLineJoin,self.strokeLineCap,self.strokeMiterLimit)
def _drawTicks(self,tU,tD,g=None):
g = g or Group()
if self.visibleTicks:
self._drawTicksInner(tU,tD,g)
return g
def _calcSubTicks(self):
if not hasattr(self,'_tickValues'):
self._pseudo_configure()
otv = self._tickValues
if not hasattr(self,'_subTickValues'):
acn = self.__class__.__name__
if acn[:11]=='NormalDateX':
iFuzz = 0
dCnv = int
else:
iFuzz = 1e-8
dCnv = lambda x:x
OTV = [tv for tv in otv if getattr(tv,'_doSubTicks',1)]
T = [].append
nst = int(self.subTickNum)
i = len(OTV)
if i<2:
self._subTickValues = []
else:
if i==2:
dst = OTV[1]-OTV[0]
elif i==3:
dst = max(OTV[1]-OTV[0],OTV[2]-OTV[1])
else:
i >>= 1
dst = OTV[i+1] - OTV[i]
fuzz = dst*iFuzz
vn = self._valueMin+fuzz
vx = self._valueMax-fuzz
if OTV[0]>vn: OTV.insert(0,OTV[0]-dst)
if OTV[-1]<vx: OTV.append(OTV[-1]+dst)
dst /= float(nst+1)
for i,x in enumerate(OTV[:-1]):
for j in range(nst):
t = x+dCnv((j+1)*dst)
if t<=vn or t>=vx: continue
T(t)
self._subTickValues = T.__self__
self._tickValues = self._subTickValues
return otv
def _drawSubTicks(self,tU,tD,g):
if getattr(self,'visibleSubTicks',0) and self.subTickNum>0:
otv = self._calcSubTicks()
try:
self._subTicking = 1
self._drawTicksInner(tU,tD,g)
finally:
del self._subTicking
self._tickValues = otv
def makeTicks(self):
yold=self._y
try:
self._y = self._labelAxisPos(getattr(self,'tickAxisMode','axis'))
g = self._drawTicks(self.tickUp,self.tickDown)
self._drawSubTicks(getattr(self,'subTickHi',0),getattr(self,'subTickLo',0),g)
return g
finally:
self._y = yold
def _labelAxisPos(self,mode=None):
axis = self.joinAxis
if axis:
mode = mode or self.labelAxisMode
if mode == 'low':
return axis._y
elif mode == 'high':
return axis._y + axis._length
return self._y
class _YTicks(_XTicks):
def _labelAxisPos(self,mode=None):
axis = self.joinAxis
if axis:
mode = mode or self.labelAxisMode
if mode == 'low':
return axis._x
elif mode == 'high':
return axis._x + axis._length
return self._x
def makeTicks(self):
xold=self._x
try:
self._x = self._labelAxisPos(getattr(self,'tickAxisMode','axis'))
g = self._drawTicks(self.tickRight,self.tickLeft)
self._drawSubTicks(getattr(self,'subTickHi',0),getattr(self,'subTickLo',0),g)
return g
finally:
self._x = xold
class XCategoryAxis(_XTicks,CategoryAxis):
"X/category axis"
_attrMap = AttrMap(BASE=CategoryAxis,
tickUp = AttrMapValue(isNumber,
desc='Tick length up the axis.'),
tickDown = AttrMapValue(isNumber,
desc='Tick length down the axis.'),
joinAxisMode = AttrMapValue(OneOf('bottom', 'top', 'value', 'points', None),
desc="Mode used for connecting axis ('bottom', 'top', 'value', 'points', None)."),
)
_dataIndex = 0
def __init__(self):
CategoryAxis.__init__(self)
self.labels.boxAnchor = 'n' #north - top edge
self.labels.dy = -5
# ultra-simple tick marks for now go between categories
# and have same line style as axis - need more
self.tickUp = 0 # how far into chart does tick go?
self.tickDown = 5 # how far below axis does tick go?
def demo(self):
self.setPosition(30, 70, 140)
self.configure([(10,20,30,40,50)])
self.categoryNames = ['One','Two','Three','Four','Five']
# all labels top-centre aligned apart from the last
self.labels.boxAnchor = 'n'
self.labels[4].boxAnchor = 'e'
self.labels[4].angle = 90
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, yAxis, mode='bottom', pos=None):
"Join with y-axis using some mode."
_assertYAxis(yAxis)
if mode == 'bottom':
self._y = yAxis._y
elif mode == 'top':
self._y = yAxis._y + yAxis._length
elif mode == 'value':
self._y = yAxis.scale(pos)
elif mode == 'points':
self._y = pos
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
if jam in ('bottom', 'top'):
self.joinToAxis(ja, mode=jam)
elif jam in ('value', 'points'):
self.joinToAxis(ja, mode=jam, pos=self.joinAxisPos)
def scale(self, idx):
"""returns the x position and width in drawing units of the slice"""
return (self._x + self.loPad + self._scale(idx)*self._barWidth, self._barWidth)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x-self.loLLen, self._y, self._x + self._length+self.hiLLen, self._y)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTickLabels(self):
g = Group()
if not self.visibleLabels: return g
categoryNames = self.categoryNames
if categoryNames is not None:
catCount = self._catCount
n = len(categoryNames)
reverseDirection = self.reverseDirection
barWidth = self._barWidth
_y = self._labelAxisPos()
_x = self._x
pmv = self._pmv if self.labelAxisMode=='axispmv' else None
for i in range(catCount):
if reverseDirection: ic = catCount-i-1
else: ic = i
if ic>=n: continue
label=i-catCount
if label in self.labels:
label = self.labels[label]
else:
label = self.labels[i]
if pmv:
_dy = label.dy
v = label._pmv = pmv[ic]
if v<0: _dy *= -2
else:
_dy = 0
lpf = label.labelPosFrac
x = _x + (i+lpf) * barWidth
label.setOrigin(x,_y+_dy)
label.setText(categoryNames[ic] or '')
g.add(label)
return g
class YCategoryAxis(_YTicks,CategoryAxis):
"Y/category axis"
_attrMap = AttrMap(BASE=CategoryAxis,
tickLeft = AttrMapValue(isNumber,
desc='Tick length left of the axis.'),
tickRight = AttrMapValue(isNumber,
desc='Tick length right of the axis.'),
joinAxisMode = AttrMapValue(OneOf(('left', 'right', 'value', 'points', None)),
desc="Mode used for connecting axis ('left', 'right', 'value', 'points', None)."),
)
_dataIndex = 1
def __init__(self):
CategoryAxis.__init__(self)
self.labels.boxAnchor = 'e' #east - right edge
self.labels.dx = -5
# ultra-simple tick marks for now go between categories
# and have same line style as axis - need more
self.tickLeft = 5 # how far left of axis does tick go?
self.tickRight = 0 # how far right of axis does tick go?
def demo(self):
self.setPosition(50, 10, 80)
self.configure([(10,20,30)])
self.categoryNames = ['One','Two','Three']
# all labels top-centre aligned apart from the last
self.labels.boxAnchor = 'e'
self.labels[2].boxAnchor = 's'
self.labels[2].angle = 90
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, xAxis, mode='left', pos=None):
"Join with x-axis using some mode."
_assertXAxis(xAxis)
if mode == 'left':
self._x = xAxis._x * 1.0
elif mode == 'right':
self._x = (xAxis._x + xAxis._length) * 1.0
elif mode == 'value':
self._x = xAxis.scale(pos) * 1.0
elif mode == 'points':
self._x = pos * 1.0
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
if jam in ('left', 'right'):
self.joinToAxis(ja, mode=jam)
elif jam in ('value', 'points'):
self.joinToAxis(ja, mode=jam, pos=self.joinAxisPos)
def scale(self, idx):
"Returns the y position and width in drawing units of the slice."
return (self._y + self._scale(idx)*self._barWidth, self._barWidth)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x, self._y-self.loLLen, self._x, self._y + self._length+self.hiLLen)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTickLabels(self):
g = Group()
if not self.visibleLabels: return g
categoryNames = self.categoryNames
if categoryNames is not None:
catCount = self._catCount
n = len(categoryNames)
reverseDirection = self.reverseDirection
barWidth = self._barWidth
labels = self.labels
_x = self._labelAxisPos()
_y = self._y
pmv = self._pmv if self.labelAxisMode=='axispmv' else None
for i in range(catCount):
if reverseDirection: ic = catCount-i-1
else: ic = i
if ic>=n: continue
label=i-catCount
if label in self.labels:
label = self.labels[label]
else:
label = self.labels[i]
lpf = label.labelPosFrac
y = _y + (i+lpf) * barWidth
if pmv:
_dx = label.dx
v = label._pmv = pmv[ic]
if v<0: _dx *= -2
else:
_dx = 0
label.setOrigin(_x+_dx, y)
label.setText(categoryNames[ic] or '')
g.add(label)
return g
class TickLabeller:
'''Abstract base class which may be used to indicate a change
in the call signature for callable label formats
'''
def __call__(self,axis,value):
return 'Abstract class instance called'
# Value axes.
class ValueAxis(_AxisG):
"Abstract value axis, unusable in itself."
_attrMap = AttrMap(
forceZero = AttrMapValue(EitherOr((isBoolean,OneOf('near'))), desc='Ensure zero in range if true.'),
visible = AttrMapValue(isBoolean, desc='Display entire object, if true.'),
visibleAxis = AttrMapValue(isBoolean, desc='Display axis line, if true.'),
visibleLabels = AttrMapValue(isBoolean, desc='Display axis labels, if true.'),
visibleTicks = AttrMapValue(isBoolean, desc='Display axis ticks, if true.'),
visibleGrid = AttrMapValue(isBoolean, desc='Display axis grid, if true.'),
strokeWidth = AttrMapValue(isNumber, desc='Width of axis line and ticks.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color of axis line and ticks.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for axis line.'),
strokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Line cap 0=butt, 1=round & 2=square"),
strokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Line join 0=miter, 1=round & 2=bevel"),
strokeMiterLimit = AttrMapValue(isNumber,desc="miter limit control miter line joins"),
gridStrokeWidth = AttrMapValue(isNumber, desc='Width of grid lines.'),
gridStrokeColor = AttrMapValue(isColorOrNone, desc='Color of grid lines.'),
gridStrokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for grid lines.'),
gridStrokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Grid Line cap 0=butt, 1=round & 2=square"),
gridStrokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Grid Line join 0=miter, 1=round & 2=bevel"),
gridStrokeMiterLimit = AttrMapValue(isNumber,desc="Grid miter limit control miter line joins"),
gridStart = AttrMapValue(isNumberOrNone, desc='Start of grid lines wrt axis origin'),
gridEnd = AttrMapValue(isNumberOrNone, desc='End of grid lines wrt axis origin'),
drawGridLast = AttrMapValue(isBoolean, desc='if true draw gridlines after everything else.'),
minimumTickSpacing = AttrMapValue(isNumber, desc='Minimum value for distance between ticks.'),
maximumTicks = AttrMapValue(isNumber, desc='Maximum number of ticks.'),
labels = AttrMapValue(None, desc='Handle of the axis labels.'),
labelAxisMode = AttrMapValue(OneOf('high','low','axis'), desc="Like joinAxisMode, but for the axis labels"),
labelTextFormat = AttrMapValue(None, desc='Formatting string or function used for axis labels.'),
labelTextPostFormat = AttrMapValue(None, desc='Extra Formatting string.'),
labelTextScale = AttrMapValue(isNumberOrNone, desc='Scaling for label tick values.'),
valueMin = AttrMapValue(isNumberOrNone, desc='Minimum value on axis.'),
valueMax = AttrMapValue(isNumberOrNone, desc='Maximum value on axis.'),
valueStep = AttrMapValue(isNumberOrNone, desc='Step size used between ticks.'),
valueSteps = AttrMapValue(isListOfNumbersOrNone, desc='List of step sizes used between ticks.'),
avoidBoundFrac = AttrMapValue(EitherOr((isNumberOrNone,SequenceOf(isNumber,emptyOK=0,lo=2,hi=2))), desc='Fraction of interval to allow above and below.'),
avoidBoundSpace = AttrMapValue(EitherOr((isNumberOrNone,SequenceOf(isNumber,emptyOK=0,lo=2,hi=2))), desc='Space to allow above and below.'),
abf_ignore_zero = AttrMapValue(EitherOr((NoneOr(isBoolean),SequenceOf(isBoolean,emptyOK=0,lo=2,hi=2))), desc='Set to True to make the avoidBoundFrac calculations treat zero as non-special'),
rangeRound=AttrMapValue(OneOf('none','both','ceiling','floor'),'How to round the axis limits'),
zrangePref = AttrMapValue(isNumberOrNone, desc='Zero range axis limit preference.'),
style = AttrMapValue(OneOf('normal','stacked','parallel_3d'),"How values are plotted!"),
skipEndL = AttrMapValue(OneOf('none','start','end','both'), desc='Skip high/low tick labels'),
origShiftIPC = AttrMapValue(isNumberOrNone, desc='Lowest label shift interval ratio.'),
origShiftMin = AttrMapValue(isNumberOrNone, desc='Minimum amount to shift.'),
origShiftSpecialValue = AttrMapValue(isNumberOrNone, desc='special value for shift'),
tickAxisMode = AttrMapValue(OneOf('high','low','axis'), desc="Like joinAxisMode, but for the ticks"),
reverseDirection = AttrMapValue(isBoolean, desc='If true reverse category direction.'),
annotations = AttrMapValue(None,desc='list of annotations'),
loLLen = AttrMapValue(isNumber, desc='extra line length before start of the axis'),
hiLLen = AttrMapValue(isNumber, desc='extra line length after end of the axis'),
subTickNum = AttrMapValue(isNumber, desc='Number of axis sub ticks, if >0'),
subTickLo = AttrMapValue(isNumber, desc='sub tick down or left'),
subTickHi = AttrMapValue(isNumber, desc='sub tick up or right'),
visibleSubTicks = AttrMapValue(isBoolean, desc='Display axis sub ticks, if true.'),
visibleSubGrid = AttrMapValue(isBoolean, desc='Display axis sub grid, if true.'),
subGridStrokeWidth = AttrMapValue(isNumber, desc='Width of grid lines.'),
subGridStrokeColor = AttrMapValue(isColorOrNone, desc='Color of grid lines.'),
subGridStrokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for grid lines.'),
subGridStrokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Grid Line cap 0=butt, 1=round & 2=square"),
subGridStrokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Grid Line join 0=miter, 1=round & 2=bevel"),
subGridStrokeMiterLimit = AttrMapValue(isNumber,desc="Grid miter limit control miter line joins"),
subGridStart = AttrMapValue(isNumberOrNone, desc='Start of grid lines wrt axis origin'),
subGridEnd = AttrMapValue(isNumberOrNone, desc='End of grid lines wrt axis origin'),
keepTickLabelsInside = AttrMapValue(isBoolean, desc='Ensure tick labels do not project beyond bounds of axis if true'),
skipGrid = AttrMapValue(OneOf('none','top','both','bottom'),"grid lines to skip top bottom both none"),
requiredRange = AttrMapValue(isNumberOrNone, desc='Minimum required value range.'),
innerTickDraw = AttrMapValue(isNoneOrCallable, desc="Callable to replace _drawInnerTicks"),
)
def __init__(self,**kw):
assert self.__class__.__name__!='ValueAxis', 'Abstract Class ValueAxis Instantiated'
self._setKeywords(**kw)
self._setKeywords(
_configured = 0,
# private properties set by methods. The initial values
# here are to make demos easy; they would always be
# overridden in real life.
_x = 50,
_y = 50,
_length = 100,
# public properties
visible = 1,
visibleAxis = 1,
visibleLabels = 1,
visibleTicks = 1,
visibleGrid = 0,
forceZero = 0,
strokeWidth = 1,
strokeColor = STATE_DEFAULTS['strokeColor'],
strokeDashArray = STATE_DEFAULTS['strokeDashArray'],
strokeLineJoin = STATE_DEFAULTS['strokeLineJoin'],
strokeLineCap = STATE_DEFAULTS['strokeLineCap'],
strokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit'],
gridStrokeWidth = 0.25,
gridStrokeColor = STATE_DEFAULTS['strokeColor'],
gridStrokeDashArray = STATE_DEFAULTS['strokeDashArray'],
gridStrokeLineJoin = STATE_DEFAULTS['strokeLineJoin'],
gridStrokeLineCap = STATE_DEFAULTS['strokeLineCap'],
gridStrokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit'],
gridStart = None,
gridEnd = None,
drawGridLast = False,
visibleSubGrid = 0,
visibleSubTicks = 0,
subTickNum = 0,
subTickLo = 0,
subTickHi = 0,
subGridStrokeLineJoin = STATE_DEFAULTS['strokeLineJoin'],
subGridStrokeLineCap = STATE_DEFAULTS['strokeLineCap'],
subGridStrokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit'],
subGridStrokeWidth = 0.25,
subGridStrokeColor = STATE_DEFAULTS['strokeColor'],
subGridStrokeDashArray = STATE_DEFAULTS['strokeDashArray'],
subGridStart = None,
subGridEnd = None,
labels = TypedPropertyCollection(Label),
keepTickLabelsInside = 0,
# how close can the ticks be?
minimumTickSpacing = 10,
maximumTicks = 7,
# a format string like '%0.2f'
# or a function which takes the value as an argument and returns a string
_labelTextFormat = None,
labelAxisMode = 'axis',
labelTextFormat = None,
labelTextPostFormat = None,
labelTextScale = None,
# if set to None, these will be worked out for you.
# if you override any or all of them, your values
# will be used.
valueMin = None,
valueMax = None,
valueStep = None,
avoidBoundFrac = None,
avoidBoundSpace = None,
abf_ignore_zero = False,
rangeRound = 'none',
zrangePref = 0,
style = 'normal',
skipEndL='none',
origShiftIPC = None,
origShiftMin = None,
origShiftSpecialValue = None,
tickAxisMode = 'axis',
reverseDirection=0,
loLLen=0,
hiLLen=0,
requiredRange=0,
)
self.labels.angle = 0
def setPosition(self, x, y, length):
# ensure floating point
self._x = float(x)
self._y = float(y)
self._length = float(length)
def configure(self, dataSeries):
"""Let the axis configure its scale and range based on the data.
Called after setPosition. Let it look at a list of lists of
numbers determine the tick mark intervals. If valueMin,
valueMax and valueStep are configured then it
will use them; if any of them are set to None it
will look at the data and make some sensible decision.
You may override this to build custom axes with
irregular intervals. It creates an internal
variable self._values, which is a list of numbers
to use in plotting.
"""
self._setRange(dataSeries)
self._configure_end()
def _configure_end(self):
self._calcTickmarkPositions()
self._calcScaleFactor()
self._configured = 1
def _getValueStepAndTicks(self, valueMin, valueMax,cache={}):
try:
K = (valueMin,valueMax)
r = cache[K]
except:
self._valueMin = valueMin
self._valueMax = valueMax
valueStep,T = self._calcStepAndTickPositions()
r = cache[K] = valueStep, T, valueStep*1e-8
return r
def _preRangeAdjust(self,valueMin,valueMax):
rr = self.requiredRange
if rr>0:
r = valueMax - valueMin
if r<rr:
m = 0.5*(valueMax+valueMin)
rr *= 0.5
y1 = min(m-rr,valueMin)
y2 = max(m+rr,valueMax)
if valueMin>=100 and y1<100:
y2 = y2 + 100 - y1
y1 = 100
elif valueMin>=0 and y1<0:
y2 = y2 - y1
y1 = 0
valueMin = self._cValueMin = y1
valueMax = self._cValueMax = y2
return valueMin,valueMax
def _setRange(self, dataSeries):
"""Set minimum and maximum axis values.
The dataSeries argument is assumed to be a list of data
vectors. Each vector is itself a list or tuple of numbers.
Returns a min, max tuple.
"""
oMin = valueMin = self.valueMin
oMax = valueMax = self.valueMax
if valueMin is None: valueMin = self._cValueMin = _findMin(dataSeries,self._dataIndex,0)
if valueMax is None: valueMax = self._cValueMax = _findMax(dataSeries,self._dataIndex,0)
if valueMin == valueMax:
if valueMax==0:
if oMin is None and oMax is None:
zrp = getattr(self,'zrangePref',0)
if zrp>0:
valueMax = zrp
valueMin = 0
elif zrp<0:
valueMax = 0
valueMin = zrp
else:
valueMax = 0.01
valueMin = -0.01
elif self.valueMin is None:
valueMin = -0.01
else:
valueMax = 0.01
else:
if valueMax>0:
valueMax = 1.2*valueMax
valueMin = 0.0
else:
valueMax = 0.0
valueMin = 1.2*valueMin
if getattr(self,'_bubblePlot',None):
bubbleMax = float(_findMax(dataSeries,2,0))
frac=.25
bubbleV=frac*(valueMax-valueMin)
self._bubbleV = bubbleV
self._bubbleMax = bubbleMax
self._bubbleRadius = frac*self._length
def special(T,x,func,bubbleV=bubbleV,bubbleMax=bubbleMax):
try:
v = T[2]
except IndexError:
v = bubbleMAx*0.1
bubbleV *= (v/bubbleMax)**0.5
return func(T[x]+bubbleV,T[x]-bubbleV)
if oMin is None: valueMin = self._cValueMin = _findMin(dataSeries,self._dataIndex,0,special=special)
if oMax is None: valueMax = self._cValueMax = _findMax(dataSeries,self._dataIndex,0,special=special)
valueMin, valueMax = self._preRangeAdjust(valueMin,valueMax)
rangeRound = self.rangeRound
cMin = valueMin
cMax = valueMax
forceZero = self.forceZero
if forceZero:
if forceZero=='near':
forceZero = min(abs(valueMin),abs(valueMax)) <= 5*(valueMax-valueMin)
if forceZero:
if valueMax<0: valueMax=0
elif valueMin>0: valueMin = 0
abf = self.avoidBoundFrac
do_rr = not getattr(self,'valueSteps',None)
do_abf = abf and do_rr
if not isSeq(abf):
abf = abf, abf
abfiz = getattr(self,'abf_ignore_zero', False)
if not isSeq(abfiz):
abfiz = abfiz, abfiz
do_rr = rangeRound is not 'none' and do_rr
if do_rr:
rrn = rangeRound in ['both','floor']
rrx = rangeRound in ['both','ceiling']
else:
rrn = rrx = 0
abS = self.avoidBoundSpace
do_abs = abS
if do_abs:
if not isSeq(abS):
abS = abS, abS
aL = float(self._length)
go = do_rr or do_abf or do_abs
cache = {}
iter = 0
while go and iter<=10:
iter += 1
go = 0
if do_abf or do_abs:
valueStep, T, fuzz = self._getValueStepAndTicks(valueMin, valueMax, cache)
if do_abf:
i0 = valueStep*abf[0]
i1 = valueStep*abf[1]
else:
i0 = i1 = 0
if do_abs:
sf = (valueMax-valueMin)/aL
i0 = max(i0,abS[0]*sf)
i1 = max(i1,abS[1]*sf)
if rrn: v = T[0]
else: v = valueMin
u = cMin-i0
if (abfiz[0] or abs(v)>fuzz) and v>=u+fuzz:
valueMin = u
go = 1
if rrx: v = T[-1]
else: v = valueMax
u = cMax+i1
if (abfiz[1] or abs(v)>fuzz) and v<=u-fuzz:
valueMax = u
go = 1
if do_rr:
valueStep, T, fuzz = self._getValueStepAndTicks(valueMin, valueMax, cache)
if rrn:
if valueMin<T[0]-fuzz:
valueMin = T[0]-valueStep
go = 1
else:
go = valueMin>=T[0]+fuzz
valueMin = T[0]
if rrx:
if valueMax>T[-1]+fuzz:
valueMax = T[-1]+valueStep
go = 1
else:
go = valueMax<=T[-1]-fuzz
valueMax = T[-1]
if iter and not go:
self._computedValueStep = valueStep
else:
self._computedValueStep = None
self._valueMin = valueMin
self._valueMax = valueMax
origShiftIPC = self.origShiftIPC
origShiftMin = self.origShiftMin
if origShiftMin is not None or origShiftIPC is not None:
origShiftSpecialValue = self.origShiftSpecialValue
self._calcValueStep()
valueMax, valueMin = self._valueMax, self._valueMin
if origShiftSpecialValue is None or abs(origShiftSpecialValue-valueMin)<1e-6:
if origShiftIPC:
m = origShiftIPC*self._valueStep
else:
m = 0
if origShiftMin:
m = max(m,(valueMax-valueMin)*origShiftMin/self._length)
self._valueMin -= m
self._rangeAdjust()
def _pseudo_configure(self):
self._valueMin = self.valueMin
self._valueMax = self.valueMax
self._configure_end()
def _rangeAdjust(self):
"""Override this if you want to alter the calculated range.
E.g. if want a minumamum range of 30% or don't want 100%
as the first point.
"""
pass
def _adjustAxisTicks(self):
'''Override if you want to put slack at the ends of the axis
eg if you don't want the last tick to be at the bottom etc
'''
pass
def _calcScaleFactor(self):
"""Calculate the axis' scale factor.
This should be called only *after* the axis' range is set.
Returns a number.
"""
self._scaleFactor = self._length / float(self._valueMax - self._valueMin)
return self._scaleFactor
def _calcStepAndTickPositions(self):
valueStep = getattr(self,'_computedValueStep',None)
if valueStep:
del self._computedValueStep
self._valueStep = valueStep
else:
self._calcValueStep()
valueStep = self._valueStep
valueMin = self._valueMin
valueMax = self._valueMax
fuzz = 1e-8*valueStep
rangeRound = self.rangeRound
i0 = int(float(valueMin)/valueStep)
v = i0*valueStep
if rangeRound in ('both','floor'):
if v>valueMin+fuzz: i0 -= 1
elif v<valueMin-fuzz: i0 += 1
i1 = int(float(valueMax)/valueStep)
v = i1*valueStep
if rangeRound in ('both','ceiling'):
if v<valueMax-fuzz: i1 += 1
elif v>valueMax+fuzz: i1 -= 1
return valueStep,[i*valueStep for i in range(i0,i1+1)]
def _calcTickPositions(self):
return self._calcStepAndTickPositions()[1]
def _calcTickmarkPositions(self):
"""Calculate a list of tick positions on the axis. Returns a list of numbers."""
self._tickValues = getattr(self,'valueSteps',None)
if self._tickValues: return self._tickValues
self._tickValues = self._calcTickPositions()
self._adjustAxisTicks()
return self._tickValues
def _calcValueStep(self):
'''Calculate _valueStep for the axis or get from valueStep.'''
if self.valueStep is None:
rawRange = self._valueMax - self._valueMin
rawInterval = rawRange / min(float(self.maximumTicks-1),(float(self._length)/self.minimumTickSpacing))
self._valueStep = nextRoundNumber(rawInterval)
else:
self._valueStep = self.valueStep
def _allIntTicks(self):
return _allInt(self._tickValues)
def makeTickLabels(self):
g = Group()
if not self.visibleLabels: return g
f = self._labelTextFormat # perhaps someone already set it
if f is None:
f = self.labelTextFormat or (self._allIntTicks() and '%.0f' or str)
elif f is str and self._allIntTicks(): f = '%.0f'
elif hasattr(f,'calcPlaces'):
f.calcPlaces(self._tickValues)
post = self.labelTextPostFormat
scl = self.labelTextScale
pos = [self._x, self._y]
d = self._dataIndex
pos[1-d] = self._labelAxisPos()
labels = self.labels
if self.skipEndL!='none':
if self.isXAxis:
sk = self._x
else:
sk = self._y
if self.skipEndL=='start':
sk = [sk]
else:
sk = [sk,sk+self._length]
if self.skipEndL=='end':
del sk[0]
else:
sk = []
nticks = len(self._tickValues)
nticks1 = nticks - 1
for i,tick in enumerate(self._tickValues):
label = i-nticks
if label in labels:
label = labels[label]
else:
label = labels[i]
if f and label.visible:
v = self.scale(tick)
if sk:
for skv in sk:
if abs(skv-v)<1e-6:
v = None
break
if v is not None:
if scl is not None:
t = tick*scl
else:
t = tick
if isinstance(f, str): txt = f % t
elif isSeq(f):
#it's a list, use as many items as we get
if i < len(f):
txt = f[i]
else:
txt = ''
elif hasattr(f,'__call__'):
if isinstance(f,TickLabeller):
txt = f(self,t)
else:
txt = f(t)
else:
raise ValueError('Invalid labelTextFormat %s' % f)
if post: txt = post % txt
pos[d] = v
label.setOrigin(*pos)
label.setText(txt)
#special property to ensure a label doesn't project beyond the bounds of an x-axis
if self.keepTickLabelsInside:
if isinstance(self, XValueAxis): #not done yet for y axes
a_x = self._x
if not i: #first one
x0, y0, x1, y1 = label.getBounds()
if x0 < a_x:
label = label.clone(dx=label.dx + a_x - x0)
if i==nticks1: #final one
a_x1 = a_x +self._length
x0, y0, x1, y1 = label.getBounds()
if x1 > a_x1:
label=label.clone(dx=label.dx-x1+a_x1)
g.add(label)
return g
def scale(self, value):
"""Converts a numeric value to a plotarea position.
The chart first configures the axis, then asks it to
"""
assert self._configured, "Axis cannot scale numbers before it is configured"
if value is None: value = 0
#this could be made more efficient by moving the definition of org and sf into the configuration
org = (self._x, self._y)[self._dataIndex]
sf = self._scaleFactor
if self.reverseDirection:
sf = -sf
org += self._length
return org + sf*(value - self._valueMin)
class XValueAxis(_XTicks,ValueAxis):
"X/value axis"
_attrMap = AttrMap(BASE=ValueAxis,
tickUp = AttrMapValue(isNumber,
desc='Tick length up the axis.'),
tickDown = AttrMapValue(isNumber,
desc='Tick length down the axis.'),
joinAxis = AttrMapValue(None,
desc='Join both axes if true.'),
joinAxisMode = AttrMapValue(OneOf('bottom', 'top', 'value', 'points', None),
desc="Mode used for connecting axis ('bottom', 'top', 'value', 'points', None)."),
joinAxisPos = AttrMapValue(isNumberOrNone,
desc='Position at which to join with other axis.'),
)
# Indicate the dimension of the data we're interested in.
_dataIndex = 0
def __init__(self,**kw):
ValueAxis.__init__(self,**kw)
self.labels.boxAnchor = 'n'
self.labels.dx = 0
self.labels.dy = -5
self.tickUp = 0
self.tickDown = 5
self.joinAxis = None
self.joinAxisMode = None
self.joinAxisPos = None
def demo(self):
self.setPosition(20, 50, 150)
self.configure([(10,20,30,40,50)])
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, yAxis, mode='bottom', pos=None):
"Join with y-axis using some mode."
_assertYAxis(yAxis)
if mode == 'bottom':
self._y = yAxis._y * 1.0
elif mode == 'top':
self._y = (yAxis._y + yAxis._length) * 1.0
elif mode == 'value':
self._y = yAxis.scale(pos) * 1.0
elif mode == 'points':
self._y = pos * 1.0
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode or 'bottom'
if jam in ('bottom', 'top'):
self.joinToAxis(ja, mode=jam)
elif jam in ('value', 'points'):
self.joinToAxis(ja, mode=jam, pos=self.joinAxisPos)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x-self.loLLen, self._y, self._x + self._length+self.hiLLen, self._y)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
#additional utilities to help specify calendar dates on which tick marks
#are to be plotted. After some thought, when the magic algorithm fails,
#we can let them specify a number of days-of-the-year to tick in any given
#year.
#################################################################################
#
# Preliminary support objects/functions for the axis used in time series charts
#
#################################################################################
_months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
_maxDays = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def parseDayAndMonth(dmstr):
"""This accepts and validates strings like "31-Dec" i.e. dates
of no particular year. 29 Feb is allowed. These can be used
for recurring dates. It returns a (dd, mm) pair where mm is the
month integer. If the text is not valid it raises an error.
"""
dstr, mstr = dmstr.split('-')
dd = int(dstr)
mstr = mstr.lower()
mm = _months.index(mstr) + 1
assert dd <= _maxDays[mm-1]
return (dd, mm)
class _isListOfDaysAndMonths(Validator):
"""This accepts and validates lists of strings like "31-Dec" i.e. dates
of no particular year. 29 Feb is allowed. These can be used
for recurring dates.
"""
def test(self,x):
if isSeq(x):
answer = True
for element in x:
try:
dd, mm = parseDayAndMonth(element)
except:
answer = False
return answer
else:
return False
def normalize(self,x):
#we store them as presented, it's the most presentable way
return x
isListOfDaysAndMonths = _isListOfDaysAndMonths()
_NDINTM = 1,2,3,6,12,24,60,120,180,240,300,360,420,480,540,600,720,840,960,1080,1200,2400
class NormalDateXValueAxis(XValueAxis):
"""An X axis applying additional rules.
Depending on the data and some built-in rules, the axis
displays normalDate values as nicely formatted dates.
The client chart should have NormalDate X values.
"""
_attrMap = AttrMap(BASE = XValueAxis,
bottomAxisLabelSlack = AttrMapValue(isNumber, desc="Fractional amount used to adjust label spacing"),
niceMonth = AttrMapValue(isBoolean, desc="Flag for displaying months 'nicely'."),
forceEndDate = AttrMapValue(isBoolean, desc='Flag for enforced displaying of last date value.'),
forceFirstDate = AttrMapValue(isBoolean, desc='Flag for enforced displaying of first date value.'),
forceDatesEachYear = AttrMapValue(isListOfDaysAndMonths, desc='List of dates in format "31-Dec",' +
'"1-Jan". If present they will always be used for tick marks in the current year, rather ' +
'than the dates chosen by the automatic algorithm. Hyphen compulsory, case of month optional.'),
xLabelFormat = AttrMapValue(None, desc="Label format string (e.g. '{mm}/{yy}') or function."),
dayOfWeekName = AttrMapValue(SequenceOf(isString,emptyOK=0,lo=7,hi=7), desc='Weekday names.'),
monthName = AttrMapValue(SequenceOf(isString,emptyOK=0,lo=12,hi=12), desc='Month names.'),
dailyFreq = AttrMapValue(isBoolean, desc='True if we are to assume daily data to be ticked at end of month.'),
specifiedTickDates = AttrMapValue(NoneOr(SequenceOf(isNormalDate)), desc='Actual tick values to use; no calculations done'),
specialTickClear = AttrMapValue(isBoolean, desc='clear rather than delete close ticks when forced first/end dates'),
skipGrid = AttrMapValue(OneOf('none','top','both','bottom'),"grid lines to skip top bottom both none"),
)
_valueClass = normalDate.ND
def __init__(self,**kw):
XValueAxis.__init__(self,**kw)
# some global variables still used...
self.bottomAxisLabelSlack = 0.1
self.niceMonth = 1
self.forceEndDate = 0
self.forceFirstDate = 0
self.forceDatesEachYear = []
self.dailyFreq = 0
self.xLabelFormat = "{mm}/{yy}"
self.dayOfWeekName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
self.monthName = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December']
self.specialTickClear = 0
self.valueSteps = self.specifiedTickDates = None
def _scalar2ND(self, x):
"Convert a scalar to a NormalDate value."
d = self._valueClass()
d.normalize(x)
return d
def _dateFormatter(self, v):
"Create a formatted label for some value."
if not isinstance(v,normalDate.NormalDate):
v = self._scalar2ND(v)
d, m = normalDate._dayOfWeekName, normalDate._monthName
try:
normalDate._dayOfWeekName, normalDate._monthName = self.dayOfWeekName, self.monthName
return v.formatMS(self.xLabelFormat)
finally:
normalDate._dayOfWeekName, normalDate._monthName = d, m
def _xAxisTicker(self, xVals):
"""Complex stuff...
Needs explanation...
Yes please says Andy :-(. Modified on 19 June 2006 to attempt to allow
a mode where one can specify recurring days and months.
"""
axisLength = self._length
formatter = self._dateFormatter
if isinstance(formatter,TickLabeller):
def formatter(tick):
return self._dateFormatter(self,tick)
firstDate = xVals[0]
endDate = xVals[-1]
labels = self.labels
fontName, fontSize, leading = labels.fontName, labels.fontSize, labels.leading
textAnchor, boxAnchor, angle = labels.textAnchor, labels.boxAnchor, labels.angle
RBL = _textBoxLimits(formatter(firstDate).split('\n'),fontName,
fontSize,leading or 1.2*fontSize,textAnchor,boxAnchor)
RBL = _rotatedBoxLimits(RBL[0],RBL[1],RBL[2],RBL[3], angle)
xLabelW = RBL[1]-RBL[0]
xLabelH = RBL[3]-RBL[2]
w = max(xLabelW,labels.width,self.minimumTickSpacing)
W = w+w*self.bottomAxisLabelSlack
n = len(xVals)
ticks = []
labels = []
maximumTicks = self.maximumTicks
if self.specifiedTickDates:
VC = self._valueClass
ticks = [VC(x) for x in self.specifiedTickDates]
labels = [formatter(d) for d in ticks]
if self.forceFirstDate and firstDate==ticks[0] and (axisLength/float(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=W:
if self.specialTickClear:
labels[1] = ''
else:
del ticks[1], labels[1]
if self.forceEndDate and endDate==ticks[-1] and (axisLength/float(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=W:
if self.specialTickClear:
labels[-2] = ''
else:
del ticks[-2], labels[-2]
return ticks, labels
def addTick(i, xVals=xVals, formatter=formatter, ticks=ticks, labels=labels):
ticks.insert(0,xVals[i])
labels.insert(0,formatter(xVals[i]))
#AR 20060619 - first we try the approach where the user has explicitly
#specified the days of year to be ticked. Other explicit routes may
#be added.
if self.forceDatesEachYear:
forcedPartialDates = list(map(parseDayAndMonth, self.forceDatesEachYear))
#generate the list of dates in the range.
#print 'dates range from %s to %s' % (firstDate, endDate)
firstYear = firstDate.year()
lastYear = endDate.year()
ticks = []
labels = []
yyyy = firstYear
#generate all forced dates between the year it starts and the year it
#ends, adding them if within range.
while yyyy <= lastYear:
for (dd, mm) in forcedPartialDates:
theDate = normalDate.ND((yyyy, mm, dd))
if theDate >= firstDate and theDate <= endDate:
ticks.append(theDate)
labels.append(formatter(theDate))
yyyy += 1
#first and last may still be forced in.
if self.forceFirstDate and firstDate!=ticks[0]:
ticks.insert(0, firstDate)
labels.insert(0,formatter(firstDate))
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=W:
if self.specialTickClear:
labels[1] = ''
else:
del ticks[1], labels[1]
if self.forceEndDate and endDate!=ticks[-1]:
ticks.append(endDate)
labels.append(formatter(endDate))
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=W:
if self.specialTickClear:
labels[-2] = ''
else:
del ticks[-2], labels[-2]
#print 'xVals found on forced dates =', ticks
return ticks, labels
#otherwise, we apply the 'magic algorithm...' which looks for nice spacing
#based on the size and separation of the labels.
for d in _NDINTM:
k = n/d
if k<=maximumTicks and k*W <= axisLength:
i = n-1
if self.niceMonth:
j = endDate.month() % (d<=12 and d or 12)
if j:
if self.forceEndDate:
addTick(i)
ticks[0]._doSubTicks=0
i -= j
#weird first date ie not at end of month
try:
wfd = firstDate.month() == xVals[1].month()
except:
wfd = 0
while i>=wfd:
addTick(i)
i -= d
if self.forceFirstDate and ticks[0]!=firstDate:
addTick(0)
ticks[0]._doSubTicks=0
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=W:
if self.specialTickClear:
labels[1] = ''
else:
del ticks[1], labels[1]
if self.forceEndDate and self.niceMonth and j:
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=W:
if self.specialTickClear:
labels[-2] = ''
else:
del ticks[-2], labels[-2]
try:
if labels[0] and labels[0]==labels[1]:
del ticks[1], labels[1]
except IndexError:
pass
return ticks, labels
raise ValueError('Problem selecting NormalDate value axis tick positions')
def _convertXV(self,data):
'''Convert all XValues to a standard normalDate type'''
VC = self._valueClass
for D in data:
for i in range(len(D)):
x, y = D[i]
if not isinstance(x,VC):
D[i] = (VC(x),y)
def _getStepsAndLabels(self,xVals):
if self.dailyFreq:
xEOM = []
pm = 0
px = xVals[0]
for x in xVals:
m = x.month()
if pm!=m:
if pm: xEOM.append(px)
pm = m
px = x
px = xVals[-1]
if xEOM[-1]!=x: xEOM.append(px)
steps, labels = self._xAxisTicker(xEOM)
else:
steps, labels = self._xAxisTicker(xVals)
return steps, labels
def configure(self, data):
self._convertXV(data)
xVals = set()
for x in data:
for dv in x:
xVals.add(dv[0])
xVals = list(xVals)
xVals.sort()
steps,labels = self._getStepsAndLabels(xVals)
valueMin, valueMax = self.valueMin, self.valueMax
if valueMin is None: valueMin = xVals[0]
if valueMax is None: valueMax = xVals[-1]
self._valueMin, self._valueMax = valueMin, valueMax
self._tickValues = steps
self._labelTextFormat = labels
self._scaleFactor = self._length / float(valueMax - valueMin)
self._tickValues = steps
self._configured = 1
class YValueAxis(_YTicks,ValueAxis):
"Y/value axis"
_attrMap = AttrMap(BASE=ValueAxis,
tickLeft = AttrMapValue(isNumber,
desc='Tick length left of the axis.'),
tickRight = AttrMapValue(isNumber,
desc='Tick length right of the axis.'),
joinAxis = AttrMapValue(None,
desc='Join both axes if true.'),
joinAxisMode = AttrMapValue(OneOf(('left', 'right', 'value', 'points', None)),
desc="Mode used for connecting axis ('left', 'right', 'value', 'points', None)."),
joinAxisPos = AttrMapValue(isNumberOrNone,
desc='Position at which to join with other axis.'),
)
# Indicate the dimension of the data we're interested in.
_dataIndex = 1
def __init__(self):
ValueAxis.__init__(self)
self.labels.boxAnchor = 'e'
self.labels.dx = -5
self.labels.dy = 0
self.tickRight = 0
self.tickLeft = 5
self.joinAxis = None
self.joinAxisMode = None
self.joinAxisPos = None
def demo(self):
data = [(10, 20, 30, 42)]
self.setPosition(100, 10, 80)
self.configure(data)
drawing = Drawing(200, 100)
drawing.add(self)
return drawing
def joinToAxis(self, xAxis, mode='left', pos=None):
"Join with x-axis using some mode."
_assertXAxis(xAxis)
if mode == 'left':
self._x = xAxis._x * 1.0
elif mode == 'right':
self._x = (xAxis._x + xAxis._length) * 1.0
elif mode == 'value':
self._x = xAxis.scale(pos) * 1.0
elif mode == 'points':
self._x = pos * 1.0
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
if jam in ('left', 'right'):
self.joinToAxis(ja, mode=jam)
elif jam in ('value', 'points'):
self.joinToAxis(ja, mode=jam, pos=self.joinAxisPos)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x, self._y-self.loLLen, self._x, self._y + self._length+self.hiLLen)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
class AdjYValueAxis(YValueAxis):
"""A Y-axis applying additional rules.
Depending on the data and some built-in rules, the axis
may choose to adjust its range and origin.
"""
_attrMap = AttrMap(BASE = YValueAxis,
leftAxisPercent = AttrMapValue(isBoolean, desc='When true add percent sign to label values.'),
leftAxisOrigShiftIPC = AttrMapValue(isNumber, desc='Lowest label shift interval ratio.'),
leftAxisOrigShiftMin = AttrMapValue(isNumber, desc='Minimum amount to shift.'),
leftAxisSkipLL0 = AttrMapValue(EitherOr((isBoolean,isListOfNumbers)), desc='Skip/Keep lowest tick label when true/false.\nOr skiplist'),
labelVOffset = AttrMapValue(isNumber, desc='add this to the labels'),
)
def __init__(self,**kw):
YValueAxis.__init__(self,**kw)
self.requiredRange = 30
self.leftAxisPercent = 1
self.leftAxisOrigShiftIPC = 0.15
self.leftAxisOrigShiftMin = 12
self.leftAxisSkipLL0 = self.labelVOffset = 0
self.valueSteps = None
def _rangeAdjust(self):
"Adjusts the value range of the axis."
from reportlab.graphics.charts.utils import find_good_grid, ticks
y_min, y_max = self._valueMin, self._valueMax
m = self.maximumTicks
n = list(filter(lambda x,m=m: x<=m,[4,5,6,7,8,9]))
if not n: n = [m]
valueStep, requiredRange = self.valueStep, self.requiredRange
if requiredRange and y_max - y_min < requiredRange:
y1, y2 = find_good_grid(y_min, y_max,n=n,grid=valueStep)[:2]
if y2 - y1 < requiredRange:
ym = (y1+y2)*0.5
y1 = min(ym-requiredRange*0.5,y_min)
y2 = max(ym+requiredRange*0.5,y_max)
if y_min>=100 and y1<100:
y2 = y2 + 100 - y1
y1 = 100
elif y_min>=0 and y1<0:
y2 = y2 - y1
y1 = 0
self._valueMin, self._valueMax = y1, y2
T, L = ticks(self._valueMin, self._valueMax, split=1, n=n, percent=self.leftAxisPercent,grid=valueStep, labelVOffset=self.labelVOffset)
abf = self.avoidBoundFrac
if abf:
i1 = (T[1]-T[0])
if not isSeq(abf):
i0 = i1 = i1*abf
else:
i0 = i1*abf[0]
i1 = i1*abf[1]
_n = getattr(self,'_cValueMin',T[0])
_x = getattr(self,'_cValueMax',T[-1])
if _n - T[0] < i0: self._valueMin = self._valueMin - i0
if T[-1]-_x < i1: self._valueMax = self._valueMax + i1
T, L = ticks(self._valueMin, self._valueMax, split=1, n=n, percent=self.leftAxisPercent,grid=valueStep, labelVOffset=self.labelVOffset)
self._valueMin = T[0]
self._valueMax = T[-1]
self._tickValues = T
if self.labelTextFormat is None:
self._labelTextFormat = L
else:
self._labelTextFormat = self.labelTextFormat
if abs(self._valueMin-100)<1e-6:
self._calcValueStep()
vMax, vMin = self._valueMax, self._valueMin
m = max(self.leftAxisOrigShiftIPC*self._valueStep,
(vMax-vMin)*self.leftAxisOrigShiftMin/self._length)
self._valueMin = self._valueMin - m
if self.leftAxisSkipLL0:
if isSeq(self.leftAxisSkipLL0):
for x in self.leftAxisSkipLL0:
try:
L[x] = ''
except IndexError:
pass
L[0] = ''
# Sample functions.
def sample0a():
"Sample drawing with one xcat axis and two buckets."
drawing = Drawing(400, 200)
data = [(10, 20)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying', 'Yang']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample0b():
"Sample drawing with one xcat axis and one bucket only."
drawing = Drawing(400, 200)
data = [(10,)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample1():
"Sample drawing containing two unconnected axes."
from reportlab.graphics.shapes import _baseGFontNameB
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Beer','Wine','Meat','Cannelloni']
xAxis.labels.boxAnchor = 'n'
xAxis.labels[3].dy = -15
xAxis.labels[3].angle = 30
xAxis.labels[3].fontName = _baseGFontNameB
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'points'
xAxis.joinAxisPos = 100
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'value'
xAxis.joinAxisPos = 35
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c():
"Sample drawing, xvalue/yvalue axes, y connected to bottom of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c1():
"xvalue/yvalue axes, without drawing axis lines/ticks."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
yAxis.visibleAxis = 0
yAxis.visibleTicks = 0
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
xAxis.visibleAxis = 0
xAxis.visibleTicks = 0
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4d():
"Sample drawing, xvalue/yvalue axes, y connected to top of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'points'
yAxis.joinAxisPos = 100
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'value'
yAxis.joinAxisPos = 35
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5c():
"Sample drawing, xvalue/yvalue axes, y connected at right of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'right'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5d():
"Sample drawing, xvalue/yvalue axes, y connected at left of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'left'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6a():
"Sample drawing, xcat/yvalue axes, x connected at top of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6b():
"Sample drawing, xcat/yvalue axes, x connected at bottom of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6c():
"Sample drawing, xcat/yvalue axes, x connected at 100 pts to y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'points'
xAxis.joinAxisPos = 100
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6d():
"Sample drawing, xcat/yvalue axes, x connected at value 20 of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'value'
xAxis.joinAxisPos = 20
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7a():
"Sample drawing, xvalue/ycat axes, y connected at right of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'right'
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7b():
"Sample drawing, xvalue/ycat axes, y connected at left of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'left'
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7c():
"Sample drawing, xvalue/ycat axes, y connected at value 30 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'value'
yAxis.joinAxisPos = 30
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7d():
"Sample drawing, xvalue/ycat axes, y connected at 200 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'points'
yAxis.joinAxisPos = 200
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
| 38.497863
| 222
| 0.567153
|
4a0482ba3208a18ba273714efaf5f0e402f5be45
| 786
|
py
|
Python
|
libflip/Globals.py
|
fivelinesinteractive/libflip
|
476ec1ca95a25690bc011c73065fa44bae1fdfd7
|
[
"Apache-2.0"
] | null | null | null |
libflip/Globals.py
|
fivelinesinteractive/libflip
|
476ec1ca95a25690bc011c73065fa44bae1fdfd7
|
[
"Apache-2.0"
] | null | null | null |
libflip/Globals.py
|
fivelinesinteractive/libflip
|
476ec1ca95a25690bc011c73065fa44bae1fdfd7
|
[
"Apache-2.0"
] | null | null | null |
from .Assets import Assets
class Globals:
instance = None
MAX_SIZE=35
cache = {}
def __init__(self, width, height, grid_size, collisions):
self.WIDTH = width
self.HEIGHT = height
self.GRID_SIZE = grid_size
self.GRID_WIDTH = width / grid_size
self.GRID_HEIGHT = height / grid_size
self.PIXEL_COLLISIONS = collisions
self.sprites = []
self.backgrounds = [] #sprites/scene things that are in the background
self.cells = {}
self.tags = {}
self.animations = []
self.keys_registered = {
'keydown': {},
'keyup': {}
}
self.keys_pressed = []
self.mouse_motion = []
self.register_collisions = []
self.assets = Assets()
| 29.111111
| 78
| 0.566158
|
4a04862aa91f58d6591324921defbe6f6be5c75a
| 4,570
|
py
|
Python
|
postman/query.py
|
chhell/django-postman
|
dbd48a7a94f4abd4748c174cf052b8da6f66800b
|
[
"BSD-3-Clause"
] | null | null | null |
postman/query.py
|
chhell/django-postman
|
dbd48a7a94f4abd4748c174cf052b8da6f66800b
|
[
"BSD-3-Clause"
] | null | null | null |
postman/query.py
|
chhell/django-postman
|
dbd48a7a94f4abd4748c174cf052b8da6f66800b
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from types import MethodType
from django import VERSION
from django.db.models.sql.compiler import SQLCompiler
from django.db.models.sql.constants import INNER
from django.db.models.sql.query import Query
import six
class Proxy(object):
"""
Code base for an instance proxy.
"""
def __init__(self, target):
self._target = target
def __getattr__(self, name):
target = self._target
f = getattr(target, name)
if isinstance(f, MethodType):
if six.PY3:
return MethodType(f.__func__, self)
else:
return MethodType(f.__func__, self, target.__class__)
else:
return f
def __setattr__(self, name, value):
if name != '_target':
setattr(self._target, name, value)
else:
object.__setattr__(self, name, value)
# added for Django 1.7, object has to be callable
# see db/models/sql/compiler.py/quote_name_unless_alias()
def __call__(self, name):
return self._target(name)
class CompilerProxy(Proxy, SQLCompiler):
"""
A proxy to a compiler.
"""
# @Override
def as_sql(self, *args, **kwargs):
sql, params = self._target.as_sql(*args, **kwargs)
if not sql: # is the case with a Paginator on an empty folder
return sql, params
# mimics compiler.py/SQLCompiler/get_from_clause() and as_sql()
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
alias = self.query.tables[0] if VERSION < (2, 0) else self.query.base_table
# if VERSION >= (1, 8):
from_clause = self.query.alias_map[alias]
alias = from_clause.table_alias
clause_sql, _ = self.compile(from_clause) # clause_sql, clause_params
clause = ' '.join(['FROM', clause_sql])
# else: <purged>
index = sql.index(clause) + len(clause)
extra_table, extra_params = self.union(self.query.pm_get_extra())
opts = self.query.get_meta()
qn2_pk_col = qn2(opts.pk.column) # usually 'id' but not in case of model inheritance
new_sql = [
sql[:index],
' {0} ({1}) {2} ON ({3}.{4} = {2}.{5})'.format(
INNER, extra_table, self.query.pm_alias_prefix, qn(alias), qn2_pk_col, qn2_pk_col),
]
if index < len(sql):
new_sql.append(sql[index:])
new_sql = ''.join(new_sql)
heading_param_count = sql[:index].count('%s')
return new_sql, params[:heading_param_count] + extra_params + params[heading_param_count:]
def union(self, querysets):
"""
Join several querysets by a UNION clause. Returns the SQL string and the list of parameters.
"""
# union() is "New in Django 1.11." (docs site)
# but buggy in 2.0, with a backport in 1.11.8 ; my ticket 29229, fixed in 1.11.12 & 2.0.4.
# For simplicity, let's even ignore the usable 1.11.0-7 frame.
# Ticket 29286 reintroduced a bug in 1.11.13 & 2.0.5, by considering oly the annotate() case and not the extra().
# Ticket 29694 fixed the missing extra() case, but is only effective as of 2.1.1,
# because extra() is destined to be deprecated.
# So the final solution here was to replace all extra() by annotate() in this app.
if VERSION < (1, 11, 12) or (2, 0) <= VERSION < (2, 0, 4):
result_sql, result_params = [], []
for qs in querysets:
sql, params = qs.query.sql_with_params()
result_sql.append(sql)
result_params.extend(params)
return ' UNION '.join(result_sql), tuple(result_params)
else:
qs = querysets[0].union(*querysets[1:])
return qs.query.sql_with_params()
class PostmanQuery(Query):
"""
A custom SQL query.
"""
pm_alias_prefix = 'PM'
# @Override
def __init__(self, *args, **kwargs):
super(PostmanQuery, self).__init__(*args, **kwargs)
self._pm_table = None
# @Override
def clone(self, *args, **kwargs):
obj = super(PostmanQuery, self).clone(*args, **kwargs)
obj._pm_table = self._pm_table
return obj
# @Override
def get_compiler(self, *args, **kwargs):
compiler = super(PostmanQuery, self).get_compiler(*args, **kwargs)
return CompilerProxy(compiler)
def pm_set_extra(self, table):
self._pm_table = table
def pm_get_extra(self):
return self._pm_table
| 35.984252
| 121
| 0.608534
|
4a04875d68c746e0462f5b1eaa2b02718ab3abe6
| 9,949
|
py
|
Python
|
tests/impls/nn_index/test_flann.py
|
Purg/SMQTK-Indexing
|
24b5f875ec01a93f1c4842381a6de88041166604
|
[
"BSD-3-Clause"
] | null | null | null |
tests/impls/nn_index/test_flann.py
|
Purg/SMQTK-Indexing
|
24b5f875ec01a93f1c4842381a6de88041166604
|
[
"BSD-3-Clause"
] | null | null | null |
tests/impls/nn_index/test_flann.py
|
Purg/SMQTK-Indexing
|
24b5f875ec01a93f1c4842381a6de88041166604
|
[
"BSD-3-Clause"
] | null | null | null |
import random
import unittest.mock as mock
import unittest
import numpy
import pytest
from smqtk_core.configuration import configuration_test_helper
from smqtk_descriptors.impls.descriptor_element.memory import DescriptorMemoryElement
from smqtk_indexing import NearestNeighborsIndex
from smqtk_indexing.impls.nn_index.flann import FlannNearestNeighborsIndex
# Don't bother running tests of the class is not usable
@pytest.mark.skipif(not FlannNearestNeighborsIndex.is_usable(),
reason="FlannNearestNeighborsIndex does not report as "
"usable.")
class TestFlannIndex (unittest.TestCase):
RAND_SEED = 42
def _make_inst(self, dist_method: str) -> FlannNearestNeighborsIndex:
"""
Make an instance of FlannNearestNeighborsIndex
"""
return FlannNearestNeighborsIndex(distance_method=dist_method,
random_seed=self.RAND_SEED)
def test_impl_findable(self) -> None:
# Already here because the implementation is reporting itself as
# usable.
self.assertIn(FlannNearestNeighborsIndex,
NearestNeighborsIndex.get_impls())
def test_configuration(self) -> None:
index_filepath = '/index_filepath'
para_filepath = '/param_fp'
descr_cache_fp = '/descrcachefp'
c = FlannNearestNeighborsIndex(
index_uri=index_filepath,
parameters_uri=para_filepath,
descriptor_cache_uri=descr_cache_fp,
distance_method='hik', random_seed=42,
)
for inst in configuration_test_helper(c): # type: FlannNearestNeighborsIndex
assert inst._index_uri == index_filepath
assert inst._index_param_uri == para_filepath
assert inst._descr_cache_uri == descr_cache_fp
assert inst._distance_method == 'hik'
assert inst._rand_seed == 42
def test_has_model_data_no_uris(self) -> None:
f = FlannNearestNeighborsIndex()
self.assertFalse(f._has_model_data())
def test_has_model_data_empty_elements(self) -> None:
f = FlannNearestNeighborsIndex('', '', '')
self.assertFalse(f._has_model_data())
def test_load_flann_model_empty_data_elements(self) -> None:
# Construct index with valid, but empty, data URIs instances
empty_data = 'base64://'
f = FlannNearestNeighborsIndex(empty_data, empty_data, empty_data)
# Load method should do nothing but set PID since given data was
# empty.
f._load_flann_model()
self.assertIsNone(f._descr_cache)
self.assertIsNone(f._flann)
self.assertIsNone(f._flann_build_params)
self.assertIsNotNone(f._pid)
@mock.patch("smqtk_indexing.impls.nn_index.flann"
".FlannNearestNeighborsIndex._load_flann_model")
def test_has_model_data_valid_uris(self, _m_flann_lfm: mock.MagicMock) -> None:
# Mocking flann data loading that occurs in constructor when given
# non-empty URI targets
f = FlannNearestNeighborsIndex(
'base64://bW9kZWxEYXRh', # 'modelData'
'base64://cGFyYW1EYXRh', # 'paramData'
'base64://ZGVzY3JEYXRh', # 'descrData'
)
self.assertTrue(f._has_model_data())
def test_build_index_one(self) -> None:
d = DescriptorMemoryElement('test', 0)
d.set_vector(numpy.zeros(8, float))
index = self._make_inst('euclidean')
index.build_index([d])
self.assertListEqual(
index._descr_cache,
[d]
)
self.assertIsNotNone(index._flann)
self.assertIsInstance(index._flann_build_params, dict)
def test_build_index_with_cache(self) -> None:
# Empty memory data elements for storage
empty_data = 'base64://'
f = FlannNearestNeighborsIndex(empty_data, empty_data, empty_data)
# Internal elements should initialize have zero-length byte values
assert f._index_elem is not None
assert f._index_param_elem is not None
assert f._descr_cache_elem is not None
self.assertEqual(len(f._index_elem.get_bytes()), 0)
self.assertEqual(len(f._index_param_elem.get_bytes()), 0)
self.assertEqual(len(f._descr_cache_elem.get_bytes()), 0)
# Make unit vectors, one for each feature dimension.
dim = 8
test_descriptors = []
for i in range(dim):
v = numpy.zeros(dim, float)
v[i] = 1.
d = DescriptorMemoryElement('unit', i)
d.set_vector(v)
test_descriptors.append(d)
f.build_index(test_descriptors)
# Internal elements should not have non-zero byte values.
self.assertGreater(len(f._index_elem.get_bytes()), 0)
self.assertGreater(len(f._index_param_elem.get_bytes()), 0)
self.assertGreater(len(f._descr_cache_elem.get_bytes()), 0)
def test_update_index(self) -> None:
# Build index with one descriptor, then "update" with a second
# different descriptor checking that the new cache contains both.
d1 = DescriptorMemoryElement('test', 0)
d1.set_vector(numpy.zeros(8))
d2 = DescriptorMemoryElement('test', 1)
d2.set_vector(numpy.ones(8))
index = self._make_inst('euclidean')
index.build_index([d1])
self.assertEqual(index.count(), 1)
self.assertSetEqual(set(index._descr_cache), {d1})
index.update_index([d2])
self.assertEqual(index.count(), 2)
self.assertSetEqual(set(index._descr_cache), {d1, d2})
def test_nn_known_descriptors_euclidean_unit(self) -> None:
dim = 5
###
# Unit vectors -- Equal distance
#
index = self._make_inst('euclidean')
test_descriptors = []
for i in range(dim):
v = numpy.zeros(dim, float)
v[i] = 1.
test_descriptors.append(
DescriptorMemoryElement('unit', i).set_vector([v])
)
index.build_index(test_descriptors)
# query descriptor -- zero vector
# -> all modeled descriptors should be equally distance (unit
# corners)
q = DescriptorMemoryElement('query', 0)
q.set_vector(numpy.zeros(dim, float))
r, dists = index.nn(q, dim)
# All dists should be 1.0, r order doesn't matter
for d in dists:
self.assertEqual(d, 1.)
def test_nn_known_descriptors_euclidean_ordered(self) -> None:
index = self._make_inst('euclidean')
# make vectors to return in a known euclidean distance order
i = 10
test_descriptors = [
DescriptorMemoryElement('ordered', j).set_vector(numpy.array([j, j*2], float))
for j in range(i)
]
random.shuffle(test_descriptors)
index.build_index(test_descriptors)
# Since descriptors were build in increasing distance from (0,0),
# returned descriptors for a query of [0,0] should be in index
# order.
q = DescriptorMemoryElement('query', 99)
q.set_vector(numpy.array([0, 0], float))
r, dists = index.nn(q, i)
for j, d, dist in zip(range(i), r, dists):
self.assertEqual(d.uuid(), j)
numpy.testing.assert_equal(d.vector(), [j, j*2])
def test_nn_known_descriptors_hik_unit(self) -> None:
dim = 5
###
# Unit vectors - Equal distance
#
index = self._make_inst('hik')
test_descriptors = []
for i in range(dim):
v = numpy.zeros(dim, float)
v[i] = 1.
test_descriptors.append(
DescriptorMemoryElement('unit', i).set_vector(v)
)
index.build_index(test_descriptors)
# query with zero vector
# -> all modeled descriptors have no intersection, dists should be
# 1.0, or maximum distance by histogram intersection.
q = DescriptorMemoryElement('query', 0)
q.set_vector(numpy.zeros(dim, float))
r, dists = index.nn(q, dim)
# All dists should be 1.0, r order doesn't matter
for d in dists:
self.assertEqual(d, 1.)
# query with index element
q = test_descriptors[3]
r, dists = index.nn(q, 1)
self.assertEqual(r[0], q)
self.assertEqual(dists[0], 0.)
r, dists = index.nn(q, dim)
self.assertEqual(r[0], q)
self.assertEqual(dists[0], 0.)
def test_build_index_no_descriptors(self) -> None:
f = FlannNearestNeighborsIndex()
self.assertRaises(
ValueError,
f.build_index, []
)
def test_build_index(self) -> None:
# Empty memory data elements for storage
empty_data = 'base64://'
f = FlannNearestNeighborsIndex(empty_data, empty_data, empty_data)
# Internal elements should initialize have zero-length byte values
assert f._index_elem is not None
assert f._index_param_elem is not None
assert f._descr_cache_elem is not None
self.assertEqual(len(f._index_elem.get_bytes()), 0)
self.assertEqual(len(f._index_param_elem.get_bytes()), 0)
self.assertEqual(len(f._descr_cache_elem.get_bytes()), 0)
# Make unit vectors, one for each feature
dim = 8
test_descriptors = []
for i in range(dim):
v = numpy.zeros(dim, float)
v[i] = 1.
d = DescriptorMemoryElement('unit', i)
d.set_vector(v)
test_descriptors.append(d)
f.build_index(test_descriptors)
# Internal elements should not have non-zero byte values.
self.assertGreater(len(f._index_elem.get_bytes()), 0)
self.assertGreater(len(f._index_param_elem.get_bytes()), 0)
self.assertGreater(len(f._descr_cache_elem.get_bytes()), 0)
| 38.265385
| 90
| 0.630315
|
4a0488e75dedeb3ec1939c6de893eb1649f55af3
| 6,621
|
py
|
Python
|
src/xdist/plugin.py
|
xifeng/pytest-xdist
|
c6f7a035e417287262bd897701a56c8f4bb41a0a
|
[
"MIT"
] | 1
|
2020-04-07T11:56:52.000Z
|
2020-04-07T11:56:52.000Z
|
src/xdist/plugin.py
|
xifeng/pytest-xdist
|
c6f7a035e417287262bd897701a56c8f4bb41a0a
|
[
"MIT"
] | 9
|
2020-08-11T15:19:55.000Z
|
2022-03-12T00:11:12.000Z
|
src/xdist/plugin.py
|
xifeng/pytest-xdist
|
c6f7a035e417287262bd897701a56c8f4bb41a0a
|
[
"MIT"
] | null | null | null |
import os
import py
import pytest
def auto_detect_cpus():
try:
from os import sched_getaffinity
except ImportError:
if os.environ.get("TRAVIS") == "true":
# workaround https://bitbucket.org/pypy/pypy/issues/2375
return 2
try:
from os import cpu_count
except ImportError:
from multiprocessing import cpu_count
else:
def cpu_count():
return len(sched_getaffinity(0))
try:
n = cpu_count()
except NotImplementedError:
return 1
return n if n else 1
class AutoInt(int):
"""Mark value as auto-detected."""
def parse_numprocesses(s):
if s == "auto":
return AutoInt(auto_detect_cpus())
elif s is not None:
return int(s)
def pytest_addoption(parser):
group = parser.getgroup("xdist", "distributed and subprocess testing")
group._addoption(
"-n",
"--numprocesses",
dest="numprocesses",
metavar="numprocesses",
action="store",
type=parse_numprocesses,
help="shortcut for '--dist=load --tx=NUM*popen', "
"you can use 'auto' here for auto detection CPUs number on "
"host system and it will be 0 when used with --pdb",
)
group.addoption(
"--maxprocesses",
dest="maxprocesses",
metavar="maxprocesses",
action="store",
type=int,
help="limit the maximum number of workers to process the tests when using --numprocesses=auto",
)
group.addoption(
"--max-worker-restart",
"--max-slave-restart",
action="store",
default=None,
dest="maxworkerrestart",
help="maximum number of workers that can be restarted "
"when crashed (set to zero to disable this feature)\n"
"'--max-slave-restart' option is deprecated and will be removed in "
"a future release",
)
group.addoption(
"--dist",
metavar="distmode",
action="store",
choices=["each", "load", "loadscope", "loadfile", "no"],
dest="dist",
default="no",
help=(
"set mode for distributing tests to exec environments.\n\n"
"each: send each test to all available environments.\n\n"
"load: load balance by sending any pending test to any"
" available environment.\n\n"
"loadscope: load balance by sending pending groups of tests in"
" the same scope to any available environment.\n\n"
"loadfile: load balance by sending test grouped by file"
" to any available environment.\n\n"
"(default) no: run tests inprocess, don't distribute."
),
)
group.addoption(
"--tx",
dest="tx",
action="append",
default=[],
metavar="xspec",
help=(
"add a test execution environment. some examples: "
"--tx popen//python=python2.5 --tx socket=192.168.1.102:8888 "
"--tx ssh=user@codespeak.net//chdir=testcache"
),
)
group._addoption(
"-d",
action="store_true",
dest="distload",
default=False,
help="load-balance tests. shortcut for '--dist=load'",
)
group.addoption(
"--rsyncdir",
action="append",
default=[],
metavar="DIR",
help="add directory for rsyncing to remote tx nodes.",
)
group.addoption(
"--rsyncignore",
action="append",
default=[],
metavar="GLOB",
help="add expression for ignores when rsyncing to remote tx nodes.",
)
group.addoption(
"--boxed",
action="store_true",
help="backward compatibility alias for pytest-forked --forked",
)
parser.addini(
"rsyncdirs",
"list of (relative) paths to be rsynced for remote distributed testing.",
type="pathlist",
)
parser.addini(
"rsyncignore",
"list of (relative) glob-style paths to be ignored for rsyncing.",
type="pathlist",
)
parser.addini(
"looponfailroots",
type="pathlist",
help="directories to check for changes",
default=[py.path.local()],
)
# -------------------------------------------------------------------------
# distributed testing hooks
# -------------------------------------------------------------------------
def pytest_addhooks(pluginmanager):
from xdist import newhooks
pluginmanager.add_hookspecs(newhooks)
# -------------------------------------------------------------------------
# distributed testing initialization
# -------------------------------------------------------------------------
@pytest.mark.trylast
def pytest_configure(config):
if config.getoption("dist") != "no" and not config.getvalue("collectonly"):
from xdist.dsession import DSession
session = DSession(config)
config.pluginmanager.register(session, "dsession")
tr = config.pluginmanager.getplugin("terminalreporter")
if tr:
tr.showfspath = False
if config.getoption("boxed"):
config.option.forked = True
@pytest.mark.tryfirst
def pytest_cmdline_main(config):
usepdb = config.getoption("usepdb", False) # a core option
if isinstance(config.option.numprocesses, AutoInt):
config.option.numprocesses = 0 if usepdb else int(config.option.numprocesses)
if config.option.numprocesses:
if config.option.dist == "no":
config.option.dist = "load"
numprocesses = config.option.numprocesses
if config.option.maxprocesses:
numprocesses = min(numprocesses, config.option.maxprocesses)
config.option.tx = ["popen"] * numprocesses
if config.option.distload:
config.option.dist = "load"
val = config.getvalue
if not val("collectonly"):
if val("dist") != "no":
if usepdb:
raise pytest.UsageError(
"--pdb is incompatible with distributing tests; try using -n0 or -nauto."
) # noqa: E501
# -------------------------------------------------------------------------
# fixtures
# -------------------------------------------------------------------------
@pytest.fixture(scope="session")
def worker_id(request):
"""Return the id of the current worker ('gw0', 'gw1', etc) or 'master'
if running on the master node.
"""
if hasattr(request.config, "workerinput"):
return request.config.workerinput["workerid"]
else:
return "master"
| 30.511521
| 103
| 0.554599
|
4a0488f0bd563f055b3335621761cfecf7f28f23
| 4,185
|
py
|
Python
|
src/programy/storage/stores/nosql/mongo/store/sets.py
|
cdoebler1/AIML2
|
ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a
|
[
"MIT"
] | 345
|
2016-11-23T22:37:04.000Z
|
2022-03-30T20:44:44.000Z
|
src/programy/storage/stores/nosql/mongo/store/sets.py
|
MikeyBeez/program-y
|
00d7a0c7d50062f18f0ab6f4a041068e119ef7f0
|
[
"MIT"
] | 275
|
2016-12-07T10:30:28.000Z
|
2022-02-08T21:28:33.000Z
|
src/programy/storage/stores/nosql/mongo/store/sets.py
|
VProgramMist/modified-program-y
|
f32efcafafd773683b3fe30054d5485fe9002b7d
|
[
"MIT"
] | 159
|
2016-11-28T18:59:30.000Z
|
2022-03-20T18:02:44.000Z
|
"""
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.storage.stores.nosql.mongo.store.mongostore import MongoStore
from programy.storage.entities.sets import SetsReadWriteStore
from programy.storage.stores.nosql.mongo.dao.set import Set
class MongoSetsStore(MongoStore, SetsReadWriteStore):
SETS = 'sets'
NAME = 'name'
VALUES = 'values'
def __init__(self, storage_engine):
MongoStore.__init__(self, storage_engine)
SetsReadWriteStore.__init__(self)
def collection_name(self):
return MongoSetsStore.SETS
def empty_named(self, name):
YLogger.info(self, "Empting set [%s]", name)
collection = self.collection()
collection.remove({MongoSetsStore.NAME: name})
def add_to_set(self, name, value, replace_existing=False):
collection = self.collection()
aset = collection.find_one({MongoSetsStore.NAME: name})
uvalue = value.upper()
if aset is not None:
if uvalue not in aset[MongoSetsStore.VALUES]:
YLogger.info(self, "Adding value to set [%s] [%s]", name, uvalue)
aset[MongoSetsStore.VALUES].append(uvalue)
result = collection.replace_one({MongoSetsStore.NAME: name}, aset)
return bool(result.modified_count > 0)
else:
YLogger.info(self, "Creating new set [%s], initial value [%s]", name, uvalue)
aset = Set(name, [uvalue])
return self.add_document(aset)
def remove_from_set(self, name, value):
YLogger.info(self, "Remove value [%s] from set [%s]", value, name)
collection = self.collection()
aset = collection.find_one({MongoSetsStore.NAME: name})
if aset is not None:
if value.upper() in aset[MongoSetsStore.VALUES]:
aset[MongoSetsStore.VALUES].remove(value.upper())
if aset[MongoSetsStore.VALUES]:
result = collection.replace_one({MongoSetsStore.NAME: name}, aset)
return bool(result.modified_count > 0)
else:
result = collection.delete_one({MongoSetsStore.NAME: name})
return bool(result.deleted_count > 0)
return False
def load_all(self, collector):
YLogger.info(self, "Loading all sets from Mongo")
collection = self.collection()
collector.empty()
sets = collection.find({})
for aset in sets:
self.load(collector, aset[MongoSetsStore.NAME])
def load(self, collector, name=None):
YLogger.info(self, "Loading set [%s] from Mongo", name)
collection = self.collection()
aset = collection.find_one({MongoSetsStore.NAME: name})
if aset is not None:
the_set = {}
for value in aset[MongoSetsStore.VALUES]:
value = value.strip()
self.add_set_values(the_set, value)
collector.remove(name)
collector.add_set(name, the_set, MongoStore.MONGO)
return True
return False
| 43.14433
| 120
| 0.668578
|
4a0489445f8442712f4c574ae5320faf87130e1f
| 5,300
|
py
|
Python
|
tests/test_measure.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
tests/test_measure.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
tests/test_measure.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of fionautil.
# http://github.com/fitnr/fionautil
# Licensed under the GPLv3 license:
# http://http://opensource.org/licenses/GPL-3.0
# Copyright (c) 2015, Neil Freeman <contact@fakeisthenewreal.org>
from unittest import TestCase as PythonTestCase
import unittest.main
from functools import partial
from math import pi
from os import path
from fionautil import measure
import fionautil.layer
shp = path.join(path.dirname(__file__), 'fixtures/testing.shp')
class TestMeasure(PythonTestCase):
def testDistance(self):
assert measure.distance(1, 0, 0, 0, False) == 1
self.assertEqual(measure.distance(0, 0, 360, 0, True), 0.0)
self.assertEqual(measure.distance(0, 0, 6, 0, True), 667916.9447596414)
def testAzimuth(self):
self.assertEqual(measure.azimuth(0, 0, 0, 0, clockwise=True, longlat=False), 0)
self.assertEqual(measure.azimuth(1, 0, 0, 1, clockwise=True, longlat=False), -45)
self.assertEqual(measure.azimuth(0, 1, 1, 0, clockwise=True, longlat=False), 135)
self.assertEqual(measure.azimuth(0, 0, 0, 1, clockwise=True, longlat=False), 0)
self.assertEqual(measure.azimuth(0, 0, 1, 0, clockwise=False, longlat=False), -90)
self.assertEqual(measure.azimuth(1, 0, 0, 0, clockwise=True, longlat=False), 270)
self.assertEqual(measure.azimuth(0, 0, 0, 90), -0.0)
self.assertEqual(measure.azimuth(0, 0, 90, 0), -90.0)
self.assertEqual(measure.azimuth(0, 0, 90, 0, radians=True), pi / -2)
def testSignedArea(self):
feature = fionautil.layer.first(shp)
self.assertEqual(measure.signed_area(feature['geometry']['coordinates'][0]), -4.428726877457176)
coords = [(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)]
assert measure.signed_area(coords) == 1.0
assert measure.clockwise(coords) == False
assert measure.counterclockwise(coords) == True
coords.reverse()
assert measure.signed_area(coords) == -1.0
assert measure.clockwise(coords) == True
assert measure.counterclockwise(coords) == False
zcoords = [(0, 0, 1), (1, 0, 0), (1, 1, 0), (0, 1, 0), (0, 0, 0)]
assert measure.signed_area(zcoords) == 1.0
def testAzimuthDistance(self):
self.assertEqual(measure.azimuth_distance(0, 0, 90, 0), (90, 10018754.171394622))
self.assertEqual(measure.azimuth_distance(1, 0, 0, 0, longlat=False), (-270, 1))
def testDet(self):
assert measure.det((1, 2), (3, 4)) == -2
assert measure.det((3, 4), (1, 2)) == 2
assert measure.det((100, 4), (1, -100)) == -10004
def testIntersectingbounds(self):
a = (0, 0, 10, 10)
b = (0, 0, 9, 9)
c = (10, 10, 20, 20)
assert measure.intersectingbounds(a, b) is True
assert measure.intersectingbounds(a, c) is True
assert measure.intersectingbounds(b, c) is False
def testIntersection(self):
a = ((0, 0), (10, 10))
b = ((10, 0), (0, 10))
c = ((0, 5), (10, 5))
d = ((12, 100), (13, 102))
e = (0, 0), (5, 5)
f = ((0, 0), (10, 0))
g = ((5, 0), (15, 0))
h = ((0, 0), (0, 10))
i = ((0, 5), (0, 15))
j = ((11, 11), (12, 12))
k = (4, 5), (10, 11)
m = (10, 10), (0, 0)
n = (0, 10), (10, 10)
self.assertEqual(measure.intersect(a, b), (5, 5))
self.assertEqual(measure.intersect(a, c), (5, 5))
self.assertIn(measure.intersect(a, e), list(a) + list(e))
assert measure.intersect(e, a) in list(a) + list(e)
assert measure.intersect(a, d) is None
assert measure.intersect(b, d) is None
assert measure.intersect(f, g) in list(f) + list(g)
assert measure.intersect(g, f) in list(f) + list(g)
assert measure.intersect(h, i) in list(h) + list(i)
assert measure.intersect(i, h) in list(h) + list(i)
assert measure.intersect(a, j) is None
assert measure.intersect(k, m) is None
assert measure.intersect(k, n) == (9, 10)
def testIntersectionDet(self):
minx, miny, maxx, maxy = 0, 0, 10, 10
edges = (
((minx, miny), (minx, maxy)),
((minx, maxy), (maxx, maxy)),
((maxx, maxy), (maxx, miny)),
((maxx, miny), (minx, miny))
)
dets = [measure.det(*j) for j in edges]
assert dets == [0, -100, -100, 0]
a = (4, 5), (10, 11)
assert measure.intersect(edges[0], a) is None
assert measure.intersect(edges[1], a) == (9, 10)
inters = [measure.intersect(e, a, detm=d) for e, d in zip(edges, dets)]
self.assertListEqual(inters, [None, (9, 10), None, None])
def testBoundsIntersect(self):
intersect = partial(measure.intersectingbounds, (0, 0, 1, 1))
assert intersect((0.5, 0.5, 1.5, 1.5)) is True
assert intersect((-1, -1, 0.5, 0.5)) is True
assert intersect((0, 0, 1, 1)) is True
assert intersect((0, -1, 0, 1)) is True
assert intersect((0.25, 1.25, 0.75, 1.75)) is False
assert intersect((0.25, 0.25, 0.75, 0.75)) is True
assert intersect((0.25, 0.25, 0.75, 4)) is True
if __name__ == '__main__':
unittest.main()
| 36.054422
| 104
| 0.58
|
4a048a4699f307a27e0541938a9354a1ad9f37c9
| 4,984
|
py
|
Python
|
sdk/lusid/models/upsert_reference_portfolio_constituents_response.py
|
mneedham/lusid-sdk-python
|
edabec16b357ba3fc48a53f3faacb4f94b18843e
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/upsert_reference_portfolio_constituents_response.py
|
mneedham/lusid-sdk-python
|
edabec16b357ba3fc48a53f3faacb4f94b18843e
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/upsert_reference_portfolio_constituents_response.py
|
mneedham/lusid-sdk-python
|
edabec16b357ba3fc48a53f3faacb4f94b18843e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2808
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class UpsertReferencePortfolioConstituentsResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'href': 'str',
'version': 'Version',
'links': 'list[Link]'
}
attribute_map = {
'href': 'href',
'version': 'version',
'links': 'links'
}
required_map = {
'href': 'optional',
'version': 'optional',
'links': 'optional'
}
def __init__(self, href=None, version=None, links=None): # noqa: E501
"""
UpsertReferencePortfolioConstituentsResponse - a model defined in OpenAPI
:param href:
:type href: str
:param version:
:type version: lusid.Version
:param links:
:type links: list[lusid.Link]
""" # noqa: E501
self._href = None
self._version = None
self._links = None
self.discriminator = None
self.href = href
if version is not None:
self.version = version
self.links = links
@property
def href(self):
"""Gets the href of this UpsertReferencePortfolioConstituentsResponse. # noqa: E501
:return: The href of this UpsertReferencePortfolioConstituentsResponse. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this UpsertReferencePortfolioConstituentsResponse.
:param href: The href of this UpsertReferencePortfolioConstituentsResponse. # noqa: E501
:type: str
"""
self._href = href
@property
def version(self):
"""Gets the version of this UpsertReferencePortfolioConstituentsResponse. # noqa: E501
:return: The version of this UpsertReferencePortfolioConstituentsResponse. # noqa: E501
:rtype: Version
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this UpsertReferencePortfolioConstituentsResponse.
:param version: The version of this UpsertReferencePortfolioConstituentsResponse. # noqa: E501
:type: Version
"""
self._version = version
@property
def links(self):
"""Gets the links of this UpsertReferencePortfolioConstituentsResponse. # noqa: E501
:return: The links of this UpsertReferencePortfolioConstituentsResponse. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this UpsertReferencePortfolioConstituentsResponse.
:param links: The links of this UpsertReferencePortfolioConstituentsResponse. # noqa: E501
:type: list[Link]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpsertReferencePortfolioConstituentsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.535912
| 103
| 0.585875
|
4a048a857e34e04641f73171e4a2e7c6f9873a3c
| 33,492
|
py
|
Python
|
src/werkzeug/wsgi.py
|
kam193/werkzeug
|
1129879c79f8415d4ea9a67d7c5ce2f2e1d28c78
|
[
"BSD-3-Clause"
] | null | null | null |
src/werkzeug/wsgi.py
|
kam193/werkzeug
|
1129879c79f8415d4ea9a67d7c5ce2f2e1d28c78
|
[
"BSD-3-Clause"
] | null | null | null |
src/werkzeug/wsgi.py
|
kam193/werkzeug
|
1129879c79f8415d4ea9a67d7c5ce2f2e1d28c78
|
[
"BSD-3-Clause"
] | 1
|
2020-12-29T18:02:18.000Z
|
2020-12-29T18:02:18.000Z
|
import io
import re
from functools import partial
from functools import update_wrapper
from itertools import chain
from ._internal import _encode_idna
from ._internal import _make_encode_wrapper
from ._internal import _to_bytes
from ._internal import _to_str
from .urls import uri_to_iri
from .urls import url_join
from .urls import url_parse
from .urls import url_quote
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(
environ,
root_only=False,
strip_querystring=False,
host_only=False,
trusted_hosts=None,
):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here's an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
Note that the string returned might contain unicode characters as the
representation is an IRI not an URI. If you need an ASCII only
representation you can use the :func:`~werkzeug.urls.iri_to_uri`
function:
>>> from werkzeug.urls import iri_to_uri
>>> iri_to_uri(get_current_url(env))
'http://localhost/script/?param=foo'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ["wsgi.url_scheme"], "://", get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(f"{''.join(tmp)}/")
cat(url_quote(environ.get("SCRIPT_NAME", "").encode("latin1")).rstrip("/"))
cat("/")
if not root_only:
cat(url_quote(environ.get("PATH_INFO", "").encode("latin1").lstrip(b"/")))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat(f"?{qs}")
return uri_to_iri("".join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, str):
trusted_list = [trusted_list]
def _normalize(hostname):
if ":" in hostname:
hostname = hostname.rsplit(":", 1)[0]
return _encode_idna(hostname)
try:
hostname = _normalize(hostname)
except UnicodeError:
return False
for ref in trusted_list:
if ref.startswith("."):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
try:
ref = _normalize(ref)
except UnicodeError:
return False
if ref == hostname:
return True
if suffix_match and hostname.endswith(b"." + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the host for the given WSGI environment. This first checks
the ``Host`` header. If it's not present, then ``SERVER_NAME`` and
``SERVER_PORT`` are used. The host will only contain the port if it
is different than the standard port for the protocol.
Optionally, verify that the host is trusted using
:func:`host_is_trusted` and raise a
:exc:`~werkzeug.exceptions.SecurityError` if it is not.
:param environ: The WSGI environment to get the host from.
:param trusted_hosts: A list of trusted hosts.
:return: Host, with port if necessary.
:raise ~werkzeug.exceptions.SecurityError: If the host is not
trusted.
"""
if "HTTP_HOST" in environ:
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
else:
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += f":{environ['SERVER_PORT']}"
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from .exceptions import SecurityError
raise SecurityError(f'Host "{rv}" is not trusted')
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available or chunked transfer encoding is used,
``None`` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
if environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked":
return None
content_length = environ.get("CONTENT_LENGTH")
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
If content length is not set, the stream will be empty for safety reasons.
If the WSGI server supports chunked or infinite streams, it should set
the ``wsgi.input_terminated`` value in the WSGI environ to indicate that.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe_fallback: use an empty stream as a safe fallback when the
content length is not set. Disabling this allows infinite streams,
which can be a denial-of-service risk.
"""
stream = environ["wsgi.input"]
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can safely
# read it until the end.
if environ.get("wsgi.input_terminated"):
return stream
# If the request doesn't specify a content length, returning the stream is
# potentially dangerous because it could be infinite, malicious or not. If
# safe_fallback is true, return an empty stream instead for safety.
if content_length is None:
return io.BytesIO() if safe_fallback else stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the ``QUERY_STRING`` from the WSGI environment. This also
takes care of the WSGI decoding dance. The string returned will be
restricted to ASCII characters.
:param environ: WSGI environment to get the query string from.
.. versionadded:: 0.9
"""
qs = environ.get("QUERY_STRING", "").encode("latin1")
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return url_quote(qs, safe=":&%=+$!*'(),")
def get_path_info(environ, charset="utf-8", errors="replace"):
"""Return the ``PATH_INFO`` from the WSGI environment and decode it
unless ``charset`` is ``None``.
:param environ: WSGI environment to get the path from.
:param charset: The charset for the path info, or ``None`` if no
decoding should be performed.
:param errors: The decoding error handling.
.. versionadded:: 0.9
"""
path = environ.get("PATH_INFO", "").encode("latin1")
return _to_str(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset="utf-8", errors="replace"):
"""Return the ``SCRIPT_NAME`` from the WSGI environment and decode
it unless `charset` is set to ``None``.
:param environ: WSGI environment to get the path from.
:param charset: The charset for the path, or ``None`` if no decoding
should be performed.
:param errors: The decoding error handling.
.. versionadded:: 0.9
"""
path = environ.get("SCRIPT_NAME", "").encode("latin1")
return _to_str(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset="utf-8", errors="replace"):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` bytes are returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
:param charset: The ``encoding`` parameter passed to
:func:`bytes.decode`.
:param errors: The ``errors`` paramater passed to
:func:`bytes.decode`.
"""
path = environ.get("PATH_INFO")
if not path:
return None
script_name = environ.get("SCRIPT_NAME", "")
# shift multiple leading slashes over
old_path = path
path = path.lstrip("/")
if path != old_path:
script_name += "/" * (len(old_path) - len(path))
if "/" not in path:
environ["PATH_INFO"] = ""
environ["SCRIPT_NAME"] = script_name + path
rv = path.encode("latin1")
else:
segment, path = path.split("/", 1)
environ["PATH_INFO"] = f"/{path}"
environ["SCRIPT_NAME"] = script_name + segment
rv = segment.encode("latin1")
return _to_str(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset="utf-8", errors="replace"):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` bytes are returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get("PATH_INFO", "").lstrip("/").split("/", 1)
if segments:
return _to_str(
segments[0].encode("latin1"), charset, errors, allow_none_charset=True
)
def extract_path_info(
environ_or_baseurl,
path_or_url,
charset="utf-8",
errors="werkzeug.url_quote",
collapse_http_schemes=True,
):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a string. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
.. versionchanged:: 0.15
The ``errors`` parameter defaults to leaving invalid bytes
quoted instead of replacing them.
.. versionadded:: 0.6
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split("@", 1)[-1].split(":", 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == "http" and port == "80") or (
scheme == "https" and port == "443"
):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += f":{port}"
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in ("http", "https"):
return None
else:
if not (base_scheme in ("http", "https") and base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip("/")
if not cur_path.startswith(base_path):
return None
return f"/{cur_path[len(base_path) :].lstrip('/')}"
class ClosingIterator:
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of the iterable returned by the application.
Because it is useful to add another close action to a returned iterable
and adding a custom iterable is a boring task this class can be used for
that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterable, "close", None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get("wsgi.file_wrapper", FileWrapper)(file, buffer_size)
class FileWrapper:
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, "close"):
self.file.close()
def seekable(self):
if hasattr(self.file, "seekable"):
return self.file.seekable()
if hasattr(self.file, "seek"):
return True
return False
def seek(self, *args):
if hasattr(self.file, "seek"):
self.file.seek(*args)
def tell(self):
if hasattr(self.file, "tell"):
return self.file.tell()
return None
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
class _RangeWrapper:
# private for now, but should we make it public in the future ?
"""This class can be used to convert an iterable object into
an iterable that will only yield a piece of the underlying content.
It yields blocks until the underlying stream range is fully read.
The yielded blocks will have a size that can't exceed the original
iterator defined block size, but that can be smaller.
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param iterable: an iterable object with a :meth:`__next__` method.
:param start_byte: byte from which read will start.
:param byte_range: how many bytes to read.
"""
def __init__(self, iterable, start_byte=0, byte_range=None):
self.iterable = iter(iterable)
self.byte_range = byte_range
self.start_byte = start_byte
self.end_byte = None
if byte_range is not None:
self.end_byte = self.start_byte + self.byte_range
self.read_length = 0
self.seekable = hasattr(iterable, "seekable") and iterable.seekable()
self.end_reached = False
def __iter__(self):
return self
def _next_chunk(self):
try:
chunk = next(self.iterable)
self.read_length += len(chunk)
return chunk
except StopIteration:
self.end_reached = True
raise
def _first_iteration(self):
chunk = None
if self.seekable:
self.iterable.seek(self.start_byte)
self.read_length = self.iterable.tell()
contextual_read_length = self.read_length
else:
while self.read_length <= self.start_byte:
chunk = self._next_chunk()
if chunk is not None:
chunk = chunk[self.start_byte - self.read_length :]
contextual_read_length = self.start_byte
return chunk, contextual_read_length
def _next(self):
if self.end_reached:
raise StopIteration()
chunk = None
contextual_read_length = self.read_length
if self.read_length == 0:
chunk, contextual_read_length = self._first_iteration()
if chunk is None:
chunk = self._next_chunk()
if self.end_byte is not None and self.read_length >= self.end_byte:
self.end_reached = True
return chunk[: self.end_byte - contextual_read_length]
return chunk
def __next__(self):
chunk = self._next()
if chunk:
return chunk
self.end_reached = True
raise StopIteration()
def close(self):
if hasattr(self.iterable, "close"):
self.iterable.close()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, str)):
raise TypeError(
"Passed a string or byte object instead of true iterator or stream."
)
if not hasattr(stream, "read"):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024, cap_at_buffer=False):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, "")
if not first_item:
return
s = _make_encode_wrapper(first_item)
empty = s("")
cr = s("\r")
lf = s("\n")
crlf = s("\r\n")
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, "")
if not new_data:
break
new_buf = []
buf_size = 0
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
buf_size += len(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
elif cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(
stream, separator, limit=None, buffer_size=10 * 1024, cap_at_buffer=False
):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, "")
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, str):
separator = _to_str(separator)
_split = re.compile(f"({re.escape(separator)})").split
_join = "".join
else:
separator = _to_bytes(separator)
_split = re.compile(b"(" + re.escape(separator) + b")").split
_join = b"".join
buffer = []
while 1:
new_data = next(_iter, "")
if not new_data:
break
chunks = _split(new_data)
new_buf = []
buf_size = 0
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
buf_size = 0
else:
buf_size += len(item)
new_buf.append(item)
if cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buf_size = len(rv)
buffer = new_buf
if buffer:
yield _join(buffer)
class LimitedStream(io.IOBase):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from .exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (OSError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, OSError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
def readable(self):
return True
| 34.245399
| 83
| 0.619282
|
4a048bcacdaad085a4137c32e7c493975cc7012a
| 5,548
|
py
|
Python
|
spacex_dash_app.py
|
staceybellerose/data_science_capstone
|
5439c9d4acebd21c3c0163ee864555b5c1b2b65e
|
[
"Unlicense"
] | null | null | null |
spacex_dash_app.py
|
staceybellerose/data_science_capstone
|
5439c9d4acebd21c3c0163ee864555b5c1b2b65e
|
[
"Unlicense"
] | null | null | null |
spacex_dash_app.py
|
staceybellerose/data_science_capstone
|
5439c9d4acebd21c3c0163ee864555b5c1b2b65e
|
[
"Unlicense"
] | null | null | null |
# NB: to run this app, the following commands need to be run first
# pip3 install pandas dash wget
# Import required libraries
import pandas as pd
import dash
import wget
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import plotly.express as px
# Read the airline data into pandas dataframe
csv_file = wget.download('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/datasets/spacex_launch_dash.csv')
spacex_df = pd.read_csv(csv_file)
max_payload = spacex_df['Payload Mass (kg)'].max()
min_payload = spacex_df['Payload Mass (kg)'].min()
print(max_payload, ' ', min_payload)
# Create a dash application
app = dash.Dash(__name__)
# Create an app layout
app.layout = html.Div(children=[html.H1('SpaceX Launch Records Dashboard',
style={'textAlign': 'center', 'color': '#503D36',
'font-size': 40}),
# TASK 1: Add a dropdown list to enable Launch Site selection
# The default select value is for ALL sites
dcc.Dropdown(id='site-dropdown',
options=[
{'label':'All Sites', 'value':'all'},
{'label':'CCAFS LC-40', 'value':'CCAFS LC-40'},
{'label':'CCAFS SLC-40', 'value':'CCAFS SLC-40'},
{'label':'KSC LC-39A', 'value':'KSC LC-39A'},
{'label':'VAFB SLC-4E', 'value':'VAFB SLC-4E'}
],
value='all',
placeholder='Select a Launch Site here',
searchable=True
),
html.Br(),
# TASK 2: Add a pie chart to show the total successful launches count for all sites
# If a specific launch site was selected, show the Success vs. Failed counts for the site
html.Div(dcc.Graph(id='success-pie-chart')),
html.Br(),
html.P("Payload range (Kg):"),
# TASK 3: Add a slider to select payload range
dcc.RangeSlider(id='payload-slider',
min=0,
max=10000,
step=1000,
marks={
0: '0 kg',
1000: '1000 kg',
2000: '2000 kg',
3000: '3000 kg',
4000: '4000 kg',
5000: '5000 kg',
6000: '6000 kg',
7000: '7000 kg',
8000: '8000 kg',
9000: '9000 kg',
10000: '10000 kg',
},
value=[min_payload, max_payload]
),
# TASK 4: Add a scatter chart to show the correlation between payload and launch success
html.Div(dcc.Graph(id='success-payload-scatter-chart')),
])
# TASK 2:
# Add a callback function for `site-dropdown` as input, `success-pie-chart` as output
@app.callback(
Output(component_id='success-pie-chart', component_property='figure'),
Input(component_id='site-dropdown', component_property='value')
)
def get_launch_data(launch_site):
if launch_site == 'all':
fig = px.pie(spacex_df, values='class', names='Launch Site')
fig.update_traces(textinfo='value')
fig.update_layout(title='Successful Launches')
else:
fig = px.pie(spacex_df[spacex_df['Launch Site']==str(launch_site)].groupby('class').count().reset_index(), values='Launch Site', names='class')
fig.update_traces(marker_colors=['#ef553b','#00cc96'])
fig.update_layout(title='Successful Launches for %s' % launch_site)
return fig
# TASK 4:
# Add a callback function for `site-dropdown` and `payload-slider` as inputs, `success-payload-scatter-chart` as output
@app.callback(
Output(component_id='success-payload-scatter-chart', component_property='figure'),
[Input(component_id='site-dropdown', component_property='value'), Input(component_id="payload-slider", component_property="value")]
)
def build_scatter_plot(launch_site, payload_range):
payload_df = spacex_df[spacex_df['Payload Mass (kg)'].between(payload_range[0], payload_range[1])]
if launch_site == 'all':
fig = px.scatter(payload_df, x='Payload Mass (kg)', y='class', color='Booster Version Category')
else:
fig = px.scatter(payload_df[payload_df['Launch Site']==str(launch_site)], x='Payload Mass (kg)', y='class', color='Booster Version Category')
fig.update_layout(title='Correlation between Payload Mass and Success')
return fig
# Run the app
if __name__ == '__main__':
app.run_server()
| 50.899083
| 153
| 0.504867
|
4a048c87b5e42ddeb3dd2cc2f3d1fdfaeaa5f516
| 18,558
|
py
|
Python
|
ciso/_version.py
|
ioos/ciso
|
1972e291b3e05cef9340a03ed1ada719ec0b5fa8
|
[
"BSD-2-Clause"
] | 1
|
2019-07-19T18:49:46.000Z
|
2019-07-19T18:49:46.000Z
|
ciso/_version.py
|
ioos/ciso
|
1972e291b3e05cef9340a03ed1ada719ec0b5fa8
|
[
"BSD-2-Clause"
] | 6
|
2015-10-16T20:23:30.000Z
|
2019-02-07T13:47:55.000Z
|
ciso/_version.py
|
ioos/ciso
|
1972e291b3e05cef9340a03ed1ada719ec0b5fa8
|
[
"BSD-2-Clause"
] | 2
|
2016-09-26T20:57:51.000Z
|
2019-09-01T00:41:17.000Z
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = ""
cfg.versionfile_source = "ciso/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(
commands, args, cwd=None, verbose=False, hide_stderr=False, env=None
):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(
GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True
)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = (
"unable to parse git-describe output: '%s'" % describe_out
)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(
GITS, ["rev-list", "HEAD", "--count"], cwd=root
)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(
get_keywords(), cfg.tag_prefix, verbose
)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| 32.730159
| 79
| 0.58072
|
4a048e785c1664602d0a2c073dfad6e9a8749d58
| 3,960
|
py
|
Python
|
Spy-Games/code.py
|
RiyaVachhani/ga-learner-dst-repo
|
8daabf510dca0c0056167292534532fbfb54bf6f
|
[
"MIT"
] | 1
|
2020-10-15T04:15:40.000Z
|
2020-10-15T04:15:40.000Z
|
Spy-Games/code.py
|
RiyaVachhani/ga-learner-dst-repo
|
8daabf510dca0c0056167292534532fbfb54bf6f
|
[
"MIT"
] | null | null | null |
Spy-Games/code.py
|
RiyaVachhani/ga-learner-dst-repo
|
8daabf510dca0c0056167292534532fbfb54bf6f
|
[
"MIT"
] | null | null | null |
# --------------
#Code starts here
#Function to read file
def read_file(path):
#Opening of the file located in the path in 'read' mode
file = open(path,'r')
#Reading of the first line of the file and storing it in a variable
sentence = file.readline()
#Closing of the file
file.close()
#Returning the first line of the file
return sentence
#Calling the function to read file
sample_message = read_file(file_path)
#Printing the line of the file
#Function to fuse message
def fuse_msg(message_a,message_b):
#Integer division of two numbers
quotient = int(message_b)//int(message_a)
#Returning the quotient in string format
return str(quotient)
#Calling the function to read file
message_1 = read_file(file_path_1)
print(message_1)
#Calling the function to read file
message_2 = read_file(file_path_2)
print(message_2)
#Calling the function 'fuse_msg'
secret_msg_1 = fuse_msg(message_1,message_2)
#Printing the secret message
print(secret_msg_1)
#Function to substitute the message
def substitute_msg(message_c):
#If-else to compare the contents of the file
if message_c == 'Red':
sub='Army General'
elif message_c == 'Green':
sub='Data Scientist'
elif message_c == 'Blue':
sub='Marin Biologist'
#Returning the substitute of the message
return sub
#Calling the function to read file
message_3 = read_file(file_path_3)
print(message_3)
#Calling the function 'substitute_msg'
secret_msg_2 = substitute_msg(message_3)
#Printing the secret message
print(secret_msg_2)
#Function to compare message
def compare_msg(message_d,message_e):
#Splitting the message into a list
a_list = message_d.split()
#Splitting the message into a list
b_list = message_e.split()
#Comparing the elements from both the lists
c_list = [x for x in a_list if not x in b_list]
#Combining the words of a list back to a single string sentence
final_msg = " ".join(c_list)
#Returning the sentence
return final_msg
#Calling the function to read file
message_4 = read_file(file_path_4)
print(message_4)
#Calling the function to read file
message_5 = read_file(file_path_5)
print(message_5)
#Calling the function 'compare messages'
secret_msg_3 = compare_msg(message_4,message_5)
#Printing the secret message
print(secret_msg_3)
#Function to filter message
def extract_msg(message_f):
#Splitting the message into a list
a_list = message_f.split()
#Creating the lambda function to identify even length words
even_word = lambda x : len(x)%2==0
#Splitting the message into a list
b_list = filter(even_word, a_list)
#Combining the words of a list back to a single string sentence
final_msg = " ".join(b_list)
#Returning the sentence
return final_msg
#Calling the function to read file
message_6 = read_file(file_path_6)
print(message_6)
#Calling the function 'filter_msg'
secret_msg_4 = extract_msg(message_6)
#Printing the secret message
print(secret_msg_4)
#Secret message parts in the correct order
message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2]
# define the path where you
final_path= user_data_dir + '/secret_message.txt'
#Combine the secret message parts into a single complete secret message
secret_msg = " ".join(message_parts)
#Function to write inside a file
def write_file(secret_msg,path):
#Opening a file named 'secret_message' in 'write' mode
f = open(path,'a+')
#Writing to the file
f.write(secret_msg)
#Closing the file
f.close
#Calling the function to write inside the file
write_file(secret_msg,final_path)
#Printing the entire secret message
print(secret_msg)
#Code ends here
| 25.384615
| 72
| 0.694949
|
4a048f5a2ba4c2c903869e6b962a1d9486d91c42
| 10,035
|
py
|
Python
|
relancer-exp/original_notebooks/mathijs_weather-data-in-new-york-city-2016/new-york-city-taxi-playground-with-xgboost.py
|
Chenguang-Zhu/relancer
|
bf1a175b77b7da4cff12fbc5de17dd55246d264d
|
[
"Apache-2.0"
] | 1
|
2022-03-05T22:27:49.000Z
|
2022-03-05T22:27:49.000Z
|
relancer-exp/original_notebooks/mathijs_weather-data-in-new-york-city-2016/new-york-city-taxi-playground-with-xgboost.py
|
Chenguang-Zhu/relancer
|
bf1a175b77b7da4cff12fbc5de17dd55246d264d
|
[
"Apache-2.0"
] | null | null | null |
relancer-exp/original_notebooks/mathijs_weather-data-in-new-york-city-2016/new-york-city-taxi-playground-with-xgboost.py
|
Chenguang-Zhu/relancer
|
bf1a175b77b7da4cff12fbc5de17dd55246d264d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import warnings
warnings.filterwarnings('ignore')
# In[ ]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
print(os.listdir("../../../input/mathijs_weather-data-in-new-york-city-2016"))
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.grid_search import GridSearchCV
import xgboost
print()
# In[ ]:
df = pd.read_csv("../../../input/mathijs_weather-data-in-new-york-city-2016/weather_data_nyc_centralpark_2016(1).csv")
df.passenger_count = df.passenger_count.astype(np.uint8)
df.vendor_id = df.vendor_id.astype(np.uint8)
df.trip_duration = df.trip_duration.astype(np.uint32)
for c in [c for c in df.columns if c.endswith('tude')]:
df.loc[:,c] = df[c].astype(np.float32)
print(df.memory_usage().sum()/2**20)
df.pickup_datetime=pd.to_datetime(df.pickup_datetime)
df.dropoff_datetime=pd.to_datetime(df.dropoff_datetime)
df['pu_hour'] = df.pickup_datetime.dt.hour
df['yday'] = df.pickup_datetime.dt.dayofyear
df['wday'] = df.pickup_datetime.dt.dayofweek
df['month'] = df.pickup_datetime.dt.month
# In[ ]:
sns.set_style('white')
sns.set_context("paper",font_scale=2)
corr = df.corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(11,9))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
print()
# In[ ]:
fig, ax = plt.subplots(ncols=1, nrows=1)
sns.distplot(df['trip_duration']/3600,ax=ax,bins=100,kde=False,hist_kws={'log':True})
# In[ ]:
fig, ax = plt.subplots(ncols=1, nrows=1)
ax.set_xlim(0,30)
sns.distplot(df['trip_duration']/3600,ax=ax,bins=1000,kde=False,hist_kws={'log':True})
# In[ ]:
def haversine(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2
c = 2 * np.arcsin(np.sqrt(a))
km = 6367 * c
miles = km * 0.621371
return miles
# In[ ]:
df['distance'] = haversine(df.pickup_longitude, df.pickup_latitude, df.dropoff_longitude, df.dropoff_latitude)
wdf = pd.read_csv("../../../input/mathijs_weather-data-in-new-york-city-2016/weather_data_nyc_centralpark_2016(1).csv")
# In[ ]:
wdf['date']=pd.to_datetime(wdf.date,format='%d-%m-%Y')
wdf['yday'] = wdf.date.dt.dayofyear
# In[ ]:
wdf.head()
# In[ ]:
wdf['snowfall'] = wdf['snow fall'].replace(['T'],0.05).astype(np.float32)
wdf['precipitation'] = wdf['precipitation'].replace(['T'],0.05).astype(np.float32)
wdf['snowdepth'] = wdf['snow depth'].replace(['T'],0.05).astype(np.float32)
# In[ ]:
df = pd.merge(df,wdf,on='yday')
df.head()
# In[ ]:
df = df.drop(['date','maximum temperature','minimum temperature'],axis=1)
# In[ ]:
sns.set_style('white')
sns.set_context("paper",font_scale=2)
corr = df.corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(11,9))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
print()
# In[ ]:
corr.head()
# In[ ]:
fastest1 = pd.read_csv("../../../input/mathijs_weather-data-in-new-york-city-2016/weather_data_nyc_centralpark_2016(1).csv")
fastest2 = pd.read_csv("../../../input/mathijs_weather-data-in-new-york-city-2016/weather_data_nyc_centralpark_2016(1).csv")
fastest = pd.concat([fastest1,fastest2],ignore_index=True)
fastest = fastest.drop(['step_location_list','step_direction','step_maneuvers','travel_time_per_step','distance_per_step','street_for_each_step','number_of_steps','starting_street','end_street'],axis=1)
fastest.head() #
# In[ ]:
df = pd.merge(df,fastest,on='id',how='outer')
df.head()
# In[ ]:
mask = ((df.trip_duration > 60) & (df.distance < 0.05))
df = df[~mask]
mask = (df.trip_duration < 60)
df = df[~mask]
mask = df.trip_duration > 79200
df = df[~mask]
mask = df.distance/(df.trip_duration/3600) > 60
df = df[~mask]
df.trip_duration = df.trip_duration.astype(np.uint16)
df = df[df.passenger_count > 0]
# In[ ]:
m = df.groupby(['wday','vendor_id'])[['trip_duration']].apply(np.median)
m.name = 'trip_duration_median'
df = df.join(m, on=['wday','vendor_id'])
# In[ ]:
sns.lmplot(y='trip_duration_median', x='wday',data=df, fit_reg=False, hue='vendor_id')
# In[ ]:
m = df.groupby(['pu_hour','vendor_id'])[['trip_duration']].apply(np.median)
m.name ='trip_duration_median_hour'
df = df.join(m, on=['pu_hour','vendor_id'])
# In[ ]:
sns.lmplot(y='trip_duration_median_hour', x='pu_hour',data=df, fit_reg=False, hue='vendor_id')
# In[ ]:
jfk_lon = -73.778889
jfk_lat = 40.639722
lga_lon = -73.872611
lga_lat = 40.77725
# In[ ]:
df['jfk_pickup_dist'] = df.apply(lambda row: haversine(jfk_lon, jfk_lat, row['pickup_longitude'],row['pickup_latitude']), axis=1)
df['lga_pickup_dist'] = df.apply(lambda row: haversine(lga_lon, lga_lat, row['pickup_longitude'],row['pickup_latitude']), axis=1)
df['jfk_dropoff_dist'] = df.apply(lambda row: haversine(jfk_lon, jfk_lat, row['dropoff_longitude'],row['dropoff_latitude']), axis=1)
df['lga_dropoff_dist'] = df.apply(lambda row: haversine(lga_lon, lga_lat, row['dropoff_longitude'],row['dropoff_latitude']), axis=1)
# In[ ]:
fig, ax = plt.subplots(ncols=2, nrows=2, sharex=True)
ax[0,0].set_xlim(0,50)
sns.distplot(df['jfk_pickup_dist'],ax=ax[0,0],bins=100,kde=False,hist_kws={'log':True})
sns.distplot(df['jfk_dropoff_dist'],ax=ax[0,1],bins=100,kde=False,hist_kws={'log':True})
sns.distplot(df['lga_pickup_dist'],ax=ax[1,0],bins=100,kde=False,hist_kws={'log':True})
sns.distplot(df['lga_dropoff_dist'],ax=ax[1,1],bins=100,kde=False,hist_kws={'log':True})
# In[ ]:
df['jfk'] = ((df['jfk_pickup_dist'] < 2) | (df['jfk_dropoff_dist'] < 2))
df['lga'] = ((df['lga_pickup_dist'] < 2) | (df['lga_dropoff_dist'] < 2))
df = df.drop(['jfk_pickup_dist','lga_pickup_dist','jfk_dropoff_dist','lga_dropoff_dist'],axis=1)
df.head()
# In[ ]:
df['workday'] = ((df['pu_hour'] > 8) & (df['pu_hour'] < 18))
df.head()
# In[ ]:
fig, ax = plt.subplots(ncols=1, nrows=1,figsize=(12,10))
plt.ylim(40.6, 40.9)
plt.xlim(-74.1,-73.7)
ax.scatter(df['pickup_longitude'],df['pickup_latitude'], s=0.01, alpha=1)
# ## RMSLE: Evaluation Metric
# In[ ]:
def rmsle(evaluator,X,real):
sum = 0.0
predicted = evaluator.predict(X)
print("Number predicted less than 0: {}".format(np.where(predicted < 0)[0].shape))
predicted[predicted < 0] = 0
for x in range(len(predicted)):
p = np.log(predicted[x]+1)
r = np.log(real[x]+1)
sum = sum + (p-r)**2
return (sum/len(predicted))**0.5
# ## Load test data
# In[ ]:
tdf = pd.read_csv("../../../input/mathijs_weather-data-in-new-york-city-2016/weather_data_nyc_centralpark_2016(1).csv")
tdf.pickup_datetime=pd.to_datetime(tdf.pickup_datetime)
#tdf.dropoff_datetime=pd.to_datetime(tdf.dropoff_datetime)
tdf['pu_hour'] = tdf.pickup_datetime.dt.hour
tdf['yday'] = tdf.pickup_datetime.dt.dayofyear
tdf['wday'] = tdf.pickup_datetime.dt.dayofweek
tdf['month'] = tdf.pickup_datetime.dt.month
tdf['distance'] = haversine(tdf.pickup_longitude, tdf.pickup_latitude, tdf.dropoff_longitude, tdf.dropoff_latitude)
fastest_test = pd.read_csv("../../../input/mathijs_weather-data-in-new-york-city-2016/weather_data_nyc_centralpark_2016(1).csv")
tdf = pd.merge(tdf,fastest_test,on='id',how='outer')
tdf = tdf.drop(['step_location_list','step_direction','step_maneuvers','travel_time_per_step','distance_per_step','street_for_each_step','number_of_steps','starting_street','end_street'],axis=1)
tdf = pd.merge(tdf,wdf,on='yday')
tdf = tdf.drop(['date','maximum temperature','minimum temperature'],axis=1)
tdf['jfk_pickup_dist'] = tdf.apply(lambda row: haversine(jfk_lon, jfk_lat, row['pickup_longitude'],row['pickup_latitude']), axis=1)
tdf['lga_pickup_dist'] = tdf.apply(lambda row: haversine(lga_lon, lga_lat, row['pickup_longitude'],row['pickup_latitude']), axis=1)
tdf['jfk_dropoff_dist'] = tdf.apply(lambda row: haversine(jfk_lon, jfk_lat, row['dropoff_longitude'],row['dropoff_latitude']), axis=1)
tdf['lga_dropoff_dist'] = tdf.apply(lambda row: haversine(lga_lon, lga_lat, row['dropoff_longitude'],row['dropoff_latitude']), axis=1)
tdf['jfk'] = ((tdf['jfk_pickup_dist'] < 2) | (tdf['jfk_dropoff_dist'] < 2))
tdf['lga'] = ((tdf['lga_pickup_dist'] < 2) | (tdf['lga_dropoff_dist'] < 2))
tdf = tdf.drop(['jfk_pickup_dist','lga_pickup_dist','jfk_dropoff_dist','lga_dropoff_dist'],axis=1)
tdf['workday'] = ((tdf['pu_hour'] > 8) & (tdf['pu_hour'] < 18))
# In[ ]:
tdf['snowfall'] = tdf['snow fall'].replace(['T'],0.05).astype(np.float32)
tdf['precipitation'] = tdf['precipitation'].replace(['T'],0.05).astype(np.float32)
tdf['snowdepth'] = tdf['snow depth'].replace(['T'],0.05).astype(np.float32)
# In[ ]:
tdf.head()
# In[ ]:
features = df[['vendor_id','passenger_count','pickup_latitude','pickup_longitude','dropoff_latitude', 'dropoff_longitude','pu_hour','wday','month','workday','precipitation','snowfall', 'snowdepth','total_distance','total_travel_time','jfk','lga']]
target = df['trip_duration']
# In[ ]:
tfeatures = tdf[['vendor_id','passenger_count','pickup_latitude','pickup_longitude', 'dropoff_latitude','dropoff_longitude','pu_hour','wday','month','workday', 'precipitation','snowfall','snowdepth','total_distance','total_travel_time','jfk','lga']]
# ## XGBoost
# In[ ]:
reg = xgboost.XGBRegressor(n_estimators=100, learning_rate=0.01, gamma=0, subsample=0.75, colsample_bytree=1, max_depth=10)
cv = ShuffleSplit(n_splits=4, test_size=0.1, random_state=0)
print(cross_val_score(reg, features, np.ravel(target), cv=cv,scoring=rmsle))
reg.fit(features,target)
# In[ ]:
pred = reg.predict(tfeatures)
print(np.where(pred < 0)[0].shape)
# In[ ]:
pred[pred < 0] = 0
tdf['trip_duration']=pred.astype(int)
out = tdf[['id','trip_duration']]
out['trip_duration'].isnull().values.any()
out.to_csv('pred_xgboost.csv',index=False)
| 27.121622
| 250
| 0.695167
|
4a048fe1c8c8bf217b7bd91aee66e13fb67aca2c
| 9,282
|
py
|
Python
|
google/cloud/debugger_v2/services/debugger2/transports/base.py
|
LaudateCorpus1/python-debugger-client
|
57c2e9396b2b56e7bed4fe49d68f2cb0a9495a22
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/debugger_v2/services/debugger2/transports/base.py
|
LaudateCorpus1/python-debugger-client
|
57c2e9396b2b56e7bed4fe49d68f2cb0a9495a22
|
[
"Apache-2.0"
] | 27
|
2021-06-25T12:57:10.000Z
|
2022-03-07T16:15:58.000Z
|
google/cloud/debugger_v2/services/debugger2/transports/base.py
|
LaudateCorpus1/python-debugger-client
|
57c2e9396b2b56e7bed4fe49d68f2cb0a9495a22
|
[
"Apache-2.0"
] | 3
|
2021-06-25T11:17:38.000Z
|
2022-01-29T08:09:28.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.debugger_v2.types import debugger
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-debugger-client",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class Debugger2Transport(abc.ABC):
"""Abstract transport class for Debugger2."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud_debugger",
)
DEFAULT_HOST: str = "clouddebugger.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.set_breakpoint: gapic_v1.method.wrap_method(
self.set_breakpoint, default_timeout=600.0, client_info=client_info,
),
self.get_breakpoint: gapic_v1.method.wrap_method(
self.get_breakpoint,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.delete_breakpoint: gapic_v1.method.wrap_method(
self.delete_breakpoint,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.list_breakpoints: gapic_v1.method.wrap_method(
self.list_breakpoints,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.list_debuggees: gapic_v1.method.wrap_method(
self.list_debuggees,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def set_breakpoint(
self,
) -> Callable[
[debugger.SetBreakpointRequest],
Union[
debugger.SetBreakpointResponse, Awaitable[debugger.SetBreakpointResponse]
],
]:
raise NotImplementedError()
@property
def get_breakpoint(
self,
) -> Callable[
[debugger.GetBreakpointRequest],
Union[
debugger.GetBreakpointResponse, Awaitable[debugger.GetBreakpointResponse]
],
]:
raise NotImplementedError()
@property
def delete_breakpoint(
self,
) -> Callable[
[debugger.DeleteBreakpointRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_breakpoints(
self,
) -> Callable[
[debugger.ListBreakpointsRequest],
Union[
debugger.ListBreakpointsResponse,
Awaitable[debugger.ListBreakpointsResponse],
],
]:
raise NotImplementedError()
@property
def list_debuggees(
self,
) -> Callable[
[debugger.ListDebuggeesRequest],
Union[
debugger.ListDebuggeesResponse, Awaitable[debugger.ListDebuggeesResponse]
],
]:
raise NotImplementedError()
__all__ = ("Debugger2Transport",)
| 36.116732
| 101
| 0.599871
|
4a0490040816dea6b93bf555ecc04b510896f2d9
| 1,721
|
py
|
Python
|
Grid2Demand/test_gd.py
|
EntaiWang99/Grid2Demand
|
362f1766ee0946b59a466801178b00b3887b1952
|
[
"MIT"
] | 3
|
2021-01-13T09:16:39.000Z
|
2021-03-23T02:55:41.000Z
|
Grid2Demand/test_gd.py
|
EntaiWang99/Grid2Demand
|
362f1766ee0946b59a466801178b00b3887b1952
|
[
"MIT"
] | null | null | null |
Grid2Demand/test_gd.py
|
EntaiWang99/Grid2Demand
|
362f1766ee0946b59a466801178b00b3887b1952
|
[
"MIT"
] | 2
|
2021-01-17T04:55:15.000Z
|
2021-03-31T12:12:04.000Z
|
# import osm2gmns as og
# net = og.getNetFromOSMFile('map.osm', network_type=('railway', 'auto', 'bike', 'walk'), POIs=True, default_lanes=True,default_speed=True)
# og.connectPOIWithNet(net)
# og.generateNodeActivityInfo(net)
# og.outputNetToCSV(net)
import grid2demand_0525a as gd
import os
os.chdir('./Norfolk_VA')
"Step 1: Read Input Network Data"
gd.ReadNetworkFiles()
# gd.ReadExternalPOI()
# users can give external customized_poi.csv
"Step 2: Partition Grid into cells"
gd.PartitionGrid(number_of_x_blocks=None, number_of_y_blocks=None, cell_width=500, cell_height=500, external_poi=True)
# users can customize number of grid cells or cell's width and height
# Also partition grid for external poi
"Step 3: Simplify the network for path4gmns"
gd.SimplifyNet(link_type_set = {'primary', 'secondary'}, criteria=10)
# users can customize 1) the link types in the simplified network
# and 2) maximum number of poi nodes in each zone by area
# we need to use the simplified network to define trip generation for boundary nodes
gd.GeneratePOIConnector()
# connect useful POI nodes to the network
"Step 4: Get Trip Generation Rates of Each Land Use Type"
gd.GetPoiTripRate()
# users can customize poi_trip_rate.csv for each land use type
"Step 5: Define Production/Attraction Value of Each Node According to POI Type and Activity Purpose"
gd.GetNodeDemand(residential_generation=200, boundary_generation=5000)
# users can customize the values of trip generation for residential nodes and boundary nodes
"Step 6: Calculate Zone-to-zone Accessibility Matrix by Centroid-to-centroid Straight Distance"
gd.ProduceAccessMatrix()
"Step 7: Generate Time-dependent Demand and Agent by Purpose-mode"
gd.GenerateDemand()
| 40.023256
| 139
| 0.790819
|
4a04906d2e4c943891a8d911fd0655df16b83131
| 2,439
|
py
|
Python
|
atest/testresources/listeners/attributeverifyinglistener.py
|
kyle1986/robortframe
|
07dc8f0f1777cd1671bfe832cd42778a50e414f0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
atest/testresources/listeners/attributeverifyinglistener.py
|
kyle1986/robortframe
|
07dc8f0f1777cd1671bfe832cd42778a50e414f0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
atest/testresources/listeners/attributeverifyinglistener.py
|
kyle1986/robortframe
|
07dc8f0f1777cd1671bfe832cd42778a50e414f0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import os
ROBOT_LISTENER_API_VERSION = '2'
OUTFILE = open(os.path.join(os.getenv('TEMPDIR'), 'listener_attrs.txt'), 'w')
START_ATTRS = 'doc starttime '
END_ATTRS = START_ATTRS + 'endtime elapsedtime status '
KW_ATTRS = 'args assign kwname libname type'
EXPECTED_TYPES = {'elapsedtime': (int, long), 'tags': list, 'args': list,
'assign': list, 'metadata': dict, 'tests': list,
'suites': list, 'totaltests': int}
def start_suite(name, attrs):
_verify_attrs('START SUITE', attrs,
START_ATTRS + 'id longname metadata source tests suites totaltests')
def end_suite(name, attrs):
_verify_attrs('END SUITE', attrs,
END_ATTRS + 'id longname metadata source tests suites totaltests statistics message')
def start_test(name, attrs):
_verify_attrs('START TEST', attrs,
START_ATTRS + 'id longname tags critical template')
def end_test(name, attrs):
_verify_attrs('END TEST', attrs,
END_ATTRS + 'id longname tags critical message template')
def start_keyword(name, attrs):
_verify_attrs('START KEYWORD', attrs, START_ATTRS + KW_ATTRS)
_verify_name(name, **attrs)
def end_keyword(name, attrs):
_verify_attrs('END KEYWORD', attrs, END_ATTRS + KW_ATTRS)
_verify_name(name, **attrs)
def _verify_attrs(method_name, attrs, names):
names = names.split()
OUTFILE.write(method_name + '\n')
if len(names) != len(attrs):
OUTFILE.write('FAILED: wrong number of attributes\n')
OUTFILE.write('Expected: %s\nActual: %s\n' % (names, attrs.keys()))
return
for name in names:
value = attrs[name]
exp_type = EXPECTED_TYPES.get(name, basestring)
if isinstance(value, exp_type):
OUTFILE.write('PASSED | %s: %s\n' % (name, value))
else:
OUTFILE.write('FAILED | %s: %r, Expected: %s, Actual: %s\n'
% (name, value, exp_type, type(value)))
def _verify_name(name, kwname=None, libname=None, **ignored):
if libname:
if name != '%s.%s' % (libname, kwname):
OUTFILE.write("FAILED | KW NAME: '%s' != '%s.%s'\n" % (name, libname, kwname))
else:
if name != kwname:
OUTFILE.write("FAILED | KW NAME: '%s' != '%s'\n" % (name, kwname))
if libname != '':
OUTFILE.write("FAILED | LIB NAME: '%s' != ''\n" % libname)
def close():
OUTFILE.close()
| 36.954545
| 103
| 0.613366
|
4a04909b3850c8c0a25f63dcd6807b3cf7bbdc46
| 792
|
py
|
Python
|
main.py
|
lhericourt/trading
|
2564cd4f3a6724e876cd56a0e1250dc7fbab8f17
|
[
"MIT"
] | null | null | null |
main.py
|
lhericourt/trading
|
2564cd4f3a6724e876cd56a0e1250dc7fbab8f17
|
[
"MIT"
] | null | null | null |
main.py
|
lhericourt/trading
|
2564cd4f3a6724e876cd56a0e1250dc7fbab8f17
|
[
"MIT"
] | null | null | null |
from config.load import load_conf
from db.apply_migration import do_migration
from data.candle import get_candles, upload_to_db_candles, get_candles_all_symbols
from data.economic_calendar import upload_to_db_events, get_events_on_period
from utils.utils import convert_to_number
from config.log import setup_custom_logger
logger = setup_custom_logger(__name__)
load_conf(filepath='config/configuration.yaml')
#import logging
#log = logging.getLogger(__name__)
#do_migration()
res = get_events_on_period('2021-03-16', '2020-03-16')
#upload_to_db_events('2010-01-01', '2020-12-22')
#test = get_candles_all_symbols('2020-12-13', '2020-12-14')
#test = get_candles('EUR/USD', 'm5', '2021-02-09', '2021-02-11')
#print('toto')
#upload_to_db_candles('2020-08-06', '2020-12-22')
print('toto')
| 30.461538
| 82
| 0.785354
|
4a049155fabd22f5ba2f602a16bbe2942062e9be
| 1,687
|
py
|
Python
|
image_utli.py
|
USC-MCL/Func-Pool
|
20c43df0eb2da68d8d2e01c03d66a1a4e4e06081
|
[
"MIT"
] | 3
|
2020-01-24T19:03:44.000Z
|
2021-04-13T17:22:36.000Z
|
image_utli.py
|
USC-MCL/Func-Pool
|
20c43df0eb2da68d8d2e01c03d66a1a4e4e06081
|
[
"MIT"
] | null | null | null |
image_utli.py
|
USC-MCL/Func-Pool
|
20c43df0eb2da68d8d2e01c03d66a1a4e4e06081
|
[
"MIT"
] | 3
|
2020-01-24T19:03:45.000Z
|
2020-04-13T08:27:13.000Z
|
# 20201.03.18
import numpy as np
from skimage.measure import block_reduce
import cv2
def MeanPooling(X, win=2):
return block_reduce(X, (1, win, win, 1), np.mean)
def MaxPooling(X, win=2):
return block_reduce(X, (1, win, win, 1), np.max)
def mybilinear_interpolation(X, win):
eX = np.zeros((X.shape[0], X.shape[1]*win, X.shape[2]*win, X.shape[-1]))
for i in range(eX.shape[1]):
for j in range(eX.shape[2]):
ii = (float)(i % win) / (float)(win)
iii = i // win
jj = (float)(j % win) / (float)(win)
jjj = j // win
a = X[:, iii, jjj]
if iii+1 < X.shape[1]:
b = X[:, iii+1, jjj]
if jjj+1 < X.shape[2]:
c = X[:, iii, jjj+1]
d = X[:, iii+1, jjj+1]
else:
c = X[:, iii, jjj]
d = X[:, iii+1, jjj]
else:
b = X[:, iii, jjj ]
if jjj+1 < X.shape[2]:
c = X[:, iii, jjj+1]
d = X[:, iii, jjj+1]
else:
c = X[:, iii, jjj]
d = X[:, iii, jjj]
eX[:, i, j] = a * (1-ii) * (1-jj) + \
b * (ii) * (1-jj) + \
c * (1-ii) * (jj) + \
d * (ii) * (jj)
return eX
def interpolation(X, win):
eX = np.zeros((X.shape[0], X.shape[1]*win, X.shape[2]*win, X.shape[-1]))
for i in range(X.shape[0]):
for j in range(X.shape[-1]):
eX[i,:,:,j] = cv2.resize(X[i,:,:,j], (X.shape[2]*win, X.shape[1]*win))
return eX
| 35.145833
| 82
| 0.391227
|
4a04920be1b5b12f4fc5ff0de763a702f1ffaae2
| 363
|
py
|
Python
|
HW3/Mykyta_Marchukov/HW_3_2.py
|
kolyasalubov/Lv-677.PythonCore
|
c9f9107c734a61e398154a90b8a3e249276c2704
|
[
"MIT"
] | null | null | null |
HW3/Mykyta_Marchukov/HW_3_2.py
|
kolyasalubov/Lv-677.PythonCore
|
c9f9107c734a61e398154a90b8a3e249276c2704
|
[
"MIT"
] | null | null | null |
HW3/Mykyta_Marchukov/HW_3_2.py
|
kolyasalubov/Lv-677.PythonCore
|
c9f9107c734a61e398154a90b8a3e249276c2704
|
[
"MIT"
] | 6
|
2022-02-22T22:30:49.000Z
|
2022-03-28T12:51:19.000Z
|
enter_number = input("Enter a four-digit number: ")
list_number = list(enter_number)
product_of_numbers = int(list_number[0])*int(list_number[1])*int(list_number[2])*int(list_number[3])
print(product_of_numbers)
print(enter_number[::-1])
sorted_list_number = list_number
sorted_list_number.sort()
sorted_number = "".join(sorted_list_number)
print(sorted_number)
| 33
| 100
| 0.793388
|
4a0492b33996084654b13b4aa1b25b64f5163b03
| 2,780
|
py
|
Python
|
c8ydm/core/configuration.py
|
SoftwareAG/cumulocity-devicemanagement-agent
|
60f42ce83db6fb6b3455777deaf8082d71a7da09
|
[
"Apache-2.0"
] | 10
|
2021-08-06T09:29:24.000Z
|
2022-03-11T05:29:32.000Z
|
c8ydm/core/configuration.py
|
SoftwareAG/cumulocity-devicemanagement-agent
|
60f42ce83db6fb6b3455777deaf8082d71a7da09
|
[
"Apache-2.0"
] | 35
|
2021-08-03T12:49:17.000Z
|
2022-03-04T08:24:40.000Z
|
c8ydm/core/configuration.py
|
SoftwareAG/cumulocity-devicemanagement-agent
|
60f42ce83db6fb6b3455777deaf8082d71a7da09
|
[
"Apache-2.0"
] | 8
|
2021-09-01T09:19:14.000Z
|
2022-02-24T15:53:06.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging, time
from c8ydm.framework.modulebase import Listener, Initializer
from c8ydm.framework.smartrest import SmartRESTMessage
class ConfigurationManager(Listener, Initializer):
logger = logging.getLogger(__name__)
def __init__(self, serial, agent, configuration):
self.configuration = configuration
self.agent = agent
self.serial = serial
def group(self, seq, sep):
result = [[]]
for e in seq:
if sep not in e:
result[-1].append(e)
else:
result[-1].append(e[:e.find(sep)])
result.append([])
if result[-1] == []:
result.pop()
return result
def handleOperation(self, message):
try:
if 's/ds' in message.topic and message.messageId == '513':
## When multiple operations received just take the first one for further processing
self.logger.info('Configuration Operation received: ' + str(message.values))
message.values = self.group(message.values, '\n513')[0]
executing = SmartRESTMessage('s/us', '501', ['c8y_Configuration'])
self.agent.publishMessage(executing)
self.configuration.writeConfigString(message.values[1][1:-1])
success = SmartRESTMessage('s/us', '503', ['c8y_Configuration'])
configs = self.configuration.getConfigString()
self.agent.publishMessage(SmartRESTMessage('s/us', '113', [configs]))
self.agent.publishMessage(success)
except Exception as e:
self.logger.exception(e)
failed = SmartRESTMessage('s/us', '502', ['c8y_Configuration', str(e)])
self.agent.publishMessage(failed)
def getSupportedOperations(self):
return ['c8y_Configuration']
def getSupportedTemplates(self):
return []
def getMessages(self):
configs = self.configuration.getConfigString()
configMessage = SmartRESTMessage('s/us', '113', [ configs ])
return [configMessage]
| 35.189873
| 99
| 0.648921
|
4a0492db24c70477862921adfa826f2c08b028d6
| 5,802
|
py
|
Python
|
tests/rules/test_metadata.py
|
victor-torres/arche
|
1bc31a1397e15860be7c3762be86c47f9e839704
|
[
"MIT"
] | null | null | null |
tests/rules/test_metadata.py
|
victor-torres/arche
|
1bc31a1397e15860be7c3762be86c47f9e839704
|
[
"MIT"
] | null | null | null |
tests/rules/test_metadata.py
|
victor-torres/arche
|
1bc31a1397e15860be7c3762be86c47f9e839704
|
[
"MIT"
] | null | null | null |
from arche import SH_URL
from arche.rules.metadata import (
check_errors,
check_outcome,
check_response_ratio,
compare_finish_time,
compare_response_ratio,
)
from arche.rules.result import Level
from conftest import create_result, Job
import pytest
error_input = [
(
{"log_count/ERROR": 10},
{
Level.ERROR: [
(
"10 error(s)",
(
f"Errors for 112358/13/21 - {SH_URL}/112358/13/21/"
f"log?filterType=error&filterAndHigher"
),
)
]
},
),
({}, {Level.INFO: [("No errors",)]}),
({"log_count/ERROR": 0}, {Level.INFO: [("No errors",)]}),
]
@pytest.mark.parametrize("error_count, expected_messages", error_input)
def test_check_errors(get_job, error_count, expected_messages):
job = get_job
job.metadata = {"scrapystats": error_count}
job.key = "112358/13/21"
result = check_errors(job)
assert result == create_result("Job Errors", expected_messages)
outcome_input = [
(
{"state": "finished", "close_reason": "cancelled"},
{Level.ERROR: [("Job has 'finished' state, 'cancelled' close reason",)]},
),
(
{"state": "cancelled", "close_reason": "finished"},
{Level.ERROR: [("Job has 'cancelled' state, 'finished' close reason",)]},
),
(
{"close_reason": "finished"},
{Level.ERROR: [("Job has 'None' state, 'finished' close reason",)]},
),
(
{"close_reason": "no_reason"},
{Level.ERROR: [("Job has 'None' state, 'no_reason' close reason",)]},
),
(
{"state": "cancelled"},
{Level.ERROR: [("Job has 'cancelled' state, 'None' close reason",)]},
),
(
{"state": "finished"},
{Level.ERROR: [("Job has 'finished' state, 'None' close reason",)]},
),
({}, {Level.ERROR: [("Job has 'None' state, 'None' close reason",)]}),
({"state": "finished", "close_reason": "finished"}, {Level.INFO: [("Finished",)]}),
]
@pytest.mark.parametrize("metadata, expected_messages", outcome_input)
def test_check_outcome(get_job, metadata, expected_messages):
job = get_job
job.metadata = metadata
result = check_outcome(job)
assert result == create_result("Job Outcome", expected_messages)
response_ratio_inputs = [
(
{"totals": {"input_values": 1000}},
{"scrapystats": {"downloader/response_count": 2000}},
{Level.INFO: [("Number of responses / Number of scraped items - 2.0",)]},
)
]
@pytest.mark.parametrize("stats, metadata, expected_messages", response_ratio_inputs)
def test_check_response_ratio(stats, metadata, expected_messages):
result = check_response_ratio(Job(metadata=metadata, stats=stats))
assert result == create_result("Responses Per Item Ratio", expected_messages)
time_inputs = [
(
{
"state": "finished",
"close_reason": "finished",
"finished_time": 1_534_828_902_196,
},
{
"state": "finished",
"close_reason": "finished",
"finished_time": 1_534_828_902_196,
},
{Level.INFO: [("Less than 1 day difference",)]},
),
(
{"state": "running", "finished_time": 1_534_838_902_196},
{
"state": "finished",
"close_reason": "finished",
"finished_time": 1_534_838_902_196,
},
{Level.WARNING: [("Jobs are not finished",)]},
),
(
{
"state": "finished",
"close_reason": "finished",
"finished_time": 1_534_828_902_196,
},
{
"state": "finished",
"close_reason": "finished",
"finished_time": 1_554_858_902_196,
},
{Level.WARNING: [("19 day(s) difference between 2 jobs",)]},
),
]
@pytest.mark.parametrize(
"source_metadata, target_metadata, expected_messages", time_inputs
)
def test_compare_finish_time(
get_jobs, source_metadata, target_metadata, expected_messages
):
source_job, target_job = get_jobs
source_job.metadata = source_metadata
target_job.metadata = target_metadata
result = compare_finish_time(source_job, target_job)
assert result == create_result("Finish Time", expected_messages)
compare_response_ratio_inputs = [
(
{"totals": {"input_values": 1000}},
{"scrapystats": {"downloader/response_count": 2000}},
{"totals": {"input_values": 1000}},
{"scrapystats": {"downloader/response_count": 2000}},
{},
),
(
{"totals": {"input_values": 1000}},
{"scrapystats": {"downloader/response_count": 2000}},
{"totals": {"input_values": 1000}},
{"scrapystats": {"downloader/response_count": 4000}},
{Level.ERROR: [("Difference is 50.0% - 2.0 and 4.0",)]},
),
(
{"totals": {"input_values": 1000}},
{"scrapystats": {"downloader/response_count": 2000}},
{"totals": {"input_values": 1000}},
{"scrapystats": {"downloader/response_count": 2300}},
{Level.WARNING: [("Difference is 13.0% - 2.0 and 2.3",)]},
),
]
@pytest.mark.parametrize(
"source_stats, source_metadata, target_stats, target_metadata, expected_messages",
compare_response_ratio_inputs,
)
def test_compare_response_ratio(
source_stats, source_metadata, target_stats, target_metadata, expected_messages
):
source_job = Job(stats=source_stats, metadata=source_metadata)
target_job = Job(stats=target_stats, metadata=target_metadata)
result = compare_response_ratio(source_job, target_job)
assert result == create_result(
"Compare Responses Per Item Ratio", expected_messages
)
| 30.376963
| 87
| 0.596001
|
4a04930fe1f1226421a99b64b8253128132a9713
| 9,751
|
py
|
Python
|
Algorithm.Python/DataConsolidationAlgorithm.py
|
echoplaza/Lean
|
66f32cffe2ddb07532c8160299a7b1b6d67429ee
|
[
"Apache-2.0"
] | 1
|
2021-02-11T21:13:12.000Z
|
2021-02-11T21:13:12.000Z
|
Algorithm.Python/DataConsolidationAlgorithm.py
|
echoplaza/Lean
|
66f32cffe2ddb07532c8160299a7b1b6d67429ee
|
[
"Apache-2.0"
] | 1
|
2021-01-23T17:59:52.000Z
|
2021-01-23T17:59:52.000Z
|
Algorithm.Python/DataConsolidationAlgorithm.py
|
echoplaza/Lean
|
66f32cffe2ddb07532c8160299a7b1b6d67429ee
|
[
"Apache-2.0"
] | null | null | null |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### Example algorithm giving an introduction into using IDataConsolidators.
### This is an advanced QC concept and requires a certain level of comfort using C# and its event system.
###
### What is an IDataConsolidator?
### IDataConsolidator is a plugin point that can be used to transform your data more easily.
### In this example we show one of the simplest consolidators, the TradeBarConsolidator.
### This type is capable of taking a timespan to indicate how long each bar should be, or an
### integer to indicate how many bars should be aggregated into one.
###
### When a new 'consolidated' piece of data is produced by the IDataConsolidator, an event is fired
### with the argument of the new data.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="consolidating data" />
class DataConsolidationAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(DateTime(2013, 10, 7, 9, 30, 0)) #Set Start Date
self.SetEndDate(self.StartDate + timedelta(60)) #Set End Date
# Find more symbols here: http://quantconnect.com/data
self.AddEquity("SPY")
self.AddForex("EURUSD", Resolution.Hour)
# define our 30 minute trade bar consolidator. we can
# access the 30 minute bar from the DataConsolidated events
thirtyMinuteConsolidator = TradeBarConsolidator(timedelta(minutes=30))
# attach our event handler. the event handler is a function that will
# be called each time we produce a new consolidated piece of data.
thirtyMinuteConsolidator.DataConsolidated += self.ThirtyMinuteBarHandler
# this call adds our 30 minute consolidator to
# the manager to receive updates from the engine
self.SubscriptionManager.AddConsolidator("SPY", thirtyMinuteConsolidator)
# here we'll define a slightly more complex consolidator. what we're trying to produce is
# a 3 day bar. Now we could just use a single TradeBarConsolidator like above and pass in
# TimeSpan.FromDays(3), but in reality that's not what we want. For time spans of longer than
# a day we'll get incorrect results around weekends and such. What we really want are tradeable
# days. So we'll create a daily consolidator, and then wrap it with a 3 count consolidator.
# first define a one day trade bar -- this produces a consolidated piece of data after a day has passed
oneDayConsolidator = TradeBarConsolidator(timedelta(1))
# next define our 3 count trade bar -- this produces a consolidated piece of data after it sees 3 pieces of data
threeCountConsolidator = TradeBarConsolidator(3)
# here we combine them to make a new, 3 day trade bar. The SequentialConsolidator allows composition of
# consolidators. It takes the consolidated output of one consolidator (in this case, the oneDayConsolidator)
# and pipes it through to the threeCountConsolidator. His output will be a 3 day bar.
three_oneDayBar = SequentialConsolidator(oneDayConsolidator, threeCountConsolidator)
# attach our handler
three_oneDayBar.DataConsolidated += self.ThreeDayBarConsolidatedHandler
# this call adds our 3 day to the manager to receive updates from the engine
self.SubscriptionManager.AddConsolidator("SPY", three_oneDayBar)
# Custom monthly consolidator
customMonthlyConsolidator = TradeBarConsolidator(self.CustomMonthly)
customMonthlyConsolidator.DataConsolidated += self.CustomMonthlyHandler
self.SubscriptionManager.AddConsolidator("SPY", customMonthlyConsolidator)
# API convenience method for easily receiving consolidated data
self.Consolidate("SPY", timedelta(minutes=45), self.FortyFiveMinuteBarHandler)
self.Consolidate("SPY", Resolution.Hour, self.HourBarHandler)
self.Consolidate("EURUSD", Resolution.Daily, self.DailyEurUsdBarHandler)
# API convenience method for easily receiving weekly-consolidated data
self.Consolidate("SPY", Calendar.Weekly, self.CalendarTradeBarHandler)
self.Consolidate("EURUSD", Calendar.Weekly, self.CalendarQuoteBarHandler)
# API convenience method for easily receiving monthly-consolidated data
self.Consolidate("SPY", Calendar.Monthly, self.CalendarTradeBarHandler);
self.Consolidate("EURUSD", Calendar.Monthly, self.CalendarQuoteBarHandler);
# API convenience method for easily receiving quarterly-consolidated data
self.Consolidate("SPY", Calendar.Quarterly, self.CalendarTradeBarHandler)
self.Consolidate("EURUSD", Calendar.Quarterly, self.CalendarQuoteBarHandler)
# API convenience method for easily receiving yearly-consolidated data
self.Consolidate("SPY", Calendar.Yearly, self.CalendarTradeBarHandler);
self.Consolidate("EURUSD", Calendar.Yearly, self.CalendarQuoteBarHandler);
# some securities may have trade and quote data available, so we can choose it based on TickType:
#self.Consolidate("BTCUSD", Resolution.Hour, TickType.Trade, self.HourBarHandler) # to get TradeBar
#self.Consolidate("BTCUSD", Resolution.Hour, TickType.Quote, self.HourBarHandler) # to get QuoteBar (default)
self.consolidatedHour = False
self.consolidated45Minute = False
self.__last = None
def OnData(self, data):
'''We need to declare this method'''
pass
def OnEndOfDay(self):
# close up shop each day and reset our 'last' value so we start tomorrow fresh
self.Liquidate("SPY")
self.__last = None
def ThirtyMinuteBarHandler(self, sender, consolidated):
'''This is our event handler for our 30 minute trade bar defined above in Initialize(). So each time the
consolidator produces a new 30 minute bar, this function will be called automatically. The 'sender' parameter
will be the instance of the IDataConsolidator that invoked the event, but you'll almost never need that!'''
if self.__last is not None and consolidated.Close > self.__last.Close:
self.Log(f"{consolidated.Time} >> SPY >> LONG >> 100 >> {self.Portfolio['SPY'].Quantity}")
self.Order("SPY", 100)
elif self.__last is not None and consolidated.Close < self.__last.Close:
self.Log(f"{consolidated.Time} >> SPY >> SHORT >> 100 >> {self.Portfolio['SPY'].Quantity}")
self.Order("SPY", -100)
self.__last = consolidated
def ThreeDayBarConsolidatedHandler(self, sender, consolidated):
''' This is our event handler for our 3 day trade bar defined above in Initialize(). So each time the
consolidator produces a new 3 day bar, this function will be called automatically. The 'sender' parameter
will be the instance of the IDataConsolidator that invoked the event, but you'll almost never need that!'''
self.Log(f"{consolidated.Time} >> Plotting!")
self.Plot(consolidated.Symbol.Value, "3HourBar", consolidated.Close)
def FortyFiveMinuteBarHandler(self, consolidated):
''' This is our event handler for our 45 minute consolidated defined using the Consolidate method'''
self.consolidated45Minute = True
self.Log(f"{consolidated.EndTime} >> FortyFiveMinuteBarHandler >> {consolidated.Close}")
def HourBarHandler(self, consolidated):
'''This is our event handler for our one hour consolidated defined using the Consolidate method'''
self.consolidatedHour = True
self.Log(f"{consolidated.EndTime} >> FortyFiveMinuteBarHandler >> {consolidated.Close}")
def DailyEurUsdBarHandler(self, consolidated):
'''This is our event handler for our daily consolidated defined using the Consolidate method'''
self.Log(f"{consolidated.EndTime} EURUSD Daily consolidated.")
def CalendarTradeBarHandler(self, tradeBar):
self.Log(f'{self.Time} :: {tradeBar.Time} {tradeBar.Close}')
def CalendarQuoteBarHandler(self, quoteBar):
self.Log(f'{self.Time} :: {quoteBar.Time} {quoteBar.Close}')
def CustomMonthly(self, dt):
'''Custom Monthly Func'''
start = dt.replace(day=1).date()
end = dt.replace(day=28) + timedelta(4)
end = (end - timedelta(end.day-1)).date()
return CalendarInfo(start, end - start)
def CustomMonthlyHandler(self, sender, consolidated):
'''This is our event handler Custom Monthly function'''
self.Log(f"{consolidated.Time} >> CustomMonthlyHandler >> {consolidated.Close}")
def OnEndOfAlgorithm(self):
if not self.consolidatedHour:
raise Exception("Expected hourly consolidator to be fired.")
if not self.consolidated45Minute:
raise Exception("Expected 45-minute consolidator to be fired.")
| 53.872928
| 151
| 0.715209
|
4a04937e82da59896dba130a79db72687e757b9d
| 430
|
py
|
Python
|
plotly/validators/mesh3d/colorbar/_tickangle.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/mesh3d/colorbar/_tickangle.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/mesh3d/colorbar/_tickangle.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class TickangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(
self, plotly_name='tickangle', parent_name='mesh3d.colorbar', **kwargs
):
super(TickangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='colorbars',
role='style',
**kwargs
)
| 26.875
| 78
| 0.639535
|
4a04961a8a9161e89891e9c4af87c1e5628d6ea8
| 1,741
|
py
|
Python
|
flask/fundamentals/dojo_survey/flask_app/config/mysqlconnection.py
|
ZhouSusan/CodingDojoPython
|
8d89c9a94a3be18e79fbf24e25348eae8c96a338
|
[
"MIT"
] | null | null | null |
flask/fundamentals/dojo_survey/flask_app/config/mysqlconnection.py
|
ZhouSusan/CodingDojoPython
|
8d89c9a94a3be18e79fbf24e25348eae8c96a338
|
[
"MIT"
] | null | null | null |
flask/fundamentals/dojo_survey/flask_app/config/mysqlconnection.py
|
ZhouSusan/CodingDojoPython
|
8d89c9a94a3be18e79fbf24e25348eae8c96a338
|
[
"MIT"
] | null | null | null |
import pymysql.cursors
class MySQLConnection:
def __init__(self, db):
connection = pymysql.connect(host = 'localhost',
user = 'root',
password = 'root',
db = db,
charset = 'utf8mb4',
cursorclass = pymysql.cursors.DictCursor,
autocommit = True)
self.connection = connection
def query_db(self, query, data=None):
with self.connection.cursor() as cursor:
try:
query = cursor.mogrify(query, data)
print("Running Query:", query)
cursor.execute(query)
if query.lower().find("insert") >= 0:
# INSERT queries will return the ID NUMBER of the row inserted
self.connection.commit()
return cursor.lastrowid
elif query.lower().find("select") >= 0:
# SELECT queries will return the data from the database as a LIST OF DICTIONARIES
result = cursor.fetchall()
return result
else:
# UPDATE and DELETE queries will return nothing
self.connection.commit()
except Exception as e:
# if the query fails the method will return FALSE
print("Something went wrong", e)
return False
finally:
self.connection.close()
# connectToMySQL receives the database we're using and uses it to create an instance of MySQLConnection
def connectToMySQL(db):
return MySQLConnection(db)
| 44.641026
| 103
| 0.506031
|
4a0496bd87f705c63373afc912e40d7c7277e27c
| 10,490
|
py
|
Python
|
kedro/framework/cli/jupyter.py
|
hfwittmann/kedro
|
b0d4fcd8f19b49a7916d78fd09daeb6209a7b6c6
|
[
"Apache-2.0"
] | 1
|
2021-11-25T12:33:13.000Z
|
2021-11-25T12:33:13.000Z
|
kedro/framework/cli/jupyter.py
|
MerelTheisenQB/kedro
|
1eaa2e0fa5d80f96e18ea60b9f3d6e6efc161827
|
[
"Apache-2.0"
] | null | null | null |
kedro/framework/cli/jupyter.py
|
MerelTheisenQB/kedro
|
1eaa2e0fa5d80f96e18ea60b9f3d6e6efc161827
|
[
"Apache-2.0"
] | null | null | null |
"""A collection of helper functions to integrate with Jupyter/IPython
and CLI commands for working with Kedro catalog.
"""
import json
import os
import re
import sys
from collections import Counter
from glob import iglob
from pathlib import Path
from typing import Any, Dict, Iterable, List
from warnings import warn
import click
from click import secho
from jupyter_client.kernelspec import NATIVE_KERNEL_NAME, KernelSpecManager
from traitlets import Unicode
from kedro.framework.cli.utils import (
KedroCliError,
_check_module_importable,
command_with_verbosity,
env_option,
forward_command,
ipython_message,
load_entry_points,
python_call,
)
from kedro.framework.project import validate_settings
from kedro.framework.startup import ProjectMetadata
JUPYTER_IP_HELP = "IP address of the Jupyter server."
JUPYTER_ALL_KERNELS_HELP = "Display all available Python kernels."
JUPYTER_IDLE_TIMEOUT_HELP = """When a notebook is closed, Jupyter server will
terminate its kernel after so many seconds of inactivity. This does not affect
any open notebooks."""
CONVERT_ALL_HELP = """Extract the nodes from all notebooks in the Kedro project directory,
including sub-folders."""
OVERWRITE_HELP = """If Python file already exists for the equivalent notebook,
overwrite its contents."""
def collect_line_magic():
"""Interface function for collecting line magic functions from plugin entry points."""
return load_entry_points("line_magic")
class SingleKernelSpecManager(KernelSpecManager):
"""A custom KernelSpec manager to be used by Kedro projects.
It limits the kernels to the default one only,
to make it less confusing for users, and gives it a sensible name.
"""
default_kernel_name = Unicode(
"Kedro", config=True, help="Alternative name for the default kernel"
)
whitelist = [NATIVE_KERNEL_NAME]
def get_kernel_spec(self, kernel_name):
"""
This function will only be called by Jupyter to get a KernelSpec
for the default kernel.
We replace the name by something sensible here.
"""
kernelspec = super().get_kernel_spec(kernel_name)
if kernel_name == NATIVE_KERNEL_NAME:
kernelspec.display_name = self.default_kernel_name
return kernelspec
def _update_ipython_dir(project_path: Path) -> None:
os.environ["IPYTHONDIR"] = str(project_path / ".ipython")
# pylint: disable=missing-function-docstring
@click.group(name="Kedro")
def jupyter_cli(): # pragma: no cover
pass
@jupyter_cli.group()
def jupyter():
"""Open Jupyter Notebook / Lab with project specific variables loaded, or
convert notebooks into Kedro code.
"""
@forward_command(jupyter, "notebook", forward_help=True)
@click.option(
"--ip",
"ip_address",
type=str,
default="127.0.0.1",
help="IP address of the Jupyter server.",
)
@click.option(
"--all-kernels", is_flag=True, default=False, help=JUPYTER_ALL_KERNELS_HELP
)
@click.option("--idle-timeout", type=int, default=30, help=JUPYTER_IDLE_TIMEOUT_HELP)
@env_option
@click.pass_obj # this will pass the metadata as first argument
def jupyter_notebook(
metadata: ProjectMetadata,
ip_address,
all_kernels,
env,
idle_timeout,
args,
**kwargs,
): # pylint: disable=unused-argument,too-many-arguments
"""Open Jupyter Notebook with project specific variables loaded."""
_check_module_importable("jupyter_core")
validate_settings()
if "-h" not in args and "--help" not in args:
ipython_message(all_kernels)
_update_ipython_dir(metadata.project_path)
arguments = _build_jupyter_command(
"notebook",
ip_address=ip_address,
all_kernels=all_kernels,
args=args,
idle_timeout=idle_timeout,
project_name=metadata.project_name,
)
python_call_kwargs = _build_jupyter_env(env)
python_call("jupyter", arguments, **python_call_kwargs)
@forward_command(jupyter, "lab", forward_help=True)
@click.option("--ip", "ip_address", type=str, default="127.0.0.1", help=JUPYTER_IP_HELP)
@click.option(
"--all-kernels", is_flag=True, default=False, help=JUPYTER_ALL_KERNELS_HELP
)
@click.option("--idle-timeout", type=int, default=30, help=JUPYTER_IDLE_TIMEOUT_HELP)
@env_option
@click.pass_obj # this will pass the metadata as first argument
def jupyter_lab(
metadata: ProjectMetadata,
ip_address,
all_kernels,
env,
idle_timeout,
args,
**kwargs,
): # pylint: disable=unused-argument,too-many-arguments
"""Open Jupyter Lab with project specific variables loaded."""
_check_module_importable("jupyter_core")
validate_settings()
if "-h" not in args and "--help" not in args:
ipython_message(all_kernels)
_update_ipython_dir(metadata.project_path)
arguments = _build_jupyter_command(
"lab",
ip_address=ip_address,
all_kernels=all_kernels,
args=args,
idle_timeout=idle_timeout,
project_name=metadata.project_name,
)
python_call_kwargs = _build_jupyter_env(env)
python_call("jupyter", arguments, **python_call_kwargs)
@command_with_verbosity(jupyter, "convert")
@click.option("--all", "-a", "all_flag", is_flag=True, help=CONVERT_ALL_HELP)
@click.option("-y", "overwrite_flag", is_flag=True, help=OVERWRITE_HELP)
@click.argument(
"filepath",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
required=False,
nargs=-1,
)
@env_option
@click.pass_obj # this will pass the metadata as first argument
def convert_notebook(
metadata: ProjectMetadata, all_flag, overwrite_flag, filepath, env, **kwargs
): # pylint: disable=unused-argument, too-many-locals
"""Convert selected or all notebooks found in a Kedro project
to Kedro code, by exporting code from the appropriately-tagged cells:
Cells tagged as `node` will be copied over to a Python file matching
the name of the notebook, under `<source_dir>/<package_name>/nodes`.
*Note*: Make sure your notebooks have unique names!
FILEPATH: Path(s) to exact notebook file(s) to be converted. Both
relative and absolute paths are accepted.
Should not be provided if --all flag is already present.
"""
project_path = metadata.project_path
source_path = metadata.source_dir
package_name = metadata.package_name
_update_ipython_dir(project_path)
if not filepath and not all_flag:
secho(
"Please specify a notebook filepath "
"or add '--all' to convert all notebooks."
)
sys.exit(1)
if all_flag:
# pathlib glob does not ignore hidden directories,
# whereas Python glob does, which is more useful in
# ensuring checkpoints will not be included
pattern = project_path / "**" / "*.ipynb"
notebooks = sorted(Path(p) for p in iglob(str(pattern), recursive=True))
else:
notebooks = [Path(f) for f in filepath]
counter = Counter(n.stem for n in notebooks)
non_unique_names = [name for name, counts in counter.items() if counts > 1]
if non_unique_names:
names = ", ".join(non_unique_names)
raise KedroCliError(
f"Found non-unique notebook names! Please rename the following: {names}"
)
output_dir = source_path / package_name / "nodes"
if not output_dir.is_dir():
output_dir.mkdir()
(output_dir / "__init__.py").touch()
for notebook in notebooks:
secho(f"Converting notebook '{notebook}'...")
output_path = output_dir / f"{notebook.stem}.py"
if output_path.is_file():
overwrite = overwrite_flag or click.confirm(
f"Output file {output_path} already exists. Overwrite?", default=False
)
if overwrite:
_export_nodes(notebook, output_path)
else:
_export_nodes(notebook, output_path)
secho("Done!", color="green") # type: ignore
def _build_jupyter_command( # pylint: disable=too-many-arguments
base: str,
ip_address: str,
all_kernels: bool,
args: Iterable[str],
idle_timeout: int,
project_name: str = "Kedro",
) -> List[str]:
cmd = [
base,
"--ip",
ip_address,
f"--MappingKernelManager.cull_idle_timeout={idle_timeout}",
f"--MappingKernelManager.cull_interval={idle_timeout}",
]
if not all_kernels:
kernel_name = re.sub(r"[^\w]+", "", project_name).strip() or "Kedro"
cmd += [
"--NotebookApp.kernel_spec_manager_class="
"kedro.framework.cli.jupyter.SingleKernelSpecManager",
f"--KernelSpecManager.default_kernel_name='{kernel_name}'",
]
return cmd + list(args)
def _build_jupyter_env(kedro_env: str) -> Dict[str, Any]:
"""Build the environment dictionary that gets injected into the subprocess running
Jupyter. Since the subprocess has access only to the environment variables passed
in, we need to copy the current environment and add ``KEDRO_ENV``.
"""
if not kedro_env:
return {}
jupyter_env = os.environ.copy()
jupyter_env["KEDRO_ENV"] = kedro_env
return {"env": jupyter_env}
def _export_nodes(filepath: Path, output_path: Path) -> None:
"""Copy code from Jupyter cells into nodes in src/<package_name>/nodes/,
under filename with same name as notebook.
Args:
filepath: Path to Jupyter notebook file
output_path: Path where notebook cells' source code will be exported
Raises:
KedroCliError: When provided a filepath that cannot be read as a
Jupyer notebook and loaded into json format.
"""
try:
content = json.loads(filepath.read_text())
except json.JSONDecodeError as exc:
raise KedroCliError(
f"Provided filepath is not a Jupyter notebook: {filepath}"
) from exc
cells = [
cell
for cell in content["cells"]
if cell["cell_type"] == "code" and "node" in cell["metadata"].get("tags", {})
]
if cells:
output_path.write_text("")
for cell in cells:
_append_source_code(cell, output_path)
else:
warn(f"Skipping notebook '{filepath}' - no nodes to export.")
def _append_source_code(cell: Dict[str, Any], path: Path) -> None:
source_code = "".join(cell["source"]).strip() + "\n"
with path.open(mode="a") as file_:
file_.write(source_code)
| 32.276923
| 90
| 0.687321
|
4a04974e87ad7ea9ff32ca7ab304310ef01f443f
| 3,140
|
py
|
Python
|
setup.py
|
idorax/tmt
|
f8ddd02838b2e7f761d53bf640225b456c896f64
|
[
"MIT"
] | null | null | null |
setup.py
|
idorax/tmt
|
f8ddd02838b2e7f761d53bf640225b456c896f64
|
[
"MIT"
] | null | null | null |
setup.py
|
idorax/tmt
|
f8ddd02838b2e7f761d53bf640225b456c896f64
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import re
import sys
from io import open
from setuptools import setup
# Parse version from the spec file
with open('tmt.spec', encoding='utf-8') as specfile:
lines = "\n".join(line.rstrip() for line in specfile)
version = re.search('Version: (.+)', lines).group(1).rstrip()
# acceptable version schema: major.minor[.patch][sub]
__version__ = version
__pkg__ = 'tmt'
__pkgdata__ = {
'tmt': ['schemas/*.yaml']
}
__pkgdir__ = {}
__pkgs__ = [
'tmt',
'tmt/plugins',
'tmt/steps',
'tmt/steps/discover',
'tmt/steps/provision',
'tmt/steps/prepare',
'tmt/steps/execute',
'tmt/steps/report',
'tmt/steps/finish',
]
__provides__ = ['tmt']
__desc__ = 'Test Management Tool'
__scripts__ = ['bin/tmt']
# Prepare install requires and extra requires
install_requires = [
'fmf>=1.1.0',
'click',
'requests',
'ruamel.yaml',
]
# typing_extensions is needed with Python 3.7 and older, types imported
# from that package (Literal, Protocol, TypedDict, ...) become available
# from typing since Python 3.8.
if sys.version_info.minor <= 7:
install_requires.append('typing-extensions>=3.7.4.3')
# dataclasses is needed with Python 3.6
if sys.version_info.minor <= 6:
install_requires.append('dataclasses')
extras_require = {
'docs': [
'sphinx>=3',
'sphinx_rtd_theme'],
'tests': [
'pytest',
'python-coveralls',
'requre',
'pre-commit',
'mypy'
],
'provision': ['testcloud>=0.7.0'],
'convert': [
'nitrate',
'markdown',
'python-bugzilla',
'html2text'],
'report-html': ['jinja2'],
'report-junit': ['junit_xml'],
}
extras_require['all'] = [
dependency
for extra in extras_require.values()
for dependency in extra]
pip_src = 'https://pypi.python.org/packages/source'
__deplinks__ = []
# README is in the parent directory
readme = 'README.rst'
with open(readme, encoding='utf-8') as _file:
readme = _file.read()
github = 'https://github.com/teemtee/tmt'
download_url = '{0}/archive/main.zip'.format(github)
default_setup = dict(
url=github,
license='MIT',
author='Petr Splichal',
author_email='psplicha@redhat.com',
maintainer='Petr Splichal',
maintainer_email='psplicha@redhat.com',
download_url=download_url,
long_description=readme,
data_files=[],
classifiers=[
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Utilities',
],
keywords=['metadata', 'testing'],
dependency_links=__deplinks__,
description=__desc__,
install_requires=install_requires,
extras_require=extras_require,
name=__pkg__,
package_data=__pkgdata__,
package_dir=__pkgdir__,
packages=__pkgs__,
provides=__provides__,
scripts=__scripts__,
version=__version__,
)
setup(**default_setup)
| 25.12
| 72
| 0.637898
|
4a049ac55276bd30c07a697ade4d1798d29c367f
| 12,044
|
py
|
Python
|
sdk/python/pulumi_azure_native/aad/v20170101/outputs.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_native/aad/v20170101/outputs.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_native/aad/v20170101/outputs.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'DomainSecuritySettingsResponse',
'HealthAlertResponse',
'HealthMonitorResponse',
'LdapsSettingsResponse',
'NotificationSettingsResponse',
]
@pulumi.output_type
class DomainSecuritySettingsResponse(dict):
"""
Domain Security Settings
"""
def __init__(__self__, *,
ntlm_v1: Optional[str] = None,
sync_ntlm_passwords: Optional[str] = None,
tls_v1: Optional[str] = None):
"""
Domain Security Settings
:param str ntlm_v1: A flag to determine whether or not NtlmV1 is enabled or disabled.
:param str sync_ntlm_passwords: A flag to determine whether or not SyncNtlmPasswords is enabled or disabled.
:param str tls_v1: A flag to determine whether or not TlsV1 is enabled or disabled.
"""
if ntlm_v1 is not None:
pulumi.set(__self__, "ntlm_v1", ntlm_v1)
if sync_ntlm_passwords is not None:
pulumi.set(__self__, "sync_ntlm_passwords", sync_ntlm_passwords)
if tls_v1 is not None:
pulumi.set(__self__, "tls_v1", tls_v1)
@property
@pulumi.getter(name="ntlmV1")
def ntlm_v1(self) -> Optional[str]:
"""
A flag to determine whether or not NtlmV1 is enabled or disabled.
"""
return pulumi.get(self, "ntlm_v1")
@property
@pulumi.getter(name="syncNtlmPasswords")
def sync_ntlm_passwords(self) -> Optional[str]:
"""
A flag to determine whether or not SyncNtlmPasswords is enabled or disabled.
"""
return pulumi.get(self, "sync_ntlm_passwords")
@property
@pulumi.getter(name="tlsV1")
def tls_v1(self) -> Optional[str]:
"""
A flag to determine whether or not TlsV1 is enabled or disabled.
"""
return pulumi.get(self, "tls_v1")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HealthAlertResponse(dict):
"""
Health Alert Description
"""
def __init__(__self__, *,
id: str,
issue: str,
last_detected: str,
name: str,
raised: str,
resolution_uri: str,
severity: str):
"""
Health Alert Description
:param str id: Health Alert Id
:param str issue: Health Alert Issue
:param str last_detected: Health Alert Last Detected DateTime
:param str name: Health Alert Name
:param str raised: Health Alert Raised DateTime
:param str resolution_uri: Health Alert TSG Link
:param str severity: Health Alert Severity
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "issue", issue)
pulumi.set(__self__, "last_detected", last_detected)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "raised", raised)
pulumi.set(__self__, "resolution_uri", resolution_uri)
pulumi.set(__self__, "severity", severity)
@property
@pulumi.getter
def id(self) -> str:
"""
Health Alert Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def issue(self) -> str:
"""
Health Alert Issue
"""
return pulumi.get(self, "issue")
@property
@pulumi.getter(name="lastDetected")
def last_detected(self) -> str:
"""
Health Alert Last Detected DateTime
"""
return pulumi.get(self, "last_detected")
@property
@pulumi.getter
def name(self) -> str:
"""
Health Alert Name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def raised(self) -> str:
"""
Health Alert Raised DateTime
"""
return pulumi.get(self, "raised")
@property
@pulumi.getter(name="resolutionUri")
def resolution_uri(self) -> str:
"""
Health Alert TSG Link
"""
return pulumi.get(self, "resolution_uri")
@property
@pulumi.getter
def severity(self) -> str:
"""
Health Alert Severity
"""
return pulumi.get(self, "severity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HealthMonitorResponse(dict):
"""
Health Monitor Description
"""
def __init__(__self__, *,
details: str,
id: str,
name: str):
"""
Health Monitor Description
:param str details: Health Monitor Details
:param str id: Health Monitor Id
:param str name: Health Monitor Name
"""
pulumi.set(__self__, "details", details)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def details(self) -> str:
"""
Health Monitor Details
"""
return pulumi.get(self, "details")
@property
@pulumi.getter
def id(self) -> str:
"""
Health Monitor Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Health Monitor Name
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class LdapsSettingsResponse(dict):
"""
Secure LDAP Settings
"""
def __init__(__self__, *,
certificate_not_after: str,
certificate_thumbprint: str,
external_access_ip_address: str,
public_certificate: str,
external_access: Optional[str] = None,
ldaps: Optional[str] = None,
pfx_certificate: Optional[str] = None,
pfx_certificate_password: Optional[str] = None):
"""
Secure LDAP Settings
:param str certificate_not_after: NotAfter DateTime of configure ldaps certificate.
:param str certificate_thumbprint: Thumbprint of configure ldaps certificate.
:param str external_access_ip_address: External access ip address.
:param str public_certificate: Public certificate used to configure secure ldap.
:param str external_access: A flag to determine whether or not Secure LDAP access over the internet is enabled or disabled.
:param str ldaps: A flag to determine whether or not Secure LDAP is enabled or disabled.
:param str pfx_certificate: The certificate required to configure Secure LDAP. The parameter passed here should be a base64encoded representation of the certificate pfx file.
:param str pfx_certificate_password: The password to decrypt the provided Secure LDAP certificate pfx file.
"""
pulumi.set(__self__, "certificate_not_after", certificate_not_after)
pulumi.set(__self__, "certificate_thumbprint", certificate_thumbprint)
pulumi.set(__self__, "external_access_ip_address", external_access_ip_address)
pulumi.set(__self__, "public_certificate", public_certificate)
if external_access is not None:
pulumi.set(__self__, "external_access", external_access)
if ldaps is not None:
pulumi.set(__self__, "ldaps", ldaps)
if pfx_certificate is not None:
pulumi.set(__self__, "pfx_certificate", pfx_certificate)
if pfx_certificate_password is not None:
pulumi.set(__self__, "pfx_certificate_password", pfx_certificate_password)
@property
@pulumi.getter(name="certificateNotAfter")
def certificate_not_after(self) -> str:
"""
NotAfter DateTime of configure ldaps certificate.
"""
return pulumi.get(self, "certificate_not_after")
@property
@pulumi.getter(name="certificateThumbprint")
def certificate_thumbprint(self) -> str:
"""
Thumbprint of configure ldaps certificate.
"""
return pulumi.get(self, "certificate_thumbprint")
@property
@pulumi.getter(name="externalAccessIpAddress")
def external_access_ip_address(self) -> str:
"""
External access ip address.
"""
return pulumi.get(self, "external_access_ip_address")
@property
@pulumi.getter(name="publicCertificate")
def public_certificate(self) -> str:
"""
Public certificate used to configure secure ldap.
"""
return pulumi.get(self, "public_certificate")
@property
@pulumi.getter(name="externalAccess")
def external_access(self) -> Optional[str]:
"""
A flag to determine whether or not Secure LDAP access over the internet is enabled or disabled.
"""
return pulumi.get(self, "external_access")
@property
@pulumi.getter
def ldaps(self) -> Optional[str]:
"""
A flag to determine whether or not Secure LDAP is enabled or disabled.
"""
return pulumi.get(self, "ldaps")
@property
@pulumi.getter(name="pfxCertificate")
def pfx_certificate(self) -> Optional[str]:
"""
The certificate required to configure Secure LDAP. The parameter passed here should be a base64encoded representation of the certificate pfx file.
"""
return pulumi.get(self, "pfx_certificate")
@property
@pulumi.getter(name="pfxCertificatePassword")
def pfx_certificate_password(self) -> Optional[str]:
"""
The password to decrypt the provided Secure LDAP certificate pfx file.
"""
return pulumi.get(self, "pfx_certificate_password")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NotificationSettingsResponse(dict):
"""
Settings for notification
"""
def __init__(__self__, *,
additional_recipients: Optional[Sequence[str]] = None,
notify_dc_admins: Optional[str] = None,
notify_global_admins: Optional[str] = None):
"""
Settings for notification
:param Sequence[str] additional_recipients: The list of additional recipients
:param str notify_dc_admins: Should domain controller admins be notified
:param str notify_global_admins: Should global admins be notified
"""
if additional_recipients is not None:
pulumi.set(__self__, "additional_recipients", additional_recipients)
if notify_dc_admins is not None:
pulumi.set(__self__, "notify_dc_admins", notify_dc_admins)
if notify_global_admins is not None:
pulumi.set(__self__, "notify_global_admins", notify_global_admins)
@property
@pulumi.getter(name="additionalRecipients")
def additional_recipients(self) -> Optional[Sequence[str]]:
"""
The list of additional recipients
"""
return pulumi.get(self, "additional_recipients")
@property
@pulumi.getter(name="notifyDcAdmins")
def notify_dc_admins(self) -> Optional[str]:
"""
Should domain controller admins be notified
"""
return pulumi.get(self, "notify_dc_admins")
@property
@pulumi.getter(name="notifyGlobalAdmins")
def notify_global_admins(self) -> Optional[str]:
"""
Should global admins be notified
"""
return pulumi.get(self, "notify_global_admins")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 33.087912
| 182
| 0.63102
|
4a049b859132f4e20e0703f81cecf6c9ef0991ee
| 813
|
py
|
Python
|
py/selenium/__init__.py
|
bjuric/selenium
|
0f59eaf594be28cab598b40477cf04fa0cc3892f
|
[
"Apache-2.0"
] | 2
|
2020-07-10T15:14:32.000Z
|
2020-12-13T13:49:51.000Z
|
py/selenium/__init__.py
|
bjuric/selenium
|
0f59eaf594be28cab598b40477cf04fa0cc3892f
|
[
"Apache-2.0"
] | 7
|
2021-01-21T14:05:28.000Z
|
2021-08-12T17:21:44.000Z
|
py/selenium/__init__.py
|
bjuric/selenium
|
0f59eaf594be28cab598b40477cf04fa0cc3892f
|
[
"Apache-2.0"
] | 4
|
2020-09-18T18:58:26.000Z
|
2020-09-20T09:46:20.000Z
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__version__ = "4.0.0a6"
| 40.65
| 62
| 0.768758
|
4a049c18521bc3bb64618ff56e6301506d3aa645
| 2,704
|
py
|
Python
|
finetune.py
|
JaeZheng/unet
|
f0fac997a1ee7ce962e14d51f97f704229fc1a75
|
[
"MIT"
] | null | null | null |
finetune.py
|
JaeZheng/unet
|
f0fac997a1ee7ce962e14d51f97f704229fc1a75
|
[
"MIT"
] | null | null | null |
finetune.py
|
JaeZheng/unet
|
f0fac997a1ee7ce962e14d51f97f704229fc1a75
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : JaeZheng
# @Time : 2019/11/11 17:19
# @File : finetune.py
from model import *
from data import *
import os
import keras.backend.tensorflow_backend as KTF
from data import meanIOU
# 指定显卡
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# 自适应分配显存
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
session = tf.Session(config=config)
KTF.set_session(session)
# prepare the 2D model
input_channels, input_rows, input_cols, input_deps = 1, 64, 64, 32
num_class, activate = 2, 'sigmoid'
weight_dir = 'Vnet-genesis_thyroid_us.h5'
models_genesis = unet(input_size=(128,128,1))
print("Load pre-trained Models Genesis weights from {}".format(weight_dir))
models_genesis.load_weights(weight_dir)
x = models_genesis.get_layer('conv2d_23').output
print(models_genesis.input.shape)
print(x.shape)
final_convolution = Conv2D(1, 1)(x)
output = Activation(activate)(final_convolution)
model = Model(inputs=models_genesis.input, outputs=output)
adam_optimizer = Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(optimizer=adam_optimizer, loss = 'binary_crossentropy', metrics = ['accuracy', meanIOU])
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
# data_gen_args = dict()
myGene = trainGenerator(1,'data/thyroid/train/','image','label',data_gen_args,save_to_dir = None, target_size=(400,496))
# myGene = my_train_data_loader(16,50,'data/thyroid/train','image','label',target_size=(128,128))
# model = unet(pretrained_weights='Vnet-genesis_thyroid_us.h5')
model_checkpoint = ModelCheckpoint('finetune_unet_thyroid.hdf5', monitor='loss',verbose=1, save_best_only=True)
model.fit_generator(myGene,steps_per_epoch=500,epochs=500,callbacks=[model_checkpoint])
if os.path.exists("finetune_unet_thyroid.hdf5.txt"):
os.remove("finetune_unet_thyroid.hdf5.txt")
with open("finetune_unet_thyroid.hdf5.txt",'w') as fh:
model.summary(positions=[.3, .55, .67, 1.], print_fn=lambda x: fh.write(x + '\n'))
# model = load_model('unet_thyroid.hdf5')
# testGene = testGenerator("data/thyroid/test",num_image=59, target_size=(400,496))
# results = model.predict_generator(testGene,59,verbose=1)
# saveResult("data/thyroid/test",results)
testGene = my_test_data_loader(59, "data/thyroid/test")
cnt = 0
for img in testGene:
result = predict_single_image(model, img, target_size=(128,128))
saveSingleResult("data/thyroid/test", result, cnt)
cnt += 1
import test
| 39.764706
| 120
| 0.725962
|
4a049d56d96044b592b7d96afd74bd089d52dc80
| 1,857
|
py
|
Python
|
bindings/python/utils.py
|
Hridoy-31/dlite
|
e7b754a5e21a13c7f8318c6a245bb8f1634b27e2
|
[
"MIT"
] | 10
|
2020-04-08T06:25:27.000Z
|
2022-03-15T06:54:53.000Z
|
bindings/python/utils.py
|
Hridoy-31/dlite
|
e7b754a5e21a13c7f8318c6a245bb8f1634b27e2
|
[
"MIT"
] | 117
|
2019-12-16T14:43:41.000Z
|
2022-03-21T19:46:58.000Z
|
bindings/python/utils.py
|
Hridoy-31/dlite
|
e7b754a5e21a13c7f8318c6a245bb8f1634b27e2
|
[
"MIT"
] | 5
|
2020-04-15T16:23:29.000Z
|
2021-12-07T08:40:54.000Z
|
import os
import dlite
thisdir = os.path.dirname(__file__)
def instance_from_dict(d):
"""Returns a new DLite instance created from dict `d`, which should
be of the same form as returned by the Instance.asdict() method.
"""
meta = dlite.get_instance(d['meta'])
if meta.is_metameta:
try:
with dlite.silent:
inst = dlite.get_instance(d['uri'])
if inst:
return inst
except dlite.DLiteError:
pass
dimensions = [dlite.Dimension(d['name'], d.get('description'))
for d in d['dimensions']]
props = []
dimmap = {dim['name']: i for i, dim in enumerate(d['dimensions'])}
for p in d['properties']:
if 'dims' in p:
dims = [dimmap[d] for d in p['dims']]
else:
dims = None
props.append(dlite.Property(
name=p['name'],
type=p['type'],
dims=dims,
unit=p.get('unit'),
iri=p.get('iri'),
description=p.get('description')))
inst = dlite.Instance(d['uri'], dimensions, props, d.get('iri'),
d.get('description'))
else:
dims = list(d['dimensions'].values())
inst = dlite.Instance(meta.uri, dims, d.get('uuid', None))
for p in meta['properties']:
inst[p.name] = d['properties'][p.name]
return inst
if __name__ == '__main__':
url = 'json://' + os.path.join(thisdir, 'tests', 'Person.json')
Person = dlite.Instance(url)
person = Person([2])
person.name = 'Ada'
person.age = 12.5
person.skills = ['skiing', 'jumping']
d1 = person.asdict()
inst1 = instance_from_dict(d1)
d2 = Person.asdict()
inst2 = instance_from_dict(d2)
| 29.015625
| 74
| 0.520194
|
4a049ec1243efb067c0639399468c8c9beb34939
| 749
|
py
|
Python
|
Leetcode/0587. Erect the Fence/0587.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
Leetcode/0587. Erect the Fence/0587.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
Leetcode/0587. Erect the Fence/0587.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
class Solution:
def outerTrees(self, trees: List[List[int]]) -> List[List[int]]:
hull = []
trees.sort(key=lambda x: (x[0], x[1]))
def cross(p: List[int], q: List[int], r: List[int]) -> int:
return (q[1] - p[1]) * (r[0] - q[0]) - (q[0] - p[0]) * (r[1] - q[1])
# Build lower hull: left-to-right scan
for tree in trees:
while len(hull) > 1 and cross(hull[-1], hull[-2], tree) > 0:
hull.pop()
hull.append(tuple(tree))
hull.pop()
# Build upper hull: right-to-left scan
for tree in reversed(trees):
while len(hull) > 1 and cross(hull[-1], hull[-2], tree) > 0:
hull.pop()
hull.append(tuple(tree))
# Remove redundant elements from the stack
return list(set(hull))
| 29.96
| 74
| 0.558077
|
4a049f3bb9ed561cfbd84b4fe73a89a1b9ace104
| 904
|
py
|
Python
|
kubernetes/test/test_v1_subject.py
|
sgwilliams-ebsco/python
|
35e6406536c96d4769ff7e2a02bf0fdcb902a509
|
[
"Apache-2.0"
] | 1
|
2021-06-10T23:44:11.000Z
|
2021-06-10T23:44:11.000Z
|
kubernetes/test/test_v1_subject.py
|
sgwilliams-ebsco/python
|
35e6406536c96d4769ff7e2a02bf0fdcb902a509
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_subject.py
|
sgwilliams-ebsco/python
|
35e6406536c96d4769ff7e2a02bf0fdcb902a509
|
[
"Apache-2.0"
] | 1
|
2018-11-06T16:33:43.000Z
|
2018-11-06T16:33:43.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_subject import V1Subject
class TestV1Subject(unittest.TestCase):
""" V1Subject unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1Subject(self):
"""
Test V1Subject
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_subject.V1Subject()
pass
if __name__ == '__main__':
unittest.main()
| 20.088889
| 105
| 0.690265
|
4a04a1509951128cdc472e91f4c9818d7628bd53
| 1,153
|
py
|
Python
|
click_web/resources/index.py
|
JayBigGuy10/click-web
|
ca88e118a283c92bff214e6fa0502db629d7dd1e
|
[
"MIT"
] | null | null | null |
click_web/resources/index.py
|
JayBigGuy10/click-web
|
ca88e118a283c92bff214e6fa0502db629d7dd1e
|
[
"MIT"
] | null | null | null |
click_web/resources/index.py
|
JayBigGuy10/click-web
|
ca88e118a283c92bff214e6fa0502db629d7dd1e
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
import click
import click_web
from flask import render_template
def index():
with click.Context(click_web.click_root_cmd, info_name=click_web.click_root_cmd.name, parent=None) as ctx:
return render_template('show_tree.html.j2', ctx=ctx, tree=_click_to_tree(ctx, click_web.click_root_cmd))
def _click_to_tree(ctx: click.Context, node: click.BaseCommand, ancestors=[]):
'''
Convert a click root command to a tree of dicts and lists
:return: a json like tree
'''
res_childs = []
res = OrderedDict()
res['is_group'] = isinstance(node, click.core.MultiCommand)
if res['is_group']:
# a group, recurse for every child
for key in node.list_commands(ctx):
child = node.get_command(ctx, key)
res_childs.append(_click_to_tree(ctx, child, ancestors[:] + [node, ]))
res['name'] = node.name
res['short_help'] = node.get_short_help_str()
res['help'] = node.help
path_parts = ancestors + [node]
res['path'] = '/' + '/'.join(p.name for p in path_parts)
if res_childs:
res['childs'] = res_childs
return res
| 31.162162
| 112
| 0.669558
|
4a04a18706169f0bf93e8059584077fdcde5d9d8
| 4,004
|
py
|
Python
|
python/sdk/client/models/logger.py
|
karzuo/merlin
|
bdbdac35071d81beb1b8b5b807697bf2eac69a40
|
[
"Apache-2.0"
] | 97
|
2020-10-15T08:03:56.000Z
|
2022-03-31T22:30:59.000Z
|
python/sdk/client/models/logger.py
|
ibnummuhammad/merlin
|
acf10a350bcacfdfe67f7020d535467b71ff1d89
|
[
"Apache-2.0"
] | 91
|
2020-10-26T03:15:27.000Z
|
2022-03-31T10:19:55.000Z
|
python/sdk/client/models/logger.py
|
ibnummuhammad/merlin
|
acf10a350bcacfdfe67f7020d535467b71ff1d89
|
[
"Apache-2.0"
] | 26
|
2020-10-21T03:53:36.000Z
|
2022-03-16T06:43:15.000Z
|
# coding: utf-8
"""
Merlin
API Guide for accessing Merlin's model management, deployment, and serving functionalities # noqa: E501
OpenAPI spec version: 0.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from client.configuration import Configuration
class Logger(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'model': 'LoggerConfig',
'transformer': 'LoggerConfig'
}
attribute_map = {
'model': 'model',
'transformer': 'transformer'
}
def __init__(self, model=None, transformer=None, _configuration=None): # noqa: E501
"""Logger - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._model = None
self._transformer = None
self.discriminator = None
if model is not None:
self.model = model
if transformer is not None:
self.transformer = transformer
@property
def model(self):
"""Gets the model of this Logger. # noqa: E501
:return: The model of this Logger. # noqa: E501
:rtype: LoggerConfig
"""
return self._model
@model.setter
def model(self, model):
"""Sets the model of this Logger.
:param model: The model of this Logger. # noqa: E501
:type: LoggerConfig
"""
self._model = model
@property
def transformer(self):
"""Gets the transformer of this Logger. # noqa: E501
:return: The transformer of this Logger. # noqa: E501
:rtype: LoggerConfig
"""
return self._transformer
@transformer.setter
def transformer(self, transformer):
"""Sets the transformer of this Logger.
:param transformer: The transformer of this Logger. # noqa: E501
:type: LoggerConfig
"""
self._transformer = transformer
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Logger, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Logger):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Logger):
return True
return self.to_dict() != other.to_dict()
| 26.693333
| 108
| 0.564685
|
4a04a21dbe916aa145e4559c7f784cce7cdf2967
| 2,144
|
py
|
Python
|
graph-tool/src/graph_tool/all.py
|
johankaito/fufuka
|
32a96ecf98ce305c2206c38443e58fdec88c788d
|
[
"Apache-2.0"
] | 1
|
2015-08-04T19:41:53.000Z
|
2015-08-04T19:41:53.000Z
|
graph-tool/src/graph_tool/all.py
|
johankaito/fufuka
|
32a96ecf98ce305c2206c38443e58fdec88c788d
|
[
"Apache-2.0"
] | null | null | null |
graph-tool/src/graph_tool/all.py
|
johankaito/fufuka
|
32a96ecf98ce305c2206c38443e58fdec88c788d
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# graph_tool -- a general graph manipulation python module
#
# Copyright (C) 2006-2015 Tiago de Paula Peixoto <tiago@skewed.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Utility module which includes all the sub-modules in graph_tool
"""
from __future__ import division, absolute_import, print_function
import warnings
from graph_tool import *
import graph_tool
from graph_tool.correlations import *
import graph_tool.correlations
from graph_tool.centrality import *
import graph_tool.centrality
try:
from graph_tool.draw import *
import graph_tool.draw
except ImportError as e:
# Proceed despite errors with cairo, matplotlib, etc.
msg = "Error importing draw module, proceeding nevertheless: " + str(e)
warnings.warn(msg, RuntimeWarning)
pass
from graph_tool.stats import *
import graph_tool.stats
from graph_tool.generation import *
import graph_tool.generation
from graph_tool.stats import *
import graph_tool.stats
from graph_tool.clustering import *
import graph_tool.clustering
from graph_tool.community import *
import graph_tool.community
from graph_tool.run_action import *
import graph_tool.run_action
from graph_tool.topology import *
import graph_tool.topology
from graph_tool.flow import *
import graph_tool.flow
from graph_tool.spectral import *
import graph_tool.spectral
from graph_tool.search import *
import graph_tool.search
from graph_tool.util import *
import graph_tool.util
import graph_tool.collection
import graph_tool.collection as collection
| 32.484848
| 75
| 0.791978
|
4a04a26710f765af547c7faaee938399e308a14d
| 16,128
|
py
|
Python
|
tests/cli/test_run.py
|
daryllstrauss/prefect
|
2dd308ec39dc189a85defa216ef15ebec78855f5
|
[
"Apache-2.0"
] | 1
|
2020-08-01T15:44:32.000Z
|
2020-08-01T15:44:32.000Z
|
tests/cli/test_run.py
|
daryllstrauss/prefect
|
2dd308ec39dc189a85defa216ef15ebec78855f5
|
[
"Apache-2.0"
] | null | null | null |
tests/cli/test_run.py
|
daryllstrauss/prefect
|
2dd308ec39dc189a85defa216ef15ebec78855f5
|
[
"Apache-2.0"
] | 1
|
2020-05-04T13:22:11.000Z
|
2020-05-04T13:22:11.000Z
|
import json
import os
import re
import tempfile
from unittest.mock import MagicMock
import pytest
import requests
from click.testing import CliRunner
import prefect
from prefect.cli.run import run
from prefect.utilities.configuration import set_temporary_config
def test_run_init():
runner = CliRunner()
result = runner.invoke(run)
assert result.exit_code == 0
assert "Run Prefect flows." in result.output
def test_run_help():
runner = CliRunner()
result = runner.invoke(run, ["--help"])
assert result.exit_code == 0
assert "Run Prefect flows." in result.output
def test_run_cloud(monkeypatch):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}],)))
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
monkeypatch.setattr(
"prefect.client.Client.create_flow_run", MagicMock(return_value="id")
)
monkeypatch.setattr(
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
with set_temporary_config(
{"cloud.api": "http://api.prefect.io", "cloud.auth_token": "secret_token"}
):
runner = CliRunner()
result = runner.invoke(
run, ["cloud", "--name", "flow", "--project", "project", "--version", "2"]
)
assert result.exit_code == 0
assert "Flow Run" in result.output
query = """
query {
flow(where: { _and: { name: { _eq: "flow" }, version: { _eq: 2 }, project: { name: { _eq: "project" } } } }, order_by: { name: asc, version: desc }, distinct_on: name) {
id
}
}
"""
assert post.called
assert post.call_args[1]["json"]["query"].split() == query.split()
def test_run_server(monkeypatch):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}],)))
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
monkeypatch.setattr(
"prefect.client.Client.create_flow_run", MagicMock(return_value="id")
)
monkeypatch.setattr(
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
with set_temporary_config(
{"cloud.api": "http://localhost:4200", "cloud.auth_token": "secret_token"}
):
runner = CliRunner()
result = runner.invoke(run, ["server", "--name", "flow", "--version", "2"])
assert result.exit_code == 0
assert "Flow Run" in result.output
query = """
query {
flow(where: { _and: { name: { _eq: "flow" }, version: { _eq: 2 } } }, order_by: { name: asc, version: desc }, distinct_on: name) {
id
}
}
"""
assert post.called
assert post.call_args[1]["json"]["query"].split() == query.split()
def test_run_cloud_watch(monkeypatch):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(
return_value=dict(
data=dict(
flow=[{"id": "flow"}],
flow_run_by_pk=dict(
states=[
{"state": "Running", "timestamp": None},
{"state": "Success", "timestamp": None},
]
),
)
)
)
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
monkeypatch.setattr(
"prefect.client.Client.create_flow_run", MagicMock(return_value="id")
)
monkeypatch.setattr(
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
with set_temporary_config(
{"cloud.api": "http://api.prefect.io", "cloud.auth_token": "secret_token"}
):
runner = CliRunner()
result = runner.invoke(
run,
[
"cloud",
"--name",
"flow",
"--project",
"project",
"--version",
"2",
"--watch",
],
)
assert result.exit_code == 0
assert "Running" in result.output
assert "Success" in result.output
assert post.called
def test_run_cloud_logs(monkeypatch):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(
return_value=dict(
data=dict(
flow=[{"id": "flow"}],
flow_run=[
{
"logs": [
{
"timestamp": "test_timestamp",
"message": "test_message",
"level": "test_level",
}
],
"state": "Success",
}
],
)
)
)
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
monkeypatch.setattr(
"prefect.client.Client.create_flow_run", MagicMock(return_value="id")
)
monkeypatch.setattr(
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
with set_temporary_config(
{"cloud.api": "http://api.prefect.io", "cloud.auth_token": "secret_token"}
):
runner = CliRunner()
result = runner.invoke(
run,
[
"cloud",
"--name",
"flow",
"--project",
"project",
"--version",
"2",
"--logs",
],
)
assert result.exit_code == 0
assert "test_timestamp" in result.output
assert "test_message" in result.output
assert "test_level" in result.output
assert post.called
def test_run_cloud_fails(monkeypatch):
post = MagicMock(
return_value=MagicMock(json=MagicMock(return_value=dict(data=dict(flow=[]))))
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
with set_temporary_config(
{"cloud.api": "http://api.prefect.io", "cloud.auth_token": "secret_token"}
):
runner = CliRunner()
result = runner.invoke(
run, ["cloud", "--name", "flow", "--project", "project", "--version", "2"]
)
assert result.exit_code == 0
assert "flow not found" in result.output
def test_run_cloud_no_param_file(monkeypatch):
with set_temporary_config(
{"cloud.api": "http://api.prefect.io", "cloud.auth_token": "secret_token"}
):
runner = CliRunner()
result = runner.invoke(
run,
[
"cloud",
"--name",
"flow",
"--project",
"project",
"--version",
"2",
"--parameters-file",
"no_file.json",
],
)
assert result.exit_code == 2
# note: click changed the output format for errors between 7.0 & 7.1, this test should be agnostic to which click version is used.
# ensure message ~= Invalid value for "--parameters-file" / "-pf": Path "no_file.json" does not exist
assert re.search(
r"Invalid value for [\"']--parameters-file", result.output, re.MULTILINE
)
assert re.search(
r"Path [\"']no_file.json[\"'] does not exist", result.output, re.MULTILINE
)
def test_run_cloud_param_file(monkeypatch):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
create_flow_run_mock = MagicMock(return_value="id")
monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
monkeypatch.setattr(
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
with tempfile.TemporaryDirectory() as directory:
file_path = os.path.join(directory, "file.json")
with open(file_path, "w") as tmp:
json.dump({"test": 42}, tmp)
with set_temporary_config(
{"cloud.api": "http://api.prefect.io", "cloud.auth_token": "secret_token"}
):
runner = CliRunner()
result = runner.invoke(
run,
[
"cloud",
"--name",
"flow",
"--project",
"project",
"--version",
"2",
"--parameters-file",
file_path,
],
)
assert result.exit_code == 0
assert "Flow Run" in result.output
assert create_flow_run_mock.called
assert create_flow_run_mock.call_args[1]["parameters"] == {"test": 42}
def test_run_cloud_param_string(monkeypatch):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
create_flow_run_mock = MagicMock(return_value="id")
monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
monkeypatch.setattr(
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
with set_temporary_config(
{"cloud.api": "http://api.prefect.io", "cloud.auth_token": "secret_token"}
):
runner = CliRunner()
result = runner.invoke(
run,
[
"cloud",
"--name",
"flow",
"--project",
"project",
"--version",
"2",
"--parameters-string",
'{"test": 42}',
],
)
assert result.exit_code == 0
assert "Flow Run" in result.output
assert create_flow_run_mock.called
assert create_flow_run_mock.call_args[1]["parameters"] == {"test": 42}
def test_run_cloud_run_name(monkeypatch):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
create_flow_run_mock = MagicMock(return_value="id")
monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
monkeypatch.setattr(
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
with set_temporary_config(
{"cloud.api": "http://api.prefect.io", "cloud.auth_token": "secret_token"}
):
runner = CliRunner()
result = runner.invoke(
run,
[
"cloud",
"--name",
"flow",
"--project",
"project",
"--version",
"2",
"--run-name",
"NAME",
],
)
assert result.exit_code == 0
assert "Flow Run" in result.output
assert create_flow_run_mock.called
assert create_flow_run_mock.call_args[1]["run_name"] == "NAME"
def test_run_cloud_param_string_overwrites(monkeypatch):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
create_flow_run_mock = MagicMock(return_value="id")
monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
monkeypatch.setattr(
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
with tempfile.TemporaryDirectory() as directory:
file_path = os.path.join(directory, "file.json")
with open(file_path, "w") as tmp:
json.dump({"test": 42}, tmp)
with set_temporary_config(
{"cloud.api": "http://api.prefect.io", "cloud.auth_token": "secret_token"}
):
runner = CliRunner()
result = runner.invoke(
run,
[
"cloud",
"--name",
"flow",
"--project",
"project",
"--version",
"2",
"--parameters-file",
file_path,
"--parameters-string",
'{"test": 43}',
],
)
assert result.exit_code == 0
assert "Flow Run" in result.output
assert create_flow_run_mock.called
assert create_flow_run_mock.call_args[1]["parameters"] == {"test": 43}
@pytest.mark.parametrize(
"api,expected",
[
("https://api.prefect.io", "https://cloud.prefect.io/tslug/flow-run/id"),
("https://api-foo.prefect.io", "https://foo.prefect.io/tslug/flow-run/id"),
],
)
def test_run_cloud_flow_run_id_link(monkeypatch, api, expected, cloud_api):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
create_flow_run_mock = MagicMock(return_value="id")
monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
monkeypatch.setattr(
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
with set_temporary_config({"cloud.api": api, "cloud.auth_token": "secret_token"}):
runner = CliRunner()
result = runner.invoke(
run, ["cloud", "--name", "flow", "--project", "project", "--version", "2",],
)
assert result.exit_code == 0
assert "Flow Run" in result.output
assert expected in result.output
def test_run_cloud_flow_run_id_no_link(monkeypatch):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
create_flow_run_mock = MagicMock(return_value="id")
monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
monkeypatch.setattr(
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
with set_temporary_config(
{"cloud.api": "https://api.foo", "cloud.auth_token": "secret_token"}
):
runner = CliRunner()
result = runner.invoke(
run,
[
"cloud",
"--name",
"flow",
"--project",
"project",
"--version",
"2",
"--no-url",
],
)
assert result.exit_code == 0
assert "Flow Run ID" in result.output
| 31.873518
| 181
| 0.53559
|
4a04a2b6e95e0775896e084803c1e4f94c5f34da
| 159
|
py
|
Python
|
samwise/features/__init__.py
|
Cloudzero/samwise
|
793463e0774f9d1c702f4252377f19fa3408a558
|
[
"MIT"
] | 4
|
2019-09-16T02:45:16.000Z
|
2020-08-25T13:11:55.000Z
|
samwise/utils/__init__.py
|
Cloudzero/samwise
|
793463e0774f9d1c702f4252377f19fa3408a558
|
[
"MIT"
] | null | null | null |
samwise/utils/__init__.py
|
Cloudzero/samwise
|
793463e0774f9d1c702f4252377f19fa3408a558
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019 CloudZero, Inc. All rights reserved.
# Licensed under the MIT License. See LICENSE file in the project root for full license information.
| 53
| 100
| 0.779874
|
4a04a3119da4b7b60a318a79be4da5dc2cb35281
| 1,365
|
py
|
Python
|
tests/beacon/types/test_block.py
|
shreyasnbhat/py-evm
|
cd31d83185e102a7cb2f11e2f67923b069ee9cef
|
[
"MIT"
] | 1
|
2018-12-09T11:56:53.000Z
|
2018-12-09T11:56:53.000Z
|
tests/beacon/types/test_block.py
|
shreyasnbhat/py-evm
|
cd31d83185e102a7cb2f11e2f67923b069ee9cef
|
[
"MIT"
] | null | null | null |
tests/beacon/types/test_block.py
|
shreyasnbhat/py-evm
|
cd31d83185e102a7cb2f11e2f67923b069ee9cef
|
[
"MIT"
] | 2
|
2018-12-09T15:58:11.000Z
|
2020-09-29T07:10:21.000Z
|
import pytest
import rlp
from eth.beacon.types.blocks import (
BaseBeaconBlock,
)
from eth.beacon.types.attestation_records import (
AttestationRecord,
)
from eth.utils.blake import (
blake,
)
def test_defaults(sample_beacon_block_params):
block = BaseBeaconBlock(**sample_beacon_block_params)
assert block.slot == sample_beacon_block_params['slot']
def test_update_attestations(sample_attestation_record_params, sample_beacon_block_params):
block = BaseBeaconBlock(**sample_beacon_block_params)
attestations = block.attestations
attestations = list(attestations)
attestations.append(AttestationRecord(**sample_attestation_record_params))
block2 = block.copy(
attestations=attestations
)
assert block2.num_attestations == 1
def test_hash(sample_beacon_block_params):
block = BaseBeaconBlock(**sample_beacon_block_params)
assert block.hash == blake(rlp.encode(block))
@pytest.mark.parametrize(
'ancestor_hashes, parent_hash',
[
((), None),
((b'\x01' * 32,), b'\x01' * 32),
((b'\x01' * 32, b'\x02' * 32), b'\x01' * 32)
]
)
def test_parent_hash(sample_beacon_block_params, ancestor_hashes, parent_hash):
block = BaseBeaconBlock(**sample_beacon_block_params).copy(
ancestor_hashes=ancestor_hashes,
)
assert block.parent_hash == parent_hash
| 27.857143
| 91
| 0.727473
|
4a04a316016a92170804519de7c0b4714d3ff1d3
| 67,336
|
py
|
Python
|
share/lib/python/neuron/crxd/rxd.py
|
myneuronews/neuron
|
ac9ca9940071524a534465b52bcab734290f8e27
|
[
"BSD-3-Clause"
] | null | null | null |
share/lib/python/neuron/crxd/rxd.py
|
myneuronews/neuron
|
ac9ca9940071524a534465b52bcab734290f8e27
|
[
"BSD-3-Clause"
] | null | null | null |
share/lib/python/neuron/crxd/rxd.py
|
myneuronews/neuron
|
ac9ca9940071524a534465b52bcab734290f8e27
|
[
"BSD-3-Clause"
] | null | null | null |
from neuron import h, nrn, nrn_dll_sym
from . import species, node, section1d, region
from .nodelist import NodeList
import weakref
import numpy
import scipy.sparse
import scipy.sparse.linalg
import ctypes
import atexit
from . import options
from .rxdException import RxDException
from . import initializer
import collections
import os
from distutils import sysconfig
import uuid
import sys
import itertools
from numpy.ctypeslib import ndpointer
import re
import platform
# aliases to avoid repeatedly doing multiple hash-table lookups
_numpy_array = numpy.array
_numpy_zeros = numpy.zeros
_scipy_sparse_linalg_bicgstab = scipy.sparse.linalg.bicgstab
_scipy_sparse_eye = scipy.sparse.eye
_scipy_sparse_linalg_spsolve = scipy.sparse.linalg.spsolve
_scipy_sparse_dok_matrix = scipy.sparse.dok_matrix
_scipy_sparse_linalg_factorized = scipy.sparse.linalg.factorized
_scipy_sparse_coo_matrix = scipy.sparse.coo_matrix
_species_get_all_species = species._get_all_species
_node_get_states = node._get_states
_section1d_transfer_to_legacy = section1d._transfer_to_legacy
_ctypes_c_int = ctypes.c_int
_weakref_ref = weakref.ref
_external_solver = None
_external_solver_initialized = False
_windows_dll_files = []
_windows_dll = []
make_time_ptr = nrn_dll_sym('make_time_ptr')
make_time_ptr.argtypes = [ctypes.py_object, ctypes.py_object]
make_time_ptr(h._ref_dt, h._ref_t)
_double_ptr = ctypes.POINTER(ctypes.c_double)
_int_ptr = ctypes.POINTER(_ctypes_c_int)
fptr_prototype = ctypes.CFUNCTYPE(None)
set_nonvint_block = nrn_dll_sym('set_nonvint_block')
set_nonvint_block(nrn_dll_sym('rxd_nonvint_block'))
set_setup = nrn_dll_sym('set_setup')
set_setup.argtypes = [fptr_prototype]
set_initialize = nrn_dll_sym('set_initialize')
set_initialize.argtypes = [fptr_prototype]
scatter_concentrations = nrn_dll_sym('scatter_concentrations')
# Transfer extracellular concentrations to NEURON
_fih_transfer_ecs = h.FInitializeHandler(1, scatter_concentrations)
rxd_set_no_diffusion = nrn_dll_sym('rxd_set_no_diffusion')
setup_solver = nrn_dll_sym('setup_solver')
setup_solver.argtypes = [ndpointer(ctypes.c_double), ctypes.c_int, ctypes.POINTER(ctypes.c_long), ctypes.c_int, ctypes.py_object, ctypes.py_object]
#states = None
_set_num_threads = nrn_dll_sym('set_num_threads')
_set_num_threads.argtypes = [ctypes.c_int]
_get_num_threads = nrn_dll_sym('get_num_threads')
_get_num_threads.restype = ctypes.c_int
clear_rates = nrn_dll_sym('clear_rates')
register_rate = nrn_dll_sym('register_rate')
register_rate.argtypes = [
ctypes.c_int, #num species
ctypes.c_int, #num regions
ctypes.c_int, #num seg
numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #species ids
ctypes.c_int, numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #num ecs species
numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #ecs species ids
ctypes.c_int, #num multicompartment reactions
numpy.ctypeslib.ndpointer(ctypes.c_double, flags='contiguous'), #multicompartment multipliers
] #Reaction rate function
setup_currents = nrn_dll_sym('setup_currents')
setup_currents.argtypes = [
ctypes.c_int, #number of membrane currents
ctypes.c_int, #number induced currents
ctypes.c_int, #number of nodes with membrane currents
_int_ptr, #number of species involved in each membrane current
_int_ptr, #charges of the species involved in each membrane current
_int_ptr, #node indices
_int_ptr, #node indices
_double_ptr, #scaling (areas) of the fluxes
_int_ptr, #charges for each species in each reation
ctypes.POINTER(ctypes.py_object), #hoc pointers
_int_ptr, #maps for membrane fluxes
_int_ptr #maps for ecs fluxes
]
set_reaction_indices = nrn_dll_sym('set_reaction_indices')
set_reaction_indices.argtypes = [ctypes.c_int, _int_ptr, _int_ptr, _int_ptr,
_int_ptr,_int_ptr,_double_ptr, ctypes.c_int, _int_ptr, _int_ptr, _int_ptr,
_int_ptr]
ecs_register_reaction = nrn_dll_sym('ecs_register_reaction')
ecs_register_reaction.argtype = [ctypes.c_int, ctypes.c_int, _int_ptr, fptr_prototype]
set_euler_matrix = nrn_dll_sym('rxd_set_euler_matrix')
set_euler_matrix.argtypes = [
ctypes.c_int,
ctypes.c_int,
numpy.ctypeslib.ndpointer(numpy.int_, flags='contiguous'),
numpy.ctypeslib.ndpointer(numpy.int_, flags='contiguous'),
numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),
numpy.ctypeslib.ndpointer(numpy.int_, flags='contiguous'),
ctypes.c_int,
numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),
numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),
numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),
numpy.ctypeslib.ndpointer(numpy.intc, flags='contiguous'),
numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),
]
rxd_setup_curr_ptrs = nrn_dll_sym('rxd_setup_curr_ptrs')
rxd_setup_curr_ptrs.argtypes = [
ctypes.c_int,
_int_ptr,
numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),
ctypes.POINTER(ctypes.py_object),
]
rxd_setup_conc_ptrs = nrn_dll_sym('rxd_setup_conc_ptrs')
rxd_setup_conc_ptrs.argtypes = [
ctypes.c_int,
_int_ptr,
ctypes.POINTER(ctypes.py_object)
]
_c_headers = """#include <math.h>
/*Some functions supported by numpy that aren't included in math.h
* names and arguments match the wrappers used in rxdmath.py
*/
double factorial(const double);
double degrees(const double);
void radians(const double, double*);
double log1p(const double);
"""
def _list_to_cint_array(data):
if data is None or len(data) == 0:
return None
else:
return (ctypes.c_int * len(data))(*tuple(data))
def _list_to_cdouble_array(data):
if data is None or len(data) == 0:
return None
else:
return (ctypes.c_double * len(data))(*tuple(data))
def _list_to_clong_array(data):
if data is None or len(data) == 0:
return None
else:
return (ctypes.c_long * len(data))(*tuple(data))
def _list_to_pyobject_array(data):
if data is None or len(data) == 0:
return None
else:
return (ctypes.py_object * len(data))(*tuple(data))
def byeworld():
# needed to prevent a seg-fault error at shutdown in at least some
# combinations of NEURON and Python, which I think is due to objects
# getting deleted out-of-order
global _react_matrix_solver
try:
del _react_matrix_solver
except NameError:
# # if it already didn't exist, that's fine
pass
_windows_remove_dlls()
atexit.register(byeworld)
# Faraday's constant (store to reduce number of lookups)
FARADAY = h.FARADAY
# converting from mM um^3 to molecules
# = 6.02214129e23 * 1000. / 1.e18 / 1000
# = avogadro * (L / m^3) * (m^3 / um^3) * (mM / M)
# value for avogardro's constant from NIST webpage, accessed 25 April 2012:
# http://physics.nist.gov/cgi-bin/cuu/Value?na
_conversion_factor = 602214.129
_cvode_object = h.CVode()
last_diam_change_cnt = None
last_structure_change_cnt = None
_linmodadd = None
_linmodadd_c = None
_diffusion_matrix = None
_curr_scales = None
_curr_ptrs = None
_curr_indices = None
_all_reactions = []
_zero_volume_indices = []
_nonzero_volume_indices = []
nrn_tree_solve = nrn_dll_sym('nrn_tree_solve')
nrn_tree_solve.restype = None
_dptr = _double_ptr
_dimensions = collections.defaultdict(lambda: 1)
_default_dx = 0.25
_default_method = 'deterministic'
#CRxD
_diffusion_d = None
_diffusion_a = None
_diffusion_b = None
_diffusion_p = None
_c_diagonal = None
_cur_node_indices = None
_diffusion_a_ptr, _diffusion_b_ptr, _diffusion_p_ptr = None, None, None
def set_solve_type(domain=None, dimension=None, dx=None, nsubseg=None, method=None):
"""Specify the numerical discretization and solver options.
domain -- a section or Python iterable of sections"""
setting_default = False
if domain is None:
domain = h.allsec()
setting_default = True
elif isinstance(domain, nrn.Section):
domain = [domain]
# NOTE: These attributes are set on a per-nrn.Section basis; they cannot
# assume Section1D objects exist because they might be specified before
# those objects are created
# domain is now always an iterable (or invalid)
if method is not None:
raise RxDException('using set_solve_type to specify method is not yet implemented')
if dimension is not None:
if dimension not in (1, 3):
raise RxDException('invalid option to set_solve_type: dimension must be 1 or 3')
factory = lambda: dimension
if setting_default:
_dimensions.default_factory = factory
for sec in domain:
_dimensions[sec] = dimension
if dx is not None:
raise RxDException('using set_solve_type to specify dx is not yet implemented')
if nsubseg is not None:
raise RxDException('using set_solve_type to specify nsubseg is not yet implemented')
def _unregister_reaction(r):
global _all_reactions
for i, r2 in enumerate(_all_reactions):
if r2() == r:
del _all_reactions[i]
break
def _register_reaction(r):
# TODO: should we search to make sure that (a weakref to) r hasn't already been added?
global _all_reactions, _external_solver_initialized
_all_reactions.append(_weakref_ref(r))
_external_solver_initialized = False
def _after_advance():
global last_diam_change_cnt
last_diam_change_cnt = _diam_change_count.value
def re_init():
"""reinitializes all rxd concentrations to match HOC values, updates matrices"""
global _external_solver_initialized
h.define_shape()
if not species._has_3d:
# TODO: if we do have 3D, make sure that we do the necessary parts of this
# update current pointers
section1d._purge_cptrs()
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
s._register_cptrs()
# update matrix equations
_setup_matrices()
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None: s.re_init()
# TODO: is this safe?
_cvode_object.re_init()
_external_solver_initialized = False
def _invalidate_matrices():
# TODO: make a separate variable for this?
global _diffusion_matrix, _external_solver_initialized, last_structure_change_cnt
_diffusion_matrix = None
last_structure_change_cnt = None
_external_solver_initialized = False
_rxd_offset = None
def _atolscale(y):
real_index_lookup = {item: index for index, item in enumerate(_nonzero_volume_indices)}
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
shifted_i = [real_index_lookup[i] + _rxd_offset for i in s.indices() if i in real_index_lookup]
y[shifted_i] *= s._atolscale
def _ode_count(offset):
global _rxd_offset, last_structure_change_cnt, _structure_change_count
initializer._do_init()
_rxd_offset = offset - len(_nonzero_volume_indices)
if _diffusion_matrix is None or last_structure_change_cnt != _structure_change_count.value: _setup_matrices()
last_structure_change_cnt = _structure_change_count.value
return len(_nonzero_volume_indices)
def _ode_reinit(y):
y[_rxd_offset : _rxd_offset + len(_nonzero_volume_indices)] = _node_get_states()[_nonzero_volume_indices]
def _ode_fun(t, y, ydot):
initializer.assert_initialized()
lo = _rxd_offset
hi = lo + len(_nonzero_volume_indices)
if lo == hi: return
states = _node_get_states().copy()
states[_nonzero_volume_indices] = y[lo : hi]
# need to fill in the zero volume states with the correct concentration
# this assumes that states at the zero volume indices is zero (although that
# assumption could be easily removed)
#matrix = _scipy_sparse_dok_matrix((len(_zero_volume_indices), len(states)))
"""
for i, row in enumerate(_zero_volume_indices):
d = _diffusion_matrix[row, row]
if d:
nzj = _diffusion_matrix[row].nonzero()[1]
print 'nzj:', nzj
for j in nzj:
matrix[i, j] = -_diffusion_matrix[row, j] / d
states[_zero_volume_indices] = matrix * states
"""
if len(_zero_volume_indices):
states[_zero_volume_indices] = _mat_for_zero_volume_nodes * states
"""
for i in _zero_volume_indices:
v = _diffusion_matrix[i] * states
d = _diffusion_matrix[i, i]
if d:
states[i] = -v / d
"""
# TODO: make this so that the section1d parts use cptrs (can't do this directly for 3D because sum, but could maybe move that into the C)
# the old way: _section1d_transfer_to_legacy()
# for sr in _species_get_all_species().values():
# s = sr()
# if s is not None: s._transfer_to_legacy()
if ydot is not None:
# diffusion_matrix = - jacobian
ydot[lo : hi] = (_rxd_reaction(states) - _diffusion_matrix * states)[_nonzero_volume_indices]
states[_zero_volume_indices] = 0
def _ode_solve(dt, t, b, y):
initializer.assert_initialized()
if _diffusion_matrix is None: _setup_matrices()
lo = _rxd_offset
hi = lo + len(_nonzero_volume_indices)
n = len(_node_get_states())
# TODO: this will need changed when can have both 1D and 3D
if species._has_3d:
if species._has_1d:
raise Exception('development issue: cvode currently does not support hybrid simulations (fix by shifting for zero volume indices)')
# NOTE: only working on the rxd part
rxd_b = b[lo : hi]
# TODO: make sure can handle both 1D and 3D
m = eye_minus_dt_J(n, dt)
# removed diagonal preconditioner since tests showed no improvement in convergence
result, info = _scipy_sparse_linalg_bicgstab(m, dt * rxd_b)
assert(info == 0)
b[lo : hi] = _react_matrix_solver(result)
else:
# 1D only; use Hines solver
full_b = numpy.zeros(n)
full_b[_nonzero_volume_indices] = b[lo : hi]
b[lo : hi] = _react_matrix_solver(_diffusion_matrix_solve(dt, full_b))[_nonzero_volume_indices]
# the following version computes the reaction matrix each time
#full_y = numpy.zeros(n)
#full_y[_nonzero_volume_indices] = y[lo : hi]
#b[lo : hi] = _reaction_matrix_solve(dt, full_y, _diffusion_matrix_solve(dt, full_b))[_nonzero_volume_indices]
# this line doesn't include the reaction contributions to the Jacobian
#b[lo : hi] = _diffusion_matrix_solve(dt, full_b)[_nonzero_volume_indices]
_rxd_induced_currents = None
def _setup_memb_currents():
initializer._do_init()
# setup membrane fluxes from our stuff
# TODO: cache the memb_cur_ptrs, memb_cur_charges, memb_net_charges, memb_cur_mapped
# because won't change very often
# need this; think it's because of initialization of mod files
if _curr_indices is None: return
SPECIES_ABSENT = -1
# TODO: change so that this is only called when there are in fact currents
rxd_memb_scales = []
memb_cur_ptrs = []
memb_cur_charges = []
memb_net_charges = []
memb_cur_mapped = []
memb_cur_mapped_ecs = []
for rptr in _all_reactions:
r = rptr()
if r and r._membrane_flux:
scales = r._memb_scales
rxd_memb_scales.extend(scales)
memb_cur_ptrs += r._cur_ptrs
memb_cur_mapped += r._cur_mapped
memb_cur_mapped_ecs += r._cur_mapped_ecs
memb_cur_charges += [r._cur_charges] * len(scales)
memb_net_charges += [r._net_charges] * len(scales)
ecs_map = [SPECIES_ABSENT if i is None else i for i in list(itertools.chain.from_iterable(itertools.chain.from_iterable(memb_cur_mapped_ecs)))]
ics_map = [SPECIES_ABSENT if i is None else i for i in list(itertools.chain.from_iterable(itertools.chain.from_iterable(memb_cur_mapped)))]
if memb_cur_ptrs:
cur_counts = [len(x) for x in memb_cur_mapped]
num_currents = numpy.array(cur_counts).sum()
setup_currents(len(memb_cur_ptrs),
num_currents,
len(_curr_indices), # num_currents == len(_curr_indices) if no Extracellular
_list_to_cint_array(cur_counts),
_list_to_cint_array(memb_net_charges),
_list_to_cint_array(_curr_indices),
_list_to_cint_array(_cur_node_indices),
_list_to_cdouble_array(rxd_memb_scales),
_list_to_cint_array(list(itertools.chain.from_iterable(memb_cur_charges))),
_list_to_pyobject_array(list(itertools.chain.from_iterable(memb_cur_ptrs))),
_list_to_cint_array(ics_map),
_list_to_cint_array(ecs_map))
def _currents(rhs):
return
if rxd_memb_flux:
# TODO: remove the asserts when this is verified to work
assert(len(rxd_memb_flux) == len(_cur_node_indices))
assert(len(rxd_memb_flux) == len(memb_cur_ptrs))
assert(len(rxd_memb_flux) == len(memb_cur_charges))
assert(len(rxd_memb_flux) == len(memb_net_charges))
for flux, cur_ptrs, cur_charges, net_charge, i, cur_maps in zip(rxd_memb_flux, memb_cur_ptrs, memb_cur_charges, memb_net_charges, _cur_node_indices, memb_cur_mapped):
rhs[i] -= net_charge * flux
#import sys
#sys.exit()
# TODO: remove this assert when more thoroughly tested
assert(len(cur_ptrs) == len(cur_maps))
for ptr, charge, cur_map_i in zip(cur_ptrs, cur_charges, cur_maps):
# this has the opposite sign of the above because positive
# currents lower the membrane potential
cur = charge * flux
ptr[0] += cur
for c in cur_map_i:
_rxd_induced_currents[c] += cur
#for sign, c in zip([-1, 1], cur_maps):
# if c is not None:
# _rxd_induced_currents[c] += sign * cur
_last_m = None
_last_preconditioner = None
_fixed_step_count = 0
from scipy.sparse.linalg import spilu as _spilu
from scipy.sparse.linalg import LinearOperator as _LinearOperator
from scipy.sparse import csc_matrix
def eye_minus_dt_J(n, dt):
"""correctly computes I - dt J as needed for the lhs of an advance.
The difficulty here is that the _euler_matrix also contains conservation
equations. These are preserved unchanged (i.e. no +1).
This reads two globals: _euler_matrix and _zero_volume_indices.
n is the length of the state vector (including the conservation nodes).
"""
m = _scipy_sparse_eye(n, n) - dt * _euler_matrix
# correct to account for algebraic conservation nodes which don't get the +1
for i in _zero_volume_indices:
m[i, i] -= 1
return m
def _fixed_step_solve(raw_dt):
initializer._do_init()
global pinverse, _fixed_step_count
global _last_m, _last_dt, _last_preconditioner
if species._species_count == 0:
return
# allow for skipping certain fixed steps
# warning: this risks numerical errors!
fixed_step_factor = options.fixed_step_factor
_fixed_step_count += 1
if _fixed_step_count % fixed_step_factor: return
dt = fixed_step_factor * raw_dt
# TODO: this probably shouldn't be here
if _diffusion_matrix is None and _euler_matrix is None: _setup_matrices()
states = _node_get_states()[:]
if _diffusion_matrix is None:
return None
b = _rxd_reaction(states) - _diffusion_matrix * states
if not species._has_3d:
# use Hines solver since 1D only
states[:] += _diffusion_matrix_solve(dt, dt * b)
# clear the zero-volume "nodes"
states[_zero_volume_indices] = 0
# TODO: refactor so this isn't in section1d... probably belongs in node
_section1d_transfer_to_legacy()
_last_preconditioner = None
else:
# TODO: this looks to be semi-implicit method because it doesn't take into account the reaction contribution to the Jacobian; do we care?
# the actual advance via implicit euler
n = len(states)
if _last_dt != dt or _last_preconditioner is None:
_last_m = eye_minus_dt_J(n, dt)
_last_preconditioner = _LinearOperator((n, n), _spilu(csc_matrix(_last_m)).solve)
_last_dt = dt
# removed diagonal preconditioner since tests showed no improvement in convergence
result, info = _scipy_sparse_linalg_bicgstab(_last_m, dt * b, M=_last_preconditioner)
assert(info == 0)
states[:] += result
# clear the zero-volume "nodes"
states[_zero_volume_indices] = 0
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None: s._transfer_to_legacy()
def _rxd_reaction(states):
# TODO: this probably shouldn't be here
# TODO: this was included in the 3d, probably shouldn't be there either
# TODO: if its None and there is 3D... should we do anything special?
if _diffusion_matrix is None and not species._has_3d: _setup_matrices()
b = _numpy_zeros(len(states))
if _curr_ptr_vector is not None:
_curr_ptr_vector.gather(_curr_ptr_storage_nrn)
b[_curr_indices] = _curr_scales * (_curr_ptr_storage - _rxd_induced_currents)
b[_curr_indices] = _curr_scales * [ptr[0] for ptr in _curr_ptrs]
# TODO: store weak references to the r._evaluate in addition to r so no
# repeated lookups
#for rptr in _all_reactions:
# r = rptr()
# if r:
# indices, mult, rate = r._evaluate(states)
# we split this in parts to allow for multiplicities and to allow stochastic to make the same changes in different places
# for i, m in zip(indices, mult):
# b[i] += m * rate
node._apply_node_fluxes(b)
return b
_last_preconditioner_dt = 0
_last_dt = None
_last_m = None
_diffusion_d = None
_diffusion_a = None
_diffusion_b = None
_diffusion_p = None
_c_diagonal = None
_cur_node_indices = None
_diffusion_a_ptr, _diffusion_b_ptr, _diffusion_p_ptr = None, None, None
def _diffusion_matrix_solve(dt, rhs):
# only get here if already initialized
global _last_dt
global _diffusion_a_ptr, _diffusion_d, _diffusion_b_ptr, _diffusion_p_ptr, _c_diagonal
if _diffusion_matrix is None: return numpy.array([])
n = len(rhs)
if _last_dt != dt:
global _c_diagonal, _diffusion_a_base, _diffusion_b_base, _diffusion_d_base
global _diffusion_a, _diffusion_b, _diffusion_p
_last_dt = dt
# clear _c_diagonal and _last_dt to trigger a recalculation
if _c_diagonal is None:
_diffusion_d_base = _numpy_array(_diffusion_matrix.diagonal())
_diffusion_a_base = _numpy_zeros(n)
_diffusion_b_base = _numpy_zeros(n)
# TODO: the int32 bit may be machine specific
_diffusion_p = _numpy_array([-1] * n, dtype=numpy.intc)
for j in range(n):
col = _diffusion_matrix[:, j]
col_nonzero = col.nonzero()
for i in col_nonzero[0]:
if i < j:
_diffusion_p[j] = i
assert(_diffusion_a_base[j] == 0)
_diffusion_a_base[j] = col[i, 0]
_diffusion_b_base[j] = _diffusion_matrix[j, i]
_c_diagonal = _linmodadd_c.diagonal()
_diffusion_d = _c_diagonal + dt * _diffusion_d_base
_diffusion_b = dt * _diffusion_b_base
_diffusion_a = dt * _diffusion_a_base
_diffusion_a_ptr = _diffusion_a.ctypes.data_as(_double_ptr)
_diffusion_b_ptr = _diffusion_b.ctypes.data_as(_double_ptr)
_diffusion_p_ptr = _diffusion_p.ctypes.data_as(_int_ptr)
result = _numpy_array(rhs)
d = _numpy_array(_diffusion_d)
d_ptr = d.ctypes.data_as(_double_ptr)
result_ptr = result.ctypes.data_as(_double_ptr)
nrn_tree_solve(_diffusion_a_ptr, d_ptr, _diffusion_b_ptr, result_ptr,
_diffusion_p_ptr, _ctypes_c_int(n))
return result
def _get_jac(dt, states):
# only get here if already initialized
# now handle the reaction contribution to the Jacobian
# this works as long as (I - dt(Jdiff + Jreact)) \approx (I - dtJreact)(I - dtJdiff)
count = 0
n = len(states)
rows = list(range(n))
cols = list(range(n))
data = [1] * n
for rptr in _all_reactions:
r = rptr()
if r:
# TODO: store weakrefs to r._jacobian_entries as well as r
# this will reduce lookup time
r_rows, r_cols, r_data = r._jacobian_entries(states, multiply=-dt)
# TODO: can we predict the length of rows etc in advance so we
# don't need to grow them?
rows += r_rows
cols += r_cols
data += r_data
count += 1
if count > 0 and n > 0:
return scipy.sparse.coo_matrix((data, (rows, cols)), shape=(n, n))
return None
def _reaction_matrix_solve(dt, states, rhs):
if not options.use_reaction_contribution_to_jacobian:
return rhs
jac = _get_jac(dt, states)
if jac is not None:
jac = jac.tocsr()
"""
print 'states:', list(states)
print 'jacobian (_solve):'
m = jac.todense()
for i in xrange(m.shape[0]):
for j in xrange(m.shape[1]):
print ('%15g' % m[i, j]),
print
"""
#result, info = scipy.sparse.linalg.bicgstab(jac, rhs)
#assert(info == 0)
result = _scipy_sparse_linalg_spsolve(jac, rhs)
else:
result = rhs
return result
_react_matrix_solver = None
def _reaction_matrix_setup(dt, unexpanded_states):
global _react_matrix_solver
if not options.use_reaction_contribution_to_jacobian:
_react_matrix_solver = lambda x: x
return
states = numpy.zeros(len(node._get_states()))
states[_nonzero_volume_indices] = unexpanded_states
jac = _get_jac(dt, states)
if jac is not None:
jac = jac.tocsc()
"""
print 'jacobian (_reaction_matrix_setup):'
m = jac.todense()
for i in xrange(m.shape[0]):
for j in xrange(m.shape[1]):
print ('%15g' % m[i, j]),
print
"""
#result, info = scipy.sparse.linalg.bicgstab(jac, rhs)
#assert(info == 0)
_react_matrix_solver = _scipy_sparse_linalg_factorized(jac)
else:
_react_matrix_solver = lambda x: x
def _setup():
initializer._do_init()
# TODO: this is when I should resetup matrices (structure changed event)
global _last_dt, _external_solver_initialized
_last_dt = None
_external_solver_initialized = False
# Using C-code for reactions
options.use_reaction_contribution_to_jacobian = False
def _find_librxdmath():
import glob
base_path = os.path.join(h.neuronhome(), "..", "..", platform.machine(), "lib", "librxdmath")
success = False
for extension in ['', '.dll', '.so', '.dylib']:
dll = base_path + extension
try:
success = os.path.exists(dll)
except:
pass
if success: break
if not success:
if sys.platform.lower().startswith("win"):
dll = os.path.join(h.neuronhome(), 'bin', 'librxdmath.dll')
success = os.path.exists(dll)
if not success:
raise RxDException('unable to connect to the librxdmath library')
return dll
def _c_compile(formula):
filename = 'rxddll' + str(uuid.uuid1())
with open(filename + '.c', 'w') as f:
f.write(formula)
math_library = '-lm'
fpic = '-fPIC'
try:
gcc = os.environ["CC"]
except:
#when running on windows try and used the gcc included with NEURON
if sys.platform.lower().startswith("win"):
math_library = ''
fpic = ''
gcc = os.path.join(h.neuronhome(),"mingw","mingw64","bin","x86_64-w64-mingw32-gcc.exe")
if not os.path.isfile(gcc):
raise RxDException("unable to locate a C compiler. Please `set CC=<path to C compiler>`")
else:
gcc = "gcc"
#TODO: Check this works on non-Linux machines
gcc_cmd = "%s -I%s -I%s " % (gcc, sysconfig.get_python_inc(), os.path.join(h.neuronhome(), "..", "..", "include", "nrn"))
gcc_cmd += "-shared %s %s.c %s " % (fpic, filename, _find_librxdmath())
gcc_cmd += "-o %s.so %s" % (filename, math_library)
if sys.platform.lower().startswith("win"):
my_path = os.getenv('PATH')
os.putenv('PATH', my_path + ';' + os.path.join(h.neuronhome(),"mingw","mingw64","bin"))
os.system(gcc_cmd)
os.putenv('PATH', my_path)
else:
os.system(gcc_cmd)
#TODO: Find a better way of letting the system locate librxdmath.so.0
rxdmath_dll = ctypes.cdll[_find_librxdmath()]
dll = ctypes.cdll['./%s.so' % filename]
reaction = dll.reaction
reaction.argtypes = [ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double)]
reaction.restype = ctypes.c_double
os.remove(filename + '.c')
if sys.platform.lower().startswith("win"):
#cannot remove dll that are in use
_windows_dll.append(weakref.ref(dll))
_windows_dll_files.append(filename + ".so")
else:
os.remove(filename + '.so')
return reaction
def _conductance(d):
pass
def _ode_jacobian(dt, t, ypred, fpred):
#print '_ode_jacobian: dt = %g, last_dt = %r' % (dt, _last_dt)
lo = _rxd_offset
hi = lo + len(_nonzero_volume_indices)
_reaction_matrix_setup(dt, ypred[lo : hi])
_orig_setup = _setup
_orig_currents = _currents
_orig_ode_count = _ode_count
_orig_ode_reinit = _ode_reinit
_orig_ode_fun = _ode_fun
_orig_ode_solve = _ode_solve
_orig_fixed_step_solve = _fixed_step_solve
_orig_ode_jacobian = _ode_jacobian
# wrapper functions allow swapping in experimental alternatives
def _w_ode_jacobian(dt, t, ypred, fpred): return None #_ode_jacobian(dt, t, ypred, fpred)
#def _w_conductance(d): return _conductance(d)
_w_conductance = None
def _w_setup(): return _setup()
def _w_currents(rhs): return None
def _w_ode_count(offset): return None #_ode_count(offset)
def _w_ode_reinit(y): return None
def _w_ode_fun(t, y, ydot): return None
def _w_ode_solve(dt, t, b, y): return None # _ode_solve(dt, t, b, y)
def _w_fixed_step_solve(raw_dt): return None # _section1d_transfer_to_legacy # _fixed_step_solve(raw_dt)
def _w_atolscale(y): return None #_atolscale(y)
_callbacks = [_w_setup, None, _w_currents, _w_conductance, _w_fixed_step_solve,
_w_ode_count, _w_ode_reinit, _w_ode_fun, _w_ode_solve, _w_ode_jacobian, _w_atolscale]
_curr_ptr_vector = None
_curr_ptr_storage = None
_curr_ptr_storage_nrn = None
pinverse = None
_cur_map = None
_h_ptrvector = h.PtrVector
_h_vector = h.Vector
_structure_change_count = nrn_dll_sym('structure_change_cnt', _ctypes_c_int)
_diam_change_count = nrn_dll_sym('diam_change_cnt', _ctypes_c_int)
def _donothing(): pass
def _update_node_data(force=False):
global last_diam_change_cnt, last_structure_change_cnt, _curr_indices, _curr_scales, _curr_ptrs, _cur_map
global _curr_ptr_vector, _curr_ptr_storage, _curr_ptr_storage_nrn
if last_diam_change_cnt != _diam_change_count.value or _structure_change_count.value != last_structure_change_cnt or force:
_cur_map = {}
last_diam_change_cnt = _diam_change_count.value
last_structure_change_cnt = _structure_change_count.value
#if not species._has_3d:
# TODO: merge this with the 3d/hybrid case?
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None: s._update_node_data()
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None: s._update_region_indices()
#end#if
for rptr in _all_reactions:
r = rptr()
if r is not None: r._update_indices()
_curr_indices = []
_curr_scales = []
_curr_ptrs = []
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None: s._setup_currents(_curr_indices, _curr_scales, _curr_ptrs, _cur_map)
num = len(_curr_ptrs)
if num:
_curr_ptr_vector = _h_ptrvector(num)
_curr_ptr_vector.ptr_update_callback(_donothing)
for i, ptr in enumerate(_curr_ptrs):
_curr_ptr_vector.pset(i, ptr)
_curr_ptr_storage_nrn = _h_vector(num)
_curr_ptr_storage = _curr_ptr_storage_nrn.as_numpy()
else:
_curr_ptr_vector = None
_curr_scales = _numpy_array(_curr_scales)
def _send_euler_matrix_to_c(nrow, nnonzero, nonzero_i, nonzero_j, nonzero_values, zero_volume_indices):
section1d._transfer_to_legacy()
set_euler_matrix(nrow, nnonzero, nonzero_i, nonzero_j, nonzero_values,
zero_volume_indices, len(zero_volume_indices),
_diffusion_a_base, _diffusion_b_base, _diffusion_d_base,
_diffusion_p, _c_diagonal)
def _matrix_to_rxd_sparse(m):
"""precondition: assumes m a numpy array"""
nonzero_i, nonzero_j = list(zip(*list(m.keys())))
nonzero_values = numpy.ascontiguousarray(list(m.values()), dtype=numpy.float64)
# number of rows
n = m.shape[1]
return n, len(nonzero_i), numpy.ascontiguousarray(nonzero_i, dtype=numpy.int_), numpy.ascontiguousarray(nonzero_j, dtype=numpy.int_), nonzero_values
def _calculate_diffusion_bases():
global _diffusion_a_base, _diffusion_b_base, _diffusion_d_base, _diffusion_p
global _c_diagonal
_diffusion_d_base = _numpy_array(_diffusion_matrix.diagonal())
n = len(_diffusion_d_base)
_diffusion_a_base = _numpy_zeros(n, dtype=numpy.double)
_diffusion_b_base = _numpy_zeros(n, dtype=numpy.double)
# TODO: the int32 bit may be machine specific
_diffusion_p = _numpy_array([-1] * n, dtype=numpy.intc)
for j in range(n):
col = _diffusion_matrix[:, j]
col_nonzero = col.nonzero()
for i in col_nonzero[0]:
if i < j:
_diffusion_p[j] = i
assert(_diffusion_a_base[j] == 0)
_diffusion_a_base[j] = col[i, 0]
_diffusion_b_base[j] = _diffusion_matrix[j, i]
break
_c_diagonal = _linmodadd_c.diagonal()
_euler_matrix = None
# TODO: make sure this does the right thing when the diffusion constant changes between two neighboring nodes
def _setup_matrices():
global _linmodadd, _linmodadd_c, _diffusion_matrix, _linmodadd_b, _last_dt, _c_diagonal, _euler_matrix
global _euler_matrix_i, _euler_matrix_j, _euler_matrix_nonzero, _euler_matrix_nrow, _euler_matrix_nnonzero
global _cur_node_indices
global _zero_volume_indices, _nonzero_volume_indices
# TODO: this sometimes seems to get called twice. Figure out why and fix, if possible.
n = len(_node_get_states())
if species._has_3d:
_euler_matrix = _scipy_sparse_dok_matrix((n, n), dtype=float)
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None: s._setup_matrices3d(_euler_matrix)
_diffusion_matrix = -_euler_matrix
_euler_matrix = _euler_matrix.tocsr()
_update_node_data(True)
# NOTE: if we also have 1D, this will be replaced with the correct values below
_zero_volume_indices = []
_nonzero_volume_indices = list(range(len(_node_get_states())))
if species._has_1d:
n = species._1d_submatrix_n()
# TODO: initialization is slow. track down why
_last_dt = None
_c_diagonal = None
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
s._assign_parents()
_update_node_data(True)
# remove old linearmodeladdition
_linmodadd = None
_linmodadd_cur = None
if n:
# create sparse matrix for C in cy'+gy=b
_linmodadd_c = _scipy_sparse_dok_matrix((n, n))
# most entries are 1 except those corresponding to the 0 and 1 ends
# create the matrix G
if not species._has_3d:
# if we have both, then put the 1D stuff into the matrix that already exists for 3D
_diffusion_matrix = _scipy_sparse_dok_matrix((n, n))
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
s._setup_diffusion_matrix(_diffusion_matrix)
s._setup_c_matrix(_linmodadd_c)
#print '_diffusion_matrix.shape = %r, n = %r, species._has_3d = %r' % (_diffusion_matrix.shape, n, species._has_3d)
# modify C for cases where no diffusive coupling of 0, 1 ends
# TODO: is there a better way to handle no diffusion?
for i in range(n):
if not _diffusion_matrix[i, i]:
_linmodadd_c[i, i] = 1
# and the vector b
_linmodadd_b = _h_vector(n)
# setup for induced membrane currents
_cur_node_indices = []
for rptr in _all_reactions:
r = rptr()
if r is not None:
r._setup_membrane_fluxes(_cur_node_indices, _cur_map)
#_cvode_object.re_init()
_linmodadd_c = _linmodadd_c.tocsr()
if species._has_3d:
_euler_matrix = -_diffusion_matrix
volumes = node._get_data()[0]
_zero_volume_indices = (numpy.where(volumes == 0)[0]).astype(numpy.int_)
_nonzero_volume_indices = volumes.nonzero()[0]
if species._has_1d and species._has_3d:
# TODO: add connections to matrix; for now: find them
hybrid_neighbors = collections.defaultdict(lambda: [])
hybrid_diams = {}
dxs = set()
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
if s._nodes and s._secs:
# have both 1D and 3D, so find the neighbors
# for each of the 3D sections, find the parent sections
for r in s._regions:
dxs.add(r._dx)
for sec in r._secs3d:
parent_seg = sec.trueparentseg()
parent_sec = None if not parent_seg else parent_seg.sec
# are any of these a match with a 1d section?
if s._has_region_section(r, parent_sec):
# this section has a 1d section that is a parent
index1d, indices3d = _get_node_indices(s, r, sec, h.section_orientation(sec=sec), parent_sec, h.parent_connection(sec=sec))
hybrid_neighbors[index1d] += indices3d
hybrid_diams[index1d] = parent_seg.diam
else:
for sec1d in r._secs1d:
parent_1d_seg = sec1d.trueparentseg()
parent_1d = None if not parent_seg else parent_seg.sec
if parent_1d == sec:
# it is the parent of a 1d section
index1d, indices3d = _get_node_indices(s, r, sec, h.parent_connection(sec=sec1d), sec1d, h.section_orientation(sec=sec1d))
hybrid_neighbors[index1d] += indices3d
hybrid_diams[index1d] = parent_1d_seg.diam
break
elif parent_1d == parent_sec:
# it connects to the parent of a 1d section
index1d, indices3d = _get_node_indices(s, r, sec, h.section_orientation(sec=sec), sec1d, h.section_orientation(sec=sec1d))
hybrid_neighbors[index1d] += indices3d
hybrid_diams[index1d] = parent_1d_seg.diam
break
if len(dxs) > 1:
raise RxDException('currently require a unique value for dx')
dx = dxs.pop()
diffs = node._diffs
n = len(_node_get_states())
# TODO: validate that we're doing the right thing at boundaries
for index1d in list(hybrid_neighbors.keys()):
neighbors3d = set(hybrid_neighbors[index1d])
# NOTE: splitting the connection area equally across all the connecting nodes
area = (numpy.pi * 0.25 * hybrid_diams[index1d] ** 2) / len(neighbors3d)
for i in neighbors3d:
d = diffs[i]
vol = node._volumes[i]
rate = d * area / (vol * dx / 2.)
# make the connections on the 3d side
_euler_matrix[i, i] -= rate
_euler_matrix[i, index1d] += rate
# make the connections on the 1d side (scale by vol because conserving mass not volume)
_euler_matrix[index1d, index1d] -= rate * vol
_euler_matrix[index1d, i] += rate * vol
#print 'index1d row sum:', sum(_euler_matrix[index1d, j] for j in xrange(n))
#print 'index1d col sum:', sum(_euler_matrix[j, index1d] for j in xrange(n))
#CRxD
if _euler_matrix is not None and _euler_matrix.nnz > 0:
_euler_matrix_nrow, _euler_matrix_nnonzero, _euler_matrix_i, _euler_matrix_j, _euler_matrix_nonzero = _matrix_to_rxd_sparse(_euler_matrix)
_calculate_diffusion_bases()
_update_node_data()
_send_euler_matrix_to_c(_euler_matrix_nrow, _euler_matrix_nnonzero, _euler_matrix_i, _euler_matrix_j, _euler_matrix_nonzero, _zero_volume_indices)
elif _diffusion_matrix is not None and _diffusion_matrix.nnz > 0:
_euler_matrix_nrow, _euler_matrix_nnonzero, _euler_matrix_i, _euler_matrix_j, _euler_matrix_nonzero = _matrix_to_rxd_sparse(-_diffusion_matrix)
_calculate_diffusion_bases()
_update_node_data()
_send_euler_matrix_to_c(_euler_matrix_nrow, _euler_matrix_nnonzero, _euler_matrix_i, _euler_matrix_j, _euler_matrix_nonzero, _zero_volume_indices)
else:
rxd_set_no_diffusion()
setup_solver(_node_get_states(), len(_node_get_states()), _list_to_clong_array(_zero_volume_indices), len(_zero_volume_indices), h._ref_t, h._ref_dt)
if _curr_indices is not None and len(_curr_indices) > 0:
rxd_setup_curr_ptrs(len(_curr_indices), _list_to_cint_array(_curr_indices),
_curr_scales, _list_to_pyobject_array(_curr_ptrs))
if section1d._all_cindices is not None and len(section1d._all_cindices) > 0:
rxd_setup_conc_ptrs(len(section1d._all_cindices),
_list_to_cint_array(section1d._all_cindices),
_list_to_pyobject_array(section1d._all_cptrs))
# we do this last because of performance issues with changing sparsity of csr matrices
if _diffusion_matrix is not None:
_diffusion_matrix = _diffusion_matrix.tocsr()
if _euler_matrix is not None:
_euler_matrix = _euler_matrix.tocsr()
if species._has_1d:
if species._has_3d:
_diffusion_matrix = -_euler_matrix
n = species._1d_submatrix_n()
if n:
matrix = _diffusion_matrix[_zero_volume_indices].tocsr()
indptr = matrix.indptr
matrixdata = matrix.data
count = len(_zero_volume_indices)
for row, i in enumerate(_zero_volume_indices):
d = _diffusion_matrix[i, i]
if d:
matrixdata[indptr[row] : indptr[row + 1]] /= -d
matrix[row, i] = 0
else:
matrixdata[indptr[row] : indptr[row + 1]] = 0
global _mat_for_zero_volume_nodes
_mat_for_zero_volume_nodes = matrix
# TODO: _mat_for_zero_volume_nodes is used for CVode.
# Figure out if/how it has to be changed for hybrid 1D/3D sims (probably just augment with identity? or change how its used to avoid multiplying by I)
"""
if pt1 in indices:
ileft = indices[pt1]
dleft = (d + diffs[ileft]) * 0.5
left = dleft * areal / (vol * dx)
euler_matrix[index, ileft] += left
euler_matrix[index, index] -= left
if pt2 in indices:
iright = indices[pt2]
dright = (d + diffs[iright]) * 0.5
right = dright * arear / (vol * dx)
euler_matrix[index, iright] += right
euler_matrix[index, index] -= right
"""
def _get_node_indices(species, region, sec3d, x3d, sec1d, x1d):
# TODO: remove need for this assumption
assert(x1d in (0, 1))
disc_indices = region._indices_from_sec_x(sec3d, x3d)
#print '%r(%g) connects to the 1d section %r(%g)' % (sec3d, x3d, sec1d, x1d)
#print 'disc indices: %r' % disc_indices
indices3d = []
for node in species._nodes:
if node._r == region:
for i, j, k in disc_indices:
if node._i == i and node._j == j and node._k == k:
indices3d.append(node._index)
#print 'found node %d with coordinates (%g, %g, %g)' % (node._index, node.x3d, node.y3d, node.z3d)
# discard duplicates...
# TODO: really, need to figure out all the 3d nodes connecting to a given 1d endpoint, then unique that
indices3d = list(set(indices3d))
#print '3d matrix indices: %r' % indices3d
# TODO: remove the need for this assertion
if x1d == h.section_orientation(sec=sec1d):
# TODO: make this whole thing more efficient
# the parent node is the nonzero index on the first row before the diagonal
first_row = min([node._index for node in species.nodes(region)(sec1d)])
for j in range(first_row):
if _euler_matrix[first_row, j] != 0:
index_1d = j
break
else:
raise RxDException('should never get here; could not find parent')
elif x1d == 1 - h.section_orientation(sec=sec1d):
# the ending zero-volume node is the one after the last node
# TODO: make this more efficient
index_1d = max([node._index for node in species.nodes(region)(sec1d)]) + 1
else:
raise RxDException('should never get here; _get_node_indices apparently only partly converted to allow connecting to 1d in middle')
#print '1d index is %d' % index_1d
return index_1d, indices3d
def _compile_reactions():
#clear all previous reactions (intracellular & extracellular) and the
#supporting indexes
#_windows_remove_dlls()
clear_rates()
regions_inv = dict() #regions -> reactions that occur there
species_by_region = dict()
all_species_involed = set()
location_count = 0
ecs_regions_inv = dict()
ecs_species_by_region = dict()
ecs_all_species_involed = set()
ecs_mc_species_involved = set()
from . import rate, multiCompartmentReaction
#Find sets of sections that contain the same regions
from .region import _c_region
matched_regions = [] # the different combinations of regions that arise in different sections
for nrnsec in list(section1d._rxd_sec_lookup.keys()):
set_of_regions = set() # a set of the regions that occur in a given section
for sec in section1d._rxd_sec_lookup[nrnsec]:
if sec(): set_of_regions.add(sec()._region)
if set_of_regions not in matched_regions:
matched_regions.append(set_of_regions)
region._c_region_lookup = dict()
#create a c_region instance for each of the unique sets of regions
c_region_list = []
for sets in matched_regions:
c_region_list.append(_c_region(sets))
for rptr in _all_reactions:
r = rptr()
if not r:
continue
#Find all the species involved
if isinstance(r,rate.Rate):
if not r._species():
continue
sptrs = set(list(r._involved_species) + [r._species])
else:
sptrs = set(list(r._involved_species) + r._dests + r._sources)
#Find all the regions involved
if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):
react_regions = [s()._extracellular()._region for s in r._sources + r._dests if isinstance(s(),species.SpeciesOnExtracellular)] + [s()._region() for s in r._sources + r._dests if not isinstance(s(),species.SpeciesOnExtracellular)]
react_regions += [sptr()._region() for sptr in sptrs if isinstance(sptr(),species.SpeciesOnRegion)]
#if regions are specified - use those
elif None not in r._regions:
react_regions = r._active_regions
#Otherwise use all the regions where the species are
else:
react_regions = set()
nsp = 0
for sp in sptrs:
s = sp()
nsp += 1
if isinstance(s,species.SpeciesOnRegion):
react_regions.add(s._region())
elif isinstance(s,species.SpeciesOnExtracellular):
react_regions.add(s._extracellular()._region)
elif isinstance(s,species._ExtracellularSpecies):
react_regions.add(s._region)
elif None not in s._regions:
[react_regions.add(reg) for reg in s._regions + s._extracellular_regions]
react_regions = list(react_regions)
#Only regions where ALL the species are present -- unless it is a membrane
#from collections import Counter
#from . import geometry as geo
#react_regions = [reg for reg, count in Counter(react_regions).iteritems() if count == nsp or isinstance(reg.geometry,geo.ScalableBorder)]
#Any intracellular regions
if not all([isinstance(x, region.Extracellular) for x in react_regions]):
species_involved = []
for sp in sptrs:
s = sp()
if not isinstance(s, species.SpeciesOnExtracellular):
all_species_involed.add(s)
species_involved.append(s)
for reg in react_regions:
if isinstance(reg, region.Extracellular):
continue
if reg in regions_inv:
regions_inv[reg].append(rptr)
else:
regions_inv[reg] = [rptr]
if reg in species_by_region:
species_by_region[reg] = species_by_region[reg].union(species_involved)
else:
species_by_region[reg] = set(species_involved)
for sec in reg._secs:
location_count += sec.nseg
#Any extracellular regions
if any([isinstance(x, region.Extracellular) for x in react_regions]):
#MultiCompartment - so can have both extracellular and intracellular regions
if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):
for sp in sptrs:
s = sp()
if isinstance(s,species._ExtracellularSpecies):
ecs_mc_species_involved.add(s)
elif isinstance(s,species.SpeciesOnExtracellular):
ecs_mc_species_involved.add(s._extracellular())
for reg in react_regions:
if reg in list(ecs_species_by_region.keys()):
ecs_species_by_region[reg] = ecs_species_by_region[reg].union(ecs_mc_species_involved)
else:
ecs_species_by_region[reg] = set(ecs_mc_species_involved)
#Otherwise - reaction can only have extracellular regions
else:
ecs_species_involved = []
for sp in sptrs:
s = sp()
ecs_all_species_involed.add(s)
ecs_species_involved.append(s)
if any([isinstance(x, region.Region) for x in react_regions]):
raise RxDException("Error: an %s cannot have both Extracellular and Intracellular regions. Use a MultiCompartmentReaction or specify the desired region with the 'region=' keyword argument", rptr().__class__)
for reg in react_regions:
if not isinstance(reg, region.Extracellular):
continue
if reg in ecs_regions_inv:
ecs_regions_inv[reg].append(rptr)
else:
ecs_regions_inv[reg] = [rptr]
if reg in ecs_species_by_region:
ecs_species_by_region[reg] = ecs_species_by_region[reg].union(ecs_species_involved)
else:
ecs_species_by_region[reg] = set(ecs_species_involved)
#Create lists of indexes for intracellular reactions and rates
nseg_by_region = [] # a list of the number of segments for each region
# a table for location,species -> state index
location_index = []
for reg in regions_inv:
rptr = weakref.ref(reg)
for c_region in region._c_region_lookup[rptr]:
for react in regions_inv[reg]:
c_region.add_reaction(react,regions_inv[reg])
c_region.add_species(species_by_region[reg])
if reg in ecs_species_by_region:
c_region.add_ecs_species(ecs_species_by_region[reg])
# now setup the reactions
#if there are no reactions
if location_count == 0 and len(ecs_regions_inv) == 0:
setup_solver(_node_get_states(), len(_node_get_states()), _list_to_clong_array(_zero_volume_indices), len(_zero_volume_indices), h._ref_t, h._ref_dt)
return None
#Setup intracellular and multicompartment reactions
if location_count > 0:
from . import rate, multiCompartmentReaction
for creg in c_region_list:
creg._initalize()
mc_mult_count = 0
mc_mult_list = []
species_ids_used = numpy.zeros((creg.num_species,creg.num_regions),bool)
ecs_species_ids_used = numpy.zeros((creg.num_ecs_species,creg.num_regions),bool)
fxn_string = _c_headers
fxn_string += 'void reaction(double** species, double** rhs, double* mult, double** species_ecs, double** rhs_ecs)\n{'
# declare the "rate" variable if any reactions (non-rates)
for rprt in list(creg._react_regions.keys()):
if not isinstance(rprt(),rate.Rate):
fxn_string += '\n\tdouble rate;'
break
for rptr in list(creg._react_regions.keys()):
r = rptr()
if isinstance(r,rate.Rate):
s = r._species()
species_id = creg._species_ids.get(s._id)
if isinstance(s,species.SpeciesOnRegion):
region_ids = [creg._region_ids.get(s._region()._id)]
else:
region_ids = creg._react_regions[rptr]
for region_id in region_ids:
rate_str = re.sub(r'species\[(\d+)\]\[(\d+)\]',lambda m: "species[%i][%i]" % (creg._species_ids.get(int(m.groups()[0])), creg._region_ids.get(int(m.groups()[1]))), r._rate)
rate_str = re.sub(r'species\[(\d+)\]\[\]',lambda m: "species[%i][%i]" % (creg._species_ids.get(int(m.groups()[0])), region_id), rate_str)
operator = '+=' if species_ids_used[species_id][region_id] else '='
fxn_string += "\n\trhs[%d][%d] %s %s;" % (species_id, region_id, operator, rate_str)
species_ids_used[species_id][region_id] = True
elif isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):
#Lookup the region_id for the reaction
for sptr in r._sources + r._dests:
if isinstance(sptr(),species.SpeciesOnExtracellular):
continue
region_id = creg._region_ids.get(sptr()._region()._id)
rate_str = re.sub(r'species\[(\d+)\]\[(\d+)\]',lambda m: "species[%i][%i]" % (creg._species_ids.get(int(m.groups()[0])), creg._region_ids.get(int(m.groups()[1]))), r._rate)
rate_str = re.sub(r'species\[(\d+)\]\[\]',lambda m: "species[%i][%i]" % (creg._species_ids.get(int(m.groups()[0])), region_id), rate_str)
rate_str = re.sub(r'species_ecs\[(\d+)\]',lambda m: "species_ecs[%i][%i]" % (int(m.groups()[0]), region_id), rate_str)
fxn_string += "\n\trate = %s;" % rate_str
for sptr in r._sources + r._dests:
s = sptr()
if isinstance(s,species.SpeciesOnExtracellular):
species_id = s._extracellular()._grid_id
operator = '+=' if ecs_species_ids_used[species_id][region_id] else '='
fxn_string += "\n\trhs_ecs[%d][%d] %s mult[%d] * rate;" % (species_id, region_id, operator, mc_mult_count)
ecs_species_ids_used[species_id][region_id] = True
else:
species_id = creg._species_ids.get(s._id)
region_id = creg._region_ids.get(sptr()._region()._id)
operator = '+=' if species_ids_used[species_id][region_id] else '='
fxn_string += "\n\trhs[%d][%d] %s mult[%d] * rate;" % (species_id, region_id, operator, mc_mult_count)
species_ids_used[species_id][region_id] = True
#TODO: Fix problem if the whole region isn't part of the same aggregate c_region
mc_mult_count += 1
mc_mult_list.extend(r._mult.flatten())
else:
for region_id in creg._react_regions[rptr]:
rate_str = re.sub(r'species\[(\d+)\]\[(\d+)\]',lambda m: "species[%i][%i]" % (creg._species_ids.get(int(m.groups()[0])), creg._region_ids.get(int(m.groups()[1]))), r._rate)
rate_str = re.sub(r'species\[(\d+)\]\[\]',lambda m: "species[%i][%i]" % (creg._species_ids.get(int(m.groups()[0])), region_id), rate_str)
fxn_string += "\n\trate = %s;" % rate_str
summed_mults = collections.defaultdict(lambda: 0)
for (mult, sp) in zip(r._mult, r._sources + r._dests):
summed_mults[creg._species_ids.get(sp()._id)] += mult
for idx in sorted(summed_mults.keys()):
operator = '+=' if species_ids_used[idx][region_id] else '='
species_ids_used[idx][region_id] = True
fxn_string += "\n\trhs[%d][%d] %s (%g) * rate;" % (idx, region_id, operator, summed_mults[idx])
fxn_string += "\n}\n"
#print "num_species=%i\t num_regions=%i\t num_segments=%i\n" % (creg.num_species, creg.num_regions, creg.num_segments)
#print creg.get_state_index()
#print "state_index %s \t num_ecs_species=%i\t ecs_species_ids %s\n" % (creg.get_state_index().shape, creg.num_ecs_species, creg.get_ecs_species_ids().shape)
#print "ecs_index %s\t mc_mult_count=%i \t mc_mult_list %s\n" % (creg.get_ecs_index().shape, mc_mult_count, numpy.array(mc_mult_list, dtype=ctypes.c_double).shape)
#print mc_mult_list
#print fxn_string
register_rate(creg.num_species, creg.num_regions, creg.num_segments, creg.get_state_index(),
creg.num_ecs_species, creg.get_ecs_species_ids(), creg.get_ecs_index(),
mc_mult_count, numpy.array(mc_mult_list, dtype=ctypes.c_double),
_c_compile(fxn_string))
setup_solver(_node_get_states(), len(_node_get_states()), _list_to_clong_array(_zero_volume_indices), len(_zero_volume_indices), h._ref_t, h._ref_dt)
#Setup extracellular reactions
if len(ecs_regions_inv) > 0:
grid_ids = []
all_gids = set()
fxn_string = _c_headers
#TODO: find the nrn include path in python
#It is necessary for a couple of function in python that are not in math.h
fxn_string += 'void reaction(double* species_ecs, double* rhs)\n{'
# declare the "rate" variable if any reactions (non-rates)
for rptr in [r for rlist in list(ecs_regions_inv.values()) for r in rlist]:
if not isinstance(rptr(),rate.Rate):
fxn_string += '\n\tdouble rate;'
break
#get a list of all grid_ids invovled
for rptr in [r for rlist in list(ecs_regions_inv.values()) for r in rlist]:
if isinstance(rptr(),rate.Rate):
for sp in [rptr()._species] + rptr()._involved_species_ecs:
s = sp()[reg]._extracellular() if isinstance(sp(), species.Species) else sp()
all_gids.add(sp()._extracellular()._grid_id if isinstance(s, species.SpeciesOnExtracellular) else s._grid_id)
else:
for sp in rptr()._sources + rptr()._dests + rptr()._involved_species_ecs:
s = sp()[reg]._extracellular() if isinstance(sp(), species.Species) else sp()
all_gids.add(sp()._extracellular()._grid_id if isinstance(s, species.SpeciesOnExtracellular) else s._grid_id)
all_gids = list(all_gids)
for reg in ecs_regions_inv:
for rptr in ecs_regions_inv[reg]:
r = rptr()
rate_str = re.sub(r'species_ecs\[(\d+)\]',lambda m: "species_ecs[%i]" % [pid for pid,gid in enumerate(all_gids) if gid == int(m.groups()[0])][0], r._rate_ecs)
if isinstance(r,rate.Rate):
s = r._species()
#Get underlying rxd._ExtracellularSpecies for the grid_id
if isinstance(s, species.Species):
s = s[reg]._extracellular()
elif isinstance(s, species.SpeciesOnExtracellular):
s = s._extracellular()
if s._grid_id in grid_ids:
operator = '+='
else:
operator = '='
grid_ids.append(s._grid_id)
pid = [pid for pid,gid in enumerate(all_gids) if gid == s._grid_id][0]
fxn_string += "\n\trhs[%d] %s %s;" % (pid, operator, rate_str)
else:
idx=0
fxn_string += "\n\trate = %s;" % rate_str
for sp in r._sources + r._dests:
s = sp()
#Get underlying rxd._ExtracellularSpecies for the grid_id
if isinstance(s, species.Species):
s = s[reg]._extracellular()
elif isinstance(s, species.SpeciesOnExtracellular):
s = s._extracellular()
if s._grid_id in grid_ids:
operator = '+='
else:
operator = '='
grid_ids.append(s._grid_id)
pid = [pid for pid,gid in enumerate(all_gids) if gid == s._grid_id][0]
fxn_string += "\n\trhs[%d] %s (%s)*rate;" % (pid, operator, r._mult[idx])
idx += 1
fxn_string += "\n}\n"
ecs_register_reaction(0, len(all_gids), _list_to_cint_array(all_gids), _c_compile(fxn_string))
def _init():
if len(species._all_species) == 0:
return None
initializer._do_init()
# TODO: check about the 0<x<1 problem alluded to in the documentation
h.define_shape()
if species._has_1d:
section1d._purge_cptrs()
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
# TODO: are there issues with hybrid or 3D here? (I don't think so, but here's a bookmark just in case)
s._register_cptrs()
s._finitialize()
_setup_matrices()
_compile_reactions()
_setup_memb_currents()
_has_nbs_registered = False
_nbs = None
def _do_nbs_register():
global _has_nbs_registered, _nbs, _fih, _fih2
if not _has_nbs_registered:
#from neuron import nonvint_block_supervisor as _nbs
_has_nbs_registered = True
#_nbs.register(_callbacks) not used by crxd
#
# register the initialization handler and the ion register handler
#
_fih = h.FInitializeHandler(3, _init)
set_setup_matrices = nrn_dll_sym('set_setup_matrices')
set_setup_matrices.argtypes = [fptr_prototype]
do_setup_matrices_fptr = fptr_prototype(_setup_matrices)
_fih2 = h.FInitializeHandler(3, initializer._do_ion_register)
#
# register scatter/gather mechanisms
#
_cvode_object.extra_scatter_gather(0, _after_advance)
# register the Python callbacks
do_setup_fptr = fptr_prototype(_setup)
do_initialize_fptr = fptr_prototype(_init)
set_setup(do_setup_fptr)
set_initialize(do_initialize_fptr)
def _windows_remove_dlls():
global _windows_dll_files, _windows_dll
for (dll_ptr,filepath) in zip(_windows_dll,_windows_dll_files):
dll = dll_ptr()
if dll:
handle = dll._handle
del dll
ctypes.windll.kernel32.FreeLibrary(handle)
os.remove(filepath)
_windows_dll_files = []
_windows_dll = []
def nthread(n=None):
if(n):
_set_num_threads(n)
return _get_num_threads()
| 42.889172
| 242
| 0.627673
|
4a04a399d22b36fe1654190d4747e5a5342792c3
| 1,981
|
py
|
Python
|
test/test_algorithm.py
|
ehigham/broad-challenge
|
9854d9269f378999f28ee9fc202068e785f87faf
|
[
"MIT"
] | null | null | null |
test/test_algorithm.py
|
ehigham/broad-challenge
|
9854d9269f378999f28ee9fc202068e785f87faf
|
[
"MIT"
] | null | null | null |
test/test_algorithm.py
|
ehigham/broad-challenge
|
9854d9269f378999f28ee9fc202068e785f87faf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
import networkx
from challenge.algorithm import dijkstras_shortest_path
class TestAlgorithmModule(unittest.TestCase):
def test_empty_graph(self):
"""Nodes are memebers of graph.nodes"""
graph = networkx.Graph()
with self.assertRaises(ValueError):
dijkstras_shortest_path(graph, 'A', 'C')
def test_disjoint_graph(self):
graph = networkx.Graph()
graph.add_nodes_from(['A', 'B'])
path = dijkstras_shortest_path(graph, 'A', 'B')
self.assertListEqual(path, [])
def test_path_to_itself(self):
"""A"""
graph = networkx.Graph()
graph.add_edges_from([('A', 'B'), ('B', 'C')])
path = dijkstras_shortest_path(graph, 'A', 'A')
self.assertListEqual(path, [])
def test_simple_shortest_path(self):
"""A - B - C """
graph = networkx.Graph()
graph.add_edges_from([('A', 'B'), ('B', 'C')])
path = dijkstras_shortest_path(graph, 'A', 'C')
self.assertListEqual(path, ['A', 'B', 'C'])
def test_shortcut_path(self):
"""
A - B - C - D - E - F
\\ /
--- G ---
"""
graph = networkx.Graph()
graph.add_edges_from([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'E'), ('E', 'F')])
graph.add_edges_from([('B', 'G'), ('G', 'E')])
path = dijkstras_shortest_path(graph, 'A', 'F')
self.assertListEqual(path, ['A', 'B', 'G', 'E', 'F'])
def test_cyclic_graph_path(self):
"""
A - B - C - D - E
| |
- G -
"""
graph = networkx.Graph()
graph.add_edges_from([('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'E')])
graph.add_edges_from([('C', 'G'), ('G', 'B')])
path = dijkstras_shortest_path(graph, 'A', 'E')
self.assertListEqual(path, ['A', 'B', 'C', 'D', 'E'])
if __name__ == '__main__':
unittest.main()
| 31.444444
| 90
| 0.504796
|
4a04a3b4454b985cc0302044cc06b8934de9d1be
| 451
|
py
|
Python
|
Allswap_django/allswap/accounts/forms.py
|
yds05238/AllSwap_Backend
|
95429fe6c709feef6b9e4b2349921e1cc4dd4c18
|
[
"MIT"
] | 2
|
2020-02-19T05:06:49.000Z
|
2020-02-20T17:34:41.000Z
|
Allswap_django/allswap/accounts/forms.py
|
yds05238/AllSwap_Backend
|
95429fe6c709feef6b9e4b2349921e1cc4dd4c18
|
[
"MIT"
] | 28
|
2020-06-05T20:52:59.000Z
|
2022-03-12T00:15:17.000Z
|
Allswap_django/allswap/accounts/forms.py
|
yds05238/AllSwap
|
95429fe6c709feef6b9e4b2349921e1cc4dd4c18
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
class UserCreateForm(UserCreationForm):
class Meta:
fields = ('username','email','password1','password2')
model = get_user_model()
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self.fields['username'].label = "Display Name"
self.fields['email'].label = "Email Address"
| 34.692308
| 61
| 0.67184
|
4a04a3c6b7f091eff7f7af30802a3a917c21272e
| 624
|
py
|
Python
|
reports/migrations/0006_auto_20170727_0217.py
|
ben174/bart-crime
|
5372f1af2e741ffccbaf5896f56e42b4f8dd7722
|
[
"MIT"
] | 25
|
2017-07-29T16:32:09.000Z
|
2020-11-18T06:57:49.000Z
|
reports/migrations/0006_auto_20170727_0217.py
|
ben174/bart-crime
|
5372f1af2e741ffccbaf5896f56e42b4f8dd7722
|
[
"MIT"
] | 21
|
2017-08-03T18:25:52.000Z
|
2021-09-08T22:59:57.000Z
|
reports/migrations/0006_auto_20170727_0217.py
|
ben174/bart-crime
|
5372f1af2e741ffccbaf5896f56e42b4f8dd7722
|
[
"MIT"
] | 3
|
2017-08-07T23:08:53.000Z
|
2017-11-20T22:56:13.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-27 02:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reports', '0005_auto_20170726_2232'),
]
operations = [
migrations.AddField(
model_name='incident',
name='arrested',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='incident',
name='incident_date',
field=models.DateField(blank=True, null=True),
),
]
| 24
| 58
| 0.597756
|
4a04a4285cee3d1757018a1ddea78bc945f2eced
| 1,340
|
py
|
Python
|
models/__init__.py
|
gditzler/extreme-verification-latency
|
eb3c783c3dcbdb8e9287bb59bb1e9d513ad6e762
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
gditzler/extreme-verification-latency
|
eb3c783c3dcbdb8e9287bb59bb1e9d513ad6e762
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
gditzler/extreme-verification-latency
|
eb3c783c3dcbdb8e9287bb59bb1e9d513ad6e762
|
[
"MIT"
] | 1
|
2022-02-25T20:37:09.000Z
|
2022-02-25T20:37:09.000Z
|
#!/usr/bin/env python
# MIT License
#
# Copyright (c) 2021
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .compose import ComposeV1
from .compose import ComposeV2
from .compose import FastCompose
from .mclassification import MClassification
from .leveliw import LevelIW
from .scargc import Scargc
from .apt import APT
| 37.222222
| 80
| 0.781343
|
4a04a5186d5524b54bb513e05a18341a3b6f9446
| 4,662
|
py
|
Python
|
pages/captcha.py
|
gsw945/myblog
|
6666762928a01b08700ce77a712fa5b3ad468d96
|
[
"MIT"
] | 17
|
2019-03-04T09:48:01.000Z
|
2022-03-28T01:55:18.000Z
|
pages/captcha.py
|
WK8612/myblog
|
8af7f079341f656678fde707fcec680f8d5269ac
|
[
"MIT"
] | 10
|
2019-06-09T05:54:38.000Z
|
2019-09-11T09:59:51.000Z
|
pages/captcha.py
|
WK8612/myblog
|
8af7f079341f656678fde707fcec680f8d5269ac
|
[
"MIT"
] | 6
|
2019-03-06T09:35:55.000Z
|
2019-08-28T14:04:27.000Z
|
import os
import base64
from io import BytesIO
import string
import random
import pickle
from matplotlib.font_manager import fontManager
from fontTools.ttLib import TTFont
from PIL import Image, ImageDraw, ImageColor, ImageFilter, ImageFont
from django.conf import settings
def char_in_font(unicode_char, ttf_font):
'''
from: https://stackoverflow.com/questions/43834362/python-unicode-rendering-how-to-know-if-a-unicode-character-is-missing-from-the/43857892#43857892
'''
if isinstance(ttf_font, str):
font = TTFont(
file=ttf_font,
allowVID=True,
ignoreDecompileErrors=True,
fontNumber=1
)
else:
font = ttf_font
cmap_tables = getattr(font['cmap'], 'tables', None)
if cmap_tables is not None:
for cmap in cmap_tables:
if cmap.isUnicode():
if ord(unicode_char) in cmap.cmap:
return True
return False
def supported_fonts(char):
'''
from: https://stackoverflow.com/questions/18821795/how-can-i-get-list-of-font-familyor-name-of-font-in-matplotlib/18821968#18821968
'''
return [
font.fname for font in fontManager.ttflist
if os.path.exists(font.fname) and char_in_font(char, font.fname)
]
def random_hexdigits(len=1):
'''
生成随机生成数字或字母
'''
return random.sample(string.hexdigits, len)
def punctuation(len=1):
'''
生成干扰字符
'''
return tuple(random.sample(string.punctuation, len))
def random_color(min=64, max=255):
'''
定义干扰字符颜色
'''
return tuple((random.randint(min, max) for i in range(3)))
def fill_color(draw, image, interval):
'''
填充颜色
'''
for i in range(0, image.width, interval):
for j in range(0, image.height, interval):
draw.point((i, j), fill=random_color())
def fill_dischar(draw, image, interval):
'''
生成干扰字符
'''
for i in range(0, image.width, interval):
dis = punctuation()
j = random.randrange(3, image.height // 2 - 3)
font = ImageFont.truetype(get_rangom_font(), 10)
draw.text((i, j), dis[0], fill=random_color(64, 255), font=font)
def fill_char(draw, image, num, interval):
'''
生成验证码
'''
secret = ''
for i in range(num):
cha = random_hexdigits()
secret += str(cha[0])
j_max = image.height // 4 - 1
if j_max < 1:
j_max = 1
j = random.randrange(0, j_max)
positon = (image.width * (i / num) + interval, j - 2)
# print(positon)
# print(image.size)
font = ImageFont.truetype(get_rangom_font(), 22)
draw.text(positon, cha[0], fill=random_color(32, 127), font=font)
return secret
def generate_image(width=90, height=26, color=(192, 192, 192)):
'''
生成验证码图片
'''
image = Image.new('RGB', (width, height), color)
draw = ImageDraw.Draw(image)
fill_color(draw, image, 5)
fill_dischar(draw, image, 10)
secret = fill_char(draw, image, 4, 5)
return image, secret
def image2base64(image):
buffered = BytesIO()
image.save(buffered, format='JPEG')
img_str = 'data:image/jpeg;base64,{0}'.format(
base64.b64encode(buffered.getvalue()).decode('ascii')
)
buffered.close()
return img_str
def get_rangom_font():
cache_folder = os.path.join(settings.BASE_DIR, 'cache')
if not os.path.exists(cache_folder):
os.makedirs(cache_folder)
font_list_cache = os.path.join(cache_folder, 'font-list.cache')
font_list = [None]
if os.path.exists(font_list_cache):
with open(font_list_cache, 'rb') as fb:
font_list = pickle.load(fb)
else:
font_list = supported_fonts('垚')
with open(font_list_cache, 'wb') as fb:
pickle.dump(font_list, fb, pickle.HIGHEST_PROTOCOL)
return random.choice(font_list)
def web_captcha():
image, secret = generate_image()
img_str = image2base64(image)
image.close()
return img_str, secret
if __name__ == '__main__':
from datetime import datetime
from pprint import pprint
dt_begin = datetime.now()
pprint(supported_fonts('垚'))
dt_end = datetime.now()
dt_diff = dt_end - dt_begin
print('begin:', dt_begin.strftime('%Y-%m-%d %H:%M:%S.%f'))
print(' end:', dt_end.strftime('%Y-%m-%d %H:%M:%S.%f'))
print('datetime used: {0}.{1} seconds'.format(dt_diff.seconds, dt_diff.microseconds))
'''
https://stackoverflow.com/questions/48229318/how-to-convert-image-pil-into-base64-without-saving?rq=1
https://stackoverflow.com/questions/31826335/how-to-convert-pil-image-image-object-to-base64-string
'''
| 30.272727
| 152
| 0.634921
|
4a04a539bdcec91ad7da1e82bde99b0076154653
| 2,844
|
py
|
Python
|
bot.py
|
Akashakashav/video-merge-bot
|
0e17b4e07067014fe216829c6223b13c32f3e87a
|
[
"MIT"
] | null | null | null |
bot.py
|
Akashakashav/video-merge-bot
|
0e17b4e07067014fe216829c6223b13c32f3e87a
|
[
"MIT"
] | null | null | null |
bot.py
|
Akashakashav/video-merge-bot
|
0e17b4e07067014fe216829c6223b13c32f3e87a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from logging import DEBUG
import subprocess
import os
import requests
import logging
import sys
from autologging import logged, traced
import telebot
from decouple import config
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=DEBUG)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
API_TOKEN = config('API_TOKEN')
bot = telebot.TeleBot(API_TOKEN)
users_files = {}
@bot.message_handler(commands=['start'])
def start_command(message):
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.add(
telebot.types.InlineKeyboardButton(
'Deployed By', url='telegram.me/kannaadiga'
)
)
bot.send_message(
message.chat.id,
'Greetings! Video Merge Bot Here 🤗\n\n' +
'To Get Help Press /help',
reply_markup=keyboard
)
@bot.message_handler(content_types=['video'])
def handle_video(message):
"""Add sent video to user's video list."""
chat_id = message.chat.id
if chat_id in users_files:
users_files[chat_id].append(message.video.file_id)
else:
users_files[chat_id] = [message.video.file_id]
@bot.message_handler(commands=['merge'])
def merge(message):
"""Merge user's videos."""
chat_id = message.chat.id
# Stops method if user hasn't sent any videos
if chat_id not in users_files:
bot.send_message(chat_id,
'You Haven\'t Send Any Video For Merge 🥺\n\n'
'Please Send Me Videos First and Press! /merge 🤗'
)
return None
inputs = list()
for i, file_id in enumerate(users_files[chat_id]):
file_info = bot.get_file(file_id)
response = requests.get(
'https://api.telegram.org/file/bot{0}/{1}'.format(
API_TOKEN, file_info.file_path
)
)
inputs.append("file '{}'".format(i))
with open(str(i), 'wb') as arq:
arq.write(response.content)
with open('inputs.txt', 'w') as arq:
arq.write('\n'.join(inputs))
subprocess.call(
['ffmpeg', '-f', 'concat', '-i', 'inputs.txt', '-c', 'copy', 'out.mp4']
)
with open('out.mp4', 'rb') as video:
bot.send_video(chat_id, video)
users_files[chat_id] = []
@bot.message_handler(commands=['help'])
def help_command(message):
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.add(
telebot.types.InlineKeyboardButton(
'Message The Developer', url='telegram.me/kannaadiga'
)
)
bot.send_message(
message.chat.id,
'1) Send Two Of Your MP4 Videos (Which You Want To Merge)\n\n' +
'2) After That Press Me! /merge ☺️',
reply_markup=keyboard
)
logger.info("Yeah I'm running!")
bot.polling()
| 25.854545
| 79
| 0.636428
|
4a04a6d80c17cffb5230d23506e588267abfd900
| 603
|
py
|
Python
|
shape_bruteforce/errors.py
|
ahmedkhalf/Shape-Bruteforce
|
4a9c205c9777c07a1fa7ecf7f4b27549b2d7dc7a
|
[
"MIT"
] | 2
|
2020-07-27T15:02:57.000Z
|
2022-03-12T02:41:02.000Z
|
shape_bruteforce/errors.py
|
ahmedkhalf/Shape-Bruteforce
|
4a9c205c9777c07a1fa7ecf7f4b27549b2d7dc7a
|
[
"MIT"
] | null | null | null |
shape_bruteforce/errors.py
|
ahmedkhalf/Shape-Bruteforce
|
4a9c205c9777c07a1fa7ecf7f4b27549b2d7dc7a
|
[
"MIT"
] | null | null | null |
class ImageDepthError(Exception):
"""Exception raised when depth (third axis) is not what it should be
"""
def __init__(self, given_depth, expected_depths):
self.message = f"Unexpected image depth: {given_depth}, expected {expected_depths}."
super().__init__(self.message)
class ImageDimensionError(Exception):
"""Exception raised number of dimensions is not what it should be
"""
def __init__(self, given_dim, expected_dims):
self.message = f"Unexpected image dimension: {given_dim}, expected {expected_dims}."
super().__init__(self.message)
| 35.470588
| 92
| 0.703151
|
4a04a8192383ec4cea3f68227f2bb7aa0f6648b9
| 4,166
|
py
|
Python
|
examples/train_mrbrains_9_classes.py
|
eynaij/MedicalZooPytorch_RibFrac
|
720cd2a3b7e62a47ed35b9e41e15db92e802ffb8
|
[
"MIT"
] | 2
|
2020-09-20T15:59:37.000Z
|
2021-04-12T09:28:35.000Z
|
examples/train_mrbrains_9_classes.py
|
eynaij/MedicalZooPytorch_RibFrac
|
720cd2a3b7e62a47ed35b9e41e15db92e802ffb8
|
[
"MIT"
] | 1
|
2020-12-17T02:38:05.000Z
|
2020-12-17T02:38:05.000Z
|
examples/train_mrbrains_9_classes.py
|
eynaij/MedicalZooPytorch_RibFrac
|
720cd2a3b7e62a47ed35b9e41e15db92e802ffb8
|
[
"MIT"
] | null | null | null |
# Python libraries
import argparse
import os
import torch
import lib.medloaders as medical_loaders
import lib.medzoo as medzoo
import lib.train as train
# Lib files
import lib.utils as utils
from lib.losses3D.dice import DiceLoss
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
import torch.distributed as dist
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
seed = 1777777
torch.manual_seed(seed)
def main():
args = get_arguments()
if args.distributed:
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled." #1
torch.backends.cudnn.benchmark = True
utils.reproducibility(args, seed)
utils.make_dirs(args.save)
training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args,
path='.././datasets')
model, optimizer = medzoo.create_model(args)
criterion = DiceLoss(classes=11, skip_index_after=args.classes)
if args.sync_bn:
model = apex.parallel.convert_syncbn_model(model)
if args.cuda:
model = model.cuda()
print("Model transferred in GPU.....")
if args.distributed:
model = DDP(model, delay_allreduce=True)
trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator,
valid_data_loader=val_generator, lr_scheduler=None)
print("START TRAINING...")
trainer.training()
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--batchSz', type=int, default=8)
parser.add_argument('--dataset_name', type=str, default="mrbrains9")
parser.add_argument('--dim', nargs="+", type=int, default=(128, 128, 48))
parser.add_argument('--classes', type=int, default=9)
parser.add_argument('--nEpochs', type=int, default=200)
parser.add_argument('--inChannels', type=int, default=3)
parser.add_argument('--inModalities', type=int, default=3)
parser.add_argument('--samples_train', type=int, default=10)
parser.add_argument('--samples_val', type=int, default=10)
parser.add_argument('--threshold', default=0.1, type=float)
parser.add_argument('--augmentation', default='no', type=str,
help='Tensor normalization: options max, mean, global')
parser.add_argument('--normalization', default='global_mean', type=str,
help='Tensor normalization: options max, mean, global')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--split', default=0.9, type=float, help='Select percentage of training data(default: 0.8)')
parser.add_argument('--lr', default=1e-4, type=float,
help='learning rate (default: 1e-3)')
parser.add_argument('--cuda', action='store_true', default=False)
parser.add_argument('--model', type=str, default='UNET3D',
choices=('VNET', 'VNET2', 'UNET3D', 'DENSENET1', 'DENSENET2', 'DENSENET3', 'HYPERDENSENET'))
parser.add_argument('--opt', type=str, default='sgd',
choices=('sgd', 'adam', 'rmsprop'))
parser.add_argument('--log_dir', type=str,
default='../runs/')
parser.add_argument('--distributed', action='store_true', default=True,
help='whether use distributed parallel training')
parser.add_argument('--sync_bn', action='store_true', default=True,
help='enabling apex sync BN')
args = parser.parse_args()
args.save = '/data/hejy/MedicalZooPytorch/saved_models/' + args.model + '_checkpoints/' + args.model + '_{}_{}_'.format(
utils.datestr(), args.dataset_name)
return args
if __name__ == '__main__':
main()
| 39.301887
| 124
| 0.655065
|
4a04a85fa50bf9b57f7690bd595861d7c1eee09b
| 2,934
|
py
|
Python
|
testsuite/test_all.py
|
licensio/pycodestyle
|
bbaa07a3806a91fd4d57e352cff92c0b56c2265c
|
[
"MIT"
] | null | null | null |
testsuite/test_all.py
|
licensio/pycodestyle
|
bbaa07a3806a91fd4d57e352cff92c0b56c2265c
|
[
"MIT"
] | null | null | null |
testsuite/test_all.py
|
licensio/pycodestyle
|
bbaa07a3806a91fd4d57e352cff92c0b56c2265c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os.path
import sys
import unittest
import pycodestyle
from testsuite.support import init_tests, selftest, ROOT_DIR
# Note: please only use a subset of unittest methods which were present
# in Python 2.5: assert(True|False|Equal|NotEqual|Raises)
class PycodestyleTestCase(unittest.TestCase):
"""Test the standard errors and warnings (E and W)."""
def setUp(self):
self._style = pycodestyle.StyleGuide(
paths=[os.path.join(ROOT_DIR, 'testsuite')],
select='E,W', quiet=True)
def test_doctest(self):
import doctest
fail_d, done_d = doctest.testmod(
pycodestyle, verbose=False, report=False
)
self.assertTrue(done_d, msg='tests not found')
self.assertFalse(fail_d, msg='%s failure(s)' % fail_d)
def test_selftest(self):
fail_s, done_s = selftest(self._style.options)
self.assertTrue(done_s, msg='tests not found')
self.assertFalse(fail_s, msg='%s failure(s)' % fail_s)
def test_checkers_testsuite(self):
init_tests(self._style)
report = self._style.check_files()
self.assertFalse(report.total_errors,
msg='%s failure(s)' % report.total_errors)
def test_own_dog_food(self):
files = [pycodestyle.__file__.rstrip('oc'), __file__.rstrip('oc'),
os.path.join(ROOT_DIR, 'setup.py')]
report = self._style.init_report(pycodestyle.StandardReport)
report = self._style.check_files(files)
self.assertEqual(list(report.messages.keys()), ['W504'],
msg='Failures: %s' % report.messages)
def test_list_reporter(self):
files = [pycodestyle.__file__.rstrip('oc'), __file__.rstrip('oc'),
os.path.join(ROOT_DIR, 'setup.py')]
report = self._style.init_report(pycodestyle.ListReport)
report = self._style.check_files(files)
self.assertTrue(len(report.entries) > 0)
self.assertIsInstance(report.entries[0], dict)
self.assertEqual(report.entries[0]["message_code"], 'W504')
def suite():
from testsuite import (
test_api,
test_blank_lines,
test_parser,
test_shell,
test_util,
)
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PycodestyleTestCase))
suite.addTest(unittest.makeSuite(test_api.APITestCase))
suite.addTest(unittest.makeSuite(test_blank_lines.TestBlankLinesDefault))
suite.addTest(unittest.makeSuite(test_blank_lines.TestBlankLinesTwisted))
suite.addTest(unittest.makeSuite(test_parser.ParserTestCase))
suite.addTest(unittest.makeSuite(test_shell.ShellTestCase))
suite.addTest(unittest.makeSuite(test_util.UtilTestCase))
return suite
def _main():
return unittest.TextTestRunner(verbosity=2).run(suite())
if __name__ == '__main__':
sys.exit(not _main())
| 34.517647
| 77
| 0.669052
|
4a04a88c16ae1b265204a1998033417770b0d1b5
| 2,361
|
py
|
Python
|
grades/migrations/0001_initial.py
|
taufactor/tau-factor
|
d7883156bd9502742e0ad5c798fa1b2c38c7ff60
|
[
"MIT"
] | null | null | null |
grades/migrations/0001_initial.py
|
taufactor/tau-factor
|
d7883156bd9502742e0ad5c798fa1b2c38c7ff60
|
[
"MIT"
] | null | null | null |
grades/migrations/0001_initial.py
|
taufactor/tau-factor
|
d7883156bd9502742e0ad5c798fa1b2c38c7ff60
|
[
"MIT"
] | 1
|
2021-05-18T19:01:14.000Z
|
2021-05-18T19:01:14.000Z
|
# Generated by Django 3.2.3 on 2021-05-17 20:40
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Exam',
fields=[
('exam_id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('moed', models.PositiveSmallIntegerField()),
('students_count', models.PositiveSmallIntegerField()),
('failures_count', models.PositiveSmallIntegerField()),
('course_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exams', to='courses.coursegroup')),
],
options={
'ordering': ('course_group', 'moed'),
'unique_together': {('course_group', 'moed')},
},
),
migrations.CreateModel(
name='ExamStatistics',
fields=[
('exam_statistics_id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('mean', models.FloatField(blank=True, null=True)),
('median', models.FloatField(blank=True, null=True)),
('standard_deviation', models.FloatField(blank=True, null=True)),
('exam', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='statistics', to='grades.exam')),
],
),
migrations.CreateModel(
name='ExamGradeRange',
fields=[
('exam_grade_range_id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('lowest_grade', models.PositiveSmallIntegerField()),
('highest_grade', models.PositiveSmallIntegerField()),
('students_in_range', models.PositiveSmallIntegerField()),
('exam', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='grades', to='grades.exam')),
],
options={
'ordering': ('lowest_grade',),
'unique_together': {('exam', 'lowest_grade')},
},
),
]
| 42.160714
| 146
| 0.564591
|
4a04a89319297a5ff8a0c7dc838ae177de91495e
| 1,697
|
py
|
Python
|
qa327_test/integration/sell/test_sell_user_walk_through.py
|
atangent/chad-CI-python
|
52de5685aebce26b07ebce247e2d3e5bda7087f9
|
[
"MIT"
] | null | null | null |
qa327_test/integration/sell/test_sell_user_walk_through.py
|
atangent/chad-CI-python
|
52de5685aebce26b07ebce247e2d3e5bda7087f9
|
[
"MIT"
] | 15
|
2020-10-15T17:02:08.000Z
|
2020-12-14T03:16:25.000Z
|
qa327_test/integration/sell/test_sell_user_walk_through.py
|
atangent/chad-CI-python
|
52de5685aebce26b07ebce247e2d3e5bda7087f9
|
[
"MIT"
] | 2
|
2020-12-11T17:51:36.000Z
|
2021-10-05T23:07:21.000Z
|
import pytest
from seleniumbase import BaseCase
from qa327_test.conftest import base_url
# integration testing: the test case interacts with the
# browser, and test the whole system (frontend+backend).
@pytest.mark.usefixtures('server')
class SellTicket(BaseCase):
def register(self):
"""register new user"""
self.open(base_url + '/register')
self.type("#email", "test_integration@test.com")
self.type("#name", "test0")
self.type("#password", "Test0!qwertyuiop")
self.type("#password2", "Test0!qwertyuiop")
self.click('input[type="submit"]')
def login(self):
""" Login to Swag Labs and verify that login was successful. """
self.open(base_url + '/login')
self.type("#email", "test_integration@test.com")
self.type("#password", "Test0!qwertyuiop")
self.click('input[type="submit"]')
def sell_ticket(self):
""" Add a ticket to be sold """
self.open(base_url + '/')
self.type("#sell_name", "Avengers")
self.type("#sell_quantity", "3")
self.type("#sell_price", "35")
self.type("#sell_exp_date", "20221220")
self.click("#sell_submit")
def test_sell_ticket(self):
""" This test checks the implemented sell
ticket feature """
self.register()
self.login()
self.sell_ticket()
self.open(base_url + "/")
self.assert_element("#ticket-Avengers")
self.assert_text("Avengers", "#ticket-Avengers-name")
self.assert_text("3", "#ticket-Avengers-quantity")
self.assert_text("35", "#ticket-Avengers-price")
self.assert_text("2022-12-20", "#ticket-Avengers-date")
| 33.94
| 72
| 0.622275
|
4a04a93f353fe77fff976c98870ba4a16bc001ce
| 6,028
|
py
|
Python
|
test/test_util.py
|
snskiff/python-snap7
|
19fc871de678a972b60f669db5b610202425378e
|
[
"MIT"
] | 6
|
2020-05-12T20:38:07.000Z
|
2020-11-17T17:39:02.000Z
|
test/test_util.py
|
RevolutionPi/python-snap7
|
0953beeb34f47c3fec490240d7ffe2edf4e8ea37
|
[
"MIT"
] | null | null | null |
test/test_util.py
|
RevolutionPi/python-snap7
|
0953beeb34f47c3fec490240d7ffe2edf4e8ea37
|
[
"MIT"
] | 2
|
2018-11-16T11:59:30.000Z
|
2020-11-17T17:39:06.000Z
|
import unittest
import re
from snap7 import util
test_spec = """
4 ID INT
6 NAME STRING[4]
12.0 testbool1 BOOL
12.1 testbool2 BOOL
12.2 testbool3 BOOL
12.3 testbool4 BOOL
12.4 testbool5 BOOL
12.5 testbool6 BOOL
12.6 testbool7 BOOL
12.7 testbool8 BOOL
13 testReal REAL
17 testDword DWORD
21 testint2 INT
"""
_bytearray = bytearray([
0, 0, # test int
4, 4, ord('t'), ord('e'), ord('s'), ord('t'), # test string
128*0 + 64*0 + 32*0 + 16*0 +
8*1 + 4*1 + 2*1 + 1*1, # test bools
68, 78, 211, 51, # test real
255, 255, 255, 255, # test dword
0, 0, # test int 2
])
class TestS7util(unittest.TestCase):
def test_get_string(self):
"""
Text extraction from string from bytearray
"""
test_array = bytearray(_bytearray)
row = util.DB_Row(test_array, test_spec, layout_offset=4)
self.assertEqual(row['NAME'], 'test')
def test_write_string(self):
test_array = bytearray(_bytearray)
row = util.DB_Row(test_array, test_spec, layout_offset=4)
row['NAME'] = 'abc'
self.assertEqual(row['NAME'], 'abc')
row['NAME'] = ''
self.assertEqual(row['NAME'], '')
try:
row['NAME'] = 'waaaaytoobig'
except ValueError:
pass
# value should still be empty
self.assertEqual(row['NAME'], '')
def test_get_int(self):
test_array = bytearray(_bytearray)
row = util.DB_Row(test_array, test_spec, layout_offset=4)
x = row['ID']
y = row['testint2']
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_set_int(self):
test_array = bytearray(_bytearray)
row = util.DB_Row(test_array, test_spec, layout_offset=4)
row['ID'] = 259
self.assertEqual(row['ID'], 259)
def test_get_int_values(self):
test_array = bytearray(_bytearray)
row = util.DB_Row(test_array, test_spec, layout_offset=4)
for value in (
-32768,
-16385,
-256,
-128,
-127,
0,
127,
128,
255,
256,
16384,
32767):
row['ID'] = value
self.assertEqual(row['ID'], value)
def test_get_bool(self):
test_array = bytearray(_bytearray)
row = util.DB_Row(test_array, test_spec, layout_offset=4)
self.assertEqual(row['testbool1'], 1)
self.assertEqual(row['testbool8'], 0)
def test_set_bool(self):
test_array = bytearray(_bytearray)
row = util.DB_Row(test_array, test_spec, layout_offset=4)
row['testbool8'] = 1
row['testbool1'] = 0
self.assertEqual(row['testbool8'], 1)
self.assertEqual(row['testbool1'], 0)
def test_db_creation(self):
test_array = bytearray(_bytearray * 10)
test_db = util.DB(1, test_array, test_spec,
row_size=len(_bytearray),
size=10,
layout_offset=4,
db_offset=0)
self.assertEqual(len(test_db.index), 10)
for i, row in test_db:
# print row
self.assertEqual(row['testbool1'], 1)
self.assertEqual(row['testbool2'], 1)
self.assertEqual(row['testbool3'], 1)
self.assertEqual(row['testbool4'], 1)
self.assertEqual(row['testbool5'], 0)
self.assertEqual(row['testbool6'], 0)
self.assertEqual(row['testbool7'], 0)
self.assertEqual(row['testbool8'], 0)
self.assertEqual(row['NAME'], 'test')
def test_get_real(self):
test_array = bytearray(_bytearray)
row = util.DB_Row(test_array, test_spec, layout_offset=4)
self.assertTrue(0.01 > (row['testReal'] - 827.3) > -0.1)
def test_set_real(self):
test_array = bytearray(_bytearray)
row = util.DB_Row(test_array, test_spec, layout_offset=4)
row['testReal'] = 1337.1337
self.assertTrue(0.01 > (row['testReal'] - 1337.1337) > -0.01)
def test_set_dword(self):
test_array = bytearray(_bytearray)
row = util.DB_Row(test_array, test_spec, layout_offset=4)
# The range of numbers is 0 to 4294967295.
row['testDword'] = 9999999
self.assertEqual(row['testDword'], 9999999)
def test_get_dword(self):
test_array = bytearray(_bytearray)
row = util.DB_Row(test_array, test_spec, layout_offset=4)
self.assertEqual(row['testDword'], 4294967295)
def test_export(self):
test_array = bytearray(_bytearray)
row = util.DB_Row(test_array, test_spec, layout_offset=4)
data = row.export()
self.assertIn('testDword', data)
self.assertIn('testbool1', data)
self.assertEqual(data['testbool5'], 0)
def print_row(data):
"""print a single db row in chr and str
"""
index_line = ""
pri_line1 = ""
chr_line2 = ""
asci = re.compile('[a-zA-Z0-9 ]')
for i, xi in enumerate(data):
# index
if not i % 5:
diff = len(pri_line1) - len(index_line)
i = str(i)
index_line += diff * ' '
index_line += i
# i = i + (ws - len(i)) * ' ' + ','
# byte array line
str_v = str(xi)
pri_line1 += str(xi) + ','
# char line
c = chr(xi)
c = c if asci.match(c) else ' '
# align white space
w = len(str_v)
c = c + (w - 1) * ' ' + ','
chr_line2 += c
print(index_line)
print(pri_line1)
print(chr_line2)
if __name__ == '__main__':
unittest.main()
| 30.14
| 69
| 0.530358
|
4a04aa56f7c58a6e82572bcb5f12161cc34ec132
| 2,110
|
py
|
Python
|
tools/py_analysis/analyze_cube.py
|
cristigr/macrobase
|
de032865b2ed03405b35c534ebace382536f53f4
|
[
"Apache-2.0"
] | 677
|
2016-01-04T04:05:50.000Z
|
2022-03-24T06:37:27.000Z
|
tools/py_analysis/analyze_cube.py
|
cristigr/macrobase
|
de032865b2ed03405b35c534ebace382536f53f4
|
[
"Apache-2.0"
] | 249
|
2015-12-29T03:41:31.000Z
|
2020-09-02T03:11:30.000Z
|
tools/py_analysis/analyze_cube.py
|
cristigr/macrobase
|
de032865b2ed03405b35c534ebace382536f53f4
|
[
"Apache-2.0"
] | 148
|
2015-12-29T03:25:48.000Z
|
2021-08-25T03:59:52.000Z
|
import pandas as pd
import numpy as np
from sklearn import linear_model
from collections import defaultdict, Iterable
from itertools import chain, combinations
import operator
import psycopg2
conn = psycopg2.connect("dbname='postgres' user='pbailis' host='localhost'")
cur = conn.cursor()
cols = "hardware_manufacturer,hardware_model,hardware_carrier,android_fw_version,hardware_bootloader"
sql = """
CREATE TEMP VIEW m AS SELECT data_count_minutes, %s FROM mapmatch_history H, sf_datasets D WHERE H.dataset_id = D.id AND data_count_minutes < 4 LIMIT 10000;
SELECT SUM(1.0/log(data_count_minutes))*pow(COUNT(*), -.5) as score, SUM(1.0/data_count_minutes) as sum, median(data_count_minutes) as median_time, percentile_cont(.01) WITHIN GROUP(ORDER BY data_count_minutes) as p99, percentile_cont(.25) WITHIN GROUP(ORDER BY data_count_minutes) as p75, COUNT(*) as cnt, %s FROM m GROUP BY CUBE(%s) ORDER BY score DESC;""" % (cols, cols, cols)
print sql
cur.execute(sql)
colnames = [desc[0] for desc in cur.description]
cur_score = None
cur_rows = []
df = None
for row in cur.fetchall():
if cur_score is not None and round(row[0], 8) != round(cur_score, 8):
if cur_score:
max_idx = -1
min_idx_count = 10000000
for e_no in range(0, len(cur_rows)):
entry = cur_rows[e_no]
entry_count = 0
for val in entry:
if val == None:
entry_count += 1
if entry_count < min_idx_count:
min_idx_count = entry_count
max_idx = e_no
fd = [f for f in cur_rows[max_idx]]
fd[1] = float(fd[1])
data = pd.DataFrame([fd], columns=colnames)
if df is None:
df = data
else:
df = pd.concat([df, data])
cur_rows = []
cur_score = row[0]
cur_rows.append(row)
pd.set_option('display.max_rows', len(df))
pd.set_option('expand_frame_repr', False)
print df
| 32.96875
| 379
| 0.613744
|
4a04ab6c2ac32248ad1e4fea26dcea80b91ac1d3
| 841
|
py
|
Python
|
srim3d.py
|
Gustav-Lindberg/Srim3D.py
|
3686d44f25234b9b3fd6f87a7d00219d3ed3183e
|
[
"Apache-2.0"
] | null | null | null |
srim3d.py
|
Gustav-Lindberg/Srim3D.py
|
3686d44f25234b9b3fd6f87a7d00219d3ed3183e
|
[
"Apache-2.0"
] | null | null | null |
srim3d.py
|
Gustav-Lindberg/Srim3D.py
|
3686d44f25234b9b3fd6f87a7d00219d3ed3183e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# srim3d.py by Gustav Lindberg
# https://github.com/Gustav-Lindberg/Srim3D.py
import numpy as np
import re as RegExp
def extractData(file="Ioniz-3D.txt"):
data = []
with open(file) as file:
hasStarted = False
topRegex = RegExp.compile(r"^-+\s-+$")
spaceRegex = RegExp.compile(r"\s+")
for line in file:
if(topRegex.match(line)):
hasStarted = True
continue
elif(not hasStarted):
continue
numbers = list(map(lambda s: float(s), spaceRegex.split(line.strip())))
data.append(numbers[1:])
return np.array(data)
def ions(data):
return np.array(list(map(lambda row: sum(row), data)))
def transverseIons(data):
return np.array(list(map(lambda column: sum(column), data.T)))
| 29
| 83
| 0.589774
|
4a04ab83d111e7e756194eda214b8090dc34c34f
| 7,805
|
py
|
Python
|
pypy/bin/pyinteractive.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2021-06-02T23:02:09.000Z
|
2021-06-02T23:02:09.000Z
|
pypy/bin/pyinteractive.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2021-03-30T18:08:41.000Z
|
2021-03-30T18:08:41.000Z
|
pypy/bin/pyinteractive.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2022-03-30T11:42:37.000Z
|
2022-03-30T11:42:37.000Z
|
#!/usr/bin/env python
"""Main entry point into the PyPy interpreter. For a list of options, type
pyinteractive.py --help
"""
import os
import sys
import time
pypy_path = os.path.join(os.path.dirname(__file__), '..', '..')
sys.path.insert(0, os.path.abspath(pypy_path))
from pypy.tool import option
from pypy.interpreter import main, interactive, error, gateway
from rpython.config.config import OptionDescription, BoolOption, StrOption
from rpython.config.config import Config, to_optparse
from pypy.config import pypyoption
cmdline_optiondescr = OptionDescription("interactive", "the options of pyinteractive.py", [
BoolOption("verbose", "show verbose interpreter-level traceback",
default=os.getenv("PYPY_TB"), cmdline="-v"),
BoolOption("interactive", "inspect interactively after running script",
default=False, cmdline="-i"),
BoolOption("completer", "use readline commandline completer",
default=False, cmdline="-C"),
BoolOption("optimize",
"skip assert statements and remove docstrings when importing modules"
" (this is -OO in regular CPython)",
default=False, cmdline="-O"),
BoolOption("no_site_import", "do not 'import site' on initialization",
default=False, cmdline="-S"),
BoolOption("runmodule",
"library module to be run as a script (terminates option list)",
default=False, cmdline="-m"),
BoolOption("runcommand",
"program passed in as CMD (terminates option list)",
default=False, cmdline="-c"),
StrOption("warn",
"warning control (arg is action:message:category:module:lineno)",
default=None, cmdline="-W"),
])
pypy_init = gateway.applevel('''
def pypy_init(import_site):
if import_site:
import os, sys
_MACOSX = sys.platform == 'darwin'
if _MACOSX:
# __PYVENV_LAUNCHER__, used by CPython on macOS, should be ignored
# since it (possibly) results in a wrong sys.prefix and
# sys.exec_prefix (and consequently sys.path).
old_pyvenv_launcher = os.environ.pop('__PYVENV_LAUNCHER__', None)
try:
import site
except:
import sys
print("'import site' failed", file=sys.stderr)
if _MACOSX and old_pyvenv_launcher:
os.environ['__PYVENV_LAUNCHER__'] = old_pyvenv_launcher
''').interphook('pypy_init')
def set_compiler(option, opt, value, parser):
from rpython.translator.platform import set_platform
set_platform('host', value)
def main_(argv=None):
starttime = time.time()
config, parser = option.get_standard_options()
interactiveconfig = Config(cmdline_optiondescr)
to_optparse(interactiveconfig, parser=parser)
def set_family_of_options(option, opt, value, parser):
from pypy.config.pypyoption import set_pypy_opt_level
set_pypy_opt_level(config, value)
parser.add_option(
'--cc', type=str, action="callback",
callback=set_compiler,
help="Compiler to use for compiling generated C")
parser.add_option(
'--opt', type=str, action="callback",
callback=set_family_of_options,
help="Set the family of options based on -opt=0,1,2,jit...")
args = option.process_options(parser, argv[1:])
if interactiveconfig.verbose:
error.RECORD_INTERPLEVEL_TRACEBACK = True
# --allworkingmodules takes really long to start up, but can be forced on
config.objspace.suggest(allworkingmodules=False)
config.objspace.usemodules.struct = True
if config.objspace.allworkingmodules:
pypyoption.enable_allworkingmodules(config)
if config.objspace.usemodules._continuation:
config.translation.continuation = True
if config.objspace.usemodules.thread:
config.translation.thread = True
# create the object space
space = option.make_objspace(config)
space._starttime = starttime
space.setitem(space.sys.w_dict, space.wrap('executable'),
space.wrap(argv[0]))
if interactiveconfig.optimize:
#change the optimize flag's value and set __debug__ to False
space.appexec([], """():
import sys
flags = list(sys.flags)
flags[3] = 2
sys.flags = type(sys.flags)(flags)
__builtins__.__dict__['__debug__'] = False
""")
# call pypy_find_stdlib: the side-effect is that it sets sys.prefix and
# sys.exec_prefix
executable = argv[0]
space.appexec([space.wrap(executable)], """(executable):
import sys
sys.pypy_find_stdlib(executable)
""")
# set warning control options (if any)
warn_arg = interactiveconfig.warn
if warn_arg is not None:
space.appexec([space.wrap(warn_arg)], """(arg):
import sys
sys.warnoptions.append(arg)""")
w_path = space.sys.get('path')
path = os.getenv('PYTHONPATH')
if path:
path = path.split(os.pathsep)
else:
path = []
path.insert(0, '')
for i, dir in enumerate(path):
space.call_method(w_path, 'insert', space.wrap(i), space.wrap(dir))
# store the command-line arguments into sys.argv
go_interactive = interactiveconfig.interactive
banner = ''
exit_status = 0
command = None
if interactiveconfig.runcommand:
command = args[0]
args[0] = '-c'
if interactiveconfig.runmodule:
command = args.pop(0)
for arg in args:
space.call_method(space.sys.get('argv'), 'append',
space.newfilename(arg))
# load the source of the program given as command-line argument
if interactiveconfig.runcommand:
def doit():
main.run_string(command, space=space)
elif interactiveconfig.runmodule:
def doit():
main.run_module(command, args, space=space)
elif args:
scriptdir = os.path.dirname(os.path.abspath(args[0]))
space.call_method(space.sys.get('path'), 'insert',
space.wrap(0), space.wrap(scriptdir))
def doit():
main.run_file(args[0], space=space)
else:
def doit():
pass
space.call_method(space.sys.get('argv'), 'append', space.wrap(''))
go_interactive = 1
banner = None
try:
def do_start():
space.startup()
pypy_init(space, space.wrap(not interactiveconfig.no_site_import))
if main.run_toplevel(space, do_start,
verbose=interactiveconfig.verbose):
# compile and run it
if not main.run_toplevel(space, doit,
verbose=interactiveconfig.verbose):
exit_status = 1
# start the interactive console
if go_interactive or os.getenv('PYTHONINSPECT'):
try:
import readline
except:
pass
con = interactive.PyPyConsole(
space, verbose=interactiveconfig.verbose,
completer=interactiveconfig.completer)
if banner == '':
banner = '%s / %s'%(con.__class__.__name__,
repr(space))
con.interact(banner)
exit_status = 0
finally:
def doit():
space.finish()
main.run_toplevel(space, doit, verbose=interactiveconfig.verbose)
return exit_status
if __name__ == '__main__':
if hasattr(sys, 'setrecursionlimit'):
# for running "python -i pyinteractive.py -Si -- py.py -Si"
sys.setrecursionlimit(3000)
sys.exit(main_(sys.argv))
| 36.134259
| 91
| 0.618962
|
4a04ac25957752957a8a239342c8cf026dea97fc
| 7,382
|
py
|
Python
|
datastore/tests/unit/test_entity.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | 1
|
2017-05-18T06:58:48.000Z
|
2017-05-18T06:58:48.000Z
|
datastore/tests/unit/test_entity.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
datastore/tests/unit/test_entity.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | 1
|
2022-03-24T01:37:10.000Z
|
2022-03-24T01:37:10.000Z
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
_PROJECT = 'PROJECT'
_KIND = 'KIND'
_ID = 1234
class TestEntity(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.datastore.entity import Entity
return Entity
def _make_one(self, key=None, exclude_from_indexes=()):
klass = self._get_target_class()
return klass(key=key, exclude_from_indexes=exclude_from_indexes)
def test_ctor_defaults(self):
klass = self._get_target_class()
entity = klass()
self.assertIsNone(entity.key)
self.assertIsNone(entity.kind)
self.assertEqual(sorted(entity.exclude_from_indexes), [])
def test_ctor_explicit(self):
_EXCLUDE_FROM_INDEXES = ['foo', 'bar']
key = _Key()
entity = self._make_one(
key=key, exclude_from_indexes=_EXCLUDE_FROM_INDEXES)
self.assertEqual(sorted(entity.exclude_from_indexes),
sorted(_EXCLUDE_FROM_INDEXES))
def test_ctor_bad_exclude_from_indexes(self):
BAD_EXCLUDE_FROM_INDEXES = object()
key = _Key()
self.assertRaises(TypeError, self._make_one, key=key,
exclude_from_indexes=BAD_EXCLUDE_FROM_INDEXES)
def test___eq_____ne___w_non_entity(self):
from google.cloud.datastore.key import Key
key = Key(_KIND, _ID, project=_PROJECT)
entity = self._make_one(key=key)
self.assertFalse(entity == object())
self.assertTrue(entity != object())
def test___eq_____ne___w_different_keys(self):
from google.cloud.datastore.key import Key
_ID1 = 1234
_ID2 = 2345
key1 = Key(_KIND, _ID1, project=_PROJECT)
entity1 = self._make_one(key=key1)
key2 = Key(_KIND, _ID2, project=_PROJECT)
entity2 = self._make_one(key=key2)
self.assertFalse(entity1 == entity2)
self.assertTrue(entity1 != entity2)
def test___eq_____ne___w_same_keys(self):
from google.cloud.datastore.key import Key
name = 'foo'
value = 42
meaning = 9
key1 = Key(_KIND, _ID, project=_PROJECT)
entity1 = self._make_one(key=key1, exclude_from_indexes=(name,))
entity1[name] = value
entity1._meanings[name] = (meaning, value)
key2 = Key(_KIND, _ID, project=_PROJECT)
entity2 = self._make_one(key=key2, exclude_from_indexes=(name,))
entity2[name] = value
entity2._meanings[name] = (meaning, value)
self.assertTrue(entity1 == entity2)
self.assertFalse(entity1 != entity2)
def test___eq_____ne___w_same_keys_different_props(self):
from google.cloud.datastore.key import Key
key1 = Key(_KIND, _ID, project=_PROJECT)
entity1 = self._make_one(key=key1)
entity1['foo'] = 'Foo'
key2 = Key(_KIND, _ID, project=_PROJECT)
entity2 = self._make_one(key=key2)
entity1['bar'] = 'Bar'
self.assertFalse(entity1 == entity2)
self.assertTrue(entity1 != entity2)
def test___eq_____ne___w_same_keys_props_w_equiv_keys_as_value(self):
from google.cloud.datastore.key import Key
key1 = Key(_KIND, _ID, project=_PROJECT)
key2 = Key(_KIND, _ID, project=_PROJECT)
entity1 = self._make_one(key=key1)
entity1['some_key'] = key1
entity2 = self._make_one(key=key1)
entity2['some_key'] = key2
self.assertTrue(entity1 == entity2)
self.assertFalse(entity1 != entity2)
def test___eq_____ne___w_same_keys_props_w_diff_keys_as_value(self):
from google.cloud.datastore.key import Key
_ID1 = 1234
_ID2 = 2345
key1 = Key(_KIND, _ID1, project=_PROJECT)
key2 = Key(_KIND, _ID2, project=_PROJECT)
entity1 = self._make_one(key=key1)
entity1['some_key'] = key1
entity2 = self._make_one(key=key1)
entity2['some_key'] = key2
self.assertFalse(entity1 == entity2)
self.assertTrue(entity1 != entity2)
def test___eq_____ne___w_same_keys_props_w_equiv_entities_as_value(self):
from google.cloud.datastore.key import Key
key = Key(_KIND, _ID, project=_PROJECT)
entity1 = self._make_one(key=key)
sub1 = self._make_one()
sub1.update({'foo': 'Foo'})
entity1['some_entity'] = sub1
entity2 = self._make_one(key=key)
sub2 = self._make_one()
sub2.update({'foo': 'Foo'})
entity2['some_entity'] = sub2
self.assertTrue(entity1 == entity2)
self.assertFalse(entity1 != entity2)
def test___eq_____ne___w_same_keys_props_w_diff_entities_as_value(self):
from google.cloud.datastore.key import Key
key = Key(_KIND, _ID, project=_PROJECT)
entity1 = self._make_one(key=key)
sub1 = self._make_one()
sub1.update({'foo': 'Foo'})
entity1['some_entity'] = sub1
entity2 = self._make_one(key=key)
sub2 = self._make_one()
sub2.update({'foo': 'Bar'})
entity2['some_entity'] = sub2
self.assertFalse(entity1 == entity2)
self.assertTrue(entity1 != entity2)
def test__eq__same_value_different_exclude(self):
from google.cloud.datastore.key import Key
name = 'foo'
value = 42
key = Key(_KIND, _ID, project=_PROJECT)
entity1 = self._make_one(key=key, exclude_from_indexes=(name,))
entity1[name] = value
entity2 = self._make_one(key=key, exclude_from_indexes=())
entity2[name] = value
self.assertFalse(entity1 == entity2)
def test__eq__same_value_different_meanings(self):
from google.cloud.datastore.key import Key
name = 'foo'
value = 42
meaning = 9
key = Key(_KIND, _ID, project=_PROJECT)
entity1 = self._make_one(key=key, exclude_from_indexes=(name,))
entity1[name] = value
entity2 = self._make_one(key=key, exclude_from_indexes=(name,))
entity2[name] = value
entity2._meanings[name] = (meaning, value)
self.assertFalse(entity1 == entity2)
def test___repr___no_key_empty(self):
entity = self._make_one()
self.assertEqual(repr(entity), '<Entity {}>')
def test___repr___w_key_non_empty(self):
key = _Key()
flat_path = ('bar', 12, 'baz', 'himom')
key._flat_path = flat_path
entity = self._make_one(key=key)
entity_vals = {'foo': 'Foo'}
entity.update(entity_vals)
expected = '<Entity%s %s>' % (flat_path, entity_vals)
self.assertEqual(repr(entity), expected)
class _Key(object):
_MARKER = object()
_key = 'KEY'
_partial = False
_path = None
_id = None
_stored = None
def __init__(self, project=_PROJECT):
self.project = project
| 33.554545
| 77
| 0.646302
|
4a04ac642676cf41e3ad81c188002f2b230f1ee1
| 1,415
|
py
|
Python
|
scripts/run_umwm.py
|
sustain-lab/umwm-sustain
|
5988a5523ea00f99902341834c8115473e0bf4a3
|
[
"MIT"
] | 1
|
2018-10-09T22:18:39.000Z
|
2018-10-09T22:18:39.000Z
|
scripts/run_umwm.py
|
sustain-lab/umwm-sustain
|
5988a5523ea00f99902341834c8115473e0bf4a3
|
[
"MIT"
] | null | null | null |
scripts/run_umwm.py
|
sustain-lab/umwm-sustain
|
5988a5523ea00f99902341834c8115473e0bf4a3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import f90nml
from netCDF4 import Dataset
import os
import shutil
import subprocess
# TODO umwm must be compiled and executable umwm-run present in path
umwm_path = '/home/milan/sustain/umwm-sustain/umwm-dev'
# path to store finished experiments
output_path = '/home/milan/sustain/umwm-sustain/experiments'
class UMWM():
def __init__(self, src_path, exp_path):
self.src_path = src_path
self.exp_path = exp_path
self.executable = src_path + '/build/bin/umwm-run'
self.namelist = exp_path + '/namelists/main.nml'
os.mkdir(exp_path)
for dir in ['input', 'output', 'namelists']:
os.mkdir(exp_path + '/' + dir)
shutil.copy(src_path + '/namelists/main.nml', self.namelist)
def build(self):
pass
def run(self):
cwd = os.getcwd()
os.chdir(self.exp_path)
subprocess.call(self.executable, shell=True)
os.chdir(cwd)
def set_parameters(self, **kwargs):
nml = f90nml.read(self.namelist)
for key in kwargs:
for sublist in nml.keys():
if key in nml[sublist].keys():
nml[sublist][key] = kwargs[key]
nml.write(self.namelist, force=True)
exp = 1
for wspd in range(5, 65, 5):
umwm = UMWM(umwm_path, output_path + '/run' + '%2.2i' % exp)
umwm.set_parameters(wspd0 = wspd)
umwm.run()
exp += 1
| 28.877551
| 68
| 0.623322
|
4a04ad7322ec9dbad3cb46e324e1ced429ab61be
| 424
|
py
|
Python
|
deepLFM/likelihoods/likelihood.py
|
tomcdonald/Deep-LFM
|
05d9d8f037c65761285201bf67925bf7f65bcbe3
|
[
"Apache-2.0"
] | 1
|
2021-11-05T17:50:09.000Z
|
2021-11-05T17:50:09.000Z
|
deepLFM/likelihoods/likelihood.py
|
tomcdonald/Deep-LFM
|
05d9d8f037c65761285201bf67925bf7f65bcbe3
|
[
"Apache-2.0"
] | null | null | null |
deepLFM/likelihoods/likelihood.py
|
tomcdonald/Deep-LFM
|
05d9d8f037c65761285201bf67925bf7f65bcbe3
|
[
"Apache-2.0"
] | 2
|
2021-06-28T07:52:34.000Z
|
2021-09-01T17:16:11.000Z
|
import abc
class Likelihood:
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def log_cond_prob(self, output, latent):
"""
Likelihoods must compute log[p(y|F)], where y are outputs & F are latent values.
"""
raise NotImplementedError("Implemented by subclass.")
@abc.abstractmethod
def predict(self, latent):
raise NotImplementedError("Implemented by subclass.")
| 28.266667
| 88
| 0.667453
|
4a04adc78f4aed90c70c1fe6968e70853cbeaa41
| 2,120
|
py
|
Python
|
tests/default_storage_test.py
|
Walon1998/dace
|
95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0
|
[
"BSD-3-Clause"
] | 1
|
2022-03-11T13:36:34.000Z
|
2022-03-11T13:36:34.000Z
|
tests/default_storage_test.py
|
Walon1998/dace
|
95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/default_storage_test.py
|
Walon1998/dace
|
95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
from dace.sdfg.infer_types import set_default_schedule_and_storage_types
def test_notbmap():
sdfg = dace.SDFG('default_storage_test_1')
sdfg.add_array('A', [20], dace.float64, dace.StorageType.GPU_Global)
sdfg.add_transient('tmp', [1], dace.float64)
state = sdfg.add_state()
r = state.add_read('A')
me, mx = state.add_map('kernel', dict(i='0:20'), dace.ScheduleType.GPU_Device)
tmp = state.add_access('tmp')
t = state.add_tasklet('add', {'a'}, {'b'}, 'b = a + 1')
w = state.add_write('A')
state.add_memlet_path(r, me, tmp, memlet=dace.Memlet.simple('A', 'i'))
state.add_memlet_path(tmp, t, dst_conn='a', memlet=dace.Memlet.simple('tmp', '0'))
state.add_memlet_path(t, mx, w, src_conn='b', memlet=dace.Memlet.simple('A', 'i'))
set_default_schedule_and_storage_types(sdfg, None)
assert sdfg.arrays['tmp'].storage == dace.StorageType.Register
def test_tbmap_sequential():
sdfg = dace.SDFG('default_storage_test_2')
sdfg.add_array('A', [20, 32], dace.float64, dace.StorageType.GPU_Global)
sdfg.add_transient('tmp', [1], dace.float64)
state = sdfg.add_state()
r = state.add_read('A')
ome, omx = state.add_map('kernel', dict(i='0:20'), dace.ScheduleType.GPU_Device)
sme, smx = state.add_map('seq', dict(j='0:1'), dace.ScheduleType.Sequential)
ime, imx = state.add_map('block', dict(ti='0:32'), dace.ScheduleType.GPU_ThreadBlock)
tmp = state.add_access('tmp')
t = state.add_tasklet('add', {'a'}, {'b'}, 'b = a + 1')
w = state.add_write('A')
state.add_memlet_path(r, ome, sme, tmp, memlet=dace.Memlet.simple('A', 'i+j, 0:32'))
state.add_memlet_path(tmp, ime, t, dst_conn='a', memlet=dace.Memlet.simple('tmp', '0, ti'))
state.add_memlet_path(t, imx, smx, omx, w, src_conn='b', memlet=dace.Memlet.simple('A', 'i+j, ti'))
set_default_schedule_and_storage_types(sdfg, None)
assert sdfg.arrays['tmp'].storage == dace.StorageType.GPU_Shared
if __name__ == '__main__':
test_notbmap()
test_tbmap_sequential()
| 42.4
| 103
| 0.675943
|
4a04ae33915b13b625b8a7e172f45f63131815d0
| 5,870
|
py
|
Python
|
userbot/__init__.py
|
herokutest12/PaperplaneExtended
|
200f96242f6ab2be05c94674c6b3811e8b83a339
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/__init__.py
|
herokutest12/PaperplaneExtended
|
200f96242f6ab2be05c94674c6b3811e8b83a339
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/__init__.py
|
herokutest12/PaperplaneExtended
|
200f96242f6ab2be05c94674c6b3811e8b83a339
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot initialization. """
import os
from sys import version_info
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
import pylast
from pySmartDL import SmartDL
from dotenv import load_dotenv
from requests import get
from telethon import TelegramClient
from telethon.sessions import StringSession
load_dotenv("config.env")
# Bot Logs setup:
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 6:
LOGS.info("You MUST have a python version of at least 3.6."
"Multiple features depend on this. Bot quitting.")
quit(1)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________", None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
# Telegram App KEY and HASH
API_KEY = os.environ.get("API_KEY", None)
API_HASH = os.environ.get("API_HASH", None)
# Userbot Session String
STRING_SESSION = os.environ.get("STRING_SESSION", None)
# Logging channel/group configuration.
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID", None))
# Userbot logging feature switch.
BOTLOG = sb(os.environ.get("BOTLOG", "False"))
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "False"))
# Console verbose logging
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL", None)
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None)
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None)
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False"))
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False"))
# Youtube API key
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", None)
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY", ""))
TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1))
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True"))
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX", None)
DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None)
LASTFM_API = os.environ.get("LASTFM_API", None)
LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None)
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None)
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None)
LASTFM_PASS = pylast.md5(LASTFM_PASSWORD_PLAIN)
if not LASTFM_USERNAME == "None":
lastfm = pylast.LastFMNetwork(api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS)
else:
lastfm = None
# Google Drive Module
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None)
GDRIVE_FOLDER_ID = os.environ.get("GDRIVE_FOLDER_ID", None)
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY",
"./downloads")
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists('bin'):
os.mkdir('bin')
binaries = {
"https://raw.githubusercontent.com/yshalsager/megadown/master/megadown":
"bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py":
"bin/cmrudl"
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# 'bot' variable
if STRING_SESSION:
# pylint: disable=invalid-name
bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH)
else:
# pylint: disable=invalid-name
bot = TelegramClient("userbot", API_KEY, API_HASH)
async def check_botlog_chatid():
if not BOTLOG_CHATID:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, "
"many critical features depend on it. KTHXBye.")
quit(1)
entity = await bot.get_entity(BOTLOG_CHATID)
if entity.default_banned_rights.send_messages:
LOGS.info(
"Your account doesn't have rights to send messages to BOTLOG_CHATID "
"group. Check if you typed the Chat ID correctly.")
quit(1)
with bot:
try:
bot.loop.run_until_complete(check_botlog_chatid())
except:
LOGS.info(
"BOTLOG_CHATID environment variable isn't a "
"valid entity. Check your environment variables/config.env file.")
quit(1)
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
ISAFK = False
AFKREASON = None
| 31.058201
| 101
| 0.70937
|
4a04ae8d4c3e0d0bee711f6288e11ebd43abc34a
| 1,260
|
py
|
Python
|
control-api/app/api/endpoints/settings.py
|
Towed-ROV/control-api
|
e7061e60fae7f5218cf98e856da369be71fe0740
|
[
"MIT"
] | 1
|
2021-05-10T21:36:59.000Z
|
2021-05-10T21:36:59.000Z
|
control-api/app/api/endpoints/settings.py
|
Towed-ROV/control-api
|
e7061e60fae7f5218cf98e856da369be71fe0740
|
[
"MIT"
] | null | null | null |
control-api/app/api/endpoints/settings.py
|
Towed-ROV/control-api
|
e7061e60fae7f5218cf98e856da369be71fe0740
|
[
"MIT"
] | null | null | null |
import time
from typing import List
from crud import crud
from db.database import get_db
from fastapi import APIRouter, Depends
from fastapi.exceptions import HTTPException
from schemas.setting import Setting, SettingCreate, SettingUpdate
from sqlalchemy.orm import Session
router = APIRouter()
@router.post("/", response_model=Setting)
def create_setting(setting: SettingCreate, db: Session = Depends(get_db)):
db_setting = crud.get_setting_by_name(db, setting.name)
if db_setting:
raise HTTPException(status_code=400, detail=f"Setting already exist")
return crud.create_setting(db, setting=setting)
@router.get("/", response_model=List[Setting])
def get_settings(db: Session = Depends(get_db)):
return crud.get_settings(db)
@router.get("/{id}", response_model=Setting)
def get_setting(id: int, db: Session = Depends(get_db)):
return crud.get_setting(db, id)
@router.put("/{sensor_id}", response_model=Setting)
def update_sensor_enabled(sensor_id: int, new_setting: SettingUpdate, db: Session = Depends(get_db)):
return crud.update_setting(db, sensor_id, new_setting)
@router.delete("/{id}", response_model=Setting)
def delete_setting(id: int, db: Session = Depends(get_db)):
return crud.delete_setting(db, id)
| 31.5
| 101
| 0.763492
|
4a04af4b0ebf85fcaf8cb203f55c88b03f285dbc
| 173
|
py
|
Python
|
src/main/python/base/__init__.py
|
boom-roasted/ImageWAO
|
944505dab1a7c97b8eae2bf9fb30006d0f471f89
|
[
"MIT"
] | 1
|
2020-03-22T01:52:52.000Z
|
2020-03-22T01:52:52.000Z
|
src/main/python/base/__init__.py
|
leftaltkey/ImageWAO
|
944505dab1a7c97b8eae2bf9fb30006d0f471f89
|
[
"MIT"
] | 2
|
2021-06-08T21:12:47.000Z
|
2021-06-08T21:30:32.000Z
|
src/main/python/base/__init__.py
|
leftaltkey/ImageWAO
|
944505dab1a7c97b8eae2bf9fb30006d0f471f89
|
[
"MIT"
] | null | null | null |
from .threading import QWorker
from .configuration import config
from .context import context as ctx
from .version import Version
__all__ = [QWorker, config, ctx, Version]
| 24.714286
| 41
| 0.797688
|
4a04b0af944c43b3067ca7551441cc75dfba7c1a
| 643
|
py
|
Python
|
students/K33401/Do_Thien/Lr1/task2/client.py
|
aytakr/ITMO_ICT_WebDevelopment_2021-2022
|
57c0eef5e1f413c7f031ee001d59e5122f990f26
|
[
"MIT"
] | 7
|
2021-09-02T08:20:58.000Z
|
2022-01-12T11:48:07.000Z
|
students/K33401/Do_Thien/Lr1/task2/client.py
|
aytakr/ITMO_ICT_WebDevelopment_2021-2022
|
57c0eef5e1f413c7f031ee001d59e5122f990f26
|
[
"MIT"
] | 76
|
2021-09-17T23:01:50.000Z
|
2022-03-18T16:42:03.000Z
|
students/K33401/Do_Thien/Lr1/task2/client.py
|
aytakr/ITMO_ICT_WebDevelopment_2021-2022
|
57c0eef5e1f413c7f031ee001d59e5122f990f26
|
[
"MIT"
] | 60
|
2021-09-04T16:47:39.000Z
|
2022-03-21T04:41:27.000Z
|
import socket
import pickle
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.connect((socket.gethostname(), 14900))
serversocket.send(b"Hello server! \n")
data = serversocket.recv(1024)
print("Server: " + data.decode("utf-8"))
print("Enter the lenghts of the side of the trapezoid!")
print("(Form: Top_edge Bottom_edge High_line)")
trapezoid = list(map(int, input("Input: ").strip().split()))[:3]
data_trapezoid = pickle.dumps(trapezoid)
serversocket.send(data_trapezoid)
data_area = serversocket.recv(1024)
area = pickle.loads(data_area)
print("The area of the trapezoid: " + str(area))
serversocket.close()
| 29.227273
| 64
| 0.752722
|
4a04b1b2880305961115290298b282e23426687c
| 4,635
|
py
|
Python
|
oneview_redfish_toolkit/tests/blueprints/test_network_device_function.py
|
AgneshKumar/oneview-redfish-toolkit
|
f559ce795b43891a919dc431563c0d0c8000a2d9
|
[
"Apache-2.0"
] | 2
|
2018-12-07T17:08:37.000Z
|
2019-03-19T05:26:20.000Z
|
oneview_redfish_toolkit/tests/blueprints/test_network_device_function.py
|
shobhit-sinha/oneview-redfish-toolkit
|
ffc86ea0a9e5d192ab6a2fe21c1717957b55d1da
|
[
"Apache-2.0"
] | null | null | null |
oneview_redfish_toolkit/tests/blueprints/test_network_device_function.py
|
shobhit-sinha/oneview-redfish-toolkit
|
ffc86ea0a9e5d192ab6a2fe21c1717957b55d1da
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (2017-2018) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Python libs
import json
# 3rd party libs
from flask_api import status
from hpOneView.exceptions import HPOneViewException
# Module libs
from oneview_redfish_toolkit.blueprints import network_device_function
from oneview_redfish_toolkit.tests.base_flask_test import BaseFlaskTest
class TestNetworkDeviceFunction(BaseFlaskTest):
"""Tests for NetworkDeviceFunction blueprint"""
@classmethod
def setUpClass(self):
super(TestNetworkDeviceFunction, self).setUpClass()
self.app.register_blueprint(
network_device_function.network_device_function)
def test_get_network_device_function(self):
"""Tests NetworkDeviceFunction"""
# Loading server_hardware mockup value
with open(
'oneview_redfish_toolkit/mockups/oneview/ServerHardware.json'
) as f:
server_hardware = json.load(f)
# Loading NetworkDeviceFunction mockup result
with open(
'oneview_redfish_toolkit/mockups/redfish/'
'NetworkDeviceFunction1_1_a.json'
) as f:
network_device_function_mockup = json.load(f)
# Create mock response
self.oneview_client.server_hardware.get.return_value = server_hardware
# Get NetworkDeviceFunction
response = self.client.get(
"/redfish/v1/Chassis/30303437-3034-4D32-3230-313133364752/"
"NetworkAdapters/3/NetworkDeviceFunctions/1_1_a"
)
# Gets json from response
result = json.loads(response.data.decode("utf-8"))
# Tests response
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual("application/json", response.mimetype)
self.assertEqualMockup(network_device_function_mockup, result)
def test_get_network_device_function_invalid_device_id(self):
"""Tests NetworkDeviceFunction"""
# Loading server_hardware mockup value
with open(
'oneview_redfish_toolkit/mockups/oneview/ServerHardware.json'
) as f:
server_hardware = json.load(f)
# Create mock response
self.oneview_client.server_hardware.get.return_value = server_hardware
# Get NetworkDeviceFunction
response = self.client.get(
"/redfish/v1/Chassis/30303437-3034-4D32-3230-313133364752/"
"NetworkAdapters/invalid_id/NetworkDeviceFunctions/1_1_a"
)
# Tests response
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
self.assertEqual("application/json", response.mimetype)
def test_get_network_device_function_sh_not_found(self):
"""Tests NetworkDeviceFunction server hardware not found"""
e = HPOneViewException({
'errorCode': 'RESOURCE_NOT_FOUND',
'message': 'server-hardware not found',
})
self.oneview_client.server_hardware.get.side_effect = e
# Get NetworkDeviceFunction
response = self.client.get(
"/redfish/v1/Chassis/30303437-3034-4D32-3230-313133364752/"
"NetworkAdapters/3/NetworkDeviceFunctions/1_1_a"
)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
self.assertEqual("application/json", response.mimetype)
def test_get_network_device_function_sh_exception(self):
"""Tests NetworkDeviceFunction unknown exception"""
e = HPOneViewException({
'errorCode': 'ANOTHER_ERROR',
'message': 'server-hardware error',
})
self.oneview_client.server_hardware.get.side_effect = e
# Get NetworkDeviceFunction
response = self.client.get(
"/redfish/v1/Chassis/30303437-3034-4D32-3230-313133364752/"
"NetworkAdapters/3/NetworkDeviceFunctions/1_1_a"
)
self.assertEqual(
status.HTTP_500_INTERNAL_SERVER_ERROR,
response.status_code
)
self.assertEqual("application/json", response.mimetype)
| 35.113636
| 78
| 0.692341
|
4a04b1de66bb2f720b2ca95ab51d9875a126b455
| 1,305
|
py
|
Python
|
backend/api/v1/groups/views.py
|
donicrazy/ChatApp
|
ab129a9c0706bbb972cbce43283ba6e06d144635
|
[
"MIT"
] | null | null | null |
backend/api/v1/groups/views.py
|
donicrazy/ChatApp
|
ab129a9c0706bbb972cbce43283ba6e06d144635
|
[
"MIT"
] | 7
|
2021-03-19T04:47:13.000Z
|
2022-01-13T02:02:46.000Z
|
backend/api/v1/groups/views.py
|
donicrazy/ChatApp
|
ab129a9c0706bbb972cbce43283ba6e06d144635
|
[
"MIT"
] | null | null | null |
from rest_framework import generics
from backend.api.v1.groups.serializers import (
GroupSerializer,
MemberSerializer,
GroupMessageSerializer,
)
from backend.groups.models import (
ChatGroup,
GroupMembership,
GroupMessage,
)
class GroupListCreateView(generics.ListCreateAPIView):
""" Create & List Groups """
queryset = ChatGroup.objects.all()
serializer_class = GroupSerializer
class GroupMembershipListCreateView(generics.ListCreateAPIView):
""" Create & List Groups """
queryset = GroupMembership.objects.all()
serializer_class = MemberSerializer
class GroupRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView):
""" Get & Update & Delete Group """
queryset = ChatGroup.objects.all()
serializer_class = GroupSerializer
lookup_url_kwarg = "slug"
lookup_field = "slug"
class GroupMessageListCreateView(generics.ListCreateAPIView):
""" Create & List Groups """
queryset = GroupMessage.objects.all()
serializer_class = GroupMessageSerializer
filterset_fields = ['dialog', 'sender']
class GroupMessageRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView):
""" Get & Update & Delete Group """
queryset = GroupMessage.objects.all()
serializer_class = GroupMessageSerializer
| 28.369565
| 86
| 0.748659
|
4a04b21eb6b7daecff935256a735f939bc4ffdff
| 578
|
py
|
Python
|
setup.py
|
guitargeek/xgbo
|
7124c32a55285cec22a02aeb868773f76b7280f5
|
[
"MIT"
] | 1
|
2022-01-07T17:51:23.000Z
|
2022-01-07T17:51:23.000Z
|
setup.py
|
guitargeek/xgbo
|
7124c32a55285cec22a02aeb868773f76b7280f5
|
[
"MIT"
] | 4
|
2018-10-02T15:19:06.000Z
|
2019-05-08T10:54:42.000Z
|
setup.py
|
guitargeek/xgbo
|
7124c32a55285cec22a02aeb868773f76b7280f5
|
[
"MIT"
] | 5
|
2018-10-19T09:42:23.000Z
|
2022-01-07T16:11:34.000Z
|
from setuptools import setup, find_packages
setup(
name="xgbo",
version="0.2",
description="Xgboost with Bayesian optimization",
long_description="Train xgboost calssifiers and regressors with Bayesian optimized hyperparameters.",
url="http://github.com/guitargeek/xgbo",
author="Jonas Rembser",
author_email="jonas.rembser@cern.ch",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=["bayesian-optimization", "numpy", "pandas", "tables", "xgboost", "sklearn", "matplotlib"],
)
| 34
| 112
| 0.714533
|
4a04b2dbe4dbba95214bcc75cb497a7cdad6ae3a
| 13,918
|
py
|
Python
|
unifold/data/parsers.py
|
nwod-edispu/Uni-Fold
|
0ebfaf234807523067759d4c300694bb58cfb991
|
[
"Apache-2.0"
] | null | null | null |
unifold/data/parsers.py
|
nwod-edispu/Uni-Fold
|
0ebfaf234807523067759d4c300694bb58cfb991
|
[
"Apache-2.0"
] | null | null | null |
unifold/data/parsers.py
|
nwod-edispu/Uni-Fold
|
0ebfaf234807523067759d4c300694bb58cfb991
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Beijing DP Technology Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for parsing various file formats."""
import collections
import dataclasses
import re
import string
from typing import Dict, Iterable, List, Optional, Sequence, Tuple
DeletionMatrix = Sequence[Sequence[int]]
@dataclasses.dataclass(frozen=True)
class TemplateHit:
"""Class representing a template hit."""
index: int
name: str
aligned_cols: int
sum_probs: float
query: str
hit_sequence: str
indices_query: List[int]
indices_hit: List[int]
def parse_fasta(fasta_string: str) -> Tuple[Sequence[str], Sequence[str]]:
"""Parses FASTA string and returns list of strings with amino-acid sequences.
Arguments:
fasta_string: The string contents of a FASTA file.
Returns:
A tuple of two lists:
* A list of sequences.
* A list of sequence descriptions taken from the comment lines. In the
same order as the sequences.
"""
sequences = []
descriptions = []
index = -1
for line in fasta_string.splitlines():
line = line.strip()
if line.startswith('>'):
index += 1
descriptions.append(line[1:]) # Remove the '>' at the beginning.
sequences.append('')
continue
elif not line:
continue # Skip blank lines.
sequences[index] += line
return sequences, descriptions
def parse_stockholm(
stockholm_string: str
) -> Tuple[Sequence[str], DeletionMatrix, Sequence[str]]:
"""Parses sequences and deletion matrix from stockholm format alignment.
Args:
stockholm_string: The string contents of a stockholm file. The first
sequence in the file should be the query sequence.
Returns:
A tuple of:
* A list of sequences that have been aligned to the query. These
might contain duplicates.
* The deletion matrix for the alignment as a list of lists. The element
at `deletion_matrix[i][j]` is the number of residues deleted from
the aligned sequence i at residue position j.
* The names of the targets matched, including the jackhmmer subsequence
suffix.
"""
name_to_sequence = collections.OrderedDict()
for line in stockholm_string.splitlines():
line = line.strip()
if not line or line.startswith(('#', '//')):
continue
name, sequence = line.split()
if name not in name_to_sequence:
name_to_sequence[name] = ''
name_to_sequence[name] += sequence
msa = []
deletion_matrix = []
query = ''
keep_columns = []
for seq_index, sequence in enumerate(name_to_sequence.values()):
if seq_index == 0:
# Gather the columns with gaps from the query
query = sequence
keep_columns = [i for i, res in enumerate(query) if res != '-']
# Remove the columns with gaps in the query from all sequences.
aligned_sequence = ''.join([sequence[c] for c in keep_columns])
msa.append(aligned_sequence)
# Count the number of deletions w.r.t. query.
deletion_vec = []
deletion_count = 0
for seq_res, query_res in zip(sequence, query):
if seq_res != '-' or query_res != '-':
if query_res == '-':
deletion_count += 1
else:
deletion_vec.append(deletion_count)
deletion_count = 0
deletion_matrix.append(deletion_vec)
return msa, deletion_matrix, list(name_to_sequence.keys())
def parse_a3m(a3m_string: str) -> Tuple[Sequence[str], DeletionMatrix]:
"""Parses sequences and deletion matrix from a3m format alignment.
Args:
a3m_string: The string contents of a a3m file. The first sequence in the
file should be the query sequence.
Returns:
A tuple of:
* A list of sequences that have been aligned to the query. These
might contain duplicates.
* The deletion matrix for the alignment as a list of lists. The element
at `deletion_matrix[i][j]` is the number of residues deleted from
the aligned sequence i at residue position j.
"""
sequences, _ = parse_fasta(a3m_string)
deletion_matrix = []
for msa_sequence in sequences:
deletion_vec = []
deletion_count = 0
for j in msa_sequence:
if j.islower():
deletion_count += 1
else:
deletion_vec.append(deletion_count)
deletion_count = 0
deletion_matrix.append(deletion_vec)
# Make the MSA matrix out of aligned (deletion-free) sequences.
deletion_table = str.maketrans('', '', string.ascii_lowercase)
aligned_sequences = [s.translate(deletion_table) for s in sequences]
return aligned_sequences, deletion_matrix
def _convert_sto_seq_to_a3m(
query_non_gaps: Sequence[bool], sto_seq: str) -> Iterable[str]:
for is_query_res_non_gap, sequence_res in zip(query_non_gaps, sto_seq):
if is_query_res_non_gap:
yield sequence_res
elif sequence_res != '-':
yield sequence_res.lower()
def convert_stockholm_to_a3m(stockholm_format: str,
max_sequences: Optional[int] = None) -> str:
"""Converts MSA in Stockholm format to the A3M format."""
descriptions = {}
sequences = {}
reached_max_sequences = False
for line in stockholm_format.splitlines():
reached_max_sequences = max_sequences and len(sequences) >= max_sequences
if line.strip() and not line.startswith(('#', '//')):
# Ignore blank lines, markup and end symbols - remainder are alignment
# sequence parts.
seqname, aligned_seq = line.split(maxsplit=1)
if seqname not in sequences:
if reached_max_sequences:
continue
sequences[seqname] = ''
sequences[seqname] += aligned_seq
for line in stockholm_format.splitlines():
if line[:4] == '#=GS':
# Description row - example format is:
# #=GS UniRef90_Q9H5Z4/4-78 DE [subseq from] cDNA: FLJ22755 ...
columns = line.split(maxsplit=3)
seqname, feature = columns[1:3]
value = columns[3] if len(columns) == 4 else ''
if feature != 'DE':
continue
if reached_max_sequences and seqname not in sequences:
continue
descriptions[seqname] = value
if len(descriptions) == len(sequences):
break
# Convert sto format to a3m line by line
a3m_sequences = {}
# query_sequence is assumed to be the first sequence
query_sequence = next(iter(sequences.values()))
query_non_gaps = [res != '-' for res in query_sequence]
for seqname, sto_sequence in sequences.items():
a3m_sequences[seqname] = ''.join(
_convert_sto_seq_to_a3m(query_non_gaps, sto_sequence))
fasta_chunks = (f">{k} {descriptions.get(k, '')}\n{a3m_sequences[k]}"
for k in a3m_sequences)
return '\n'.join(fasta_chunks) + '\n' # Include terminating newline.
def _get_hhr_line_regex_groups(
regex_pattern: str, line: str) -> Sequence[Optional[str]]:
match = re.match(regex_pattern, line)
if match is None:
raise RuntimeError(f'Could not parse query line {line}')
return match.groups()
def _update_hhr_residue_indices_list(
sequence: str, start_index: int, indices_list: List[int]):
"""Computes the relative indices for each residue with respect to the original sequence."""
counter = start_index
for symbol in sequence:
if symbol == '-':
indices_list.append(-1)
else:
indices_list.append(counter)
counter += 1
def _parse_hhr_hit(detailed_lines: Sequence[str]) -> TemplateHit:
"""Parses the detailed HMM HMM comparison section for a single Hit.
This works on .hhr files generated from both HHBlits and HHSearch.
Args:
detailed_lines: A list of lines from a single comparison section between 2
sequences (which each have their own HMM's)
Returns:
A dictionary with the information from that detailed comparison section
Raises:
RuntimeError: If a certain line cannot be processed
"""
# Parse first 2 lines.
number_of_hit = int(detailed_lines[0].split()[-1])
name_hit = detailed_lines[1][1:]
# Parse the summary line.
pattern = (
'Probab=(.*)[\t ]*E-value=(.*)[\t ]*Score=(.*)[\t ]*Aligned_cols=(.*)[\t'
' ]*Identities=(.*)%[\t ]*Similarity=(.*)[\t ]*Sum_probs=(.*)[\t '
']*Template_Neff=(.*)')
match = re.match(pattern, detailed_lines[2])
if match is None:
raise RuntimeError(
'Could not parse section: %s. Expected this: \n%s to contain summary.' %
(detailed_lines, detailed_lines[2]))
(prob_true, e_value, _, aligned_cols, _, _, sum_probs,
neff) = [float(x) for x in match.groups()]
# The next section reads the detailed comparisons. These are in a 'human
# readable' format which has a fixed length. The strategy employed is to
# assume that each block starts with the query sequence line, and to parse
# that with a regexp in order to deduce the fixed length used for that block.
query = ''
hit_sequence = ''
indices_query = []
indices_hit = []
length_block = None
for line in detailed_lines[3:]:
# Parse the query sequence line
if (line.startswith('Q ') and not line.startswith('Q ss_dssp') and
not line.startswith('Q ss_pred') and
not line.startswith('Q Consensus')):
# Thus the first 17 characters must be 'Q <query_name> ', and we can parse
# everything after that.
# start sequence end total_sequence_length
patt = r'[\t ]*([0-9]*) ([A-Z-]*)[\t ]*([0-9]*) \([0-9]*\)'
groups = _get_hhr_line_regex_groups(patt, line[17:])
# Get the length of the parsed block using the start and finish indices,
# and ensure it is the same as the actual block length.
start = int(groups[0]) - 1 # Make index zero based.
delta_query = groups[1]
end = int(groups[2])
num_insertions = len([x for x in delta_query if x == '-'])
length_block = end - start + num_insertions
assert length_block == len(delta_query)
# Update the query sequence and indices list.
query += delta_query
_update_hhr_residue_indices_list(delta_query, start, indices_query)
elif line.startswith('T '):
# Parse the hit sequence.
if (not line.startswith('T ss_dssp') and
not line.startswith('T ss_pred') and
not line.startswith('T Consensus')):
# Thus the first 17 characters must be 'T <hit_name> ', and we can
# parse everything after that.
# start sequence end total_sequence_length
patt = r'[\t ]*([0-9]*) ([A-Z-]*)[\t ]*[0-9]* \([0-9]*\)'
groups = _get_hhr_line_regex_groups(patt, line[17:])
start = int(groups[0]) - 1 # Make index zero based.
delta_hit_sequence = groups[1]
assert length_block == len(delta_hit_sequence)
# Update the hit sequence and indices list.
hit_sequence += delta_hit_sequence
_update_hhr_residue_indices_list(delta_hit_sequence, start, indices_hit)
return TemplateHit(
index=number_of_hit,
name=name_hit,
aligned_cols=int(aligned_cols),
sum_probs=sum_probs,
query=query,
hit_sequence=hit_sequence,
indices_query=indices_query,
indices_hit=indices_hit,
)
def parse_hhr(hhr_string: str) -> Sequence[TemplateHit]:
"""Parses the content of an entire HHR file."""
lines = hhr_string.splitlines()
# Each .hhr file starts with a results table, then has a sequence of hit
# "paragraphs", each paragraph starting with a line 'No <hit number>'. We
# iterate through each paragraph to parse each hit.
block_starts = [i for i, line in enumerate(lines) if line.startswith('No ')]
hits = []
if block_starts:
block_starts.append(len(lines)) # Add the end of the final block.
for i in range(len(block_starts) - 1):
hits.append(_parse_hhr_hit(lines[block_starts[i]:block_starts[i + 1]]))
return hits
def parse_e_values_from_tblout(tblout: str) -> Dict[str, float]:
"""Parse target to e-value mapping parsed from Jackhmmer tblout string."""
e_values = {'query': 0}
lines = [line for line in tblout.splitlines() if line[0] != '#']
# As per http://eddylab.org/software/hmmer/Userguide.pdf fields are
# space-delimited. Relevant fields are (1) target name: and
# (5) E-value (full sequence) (numbering from 1).
for line in lines:
fields = line.split()
e_value = fields[4]
target_name = fields[0]
e_values[target_name] = float(e_value)
return e_values
| 38.131507
| 95
| 0.623868
|
4a04b3dcf9bca6d20a258060451eb3c4243eac4e
| 694
|
py
|
Python
|
cabot/init.py
|
jareddgotte/cabot
|
185ae27e5ca7b053031e3731f20bfb835b0d63ae
|
[
"MIT"
] | null | null | null |
cabot/init.py
|
jareddgotte/cabot
|
185ae27e5ca7b053031e3731f20bfb835b0d63ae
|
[
"MIT"
] | null | null | null |
cabot/init.py
|
jareddgotte/cabot
|
185ae27e5ca7b053031e3731f20bfb835b0d63ae
|
[
"MIT"
] | null | null | null |
#
# Init
#
# import global modules
import configparser
import importlib
def init():
global config
config = configparser.ConfigParser()
config.read('config.ini')
global commands
commands = {}
def addCommandAction(command, instance, function):
if command in commands.keys():
commands[command].append([instance, function])
else:
commands[command] = [[instance, function]]
def executeCommandAction(command, message):
if command in commands.keys():
for instanceArray in commands[command]:
instance = instanceArray[0]
callableMethod = getattr(instance, instanceArray[1])
return callableMethod(message)
| 23.133333
| 64
| 0.674352
|
4a04b4c906e7f15e430b9655232b42c7dce7116c
| 1,066
|
py
|
Python
|
qa327_test/conftest.py
|
winterNebs/HyperTextAssassins
|
77d44db692d03b95462694394048aec3aacc4108
|
[
"MIT"
] | 2
|
2021-01-03T01:42:23.000Z
|
2021-03-30T18:59:45.000Z
|
qa327_test/conftest.py
|
johnsonzhang434/SeetGeek
|
77d44db692d03b95462694394048aec3aacc4108
|
[
"MIT"
] | null | null | null |
qa327_test/conftest.py
|
johnsonzhang434/SeetGeek
|
77d44db692d03b95462694394048aec3aacc4108
|
[
"MIT"
] | 1
|
2021-01-02T19:39:29.000Z
|
2021-01-02T19:39:29.000Z
|
import pytest
import subprocess
import os
import signal
import time
import tempfile
from qa327.__main__ import FLASK_PORT
from qa327.__main__ import app
from qa327.models import db
import threading
from werkzeug.serving import make_server
base_url = 'http://localhost:{}'.format(FLASK_PORT)
class ServerThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.srv = make_server('127.0.0.1', FLASK_PORT, app)
self.ctx = app.app_context()
self.ctx.push()
def run(self):
self.srv.serve_forever()
with self.ctx:
db.drop_all()
db.create_all()
db.session.commit()
def shutdown(self):
self.srv.shutdown()
@pytest.fixture(scope="module", autouse=True)
def server():
on_win = os.name == 'nt'
# create a live server for testing
# with a temporary file as database
server = ServerThread()
server.start()
time.sleep(1)
yield
server.shutdown()
time.sleep(1)
| 22.680851
| 61
| 0.63227
|
4a04b4ec5e8e7495fa085dfdf9b3c9660dc5a8f7
| 211
|
py
|
Python
|
tests/__init__.py
|
TestowanieAutomatyczneUG/laboratorium-10-cati97
|
4dec599bdaa0de2ab0ef88f10e8c269dd6c4e40d
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
TestowanieAutomatyczneUG/laboratorium-10-cati97
|
4dec599bdaa0de2ab0ef88f10e8c269dd6c4e40d
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
TestowanieAutomatyczneUG/laboratorium-10-cati97
|
4dec599bdaa0de2ab0ef88f10e8c269dd6c4e40d
|
[
"MIT"
] | null | null | null |
# the inclusion of the tests module is not meant to offer best practices for
# testing in general, but rather to support the `find_packages` example in
# setup.py that excludes installing the "tests" package
| 52.75
| 77
| 0.777251
|
4a04b581430587731e8ae3e84bc9b600e3c2d2c8
| 84,267
|
py
|
Python
|
src/olympia/bandwagon/tests/test_views.py
|
g-k/addons-server
|
ca369146787849719096745d1782f583c68cd10f
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/bandwagon/tests/test_views.py
|
g-k/addons-server
|
ca369146787849719096745d1782f583c68cd10f
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/bandwagon/tests/test_views.py
|
g-k/addons-server
|
ca369146787849719096745d1782f583c68cd10f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import django.test
from django.conf import settings
from django.forms import ValidationError
from django.test.utils import override_settings
from django.utils.datastructures import MultiValueDict
import pytest
from mock import Mock, patch
from pyquery import PyQuery as pq
from rest_framework.fields import empty
from rest_framework.settings import api_settings
from olympia import amo, core
from olympia.activity.models import ActivityLog
from olympia.addons.models import Addon
from olympia.amo.tests import (
APITestClient, TestCase, addon_factory, collection_factory, reverse_ns,
user_factory)
from olympia.amo.tests.test_helpers import get_uploaded_file
from olympia.amo.urlresolvers import get_outgoing_url, reverse
from olympia.amo.utils import urlparams
from olympia.bandwagon import forms
from olympia.bandwagon.models import (
Collection, CollectionAddon, CollectionVote, CollectionWatcher)
from olympia.users.models import UserProfile
pytestmark = pytest.mark.django_db
def test_addons_form():
f = forms.AddonsForm(MultiValueDict({'addon': [''],
'addon_comment': ['comment']}))
assert f.is_valid()
def test_collections_form_bad_slug():
f = forms.CollectionForm(dict(slug=' ', listed=True, name=' '))
assert 'slug' in f.errors
assert 'name' in f.errors
def test_collections_form_long_description():
f = forms.CollectionForm(dict(description='&*' * 200))
assert 'description' in f.errors
def test_collections_form_unicode_slug():
u = Mock()
u.collections.filter.return_value.count.return_value = False
f = forms.CollectionForm(dict(slug=u'Ελλην', listed=True, name=' '),
initial=dict(author=u))
assert 'name' in f.errors
assert 'slug' not in f.errors
class TestViews(TestCase):
fixtures = ['users/test_backends', 'bandwagon/test_models',
'base/addon_3615']
def check_response(self, url, code, to=None):
response = self.client.get(url, follow=True)
if code == 404:
assert response.status_code == 404
elif code in (301, 302):
self.assert3xx(response, to, status_code=code)
else: # pragma: no cover
assert code in (301, 302, 404), code
def test_legacy_redirects(self):
collection = Collection.objects.get(nickname='wut')
url = collection.get_url_path()
tests = [
('/collection/wut?x=y', 301, url + '?x=y'),
('/collection/wut/', 301, url),
('/collection/f94d08c7-794d-3ce4-4634-99caa09f9ef4', 301, url),
('/collection/f94d08c7-794d-3ce4-4634-99caa09f9ef4/', 301, url),
('/collections/view/f94d08c7-794d-3ce4-4634-99caa09f9ef4', 301,
url),
('/collections/view/wut/', 301, url),
('/collection/404', 404)]
for test in tests:
self.check_response(*test)
def test_legacy_redirects_edit(self):
self.client.login(email='jbalogh@mozilla.com')
u = UserProfile.objects.get(email='jbalogh@mozilla.com')
uuid = u.favorites_collection().uuid
self.check_response('/collections/edit/%s' % uuid, 301,
u.favorites_collection().edit_url())
def test_collection_directory_redirects(self):
base = reverse('collections.list')
tests = [
('/collections/editors_picks', 301,
urlparams(base, sort='featured')),
('/collections/popular/', 301,
urlparams(base, sort='popular')),
# These don't work without a login.
('/collections/favorites/', 301, base),
]
for test in tests:
self.check_response(*test)
def test_collection_directory_redirects_with_login(self):
self.client.login(email='jbalogh@mozilla.com')
self.check_response('/collections/favorites/', 301,
reverse('collections.following'))
def test_unlisted_collection_login_redirect(self):
user = UserProfile.objects.get(email='jbalogh@mozilla.com')
urls = (
'/en-US/firefox/collections/mine/',
'/en-US/firefox/collections/mine/favorites/',
user.favorites_collection().get_url_path(),
)
for url in urls:
self.assertLoginRedirects(self.client.get(url), url)
def test_unreviewed_addon(self):
u = UserProfile.objects.get(email='jbalogh@mozilla.com')
addon = Addon.objects.all()[0]
addon.status = amo.STATUS_NOMINATED
c = u.favorites_collection()
core.set_user(u)
c.add_addon(addon)
self.client.login(email='jbalogh@mozilla.com')
response = self.client.get(c.get_url_path())
assert list(response.context['addons'].object_list) == [addon]
def test_mine(self):
u = UserProfile.objects.get(email='jbalogh@mozilla.com')
addon = addon = Addon.objects.all()[0]
c = u.favorites_collection()
core.set_user(u)
c.add_addon(addon)
assert self.client.login(email='jbalogh@mozilla.com')
# My Collections.
response = self.client.get('/en-US/firefox/collections/mine/')
assert response.context['author'] == (
UserProfile.objects.get(email='jbalogh@mozilla.com'))
# My Favorites.
response = self.client.get(reverse('collections.detail',
args=['mine', 'favorites']))
assert response.status_code == 200
assert list(response.context['addons'].object_list) == [addon]
def test_not_mine(self):
self.client.logout()
r = self.client.get(reverse('collections.user', args=['jbalogh']))
assert r.context['page'] == 'user'
assert '#p-mine' not in pq(r.content)('style').text(), (
"'Collections I've Made' sidebar link shouldn't be highlighted.")
def test_description_no_link_no_markup(self):
c = Collection.objects.get(slug='wut-slug')
c.description = ('<a href="http://example.com">example.com</a> '
'http://example.com <b>foo</b> some text')
c.save()
assert self.client.login(email='jbalogh@mozilla.com')
response = self.client.get('/en-US/firefox/collections/mine/')
# All markup is escaped, all links are stripped.
self.assertContains(response, '<b>foo</b> some text')
def test_delete_icon(self):
user = UserProfile.objects.get(email='jbalogh@mozilla.com')
collection = user.favorites_collection()
edit_url = collection.edit_url()
# User not logged in: redirect to login page.
res = self.client.post(collection.delete_icon_url())
assert res.status_code == 302
assert res.url != edit_url
self.client.login(email='jbalogh@mozilla.com')
res = self.client.post(collection.delete_icon_url())
assert res.status_code == 302
assert res.url == edit_url
def test_delete_icon_csrf_protected(self):
"""The delete icon view only accepts POSTs and is csrf protected."""
user = UserProfile.objects.get(email='jbalogh@mozilla.com')
collection = user.favorites_collection()
client = django.test.Client(enforce_csrf_checks=True)
client.login(email='jbalogh@mozilla.com')
res = client.get(collection.delete_icon_url())
assert res.status_code == 405 # Only POSTs are allowed.
res = client.post(collection.delete_icon_url())
assert res.status_code == 403 # The view is csrf protected.
def test_no_xss_in_collection_page(self):
coll = Collection.objects.get(slug='wut-slug')
name = '"><script>alert(/XSS/);</script>'
name_escaped = '"><script>alert(/XSS/);</script>'
coll.name = name
coll.save()
resp = self.client.get(coll.get_url_path())
assert name not in resp.content
assert name_escaped in resp.content
class TestPrivacy(TestCase):
fixtures = ['users/test_backends']
def setUp(self):
super(TestPrivacy, self).setUp()
# The favorites collection is created automatically.
self.url = reverse('collections.detail', args=['jbalogh', 'favorites'])
self.client.login(email='jbalogh@mozilla.com')
assert self.client.get(self.url).status_code == 200
self.client.logout()
self.c = Collection.objects.get(slug='favorites',
author__username='jbalogh')
def test_owner(self):
self.client.login(email='jbalogh@mozilla.com')
r = self.client.get(self.url)
assert r.status_code == 200
# TODO(cvan): Uncomment when bug 719512 gets fixed.
# assert pq(r.content)('.meta .view-stats').length == 1, (
# 'Add-on authors should be able to view stats')
def test_private(self):
self.client.logout()
self.client.login(email='fligtar@gmail.com')
assert self.client.get(self.url).status_code == 403
def test_public(self):
# Make it public, others can see it.
self.assertLoginRedirects(self.client.get(self.url), self.url)
self.c.listed = True
self.c.save()
r = self.client.get(self.url)
assert r.status_code == 200
assert pq(r.content)('.meta .view-stats').length == 0, (
'Only add-on authors can view stats')
def test_contributer(self):
self.c.listed = False
self.c.save()
self.assertLoginRedirects(self.client.get(self.url), self.url)
user = UserProfile.objects.get(email='fligtar@gmail.com')
self.grant_permission(user, 'Collections:Contribute')
self.client.login(email='fligtar@gmail.com')
# should fail as self.c collection isn't special
assert self.client.get(self.url).status_code == 403
# But now with special collection will work
with override_settings(COLLECTION_FEATURED_THEMES_ID=self.c.id):
response = self.client.get(self.url)
assert response.status_code == 200
class TestVotes(TestCase):
fixtures = ['users/test_backends']
def setUp(self):
super(TestVotes, self).setUp()
self.client.login(email='jbalogh@mozilla.com')
args = ['fligtar', 'slug']
Collection.objects.create(slug='slug', author_id=9945)
self.c_url = reverse('collections.detail', args=args)
self.up = reverse('collections.vote', args=args + ['up'])
self.down = reverse('collections.vote', args=args + ['down'])
def test_login_required(self):
self.client.logout()
self.assertLoginRedirects(self.client.post(self.up), to=self.up)
def test_post_required(self):
r = self.client.get(self.up, follow=True)
self.assert3xx(r, self.c_url)
def check(self, upvotes=0, downvotes=0):
c = Collection.objects.get(slug='slug', author=9945)
assert c.upvotes == upvotes
assert c.downvotes == downvotes
assert CollectionVote.objects.filter(
user=4043307, vote=1).count() == upvotes
assert CollectionVote.objects.filter(
user=4043307, vote=-1).count() == downvotes
assert CollectionVote.objects.filter(
user=4043307).count() == upvotes + downvotes
def test_upvote(self):
self.client.post(self.up)
self.check(upvotes=1)
def test_downvote(self):
self.client.post(self.down)
self.check(downvotes=1)
def test_down_then_up(self):
self.client.post(self.down)
self.check(downvotes=1)
self.client.post(self.up)
self.check(upvotes=1)
def test_up_then_up(self):
self.client.post(self.up)
self.check(upvotes=1)
self.client.post(self.up)
self.check(upvotes=0)
def test_normal_response(self):
r = self.client.post(self.up, follow=True)
self.assert3xx(r, self.c_url)
def test_ajax_response(self):
r = self.client.post_ajax(self.up, follow=True)
assert not r.redirect_chain
assert r.status_code == 200
class TestCRUD(TestCase):
"""Test the collection form."""
fixtures = ('base/users', 'base/addon_3615', 'base/collections')
def setUp(self):
super(TestCRUD, self).setUp()
self.add_url = reverse('collections.add')
self.login_admin()
# Oh god it's unicode.
self.slug = u'\u05d0\u05d5\u05e1\u05e3'
self.data = {
'addon': 3615,
'addon_comment': 'fff',
'name': u'קווים תחתונים ומקפים בלבד',
'slug': self.slug,
'description': '',
'listed': 'True'
}
self.grant_permission(
UserProfile.objects.get(email='admin@mozilla.com'),
'Admin:Curation')
def login_admin(self):
assert self.client.login(email='admin@mozilla.com')
def login_regular(self):
assert self.client.login(email='regular@mozilla.com')
def create_collection(self, **kw):
self.data.update(kw)
response = self.client.post(self.add_url, self.data, follow=True)
assert response.status_code == 200
return response
@patch('olympia.bandwagon.views.statsd.incr')
def test_create_collection_statsd(self, mock_incr):
self.client.post(self.add_url, self.data, follow=True)
mock_incr.assert_any_call('collections.created')
def test_no_xss_in_edit_page(self):
name = '"><script>alert(/XSS/);</script>'
self.create_collection(name=name)
collection = Collection.objects.get(slug=self.slug)
assert collection.name == name
url = reverse('collections.edit', args=['admin', collection.slug, ])
r = self.client.get(url)
self.assertContains(
r,
'"><script>alert(/XSS/);</script>'
)
assert name not in r.content
def test_listing_xss(self):
c = Collection.objects.get(id=80)
assert self.client.login(email='clouserw@gmail.com')
url = reverse('collections.watch', args=[c.author.username, c.slug])
user = UserProfile.objects.get(id='10482')
user.display_name = "<script>alert(1)</script>"
user.save()
r = self.client.post(url, follow=True)
assert r.status_code == 200
qs = CollectionWatcher.objects.filter(user__username='clouserw',
collection=80)
assert qs.count() == 1
r = self.client.get('/en-US/firefox/collections/following/',
follow=True)
assert '<script>alert' in r.content
assert '<script>alert' not in r.content
def test_add_fail(self):
"""
If we input addons but fail at filling out the form, don't show
invisible addons.
"""
data = {'addon': 3615,
'addon_comment': 'fff',
'description': '',
'listed': 'True'}
r = self.client.post(self.add_url, data, follow=True)
assert pq(r.content)('.errorlist li')[0].text == (
'This field is required.')
self.assertContains(r, 'Delicious')
def test_default_locale(self):
r = self.client.post('/he/firefox/collections/add',
self.data, follow=True)
assert r.status_code == 200
c = Collection.objects.get(slug=self.slug)
assert c.default_locale == 'he'
def test_fix_slug(self):
self.data['slug'] = 'some slug'
self.create_collection()
Collection.objects.get(slug='some-slug')
def test_showform(self):
"""Shows form if logged in."""
r = self.client.get(self.add_url)
assert r.status_code == 200
def test_submit(self):
"""Test submission of addons."""
# TODO(davedash): Test file uploads, test multiple addons.
r = self.client.post(self.add_url, self.data, follow=True)
assert r.request['PATH_INFO'].decode('utf-8') == (
'/en-US/firefox/collections/admin/%s/' % self.slug)
c = Collection.objects.get(slug=self.slug)
assert unicode(c.name) == self.data['name']
assert c.description == ''
assert c.addons.all()[0].id == 3615
def test_duplicate_slug(self):
"""Try the same thing twice. AND FAIL"""
self.client.post(self.add_url, self.data, follow=True)
r = self.client.post(self.add_url, self.data, follow=True)
assert r.context['form'].errors['slug'][0] == (
'This url is already in use by another collection')
def test_edit(self):
self.create_collection()
url = reverse('collections.edit', args=['admin', self.slug])
r = self.client.get(url, follow=True)
assert r.status_code == 200
def test_edit_post(self):
"""Test edit of collection."""
self.create_collection()
url = reverse('collections.edit', args=['admin', self.slug])
r = self.client.post(url, {'name': 'HALP', 'slug': 'halp',
'listed': True}, follow=True)
assert r.status_code == 200
c = Collection.objects.get(slug='halp')
assert unicode(c.name) == 'HALP'
def test_edit_description(self):
self.create_collection()
url = reverse('collections.edit', args=['admin', self.slug])
self.data['description'] = 'abc'
edit_url = Collection.objects.get(slug=self.slug).edit_url()
r = self.client.post(url, self.data)
self.assert3xx(r, edit_url, 302)
assert unicode(Collection.objects.get(slug=self.slug).description) == (
'abc')
def test_edit_no_description(self):
self.create_collection(description='abc')
assert Collection.objects.get(slug=self.slug).description == 'abc'
url = reverse('collections.edit', args=['admin', self.slug])
self.data['description'] = ''
edit_url = Collection.objects.get(slug=self.slug).edit_url()
r = self.client.post(url, self.data)
self.assert3xx(r, edit_url, 302)
assert unicode(Collection.objects.get(slug=self.slug).description) == (
'')
def test_edit_spaces(self):
"""Let's put lots of spaces and see if they show up."""
self.create_collection()
url = reverse('collections.edit', args=['admin', self.slug])
r = self.client.post(url,
{'name': ' H A L P ', 'slug': ' halp ',
'listed': True}, follow=True)
assert r.status_code == 200
c = Collection.objects.get(slug='halp')
assert unicode(c.name) == 'H A L P'
def test_forbidden_edit(self):
self.create_collection()
self.login_regular()
url_args = ['admin', self.slug]
url = reverse('collections.edit', args=url_args)
r = self.client.get(url)
assert r.status_code == 403
r = self.client.post(url)
assert r.status_code == 403
url = reverse('collections.edit_addons', args=url_args)
r = self.client.get(url)
assert r.status_code == 403
r = self.client.post(url)
assert r.status_code == 403
url = reverse('collections.edit_privacy', args=url_args)
r = self.client.get(url)
assert r.status_code == 403
r = self.client.post(url)
assert r.status_code == 403
url = reverse('collections.delete', args=url_args)
r = self.client.get(url)
assert r.status_code == 403
r = self.client.post(url)
assert r.status_code == 403
def test_acl_contributor(self):
collection = self.create_collection().context['collection']
with override_settings(COLLECTION_FEATURED_THEMES_ID=collection.id):
regular_user = UserProfile.objects.get(email='regular@mozilla.com')
self.grant_permission(regular_user, 'Collections:Contribute')
self.login_regular()
url_args = ['admin', self.slug]
url = reverse('collections.edit', args=url_args)
r = self.client.get(url)
assert r.status_code == 200
assert r.context['form'] is None
r = self.client.post(url)
assert r.status_code == 403
url = reverse('collections.edit_addons', args=url_args)
r = self.client.get(url)
# Passed acl check, but this view needs a POST.
assert r.status_code == 405
r = self.client.post(url, {'addon': 3615}, follow=True)
assert r.status_code == 200
url = reverse('collections.edit_privacy', args=url_args)
r = self.client.get(url)
assert r.status_code == 403
r = self.client.post(url)
assert r.status_code == 403
url = reverse('collections.delete', args=url_args)
r = self.client.get(url)
assert r.status_code == 403
r = self.client.post(url)
assert r.status_code == 403
def test_acl_admin_curation(self):
# Test that even with 'Admin:Curation' you can't edit anyone's
# collection through the legacy frontend.
self.create_collection()
user = UserProfile.objects.get(email='regular@mozilla.com')
self.grant_permission(user, 'Admin:Curation')
self.login_regular()
url_args = ['admin', self.slug]
url = reverse('collections.edit', args=url_args)
response = self.client.get(url)
assert response.status_code == 403
url = reverse('collections.edit_addons', args=url_args)
response = self.client.get(url)
assert response.status_code == 403
url = reverse('collections.edit_privacy', args=url_args)
response = self.client.get(url)
assert response.status_code == 403
url = reverse('collections.delete', args=url_args)
response = self.client.get(url)
assert response.status_code == 403
def test_acl_admin_curation_mozilla(self):
# Test that with 'Admin:Curation' you can edit collections by the
# user named "mozilla".
self.create_collection()
mozilla = UserProfile.objects.get(username='mozilla')
Collection.objects.get(slug=self.slug).update(author=mozilla)
user = UserProfile.objects.get(email='regular@mozilla.com')
self.grant_permission(user, 'Admin:Curation')
self.login_regular()
url_args = ['mozilla', self.slug]
url = reverse('collections.edit', args=url_args)
response = self.client.get(url)
assert response.status_code == 200
url = reverse('collections.edit_addons', args=url_args)
response = self.client.get(url)
# Passed acl check, but this view needs a POST.
assert response.status_code == 405
url = reverse('collections.edit_privacy', args=url_args)
response = self.client.get(url)
# Passed acl check, but this view needs a POST.
assert response.status_code == 405
url = reverse('collections.delete', args=url_args)
response = self.client.get(url)
assert response.status_code == 200
def test_edit_favorites(self):
r = self.client.get(reverse('collections.list'))
fav = r.context['request'].user.favorites_collection()
r = self.client.post(fav.edit_url(), {'name': 'xxx', 'listed': True})
assert r.status_code == 302
c = Collection.objects.get(id=fav.id)
assert unicode(c.name) == 'xxx'
def test_edit_addons_get(self):
self.create_collection()
url = reverse('collections.edit_addons', args=['admin', self.slug])
r = self.client.get(url, follow=True)
assert r.status_code == 405
def test_edit_addons_post(self):
self.create_collection()
url = reverse('collections.edit_addons',
args=['admin', self.slug])
r = self.client.post(url, {'addon': 3615}, follow=True)
addon = Collection.objects.filter(slug=self.slug)[0].addons.all()[0]
assert addon.id == 3615
doc = pq(r.content)('.success')
assert doc('h2').text() == 'Collection updated!'
assert doc('p').text() == 'View your collection to see the changes.'
def test_delete(self):
self.create_collection()
assert len(Collection.objects.filter(slug=self.slug)) == 1
url = reverse('collections.delete',
args=['admin', self.slug])
self.client.post(url, dict(sure=0))
assert len(Collection.objects.filter(slug=self.slug)) == 1
self.client.post(url, dict(sure='1'))
assert len(Collection.objects.filter(slug=self.slug)) == 0
def test_no_xss_in_delete_confirm_page(self):
name = '"><script>alert(/XSS/);</script>'
self.create_collection(name=name)
collection = Collection.objects.get(slug=self.slug)
assert collection.name == name
url = reverse('collections.delete', args=['admin', collection.slug, ])
r = self.client.get(url)
self.assertContains(
r,
'"><script>alert(/XSS/);</script>'
)
assert name not in r.content
def test_form_uneditable_slug(self):
"""
Editing a mobile or favorite collection should have an uneditable slug.
"""
u = UserProfile.objects.get(username='admin')
Collection(author=u, slug='mobile', type=amo.COLLECTION_MOBILE).save()
url = reverse('collections.edit', args=['admin', 'mobile'])
r = self.client.get(url, follow=True)
doc = pq(r.content)
assert len(doc('#id_slug')) == 0
def test_form_uneditable_slug_submit(self):
"""
Ignore the slug request change, if some jackass thinks he can change
it.
"""
u = UserProfile.objects.get(username='admin')
Collection(author=u, slug='mobile', type=amo.COLLECTION_MOBILE).save()
url = reverse('collections.edit', args=['admin', 'mobile'])
self.client.post(url, {'name': 'HALP', 'slug': 'halp', 'listed': True},
follow=True)
assert not Collection.objects.filter(slug='halp', author=u)
assert Collection.objects.filter(slug='mobile', author=u)
def test_no_changing_owners(self):
self.login_admin()
self.create_collection()
mozilla = UserProfile.objects.get(username='mozilla')
collection = Collection.objects.get(slug=self.slug)
collection.update(author=mozilla)
self.login_regular()
self.grant_permission(
UserProfile.objects.get(email='regular@mozilla.com'),
'Admin:Curation')
response = self.client.post(
collection.edit_url(),
{'name': 'new name', 'slug': self.slug, 'listed': True},
follow=True)
assert response.status_code == 200
collection.reload()
assert unicode(collection.name) == 'new name'
class TestChangeAddon(TestCase):
fixtures = ['users/test_backends']
def setUp(self):
super(TestChangeAddon, self).setUp()
self.client.login(email='jbalogh@mozilla.com')
self.add = reverse('collections.alter',
args=['jbalogh', 'mobile', 'add'])
self.remove = reverse('collections.alter',
args=['jbalogh', 'mobile', 'remove'])
self.flig = Collection.objects.create(author_id=9945, slug='xxx')
self.flig_add = reverse('collections.alter',
args=['fligtar', 'xxx', 'add'])
self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION)
def check_redirect(self, request):
url = '%s?addon_id=%s' % (reverse('collections.ajax_list'),
self.addon.id)
self.assert3xx(request, url)
def test_login_required(self):
self.client.logout()
self.assertLoginRedirects(self.client.post(self.add), to=self.add)
def test_post_required(self):
r = self.client.get(self.add)
assert r.status_code == 405
def test_ownership(self):
r = self.client.post(self.flig_add)
assert r.status_code == 403
def test_contributer(self):
with override_settings(COLLECTION_FEATURED_THEMES_ID=self.flig.id):
user = UserProfile.objects.get(id=4043307)
self.grant_permission(user, 'Collections:Contribute')
response = self.client.post_ajax(
self.flig_add, {'addon_id': self.addon.id})
self.check_redirect(response)
def test_no_addon(self):
r = self.client.post(self.add)
assert r.status_code == 400
def test_add_success(self):
r = self.client.post_ajax(self.add, {'addon_id': self.addon.id})
self.check_redirect(r)
c = Collection.objects.get(author__username='jbalogh', slug='mobile')
assert self.addon in c.addons.all()
assert c.addons.count() == 1
def test_add_secretly(self):
"""
When we add to a private collection, make sure we don't log anything.
"""
self.client.post_ajax(self.add, {'addon_id': self.addon.id})
# There should be no log objects for this add-on
assert len(ActivityLog.objects.for_addons(self.addon)) == 0
def test_add_existing(self):
r = self.client.post_ajax(self.add, {'addon_id': self.addon.id})
self.check_redirect(r)
r = self.client.post_ajax(self.add, {'addon_id': self.addon.id})
self.check_redirect(r)
c = Collection.objects.get(author__username='jbalogh', slug='mobile')
assert self.addon in c.addons.all()
assert c.addons.count() == 1
def test_remove_secretly(self):
"""
When we remove from a private collection, make sure we don't log
anything.
"""
self.client.post_ajax(self.add, {'addon_id': self.addon.id})
self.client.post_ajax(self.remove, {'addon_id': self.addon.id})
# There should be no log objects for this add-on
assert len(ActivityLog.objects.for_addons(self.addon)) == 0
def test_remove_success(self):
r = self.client.post_ajax(self.add, {'addon_id': self.addon.id})
self.check_redirect(r)
r = self.client.post_ajax(self.remove, {'addon_id': self.addon.id})
self.check_redirect(r)
c = Collection.objects.get(author__username='jbalogh', slug='mobile')
assert c.addons.count() == 0
def test_remove_nonexistent(self):
r = self.client.post_ajax(self.remove, {'addon_id': self.addon.id})
self.check_redirect(r)
c = Collection.objects.get(author__username='jbalogh', slug='mobile')
assert c.addons.count() == 0
def test_no_ajax_response(self):
r = self.client.post(self.add, {'addon_id': self.addon.id},
follow=True)
self.assert3xx(r, reverse('collections.detail',
args=['jbalogh', 'mobile']))
class AjaxTest(TestCase):
fixtures = ('base/users', 'base/addon_3615',
'base/addon_5299_gcal', 'base/collections')
def setUp(self):
super(AjaxTest, self).setUp()
assert self.client.login(email='clouserw@gmail.com')
self.user = UserProfile.objects.get(email='clouserw@gmail.com')
self.other = UserProfile.objects.exclude(id=self.user.id)[0]
def test_list_collections(self):
r = self.client.get(
reverse('collections.ajax_list') + '?addon_id=3615')
doc = pq(r.content)
assert doc('li.selected').attr('data-id') == '80'
def test_add_collection(self):
r = self.client.post_ajax(reverse('collections.ajax_add'),
{'addon_id': 3615, 'id': 80}, follow=True)
doc = pq(r.content)
assert doc('li.selected').attr('data-id') == '80'
def test_bad_collection(self):
r = self.client.post(reverse('collections.ajax_add'), {'id': 'adfa'})
assert r.status_code == 400
def test_remove_collection(self):
r = self.client.post(reverse('collections.ajax_remove'),
{'addon_id': 1843, 'id': 80}, follow=True)
doc = pq(r.content)
assert len(doc('li.selected')) == 0
def test_new_collection(self):
assert not Collection.objects.filter(slug='auniqueone')
r = self.client.post(
reverse('collections.ajax_new'),
{'addon_id': 5299, 'name': 'foo', 'slug': 'auniqueone',
'description': 'foobar', 'listed': True},
follow=True)
doc = pq(r.content)
assert len(doc('li.selected')) == 1, (
"The new collection is not selected.")
assert Collection.objects.filter(slug='auniqueone')
def test_add_other_collection(self):
"403 when you try to add to a collection that isn't yours."
c = Collection(author=self.other)
c.save()
r = self.client.post(reverse('collections.ajax_add'),
{'addon_id': 3615, 'id': c.id}, follow=True)
assert r.status_code == 403
def test_remove_other_collection(self):
"403 when you try to add to a collection that isn't yours."
c = Collection(author=self.other)
c.save()
r = self.client.post(reverse('collections.ajax_remove'),
{'addon_id': 3615, 'id': c.id}, follow=True)
assert r.status_code == 403
def test_ajax_list_no_addon_id(self):
assert self.client.get(
reverse('collections.ajax_list')).status_code == 400
def test_ajax_list_bad_addon_id(self):
url = reverse('collections.ajax_list') + '?addon_id=fff'
assert self.client.get(url).status_code == 400
class TestWatching(TestCase):
fixtures = ['base/users', 'base/collection_57181']
def setUp(self):
super(TestWatching, self).setUp()
self.collection = c = Collection.objects.get(id=57181)
self.url = reverse('collections.watch',
args=[c.author.username, c.slug])
assert self.client.login(email='clouserw@gmail.com')
self.qs = CollectionWatcher.objects.filter(user__username='clouserw',
collection=57181)
assert self.qs.count() == 0
def test_watch(self):
r = self.client.post(self.url, follow=True)
assert r.status_code == 200
assert self.qs.count() == 1
def test_unwatch(self):
r = self.client.post(self.url, follow=True)
assert r.status_code == 200
r = self.client.post(self.url, follow=True)
assert r.status_code == 200
assert self.qs.count() == 0
def test_amouser_watching(self):
response = self.client.post(self.url, follow=True)
assert response.status_code == 200
response = self.client.get('/en-US/firefox/')
assert tuple(response.context['user'].watching) == (57181,)
def test_ajax_response(self):
r = self.client.post_ajax(self.url, follow=True)
assert r.status_code == 200
assert json.loads(r.content) == {'watching': True}
class TestCollectionForm(TestCase):
fixtures = ['base/collection_57181', 'users/test_backends']
@patch('olympia.amo.models.ModelBase.update')
def test_icon(self, update_mock):
collection = Collection.objects.get(pk=57181)
# TODO(andym): altering this form is too complicated, can we simplify?
form = forms.CollectionForm(
{'listed': collection.listed,
'slug': collection.slug,
'name': collection.name},
instance=collection,
files={'icon': get_uploaded_file('transparent.png')},
initial={'author': collection.author,
'application': collection.application})
assert form.is_valid()
form.save()
assert update_mock.called
def test_icon_invalid_though_content_type_is_correct(self):
collection = Collection.objects.get(pk=57181)
# This file is not an image at all, but we'll try to upload it with an
# image mime type. It should not work.
fake_image = get_uploaded_file('non-image.png')
assert fake_image.content_type == 'image/png'
form = forms.CollectionForm(
{'listed': collection.listed,
'slug': collection.slug,
'name': collection.name},
instance=collection,
files={'icon': fake_image},
initial={'author': collection.author,
'application': collection.application})
assert not form.is_valid()
assert form.errors == {'icon': [u'Icons must be either PNG or JPG.']}
def test_icon_invalid_gif(self):
collection = Collection.objects.get(pk=57181)
form = forms.CollectionForm(
{'listed': collection.listed,
'slug': collection.slug,
'name': collection.name},
instance=collection,
files={'icon': get_uploaded_file('animated.gif')},
initial={'author': collection.author,
'application': collection.application})
assert not form.is_valid()
assert form.errors == {'icon': [u'Icons must be either PNG or JPG.']}
def test_icon_invalid_animated(self):
collection = Collection.objects.get(pk=57181)
form = forms.CollectionForm(
{'listed': collection.listed,
'slug': collection.slug,
'name': collection.name},
instance=collection,
files={'icon': get_uploaded_file('animated.png')},
initial={'author': collection.author,
'application': collection.application})
assert not form.is_valid()
assert form.errors == {'icon': [u'Icons cannot be animated.']}
def test_denied_name(self):
form = forms.CollectionForm()
form.cleaned_data = {'name': 'IE6Fan'}
with self.assertRaisesRegexp(ValidationError,
'This name cannot be used.'):
form.clean_name()
def test_denied_name_contains(self):
form = forms.CollectionForm()
form.cleaned_data = {'name': 'IE6fanBoy'}
with self.assertRaisesRegexp(ValidationError,
'This name cannot be used.'):
form.clean_name()
def test_clean_description(self):
# No links, no problems.
form = forms.CollectionForm()
form.cleaned_data = {'description': 'some description, no links!'}
assert form.clean_description() == 'some description, no links!'
# No links allowed: raise on text links.
form.cleaned_data = {'description': 'http://example.com'}
with self.assertRaisesRegexp(ValidationError, 'No links are allowed'):
form.clean_description()
# No links allowed: raise on URLs.
form.cleaned_data = {
'description': '<a href="http://example.com">example.com</a>'}
with self.assertRaisesRegexp(ValidationError, 'No links are allowed'):
form.clean_description()
def test_honeypot_not_required(self):
author = UserProfile.objects.get(pk=9945)
form = forms.CollectionForm(
initial={'author': author},
data={
'name': 'test collection',
'slug': 'test-collection',
'listed': False,
}
)
assert form.is_valid()
def test_honeypot_fails_on_entry(self):
author = UserProfile.objects.get(pk=9945)
form = forms.CollectionForm(
initial={'author': author},
data={
'name': 'test collection',
'slug': 'test-collection',
'listed': False,
'your_name': "I'm a super dumb bot",
}
)
assert not form.is_valid()
assert 'spam' in form.errors['__all__'][0]
@patch('olympia.bandwagon.forms.statsd.incr')
def test_honeypot_statsd_incr(self, mock_incr):
author = UserProfile.objects.get(pk=9945)
form = forms.CollectionForm(
initial={'author': author},
data={
'name': 'test collection',
'slug': 'test-collection',
'listed': False,
'your_name': "I'm a super dumb bot",
}
)
assert not form.is_valid()
mock_incr.assert_any_call('collections.honeypotted')
class TestCollectionViewSetList(TestCase):
client_class = APITestClient
def setUp(self):
self.user = user_factory()
self.url = reverse_ns(
'collection-list', kwargs={'user_pk': self.user.pk})
super(TestCollectionViewSetList, self).setUp()
def test_basic(self):
collection_factory(author=self.user)
collection_factory(author=self.user)
collection_factory(author=self.user)
collection_factory(author=user_factory()) # Not our collection.
Collection.objects.all().count() == 4
self.client.login_api(self.user)
response = self.client.get(self.url)
assert response.status_code == 200
assert len(response.data['results']) == 3
def test_no_auth(self):
collection_factory(author=self.user)
response = self.client.get(self.url)
assert response.status_code == 401
def test_different_user(self):
random_user = user_factory()
other_url = reverse_ns('collection-list',
kwargs={'user_pk': random_user.pk})
collection_factory(author=random_user)
self.client.login_api(self.user)
response = self.client.get(other_url)
assert response.status_code == 403
def test_admin(self):
random_user = user_factory()
other_url = reverse_ns('collection-list',
kwargs={'user_pk': random_user.pk})
collection_factory(author=random_user)
self.grant_permission(self.user, 'Collections:Edit')
self.client.login_api(self.user)
response = self.client.get(other_url)
assert response.status_code == 403
self.grant_permission(self.user, 'Collections:Contribute')
self.client.login_api(self.user)
response = self.client.get(other_url)
assert response.status_code == 403
self.grant_permission(self.user, 'Admin:Curation')
response = self.client.get(other_url)
assert response.status_code == 403
def test_404(self):
# Invalid user.
url = reverse_ns(
'collection-list', kwargs={'user_pk': self.user.pk + 66})
# Not logged in.
response = self.client.get(url)
assert response.status_code == 401
# Logged in
self.client.login_api(self.user)
response = self.client.get(url)
assert response.status_code == 404
def test_sort(self):
col_a = collection_factory(author=self.user)
col_b = collection_factory(author=self.user)
col_c = collection_factory(author=self.user)
col_a.update(modified=self.days_ago(3))
col_b.update(modified=self.days_ago(1))
col_c.update(modified=self.days_ago(6))
self.client.login_api(self.user)
response = self.client.get(self.url)
assert response.status_code == 200
# should be b a c because 1, 3, 6 days ago.
assert response.data['results'][0]['uuid'] == col_b.uuid
assert response.data['results'][1]['uuid'] == col_a.uuid
assert response.data['results'][2]['uuid'] == col_c.uuid
def test_with_addons_is_ignored(self):
collection_factory(author=self.user)
self.client.login_api(self.user)
response = self.client.get(self.url + '?with_addons')
assert response.status_code == 200, response.data
assert 'addons' not in response.data['results'][0]
class TestCollectionViewSetDetail(TestCase):
client_class = APITestClient
def setUp(self):
self.user = user_factory()
self.collection = collection_factory(author=self.user)
self.url = self._get_url(self.user, self.collection)
super(TestCollectionViewSetDetail, self).setUp()
def _get_url(self, user, collection):
return reverse_ns(
'collection-detail', kwargs={
'user_pk': user.pk, 'slug': collection.slug})
def test_basic(self):
response = self.client.get(self.url)
assert response.status_code == 200
assert response.data['id'] == self.collection.id
def test_no_id_lookup(self):
collection = collection_factory(author=self.user, slug='999')
id_url = reverse_ns(
'collection-detail', kwargs={
'user_pk': self.user.pk, 'slug': collection.id})
response = self.client.get(id_url)
assert response.status_code == 404
slug_url = reverse_ns(
'collection-detail', kwargs={
'user_pk': self.user.pk, 'slug': collection.slug})
response = self.client.get(slug_url)
assert response.status_code == 200
assert response.data['id'] == collection.id
def test_not_listed(self):
self.collection.update(listed=False)
# not logged in
response = self.client.get(self.url)
assert response.status_code == 401
# logged in
random_user = user_factory()
self.client.login_api(random_user)
response = self.client.get(self.url)
assert response.status_code == 403
def test_not_listed_self(self):
self.collection.update(listed=False)
self.client.login_api(self.user)
response = self.client.get(self.url)
assert response.status_code == 200
assert response.data['id'] == self.collection.id
def test_not_listed_admin(self):
random_user = user_factory()
collection = collection_factory(author=random_user, listed=False)
self.grant_permission(self.user, 'Collections:Edit')
self.client.login_api(self.user)
response = self.client.get(self._get_url(random_user, collection))
assert response.status_code == 403
self.grant_permission(self.user, 'Collections:Contribute')
self.client.login_api(self.user)
response = self.client.get(self._get_url(random_user, collection))
assert response.status_code == 403
self.grant_permission(self.user, 'Admin:Curation')
response = self.client.get(self._get_url(random_user, collection))
assert response.status_code == 403
random_user.update(username='mozilla')
response = self.client.get(self._get_url(random_user, collection))
assert response.status_code == 200
assert response.data['id'] == collection.pk
def test_not_listed_contributor(self):
self.collection.update(listed=False)
random_user = user_factory()
setting_key = 'COLLECTION_FEATURED_THEMES_ID'
with override_settings(**{setting_key: self.collection.id}):
self.client.login_api(random_user)
# Not their collection so not allowed.
response = self.client.get(self.url)
assert response.status_code == 403
self.grant_permission(random_user, 'Collections:Contribute')
# Now they can access it.
response = self.client.get(self.url)
assert response.status_code == 200
assert response.data['id'] == self.collection.id
# Double check only the COLLECTION_FEATURED_THEMES_ID is allowed.
response = self.client.get(self.url)
assert response.status_code == 403
# Even on a mozilla-owned collection.
self.collection.author.update(username='mozilla')
response = self.client.get(self.url)
assert response.status_code == 403
def test_404(self):
# Invalid user.
response = self.client.get(reverse_ns(
'collection-detail', kwargs={
'user_pk': self.user.pk + 66, 'slug': self.collection.slug}))
assert response.status_code == 404
# Invalid collection.
response = self.client.get(reverse_ns(
'collection-detail', kwargs={
'user_pk': self.user.pk, 'slug': 'hello'}))
assert response.status_code == 404
def test_with_addons(self):
addon = addon_factory()
self.collection.add_addon(addon)
response = self.client.get(self.url + '?with_addons')
assert response.status_code == 200
assert response.data['id'] == self.collection.id
addon_data = response.data['addons'][0]['addon']
assert addon_data['id'] == addon.id
assert isinstance(addon_data['name'], dict)
assert addon_data['name'] == {'en-US': unicode(addon.name)}
# Now test the limit of addons returned
self.collection.add_addon(addon_factory())
self.collection.add_addon(addon_factory())
self.collection.add_addon(addon_factory())
response = self.client.get(self.url + '?with_addons')
assert len(response.data['addons']) == 4
patched_drf_setting = dict(settings.REST_FRAMEWORK)
patched_drf_setting['PAGE_SIZE'] = 3
with django.test.override_settings(REST_FRAMEWORK=patched_drf_setting):
response = self.client.get(self.url + '?with_addons')
assert len(response.data['addons']) == 3
def test_with_addons_and_wrap_outgoing_links_and_lang(self):
addon = addon_factory(
support_url='http://support.example.com',
homepage='http://homepage.example.com')
self.collection.add_addon(addon)
response = self.client.get(
self.url + '?with_addons&lang=en-US&wrap_outgoing_links')
assert response.status_code == 200
assert response.data['id'] == self.collection.id
addon_data = response.data['addons'][0]['addon']
assert addon_data['id'] == addon.id
assert isinstance(addon_data['name']['en-US'], basestring)
assert addon_data['name'] == {'en-US': unicode(addon.name)}
assert isinstance(addon_data['homepage']['en-US'], basestring)
assert addon_data['homepage'] == {
'en-US': get_outgoing_url(unicode(addon.homepage))}
assert isinstance(addon_data['support_url']['en-US'], basestring)
assert addon_data['support_url'] == {
'en-US': get_outgoing_url(unicode(addon.support_url))}
overridden_api_gates = {
api_settings.DEFAULT_VERSION: ('l10n_flat_input_output',)}
with override_settings(DRF_API_GATES=overridden_api_gates):
response = self.client.get(
self.url + '?with_addons&lang=en-US&wrap_outgoing_links')
assert response.status_code == 200
assert response.data['id'] == self.collection.id
addon_data = response.data['addons'][0]['addon']
assert addon_data['id'] == addon.id
assert isinstance(addon_data['name'], basestring)
assert addon_data['name'] == unicode(addon.name)
assert isinstance(addon_data['homepage'], basestring)
assert addon_data['homepage'] == get_outgoing_url(
unicode(addon.homepage))
assert isinstance(addon_data['support_url'], basestring)
assert addon_data['support_url'] == get_outgoing_url(
unicode(addon.support_url))
class CollectionViewSetDataMixin(object):
client_class = APITestClient
data = {
'name': {'fr': u'lé $túff', 'en-US': u'$tuff'},
'description': {'fr': u'Un dis une dát', 'en-US': u'dis n dat'},
'slug': u'stuff',
'public': True,
'default_locale': 'fr',
}
def setUp(self):
self.url = self.get_url(self.user)
super(CollectionViewSetDataMixin, self).setUp()
def send(self, url=None, data=None):
raise NotImplementedError
def get_url(self, user):
raise NotImplementedError
@property
def user(self):
if not hasattr(self, '_user'):
self._user = user_factory()
return self._user
def check_data(self, collection, data, json):
for prop, value in data.iteritems():
assert json[prop] == value
with self.activate('fr'):
collection = collection.reload()
assert collection.name == data['name']['fr']
assert collection.description == data['description']['fr']
assert collection.slug == data['slug']
assert collection.listed == data['public']
assert collection.default_locale == data['default_locale']
def test_no_auth(self):
response = self.send()
assert response.status_code == 401
def test_update_name_invalid(self):
self.client.login_api(self.user)
data = dict(self.data)
# Sending a single value for localized field is now forbidden.
data.update(name=u' ')
response = self.send(data=data)
assert response.status_code == 400
assert json.loads(response.content) == {
'name': ['You must provide an object of {lang-code:value}.']}
# Passing a dict of localised values
data.update(name={'en-US': u' '})
response = self.send(data=data)
assert response.status_code == 400
assert json.loads(response.content) == {
'name': ['Name cannot be empty.']}
@override_settings(DRF_API_GATES={
api_settings.DEFAULT_VERSION: ('l10n_flat_input_output',)})
def test_update_name_invalid_flat_input(self):
self.client.login_api(self.user)
data = dict(self.data)
data.update(name=u' ')
response = self.send(data=data)
assert response.status_code == 400
assert json.loads(response.content) == {
'name': ['Name cannot be empty.']}
# Passing a dict of localised values
data.update(name={'en-US': u' '})
response = self.send(data=data)
assert response.status_code == 400
assert json.loads(response.content) == {
'name': ['Name cannot be empty.']}
def test_biography_no_links(self):
self.client.login_api(self.user)
data = dict(self.data)
data.update(description='<a href="https://google.com">google</a>')
response = self.send(data=data)
assert response.status_code == 400
assert json.loads(response.content) == {
'description': [
'You must provide an object of {lang-code:value}.']}
data.update(description={
'en-US': '<a href="https://google.com">google</a>'})
response = self.send(data=data)
assert response.status_code == 400
assert json.loads(response.content) == {
'description': ['No links are allowed.']}
@override_settings(DRF_API_GATES={
api_settings.DEFAULT_VERSION: ('l10n_flat_input_output',)})
def test_biography_no_links_flat_input(self):
self.client.login_api(self.user)
data = dict(self.data)
data.update(description='<a href="https://google.com">google</a>')
response = self.send(data=data)
assert response.status_code == 400
assert json.loads(response.content) == {
'description': ['No links are allowed.']}
data.update(description={
'en-US': '<a href="https://google.com">google</a>'})
response = self.send(data=data)
assert response.status_code == 400
assert json.loads(response.content) == {
'description': ['No links are allowed.']}
def test_slug_valid(self):
self.client.login_api(self.user)
data = dict(self.data)
data.update(slug=u'£^@')
response = self.send(data=data)
assert response.status_code == 400
assert json.loads(response.content) == {
'slug': [u'The custom URL must consist of letters, numbers, '
u'underscores or hyphens.']}
def test_slug_unique(self):
collection_factory(author=self.user, slug='edam')
self.client.login_api(self.user)
data = dict(self.data)
data.update(slug=u'edam')
response = self.send(data=data)
assert response.status_code == 400
assert u'This custom URL is already in use' in (
','.join(json.loads(response.content)['non_field_errors']))
class TestCollectionViewSetCreate(CollectionViewSetDataMixin, TestCase):
def send(self, url=None, data=None):
return self.client.post(url or self.url, data or self.data)
def get_url(self, user):
return reverse_ns('collection-list', kwargs={'user_pk': user.pk})
def test_basic_create(self):
self.client.login_api(self.user)
response = self.send()
assert response.status_code == 201, response.content
collection = Collection.objects.get()
self.check_data(collection, self.data, json.loads(response.content))
assert collection.author.id == self.user.id
assert collection.uuid
def test_create_minimal(self):
self.client.login_api(self.user)
data = {
'name': {'en-US': u'this'},
'slug': u'minimal',
}
response = self.send(data=data)
assert response.status_code == 201, response.content
collection = Collection.objects.get()
assert collection.name == data['name']['en-US']
assert collection.slug == data['slug']
# Double-check trying to create with a non-dict name now fails
data = {
'name': u'this',
'slug': u'minimal',
}
response = self.send(data=data)
assert response.status_code == 400
assert json.loads(response.content) == {
'name': ['You must provide an object of {lang-code:value}.']}
@override_settings(DRF_API_GATES={
api_settings.DEFAULT_VERSION: ('l10n_flat_input_output',)})
def test_create_minimal_flat_input(self):
self.client.login_api(self.user)
data = {
'name': u'this',
'slug': u'minimal',
}
response = self.send(data=data)
assert response.status_code == 201, response.content
collection = Collection.objects.get()
assert collection.name == data['name']
assert collection.slug == data['slug']
def test_create_cant_set_readonly(self):
self.client.login_api(self.user)
data = {
'name': {'en-US': u'this'},
'slug': u'minimal',
'addon_count': 99, # In the serializer but read-only.
'subscribers': 999, # Not in the serializer.
}
response = self.send(data=data)
assert response.status_code == 201, response.content
collection = Collection.objects.get()
assert collection.addon_count != 99
assert collection.subscribers != 999
def test_different_account(self):
self.client.login_api(self.user)
different_user = user_factory()
url = self.get_url(different_user)
response = self.send(url=url)
assert response.status_code == 403
def test_admin_create_fails(self):
self.grant_permission(self.user, 'Collections:Edit')
self.client.login_api(self.user)
random_user = user_factory()
url = self.get_url(random_user)
response = self.send(url=url)
assert response.status_code == 403
self.grant_permission(self.user, 'Collections:Contribute')
response = self.send(url=url)
assert response.status_code == 403
self.grant_permission(self.user, 'Admin:Curation')
response = self.send(url=url)
assert response.status_code == 403
def test_create_numeric_slug(self):
self.client.login_api(self.user)
data = {
'name': {'en-US': u'this'},
'slug': u'1',
}
response = self.send(data=data)
assert response.status_code == 201, response.content
collection = Collection.objects.get()
assert collection.name == data['name']['en-US']
assert collection.slug == data['slug']
class TestCollectionViewSetPatch(CollectionViewSetDataMixin, TestCase):
def setUp(self):
self.collection = collection_factory(author=self.user)
super(TestCollectionViewSetPatch, self).setUp()
def send(self, url=None, data=None):
return self.client.patch(url or self.url, data or self.data)
def get_url(self, user):
return reverse_ns(
'collection-detail', kwargs={
'user_pk': user.pk, 'slug': self.collection.slug})
def test_basic_patch(self):
self.client.login_api(self.user)
original = self.client.get(self.url).content
response = self.send()
assert response.status_code == 200
assert response.content != original
self.collection = self.collection.reload()
self.check_data(self.collection, self.data,
json.loads(response.content))
def test_different_account(self):
self.client.login_api(self.user)
different_user = user_factory()
self.collection.update(author=different_user)
url = self.get_url(different_user)
response = self.send(url=url)
assert response.status_code == 403
def test_admin_patch(self):
self.grant_permission(self.user, 'Collections:Edit')
self.client.login_api(self.user)
random_user = user_factory()
self.collection.update(author=random_user)
url = self.get_url(random_user)
original = self.client.get(url).content
response = self.send(url=url)
assert response.status_code == 403
self.grant_permission(self.user, 'Collections:Contribute')
response = self.send(url=url)
assert response.status_code == 403
self.grant_permission(self.user, 'Admin:Curation')
response = self.send(url=url)
assert response.status_code == 403
random_user.update(username='mozilla')
response = self.send(url=url)
assert response.status_code == 200
assert response.content != original
self.collection = self.collection.reload()
self.check_data(self.collection, self.data,
json.loads(response.content))
# Just double-check we didn't steal their collection
assert self.collection.author.id == random_user.id
def test_contributor_patch_fails(self):
self.client.login_api(self.user)
random_user = user_factory()
self.collection.update(author=random_user)
self.grant_permission(random_user, 'Collections:Contribute')
url = self.get_url(random_user)
setting_key = 'COLLECTION_FEATURED_THEMES_ID'
with override_settings(**{setting_key: self.collection.id}):
# Check setup is good and we can access the collection okay.
get_response = self.client.get(url)
assert get_response.status_code == 200
# But can't patch it.
response = self.send(url=url)
assert response.status_code == 403
class TestCollectionViewSetDelete(TestCase):
client_class = APITestClient
def setUp(self):
self.user = user_factory()
self.collection = collection_factory(author=self.user)
self.url = self.get_url(self.user)
super(TestCollectionViewSetDelete, self).setUp()
def get_url(self, user):
return reverse_ns(
'collection-detail', kwargs={
'user_pk': user.pk, 'slug': self.collection.slug})
def test_delete(self):
self.client.login_api(self.user)
response = self.client.delete(self.url)
assert response.status_code == 204
assert not Collection.objects.filter(id=self.collection.id).exists()
def test_no_auth(self):
response = self.client.delete(self.url)
assert response.status_code == 401
def test_different_account_fails(self):
self.client.login_api(self.user)
different_user = user_factory()
self.collection.update(author=different_user)
url = self.get_url(different_user)
response = self.client.delete(url)
assert response.status_code == 403
def test_admin_delete(self):
self.grant_permission(self.user, 'Collections:Edit')
self.client.login_api(self.user)
random_user = user_factory()
self.collection.update(author=random_user)
url = self.get_url(random_user)
response = self.client.delete(url)
assert response.status_code == 403
self.grant_permission(self.user, 'Collections:Contribute')
response = self.client.delete(url)
assert response.status_code == 403
self.grant_permission(self.user, 'Admin:Curation')
response = self.client.delete(url)
assert response.status_code == 403
assert Collection.objects.filter(id=self.collection.id).exists()
# Curators can't delete collections even owned by mozilla.
random_user.update(username='mozilla')
response = self.client.delete(url)
assert response.status_code == 403
assert Collection.objects.filter(id=self.collection.id).exists()
def test_contributor_fails(self):
self.client.login_api(self.user)
different_user = user_factory()
self.collection.update(author=different_user)
self.grant_permission(different_user, 'Collections:Contribute')
url = self.get_url(different_user)
setting_key = 'COLLECTION_FEATURED_THEMES_ID'
with override_settings(**{setting_key: self.collection.id}):
# Check setup is good and we can access the collection okay.
get_response = self.client.get(url)
assert get_response.status_code == 200
# But can't delete it.
response = self.client.delete(url)
assert response.status_code == 403
class CollectionAddonViewSetMixin(object):
def check_response(self, response):
raise NotImplementedError
def send(self, url):
# List and Detail do this. Override for other verbs.
return self.client.get(url)
def test_basic(self):
self.check_response(self.send(self.url))
def test_not_listed_not_logged_in(self):
self.collection.update(listed=False)
response = self.send(self.url)
assert response.status_code == 401
def test_not_listed_different_user(self):
self.collection.update(listed=False)
different_user = user_factory()
self.client.login_api(different_user)
response = self.send(self.url)
assert response.status_code == 403
def test_not_listed_self(self):
self.collection.update(listed=False)
self.client.login_api(self.user)
self.check_response(self.send(self.url))
def test_not_listed_admin(self):
self.collection.update(listed=False)
admin_user = user_factory()
self.grant_permission(admin_user, 'Collections:Edit')
self.client.login_api(admin_user)
response = self.send(self.url)
assert response.status_code == 403
self.grant_permission(admin_user, 'Collections:Contribute')
response = self.send(self.url)
assert response.status_code == 403
self.grant_permission(admin_user, 'Admin:Curation')
response = self.send(self.url)
assert response.status_code == 403
self.collection.author.update(username='mozilla')
self.check_response(self.send(self.url))
def test_contributor(self):
self.collection.update(listed=False)
random_user = user_factory()
self.grant_permission(random_user, 'Collections:Contribute')
self.client.login_api(random_user)
# should fail as self.collection isn't special
response = self.send(self.url)
assert response.status_code == 403
# But now with special collection will work
setting_key = 'COLLECTION_FEATURED_THEMES_ID'
with override_settings(**{setting_key: self.collection.id}):
self.check_response(self.send(self.url))
class TestCollectionAddonViewSetList(CollectionAddonViewSetMixin, TestCase):
client_class = APITestClient
def setUp(self):
self.user = user_factory()
self.collection = collection_factory(author=self.user)
self.addon_a = addon_factory(name=u'anteater')
self.addon_b = addon_factory(name=u'baboon')
self.addon_c = addon_factory(name=u'cheetah')
self.addon_disabled = addon_factory(name=u'antelope_disabled')
self.addon_deleted = addon_factory(name=u'buffalo_deleted')
self.addon_pending = addon_factory(name=u'pelican_pending')
# Set a few more languages on our add-ons to test sorting
# a bit better. https://github.com/mozilla/addons-server/issues/8354
self.addon_a.name = {'de': u'Ameisenbär'}
self.addon_a.save()
self.addon_b.name = {'de': u'Pavian'}
self.addon_b.save()
self.addon_c.name = {'de': u'Gepard'}
self.addon_c.save()
self.collection.add_addon(self.addon_a)
self.collection.add_addon(self.addon_disabled)
self.collection.add_addon(self.addon_b)
self.collection.add_addon(self.addon_deleted)
self.collection.add_addon(self.addon_c)
self.collection.add_addon(self.addon_pending)
# Set up our filtered-out-by-default addons
self.addon_disabled.update(disabled_by_user=True)
self.addon_deleted.delete()
self.addon_pending.current_version.all_files[0].update(
status=amo.STATUS_AWAITING_REVIEW)
self.url = reverse_ns(
'collection-addon-list', kwargs={
'user_pk': self.user.pk,
'collection_slug': self.collection.slug})
super(TestCollectionAddonViewSetList, self).setUp()
def check_response(self, response):
assert response.status_code == 200, self.url
assert len(response.data['results']) == 3
def test_404(self):
# Invalid user.
response = self.client.get(reverse_ns(
'collection-addon-list', kwargs={
'user_pk': self.user.pk + 66,
'collection_slug': self.collection.slug}))
assert response.status_code == 404
# Invalid collection.
response = self.client.get(reverse_ns(
'collection-addon-list', kwargs={
'user_pk': self.user.pk,
'collection_slug': 'hello'}))
assert response.status_code == 404
def check_result_order(self, response, first, second, third):
results = response.data['results']
assert results[0]['addon']['id'] == first.id
assert results[1]['addon']['id'] == second.id
assert results[2]['addon']['id'] == third.id
assert len(response.data['results']) == 3
def test_sorting(self):
self.addon_a.update(weekly_downloads=500)
self.addon_b.update(weekly_downloads=1000)
self.addon_c.update(weekly_downloads=100)
self.client.login_api(self.user)
# First default sort
self.check_result_order(
self.client.get(self.url),
self.addon_b, self.addon_a, self.addon_c)
# Popularity ascending
self.check_result_order(
self.client.get(self.url + '?sort=popularity'),
self.addon_c, self.addon_a, self.addon_b)
# Popularity descending (same as default)
self.check_result_order(
self.client.get(self.url + '?sort=-popularity'),
self.addon_b, self.addon_a, self.addon_c)
CollectionAddon.objects.get(
collection=self.collection, addon=self.addon_a).update(
created=self.days_ago(1))
CollectionAddon.objects.get(
collection=self.collection, addon=self.addon_b).update(
created=self.days_ago(3))
CollectionAddon.objects.get(
collection=self.collection, addon=self.addon_c).update(
created=self.days_ago(2))
# Added ascending
self.check_result_order(
self.client.get(self.url + '?sort=added'),
self.addon_b, self.addon_c, self.addon_a)
# Added descending
self.check_result_order(
self.client.get(self.url + '?sort=-added'),
self.addon_a, self.addon_c, self.addon_b)
# Name ascending
self.check_result_order(
self.client.get(self.url + '?sort=name'),
self.addon_a, self.addon_b, self.addon_c)
# Name descending
self.check_result_order(
self.client.get(self.url + '?sort=-name'),
self.addon_c, self.addon_b, self.addon_a)
# Name ascending, German
self.check_result_order(
self.client.get(self.url + '?sort=name&lang=de'),
self.addon_a, self.addon_c, self.addon_b)
# Name descending, German
self.check_result_order(
self.client.get(self.url + '?sort=-name&lang=de'),
self.addon_b, self.addon_c, self.addon_a)
def test_only_one_sort_parameter_supported(self):
response = self.client.get(self.url + '?sort=popularity,name')
assert response.status_code == 400
assert response.data == [
'You can only specify one "sort" argument. Multiple orderings '
'are not supported']
def test_with_deleted_or_with_hidden(self):
response = self.send(self.url)
assert response.status_code == 200
# Normal
assert len(response.data['results']) == 3
response = self.send(self.url + '?filter=all')
assert response.status_code == 200
# Now there should be 2 extra
assert len(response.data['results']) == 5
response = self.send(self.url + '?filter=all_with_deleted')
assert response.status_code == 200
# And one more still - with_deleted gets you with_hidden too.
assert len(response.data['results']) == 6
all_addons_ids = {
self.addon_a.id, self.addon_b.id, self.addon_c.id,
self.addon_disabled.id, self.addon_deleted.id,
self.addon_pending.id}
result_ids = {
result['addon']['id'] for result in response.data['results']}
assert all_addons_ids == result_ids
class TestCollectionAddonViewSetDetail(CollectionAddonViewSetMixin, TestCase):
client_class = APITestClient
def setUp(self):
self.user = user_factory()
self.collection = collection_factory(author=self.user)
self.addon = addon_factory()
self.collection.add_addon(self.addon)
self.url = reverse_ns(
'collection-addon-detail', kwargs={
'user_pk': self.user.pk,
'collection_slug': self.collection.slug,
'addon': self.addon.id})
super(TestCollectionAddonViewSetDetail, self).setUp()
def check_response(self, response):
assert response.status_code == 200, self.url
assert response.data['addon']['id'] == self.addon.id
def test_with_slug(self):
self.url = reverse_ns(
'collection-addon-detail', kwargs={
'user_pk': self.user.pk,
'collection_slug': self.collection.slug,
'addon': self.addon.slug})
self.test_basic()
def test_deleted(self):
self.addon.delete()
self.test_basic()
class TestCollectionAddonViewSetCreate(CollectionAddonViewSetMixin, TestCase):
client_class = APITestClient
def setUp(self):
self.user = user_factory()
self.collection = collection_factory(author=self.user)
self.url = reverse_ns(
'collection-addon-list', kwargs={
'user_pk': self.user.pk,
'collection_slug': self.collection.slug})
self.addon = addon_factory()
super(TestCollectionAddonViewSetCreate, self).setUp()
def check_response(self, response):
assert response.status_code == 201, response.content
assert CollectionAddon.objects.filter(
collection=self.collection.id, addon=self.addon.id).exists()
def send(self, url, data=None):
data = data or {'addon': self.addon.pk}
return self.client.post(url, data=data)
def test_basic(self):
assert not CollectionAddon.objects.filter(
collection=self.collection.id).exists()
self.client.login_api(self.user)
response = self.send(self.url)
self.check_response(response)
def test_add_with_comments(self):
self.client.login_api(self.user)
response = self.send(self.url,
data={'addon': self.addon.pk,
'notes': {'en-US': 'its good!'}})
self.check_response(response)
collection_addon = CollectionAddon.objects.get(
collection=self.collection.id, addon=self.addon.id)
assert collection_addon.addon == self.addon
assert collection_addon.collection == self.collection
assert collection_addon.comments == 'its good!'
# Double-check trying to create with a non-dict name now fails
response = self.send(self.url,
data={'addon': self.addon.pk,
'notes': 'its good!'})
assert response.status_code == 400
assert json.loads(response.content) == {
'notes': ['You must provide an object of {lang-code:value}.']}
@override_settings(DRF_API_GATES={
api_settings.DEFAULT_VERSION: ('l10n_flat_input_output',)})
def test_add_with_comments_flat_input(self):
self.client.login_api(self.user)
response = self.send(self.url,
data={'addon': self.addon.pk,
'notes': 'its good!'})
self.check_response(response)
collection_addon = CollectionAddon.objects.get(
collection=self.collection.id, addon=self.addon.id)
assert collection_addon.addon == self.addon
assert collection_addon.collection == self.collection
assert collection_addon.comments == 'its good!'
def test_fail_when_no_addon(self):
self.client.login_api(self.user)
response = self.send(self.url, data={'notes': {'en-US': ''}})
assert response.status_code == 400
assert json.loads(response.content) == {
'addon': [u'This field is required.']}
def test_fail_when_not_public_addon(self):
self.client.login_api(self.user)
self.addon.update(status=amo.STATUS_NULL)
response = self.send(self.url)
assert response.status_code == 400
assert json.loads(response.content) == {
'addon': ['Invalid pk or slug "%s" - object does not exist.' %
self.addon.pk]}
def test_fail_when_invalid_addon(self):
self.client.login_api(self.user)
response = self.send(self.url, data={'addon': 3456})
assert response.status_code == 400
assert json.loads(response.content) == {
'addon': ['Invalid pk or slug "%s" - object does not exist.' %
3456]}
def test_with_slug(self):
self.client.login_api(self.user)
response = self.send(self.url, data={'addon': self.addon.slug})
self.check_response(response)
def test_uniqueness_message(self):
CollectionAddon.objects.create(
collection=self.collection, addon=self.addon)
self.client.login_api(self.user)
response = self.send(self.url, data={'addon': self.addon.slug})
assert response.status_code == 400
assert response.data == {
u'non_field_errors':
[u'This add-on already belongs to the collection']
}
class TestCollectionAddonViewSetPatch(CollectionAddonViewSetMixin, TestCase):
client_class = APITestClient
def setUp(self):
self.user = user_factory()
self.collection = collection_factory(author=self.user)
self.addon = addon_factory()
self.collection.add_addon(self.addon)
self.url = reverse_ns(
'collection-addon-detail', kwargs={
'user_pk': self.user.pk,
'collection_slug': self.collection.slug,
'addon': self.addon.id})
super(TestCollectionAddonViewSetPatch, self).setUp()
def check_response(self, response, notes=empty):
notes = notes if notes != empty else u'it does things'
assert response.status_code == 200, response.content
collection_addon = CollectionAddon.objects.get(
collection=self.collection.id)
assert collection_addon.addon == self.addon
assert collection_addon.collection == self.collection
assert collection_addon.comments == notes
def send(self, url, data=None):
data = data or {'notes': {'en-US': 'it does things'}}
return self.client.patch(url, data=data)
def test_basic(self):
self.client.login_api(self.user)
response = self.send(self.url)
self.check_response(response)
def test_flat_input(self):
self.client.login_api(self.user)
data = {'notes': 'it does things'}
# By default this should be rejected
response = self.send(self.url, data)
assert response.status_code == 400
assert json.loads(response.content) == {
'notes': ['You must provide an object of {lang-code:value}.']}
# But with the correct api gate, we can use the old behavior
overridden_api_gates = {
api_settings.DEFAULT_VERSION: ('l10n_flat_input_output',)}
with override_settings(DRF_API_GATES=overridden_api_gates):
response = self.send(self.url, data)
self.check_response(response)
def test_cant_change_addon(self):
self.client.login_api(self.user)
new_addon = addon_factory()
response = self.send(self.url,
data={'addon': new_addon.id})
self.check_response(response, notes=None)
def test_deleted(self):
self.addon.delete()
self.test_basic()
class TestCollectionAddonViewSetDelete(CollectionAddonViewSetMixin, TestCase):
client_class = APITestClient
def setUp(self):
self.user = user_factory()
self.collection = collection_factory(author=self.user)
self.addon = addon_factory()
self.collection.add_addon(self.addon)
self.url = reverse_ns(
'collection-addon-detail', kwargs={
'user_pk': self.user.pk,
'collection_slug': self.collection.slug,
'addon': self.addon.id})
super(TestCollectionAddonViewSetDelete, self).setUp()
def check_response(self, response):
assert response.status_code == 204
assert not CollectionAddon.objects.filter(
collection=self.collection.id, addon=self.addon).exists()
def send(self, url):
return self.client.delete(url)
def test_basic(self):
assert CollectionAddon.objects.filter(
collection=self.collection.id, addon=self.addon).exists()
self.client.login_api(self.user)
response = self.send(self.url)
self.check_response(response)
def test_deleted(self):
self.addon.delete()
self.test_basic()
| 38.619157
| 79
| 0.619875
|
4a04b5b89d0cbea50bcb1f2ae74d3561d9407694
| 3,928
|
py
|
Python
|
pandashells/lib/lomb_scargle_lib.py
|
preranavilas/pythonwithpandas
|
0a5e23ac8f81f6403253f39df39a9268688c45cf
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
pandashells/lib/lomb_scargle_lib.py
|
preranavilas/pythonwithpandas
|
0a5e23ac8f81f6403253f39df39a9268688c45cf
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
pandashells/lib/lomb_scargle_lib.py
|
preranavilas/pythonwithpandas
|
0a5e23ac8f81f6403253f39df39a9268688c45cf
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
#! /usr/bin/env python
# standard library imports
import warnings
try:
import pandas as pd
import numpy as np
# will catch import errors in module_checker_lib so won't test this branch
except ImportError: # pragma: nocover
pass
def _next_power_two(x):
""" given a number, returns the next power of two
"""
x = int(x)
n = 1
while n < x:
n = n << 1
return n
def _compute_pad(t, interp_exponent=0):
"""
Given a sorted time series t, compute the zero padding.
The final padded arrays are the next power of two in length multiplied
by 2 ** interp_exponent.
returns t_pad and y_pad
"""
t_min, t_max, n = t[0], t[-1], len(t)
dt = (t_max - t_min) / float(n - 1)
n_padded = _next_power_two(len(t)) << interp_exponent
n_pad = n_padded - n
t_pad = np.linspace(t_max + dt, t_max + dt + (n_pad - 1) * dt, n_pad)
y_pad = np.zeros(len(t_pad))
return t_pad, y_pad
def _compute_params(t):
"""
Takes a timeseries and computes the parameters needed for the fast
lomb scargle algorithm in gatspy
"""
t_min, t_max, n = t[0], t[-1], len(t)
dt = (t_max - t_min) / float(n - 1)
min_freq = 1. / (t_max - t_min)
d_freq = 1. / (2 * dt * len(t))
return min_freq, d_freq, len(t)
def lomb_scargle(df, time_col, val_col, interp_exponent=0, freq_order=False):
"""
:type df: pandas.DataFrame
:param df: An input dataframe
:type time_col: str
:param time_col: The column of the dataframe holding the timestamps
:type val_col: str
:param val_col: The column of the dataframe holding the observations
:type interp_exp: int
:param interp_exp: Interpolate the spectrum by this power of two
:type freq_order: bool
:param freq_order: If set to True spectrum is returned in frequency order
instead of period order (default=False)
:rtype: Pandas DataFrame
:returns: A dataframe with columns: period, freq, power, amplitude
"""
# do imports here to avoid loading plot libraries when this
# module is loaded in __init__.py
# which then doesn't allow for doing matplotlib.use() later
import gatspy
# only care about timestamped values
df = df[[time_col, val_col]].dropna()
# standardize column names, remove mean from values, and sort by time
warnings.filterwarnings('ignore') # TODO: Update this filter when pandas removes support for sort_index
df = df.rename(columns={time_col: 't', val_col: 'y'}).sort_index(by=['t'])
warnings.resetwarnings()
df['y'] = df['y'] - df.y.mean()
# compute total energy in the time series
E_in = np.sum((df.y * df.y))
# appropriately zero-pad the timeseries before taking spectrum
pre_pad_length = len(df)
t_pad, y_pad = _compute_pad(df.t.values, interp_exponent=interp_exponent)
if len(t_pad) > 0:
df = df.append(
pd.DataFrame({'t': t_pad, 'y': y_pad}), ignore_index=True)
# fit the lombs scargle model to the time series
model = gatspy.periodic.LombScargleFast()
model.fit(df.t.values, df.y.values, 1)
# compute params for getting results out of lomb scargle fit
f0, df, N = _compute_params(df.t.values)
f = f0 + df * np.arange(N)
p = 1. / f
# retrieve the lomb scarge fit and normalize for power / amplitude
yf = model.score_frequency_grid(f0, df, N)
yf_power = 2 * yf * E_in * len(yf) / float(pre_pad_length) ** 2
yf_amp = np.sqrt(yf_power)
# generate the output dataframe
df = pd.DataFrame(
{'freq': f, 'period': p, 'power': yf_power, 'amp': yf_amp}
)[['period', 'freq', 'power', 'amp']]
# order by period if desired
if not freq_order:
warnings.filterwarnings('ignore') # TODO: Update this filter when pandas removes support for sort_index
df = df.sort_index(by='period')
warnings.resetwarnings()
return df
| 31.677419
| 112
| 0.650204
|
4a04b7d6bef45671cef6e8b2f5cff8fc2f4c5614
| 6,617
|
py
|
Python
|
mobster/guide_mobster.py
|
Militeee/HMOBSTER
|
6e682f6857d147841d43f95a5e7af00fc83bcb5d
|
[
"Apache-2.0"
] | null | null | null |
mobster/guide_mobster.py
|
Militeee/HMOBSTER
|
6e682f6857d147841d43f95a5e7af00fc83bcb5d
|
[
"Apache-2.0"
] | null | null | null |
mobster/guide_mobster.py
|
Militeee/HMOBSTER
|
6e682f6857d147841d43f95a5e7af00fc83bcb5d
|
[
"Apache-2.0"
] | null | null | null |
import pyro as pyro
import numpy as np
from pyro.infer import MCMC, NUTS, Predictive, HMC, config_enumerate
from pyro.infer.autoguide import AutoDelta, init_to_sample
import pyro.distributions as dist
import pyro.poutine as poutine
import torch
from torch.distributions import constraints
import mobster.utils_mobster as mut
from mobster.likelihood_calculation import *
@config_enumerate
def guide(data, K=1, tail=1, truncated_pareto = True, purity=0.96, clonal_beta_var=1., number_of_trials_clonal_mean=100.,
number_of_trials_clonal=900., number_of_trials_k=300., prior_lims_clonal=[1., 10000.],alpha_precision_concentration = 100, alpha_precision_rate=0.1,
prior_lims_k=[1., 10000.], epsilon_ccf = 0.01):
karyos = list(data.keys())
# Here we calculate the theoretical number of clonal clusters
theoretical_num_clones = [mut.theo_clonal_list[kr] for kr in karyos]
# Calculate the theoretical clonal means, wihch can be analytically defined for simple karyotypes, and then multiply them by the ploidy
theoretical_clonal_means = [mut.theo_clonal_means_list[kr] * purity for kr in karyos]
counts_clones = dict()
for i in theoretical_num_clones:
counts_clones[i] = counts_clones.get(i, 0) + 1
index_2 = [i for i, j in enumerate(theoretical_num_clones) if j == 2]
index_1 = [i for i, j in enumerate(theoretical_num_clones) if j == 1]
a_prior = pyro.param("tail_mean", torch.tensor(1.), constraint=constraints.positive)
alpha_precision_par = pyro.param("alpha_noise",
dist.Gamma(concentration=alpha_precision_concentration, rate=alpha_precision_rate).mean * torch.ones([len(karyos)]),
constraint=constraints.positive)
avg_number_of_trials_beta = pyro.param("avg_number_of_trials_beta", torch.ones(len(karyos)) * number_of_trials_k, constraint=constraints.positive)
precision_number_of_trials_beta = pyro.param("prc_number_of_trials_beta", torch.ones(len(karyos)) * 20, constraint=constraints.positive)
alpha_prior = pyro.sample('u', dist.Delta(a_prior))
ccf_priors = pyro.param("ccf_priors",
((torch.min(torch.tensor(1) * purity) - 0.001) / (K + 1)) * torch.arange(1,K+1),
constraint=constraints.unit_interval
)
if K != 0:
with pyro.plate("subclones", K):
subclonal_ccf = pyro.sample("sb_ccf", dist.Delta(ccf_priors).to_event(0))
idx1 = 0
idx2 = 0
if tail == 1:
weights_tail = pyro.param("param_tail_weights", 1 / torch.ones([len(karyos), 2]), constraint=constraints.simplex)
if 2 in theoretical_num_clones:
weights_param_2 = pyro.param("param_weights_2", (1 / (K + 2)) * torch.ones([len(index_2), K + 2]),
constraint=constraints.simplex)
if 1 in theoretical_num_clones:
weights_param_1 = pyro.param("param_weights_1", (1 / (K + 1)) * torch.ones([len(index_1), K + 1]),
constraint=constraints.simplex)
for kr in pyro.plate("kr", len(karyos)):
NV = data[karyos[kr]][:, 0]
DP = data[karyos[kr]][:, 1]
VAF = NV / DP
if K != 0:
with pyro.plate("subclones_{}".format(kr), K):
adj_ccf = subclonal_ccf * mut.ccf_adjust[karyos[kr]] * purity
k_means = pyro.sample('beta_subclone_mean_{}'.format(kr),
dist.Uniform(adj_ccf - epsilon_ccf, adj_ccf + epsilon_ccf))
prior_overdispersion = pyro.sample('prior_ovedisp_{}'.format(kr),
dist.Delta(avg_number_of_trials_beta[kr]))
prec_overdispersion = pyro.sample('prec_ovedisp_{}'.format(kr),
dist.Delta(precision_number_of_trials_beta[kr]))
if theoretical_num_clones[kr] == 2:
pyro.sample('weights_{}'.format(kr), dist.Delta(weights_param_2[idx2]).to_event(1))
# Mean parameter when the number of clonal picks is 2
a_2_theo = torch.cat([theoretical_clonal_means[i] for i in index_2]).reshape([counts_clones[2], 2])
a_2_theo = torch.transpose(a_2_theo,0,1)
a21 = pyro.param('a_2',
a_2_theo.reshape([2, len(index_2)]),
constraint=constraints.unit_interval)
with pyro.plate("clones_{}".format(kr), 2):
pyro.sample('beta_clone_mean_{}'.format(kr), dist.Delta(a21[:, idx2]))
with pyro.plate("clones_N_{}".format(kr), 2 + K):
pyro.sample('beta_clone_n_samples_{}'.format(kr), dist.LogNormal(torch.log(prior_overdispersion), 1/prec_overdispersion))
idx2 += 1
else:
pyro.sample('weights_{}'.format(kr), dist.Delta(weights_param_1[idx1]).to_event(1))
# Mean parameter when the number of clonal picks is 1
a_1_theo = torch.tensor([theoretical_clonal_means[i] for i in index_1]).reshape([1, counts_clones[1]])
a11 = pyro.param('a_1',
a_1_theo.reshape([1 , len(index_1)]),
constraint=constraints.unit_interval)
with pyro.plate("clones_{}".format(kr), 1):
pyro.sample('beta_clone_mean_{}'.format(kr), dist.Delta(a11[:, idx1]))
with pyro.plate("clones_N_{}".format(kr), 1 + K):
pyro.sample('beta_clone_n_samples_{}'.format(kr), dist.LogNormal(torch.log(prior_overdispersion), 1/prec_overdispersion))
idx1 += 1
if tail == 1:
# K = K + tail
pyro.sample('weights_tail_{}'.format(kr), dist.Delta(weights_tail[kr]).to_event(1))
alpha_precision = pyro.sample('alpha_precision_{}'.format(kr), dist.Delta(alpha_precision_par[kr]))
alpha = pyro.sample("alpha_noise_{}".format(kr),
dist.LogNormal(torch.log(alpha_prior * mut.theo_allele_list[karyos[kr]]),
1 / alpha_precision))
if alpha_prior <= 0 or torch.isnan(alpha_prior):
alpha_prior = torch.tensor(0.001)
#alpha = alpha_prior * mut.theo_allele_list[karyos[kr]]
if truncated_pareto:
pyro.sample("tail_T_{}".format(kr), BoundedPareto( (torch.min(VAF) - 1e-5), alpha,
torch.amin(theoretical_clonal_means[kr])))
else:
pyro.sample("tail_{}".format(kr), BoundedPareto(torch.min(VAF) - 1e-5, alpha,1))
| 44.709459
| 158
| 0.615687
|
4a04b823c7d4718853b115126c0ade0071cb6b01
| 10,924
|
py
|
Python
|
fill_shelf.py
|
Ross91/MayaScripts
|
988848f8cfc152261a95c60a1760af686a249658
|
[
"BSD-3-Clause"
] | null | null | null |
fill_shelf.py
|
Ross91/MayaScripts
|
988848f8cfc152261a95c60a1760af686a249658
|
[
"BSD-3-Clause"
] | null | null | null |
fill_shelf.py
|
Ross91/MayaScripts
|
988848f8cfc152261a95c60a1760af686a249658
|
[
"BSD-3-Clause"
] | null | null | null |
""" Bookshelf Filler.
This hasn't been stress tested so stability is unknown. This was also created in python 3.7.
Some terminology:
Assets are your books, which will be duplicated and moved into place.
Source objects are where your books are place, you want one per shelf.
Assets should be at origin with zero transforms and have their pivot centered in the x and z axis. The y-axis should
align with the bottom of the mesh, so it can rotated properly.
For your source object use a cube, this tool will use the bounding box to place assets. I would advise changing the
display mode so you can see your assets when placed.
Do not freeze transformations on any source object since this information is used to position assets.
Assets will be placed along the sources local X-axis from negative to positive, the bottom will align with the bottom of
the source object.
Example Implementation:
books = ['book1', 'book2', 'book3']
sources = ['pcube1', 'pcube2']
pack(sources, books, rotate=(-5, 5), spacing=(0.1, 0.2))
"""
__author__ = "Ross Harrop"
__version__ = "0.0.1"
import random
from maya.api import OpenMaya as om
import maya.cmds as mc
def get_object_from_node(node_name):
"""
Get OpenMaya object from name.
Args:
node_name(str): Object name
Returns:
"""
return om.MDagPath.getAPathTo(om.MSelectionList().add(node_name).getDependNode(0))
def get_bounding_box(node):
"""
Get OpenMaya bounding box from object.
Args:
node(str): Object name.
Returns: MBoundingBox
"""
obj = get_object_from_node(node)
dag = om.MFnDagNode(obj)
return dag.boundingBox
def get_local_axis(obj, axis=0):
"""
return objects local axis as a unit vector.
x=0
y=1
z=2
Args:
obj(str): Maya Transform name.
axis(int): -
Returns:
"""
matrix = mc.xform(obj, q=True, m=True, ws=True)
if axis == 0:
return om.MVector(matrix[0], matrix[1], matrix[2]).normalize()
if axis == 1:
return om.MVector(matrix[4], matrix[5], matrix[6]).normalize()
if axis == 2:
return om.MVector(matrix[8], matrix[9], matrix[10]).normalize()
class MeshBounding(object):
"""
A normal maya bounding box is in world space.
This is a local space version to calculate height, width and depth, regardless of orientation.
"""
def __init__(self, node_name):
super(MeshBounding, self).__init__()
# get faces.
faces = mc.ls(mc.polyListComponentConversion(node_name, tf=True), fl=1)
if len(faces) != 6:
mc.warning("Please use a cube")
return
# get vertexes.
vert_groups = [mc.ls(mc.polyListComponentConversion(f, ff=True, tv=True), fl=1) for f in faces]
positions = []
# get center point for each face.
for verts in vert_groups:
vert_pos = om.MVector(0, 0, 0)
for vert in verts:
t = mc.xform(vert, q=1, ws=1, t=1)
vert_pos += om.MVector(t[0], t[1], t[2])
average_pos = om.MVector(vert_pos.x / 4, vert_pos.y / 4, vert_pos.z / 4)
positions.append(average_pos)
# get height, width and depth.
self.depth = abs((positions[-1] - positions[-2]).length())
self.height = abs((positions[0] - positions[2]).length())
self.width = abs((positions[1] - positions[3]).length())
# get translation
area_pos = mc.xform(node_name, q=True, ws=True, t=True)
self.translation = om.MVector(area_pos[0], area_pos[1], area_pos[2])
# get x, y and z-axis unit vectors from world matrix.
world_mat = mc.xform(node_name, q=True, m=True, ws=True)
self.x = om.MVector(world_mat[0], world_mat[1], world_mat[2]).normalize()
self.y = om.MVector(world_mat[4], world_mat[5], world_mat[6]).normalize()
self.z = om.MVector(world_mat[8], world_mat[9], world_mat[10]).normalize()
# Get the transformation matrix of the selected object.
dag_path = get_object_from_node(node_name)
transform = om.MFnTransform(dag_path)
self.rotation = transform.rotation()
m = transform.transformationMatrix()
# Get the shape directly below the selected transform.
dag_path.extendToShape()
# get bounding box
mesh = om.MFnMesh(dag_path)
bounds = mesh.boundingBox
center = bounds.center
min = bounds.min
max = bounds.max
# Transform the bounding box min/max by the objects transformation matrix.
self.min = min * m
self.max = max * m
def pack(sources, assets, rotate=(-5, 5), spacing=(0.1, 0.2)):
"""
Duplicate and place assets within an area.
This assumes your assets are at origin with zero transform.
Args:
sources([str]): Source objects to place assets in.
assets([str]): Assets to be placed.
rotate(int, int): min and max rotation variation.
spacing(float, float): min and max spacing between assets.
"""
groups = []
for source in sources:
# get source bounding box
area_bounding = MeshBounding(source)
if not area_bounding:
continue
# have the books reached the end of the source object.
threshold = False
# has the book been rotated
leaning = False
items = []
# start at origin
position = om.MVector(0, 0, 0)
# duplicate books and place them until threshold reached.
while not threshold:
# pick random book from list and duplicate it.
asset = assets[random.randint(0, (len(assets) - 1))]
duplicate = mc.duplicate(asset)[0]
# get local x axis
axis = get_local_axis(duplicate)
# Get OpenMaya classes
c_obj = get_object_from_node(duplicate)
c_tran = om.MFnTransform(c_obj)
# get an offset from the center of the book to its border.
asset_bounding = get_bounding_box(duplicate)
offset = axis * asset_bounding.width / 2
# decide if its going to be rotated and check if the previous was rotated.
if not bool(random.getrandbits(1)) or leaning:
position += offset
if not leaning:
# add random spacing
rnd = round(random.uniform(spacing[0], spacing[1]), 1)
position += axis * rnd
# move book
c_tran.setTranslation(position, om.MSpace.kTransform)
position += offset
# if its reached the edge of the source and its clipping, remove it.
if position.x >= area_bounding.depth:
threshold = True
mc.delete(duplicate)
continue
items.append(duplicate)
leaning = False
continue
# get random rotation int
rotate_value = random.randint(rotate[0], rotate[1])
# if random rotation is zero, treat it like a normal book.
if rotate_value == 0:
position += offset
rnd = round(random.uniform(spacing[0], spacing[1]), 1)
position += axis * rnd
c_tran.setTranslation(position, om.MSpace.kTransform)
position += offset
if position.x >= area_bounding.depth:
threshold = True
mc.delete(duplicate)
continue
items.append(duplicate)
leaning = False
continue
# check for positive or negative rotation.
if rotate_value < 0:
is_positive = False
else:
is_positive = True
leaning = True
# change rotation from degrees to radians
angle = om.MAngle(rotate_value, 2)
rotation = om.MEulerRotation(0, 0, angle.asRadians())
c_tran.rotateBy(rotation, 2)
# before moving book, adjust its position
if is_positive:
new_pos = get_bounding_box(duplicate)
adj = axis * new_pos.width
position += offset * -1
position += adj
c_tran.setTranslation(position, om.MSpace.kTransform)
position += offset
rnd = round(random.uniform(spacing[0], spacing[1]), 1)
position += axis * rnd
else:
position += offset
rnd = round(random.uniform(spacing[0], spacing[1]), 1)
position += axis * rnd
c_tran.setTranslation(position, om.MSpace.kTransform)
new_pos = get_bounding_box(duplicate)
adj = axis * abs(position.x - new_pos.max.x)
position += adj
if position.x >= area_bounding.depth:
threshold = True
mc.delete(duplicate)
continue
items.append(duplicate)
# books have been offset from the origin and no where near the source.
# translate and rotate them to the correct position, inside the source.
area_trans = area_bounding.translation
area_trans += area_bounding.x * (area_bounding.depth / 2) * -1
area_trans += area_bounding.y * (area_bounding.height / 2) * -1
rot = mc.xform(source, q=1, ws=1, ro=1)
angle_x = om.MAngle(rot[0], 2)
angle_y = om.MAngle(rot[1], 2)
angle_z = om.MAngle(rot[2], 2)
obj_rot = om.MEulerRotation(angle_x.asRadians(), angle_y.asRadians(), angle_z.asRadians())
for i in items:
if i == items[0]:
asset_bounding = get_bounding_box(i)
area_trans += area_bounding.x * (asset_bounding.width / 2)
om_obj = get_object_from_node(i)
obj_trans = om.MFnTransform(om_obj)
x_pos = area_bounding.x * obj_trans.translation(om.MSpace.kTransform).x
y_pos = area_bounding.y * obj_trans.translation(om.MSpace.kTransform).y
z_pos = area_bounding.z * obj_trans.translation(om.MSpace.kTransform).y
adjusted_trans = area_trans + x_pos
adjusted_trans += y_pos
adjusted_trans += z_pos
obj_trans.rotateBy(obj_rot, 3)
obj_trans.setTranslation(adjusted_trans, 4)
grp = mc.group(items)
groups.append(grp)
mc.group(groups)
| 34.56962
| 121
| 0.572409
|
4a04b8391e8d58bfc63e869c94d700dc254a91d5
| 2,587
|
py
|
Python
|
Week 3/id_475/LeetCode_102_475.py
|
larryRishi/algorithm004-05
|
e60d0b1176acd32a9184b215e36d4122ba0b6263
|
[
"Apache-2.0"
] | 1
|
2019-10-12T06:48:45.000Z
|
2019-10-12T06:48:45.000Z
|
Week 3/id_475/LeetCode_102_475.py
|
larryRishi/algorithm004-05
|
e60d0b1176acd32a9184b215e36d4122ba0b6263
|
[
"Apache-2.0"
] | 1
|
2019-12-01T10:02:03.000Z
|
2019-12-01T10:02:03.000Z
|
Week 3/id_475/LeetCode_102_475.py
|
larryRishi/algorithm004-05
|
e60d0b1176acd32a9184b215e36d4122ba0b6263
|
[
"Apache-2.0"
] | null | null | null |
# BFS
# 法一
class Solution:
def levelOrder(self,root):
"""利⽤队列实现树的层次遍历"""
if root == None:
return
res = []
queue = []
queue.append(root)
while queue:
node = queue.pop(0)
res.append(node.val)
if node.left is not None:
queue.append(node.left)
if node.right != None:
queue.append(node.right)
return res
# 法二
class Solution:
def levelOrder(self, root):
"""利用数组进行层次遍历"""
if not root:
return []
res = []
cur_level = [root]
while cur_level:
next_level = []
for i in cur_level:
res.append(i.val)
if i.left:
next_level.append(i.left)
if i.right:
next_level.append(i.right)
cur_level = next_level
return res
# LeetCode102
# 第一遍:
# 法一(1):迭代法(数组)
# 思想:当前节点cur_level,循环遍历当前节点的子节点,放入next_level,对每一次遍历所有节点:把他的左右节点放入next_level,遍历结束后更新cur_level
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
"""迭代法:数组实现二叉树层次遍历"""
if not root:
return []
res = []
cur_level = [root]
while cur_level:
temp = []
next_level = []
for node in cur_level: # 标志层结束的方法:一层一层地遍历
temp.append(node.val)
if node.left:
next_level.append(node.left)
if node.right:
next_level.append(node.right)
res.append(temp)
cur_level = next_level
return res
# 法一(2):迭代法(队列)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
"""队列实现二叉树层次遍历"""
if not root:
return []
res = []
queue = [root]
while queue:
temp = []
for i in range(len(queue)): # 关键在于利用queue的长度标志层的结束
node = queue.pop(0)
temp.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
res.append(temp)
return res
| 27.231579
| 92
| 0.487824
|
4a04b94766261647dfc79e1b26c42c21be4320cb
| 8,687
|
py
|
Python
|
nipype/pipeline/engine/tests/test_nodes.py
|
moloney/nipype
|
a7a9c85c79cb1412ba03406074f83200447ef50b
|
[
"Apache-2.0"
] | 7
|
2017-02-17T08:54:26.000Z
|
2022-03-10T20:57:23.000Z
|
nipype/pipeline/engine/tests/test_nodes.py
|
moloney/nipype
|
a7a9c85c79cb1412ba03406074f83200447ef50b
|
[
"Apache-2.0"
] | 1
|
2016-04-25T15:07:09.000Z
|
2016-04-25T15:07:09.000Z
|
nipype/pipeline/engine/tests/test_nodes.py
|
moloney/nipype
|
a7a9c85c79cb1412ba03406074f83200447ef50b
|
[
"Apache-2.0"
] | 2
|
2017-09-23T16:22:00.000Z
|
2019-08-01T14:18:52.000Z
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function, unicode_literals
from builtins import str
import os
from copy import deepcopy
import pytest
from .... import config
from ....interfaces import utility as niu
from ... import engine as pe
from ..utils import merge_dict
from .test_base import EngineTestInterface
from .test_utils import UtilsTestInterface
'''
Test for order of iterables
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as niu
wf1 = pe.Workflow(name='wf1')
node1 = pe.Node(interface=niu.IdentityInterface(fields=['a1','b1']), name='node1')
node1.iterables = ('a1', [1,2])
wf1.add_nodes([node1])
wf2 = pe.Workflow(name='wf2')
node2 = pe.Node(interface=niu.IdentityInterface(fields=['a2','b2']), name='node2')
wf2.add_nodes([node2])
wf1.connect(node1, 'a1', wf2, 'node2.a2')
node4 = pe.Node(interface=niu.IdentityInterface(fields=['a4','b4']), name='node4')
#node4.iterables = ('a4', [5,6])
wf2.connect(node2, 'b2', node4, 'b4')
wf3 = pe.Workflow(name='wf3')
node3 = pe.Node(interface=niu.IdentityInterface(fields=['a3','b3']), name='node3')
node3.iterables = ('b3', [3,4])
wf3.add_nodes([node3])
wf1.connect(wf3, 'node3.b3', wf2, 'node2.b2')
wf1.base_dir = os.path.join(os.getcwd(),'testit')
wf1.run(inseries=True, createdirsonly=True)
wf1.write_graph(graph2use='exec')
'''
'''
import nipype.pipeline.engine as pe
import nipype.interfaces.spm as spm
import os
from io import StringIO
from nipype.utils.config import config
config.readfp(StringIO("""
[execution]
remove_unnecessary_outputs = true
"""))
segment = pe.Node(interface=spm.Segment(), name="segment")
segment.inputs.data = os.path.abspath("data/T1.nii")
segment.inputs.gm_output_type = [True, True, True]
segment.inputs.wm_output_type = [True, True, True]
smooth_gm = pe.Node(interface=spm.Smooth(), name="smooth_gm")
workflow = pe.Workflow(name="workflow_cleanup_test")
workflow.base_dir = os.path.abspath('./workflow_cleanup_test')
workflow.connect([(segment, smooth_gm, [('native_gm_image','in_files')])])
workflow.run()
#adding new node that uses one of the previously deleted outputs of segment; this should force segment to rerun
smooth_wm = pe.Node(interface=spm.Smooth(), name="smooth_wm")
workflow.connect([(segment, smooth_wm, [('native_wm_image','in_files')])])
workflow.run()
workflow.run()
'''
# Node
def test_node_init():
with pytest.raises(TypeError):
pe.Node()
with pytest.raises(IOError):
pe.Node(EngineTestInterface, name='test')
def test_node_get_output():
mod1 = pe.Node(interface=EngineTestInterface(), name='mod1')
mod1.inputs.input1 = 1
mod1.run()
assert mod1.get_output('output1') == [1, 1]
mod1._result = None
assert mod1.get_output('output1') == [1, 1]
def test_mapnode_iterfield_check():
mod1 = pe.MapNode(EngineTestInterface(), iterfield=['input1'], name='mod1')
with pytest.raises(ValueError):
mod1._check_iterfield()
mod1 = pe.MapNode(
EngineTestInterface(), iterfield=['input1', 'input2'], name='mod1')
mod1.inputs.input1 = [1, 2]
mod1.inputs.input2 = 3
with pytest.raises(ValueError):
mod1._check_iterfield()
@pytest.mark.parametrize("x_inp, f_exp",
[(3, [6]), ([2, 3], [4, 6]), ((2, 3), [4, 6]),
(range(3), [0, 2, 4]), ("Str", ["StrStr"]),
(["Str1", "Str2"], ["Str1Str1", "Str2Str2"])])
def test_mapnode_iterfield_type(x_inp, f_exp):
from nipype import MapNode, Function
def double_func(x):
return 2 * x
double = Function(["x"], ["f_x"], double_func)
double_node = MapNode(double, name="double", iterfield=["x"])
double_node.inputs.x = x_inp
res = double_node.run()
assert res.outputs.f_x == f_exp
def test_mapnode_nested(tmpdir):
tmpdir.chdir()
from nipype import MapNode, Function
def func1(in1):
return in1 + 1
n1 = MapNode(
Function(input_names=['in1'], output_names=['out'], function=func1),
iterfield=['in1'],
nested=True,
name='n1')
n1.inputs.in1 = [[1, [2]], 3, [4, 5]]
n1.run()
assert n1.get_output('out') == [[2, [3]], 4, [5, 6]]
n2 = MapNode(
Function(input_names=['in1'], output_names=['out'], function=func1),
iterfield=['in1'],
nested=False,
name='n1')
n2.inputs.in1 = [[1, [2]], 3, [4, 5]]
with pytest.raises(Exception) as excinfo:
n2.run()
assert "can only concatenate list" in str(excinfo.value)
def test_mapnode_expansion(tmpdir):
tmpdir.chdir()
from nipype import MapNode, Function
def func1(in1):
return in1 + 1
mapnode = MapNode(
Function(function=func1),
iterfield='in1',
name='mapnode',
n_procs=2,
mem_gb=2)
mapnode.inputs.in1 = [1, 2]
for idx, node in mapnode._make_nodes():
for attr in ('overwrite', 'run_without_submitting', 'plugin_args'):
assert getattr(node, attr) == getattr(mapnode, attr)
for attr in ('_n_procs', '_mem_gb'):
assert (getattr(node, attr) == getattr(mapnode, attr))
def test_node_hash(tmpdir):
from nipype.interfaces.utility import Function
tmpdir.chdir()
config.set_default_config()
config.set('execution', 'stop_on_first_crash', True)
config.set('execution', 'crashdump_dir', os.getcwd())
def func1():
return 1
def func2(a):
return a + 1
n1 = pe.Node(
Function(input_names=[], output_names=['a'], function=func1),
name='n1')
n2 = pe.Node(
Function(input_names=['a'], output_names=['b'], function=func2),
name='n2')
w1 = pe.Workflow(name='test')
def modify(x):
return x + 1
n1.inputs.a = 1
w1.connect(n1, ('a', modify), n2, 'a')
w1.base_dir = os.getcwd()
# create dummy distributed plugin class
from nipype.pipeline.plugins.base import DistributedPluginBase
# create a custom exception
class EngineTestException(Exception):
pass
class RaiseError(DistributedPluginBase):
def _submit_job(self, node, updatehash=False):
raise EngineTestException(
'Submit called - cached=%s, updated=%s' % node.is_cached())
# check if a proper exception is raised
with pytest.raises(EngineTestException) as excinfo:
w1.run(plugin=RaiseError())
assert str(excinfo.value).startswith('Submit called')
# generate outputs
w1.run(plugin='Linear')
# ensure plugin is being called
config.set('execution', 'local_hash_check', False)
# rerun to ensure we have outputs
w1.run(plugin='Linear')
# set local check
config.set('execution', 'local_hash_check', True)
w1 = pe.Workflow(name='test')
w1.connect(n1, ('a', modify), n2, 'a')
w1.base_dir = os.getcwd()
w1.run(plugin=RaiseError())
def test_outputs_removal(tmpdir):
def test_function(arg1):
import os
file1 = os.path.join(os.getcwd(), 'file1.txt')
file2 = os.path.join(os.getcwd(), 'file2.txt')
with open(file1, 'wt') as fp:
fp.write('%d' % arg1)
with open(file2, 'wt') as fp:
fp.write('%d' % arg1)
return file1, file2
n1 = pe.Node(
niu.Function(
input_names=['arg1'],
output_names=['file1', 'file2'],
function=test_function),
base_dir=tmpdir.strpath,
name='testoutputs')
n1.inputs.arg1 = 1
n1.config = {'execution': {'remove_unnecessary_outputs': True}}
n1.config = merge_dict(deepcopy(config._sections), n1.config)
n1.run()
assert tmpdir.join(n1.name, 'file1.txt').check()
assert tmpdir.join(n1.name, 'file1.txt').check()
n1.needed_outputs = ['file2']
n1.run()
assert not tmpdir.join(n1.name, 'file1.txt').check()
assert tmpdir.join(n1.name, 'file2.txt').check()
def test_inputs_removal(tmpdir):
file1 = tmpdir.join('file1.txt')
file1.write('dummy_file')
n1 = pe.Node(
UtilsTestInterface(), base_dir=tmpdir.strpath, name='testinputs')
n1.inputs.in_file = file1.strpath
n1.config = {'execution': {'keep_inputs': True}}
n1.config = merge_dict(deepcopy(config._sections), n1.config)
n1.run()
assert tmpdir.join(n1.name, 'file1.txt').check()
n1.inputs.in_file = file1.strpath
n1.config = {'execution': {'keep_inputs': False}}
n1.config = merge_dict(deepcopy(config._sections), n1.config)
n1.overwrite = True
n1.run()
assert not tmpdir.join(n1.name, 'file1.txt').check()
| 29.648464
| 111
| 0.642569
|
4a04b94dcbc8d5b95de330e9291ce86f5383b9ab
| 225
|
py
|
Python
|
FabricUI/pattern/__init__.py
|
shuaih7/FabricUI
|
6501e8e6370d1f90174002f5768b5ef63e8412bc
|
[
"Apache-2.0"
] | null | null | null |
FabricUI/pattern/__init__.py
|
shuaih7/FabricUI
|
6501e8e6370d1f90174002f5768b5ef63e8412bc
|
[
"Apache-2.0"
] | 2
|
2020-11-27T05:21:12.000Z
|
2020-11-27T05:24:04.000Z
|
FabricUI/pattern/__init__.py
|
shuaih7/QtUI
|
6501e8e6370d1f90174002f5768b5ef63e8412bc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 03.02.2021
Updated on 03.12.2021
Author: haoshuai@handaotech.com
'''
from .pattern_filter import PatternFilter
from .pattern_filter_simple import PatternFilterSimple
| 18.75
| 54
| 0.755556
|
4a04ba0fb97db7ff4ed08d2668b43db6b3d9c6e2
| 2,223
|
py
|
Python
|
huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/create_listener_tags_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/create_listener_tags_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/create_listener_tags_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class CreateListenerTagsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""CreateListenerTagsResponse - a model defined in huaweicloud sdk"""
super(CreateListenerTagsResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateListenerTagsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.109756
| 77
| 0.541161
|
4a04bab87a3b1f40e6574d4f022286f562fa286c
| 208
|
py
|
Python
|
algorithimic_tasks/ntm/datasets/__init__.py
|
zoharli/armin
|
9bf8e4533850a66bbef26390244f0d0ad30c067b
|
[
"MIT"
] | 3
|
2019-07-01T12:11:29.000Z
|
2020-05-25T22:37:50.000Z
|
algorithimic_tasks/ntm/datasets/__init__.py
|
zoharli/armin
|
9bf8e4533850a66bbef26390244f0d0ad30c067b
|
[
"MIT"
] | null | null | null |
algorithimic_tasks/ntm/datasets/__init__.py
|
zoharli/armin
|
9bf8e4533850a66bbef26390244f0d0ad30c067b
|
[
"MIT"
] | null | null | null |
from .copy import CopyDataset
from .add import AddDataset
from .repeatcopy import RepeatCopyDataset
from .associative import AssociativeDataset
from .ngram import NGram
from .prioritysort import PrioritySort
| 29.714286
| 43
| 0.855769
|
4a04bbdc38146400088330234724a3389e557dc0
| 2,818
|
py
|
Python
|
data_analyse/file_updater.py
|
github4code/YahooNews_Analyzer
|
2fb535b220103a3c18f74a1338568dc7c2d8ed9f
|
[
"MIT"
] | null | null | null |
data_analyse/file_updater.py
|
github4code/YahooNews_Analyzer
|
2fb535b220103a3c18f74a1338568dc7c2d8ed9f
|
[
"MIT"
] | null | null | null |
data_analyse/file_updater.py
|
github4code/YahooNews_Analyzer
|
2fb535b220103a3c18f74a1338568dc7c2d8ed9f
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
'''
This program is used to update dictionary-file, tfidf-model, lda-model
Program execution: python file_updater.py
Author: Jason Su
Last modification: 2016/12/17
'''
import sys, datetime, jieba, jieba.analyse, MySQLdb
from gensim import corpora, models, similarities
from mysql_config import DBConfig
reload(sys)
sys.setdefaultencoding('utf8')
date_list = [2016,9,22]
date = date_list[0]*10000 + date_list[1]*100 + date_list[2]
today_date = (datetime.datetime.now().year)*10000 + (datetime.datetime.now().month)*100 + datetime.datetime.now().day
def get_next_date(date_in):
date = datetime.datetime(date_in[0], date_in[1], date_in[2])
date = date + datetime.timedelta(days=1)
return [int(str(date)[0:4]), int(str(date)[5:7]), int(str(date)[8:10])]
stop_words = set(open("ref/stop_word.txt", "r").read().splitlines())
stop_words.update('\n', '\t', ' ')
jieba.set_dictionary('ref/dict.txt.big')
jieba.load_userdict("ref/userdict.txt")
if 1>0:
try:
db = DBConfig()
db.dbConnect()
query = "SELECT COUNT(*) from News WHERE date>=%s" % date
db.executeQuery(query)
news_num = int(db.results[0][0])
query = "SELECT number, title, content from News WHERE date>=%s" % date
db.executeQuery(query)
texts = []
today_number = []
for result in db.results:
seglist = jieba.cut(result[2], cut_all=False)
line = []
for word in seglist:
if word.encode('utf8') not in stop_words and word.isdigit() == False:
line.append(word)
texts.append(line)
today_number.append(result[0])
'''query = "SELECT date from News WHERE number=%s" % db.results[news_num-1][0]
db.executeQuery(query)
query = "UPDATE Information SET last_dict_date=%s WHERE 1" % db.results[0][0]
db.executeQuery(query)
db.dbCommit()'''
except MySQLdb.Error as e:
print "Error %d: %s" % (e.args[0], e.args[1])
dictionary = corpora.Dictionary(texts)
#dictionary = corpora.Dictionary.load_from_text('news.dict')
#dictionary.add_documents(texts)
dictionary.filter_extremes(no_below=10, no_above=0.5, keep_n=50000)
dictionary.save_as_text('news.dict')
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize('news.mm', corpus)
#tfidf.load('foo.tfidf_model')
tfidf = models.TfidfModel(corpus)
tfidf.save('foo.tfidf_model')
print tfidf
#lda = models.ldamodel.LdaModel(tfidf[corpus], id2word=dictionary, num_topics = TOPIC_NUM)
#lda.save('foo.lda_model')
#lda = models.ldamodel.LdaModel.load('foo.lda_model')
| 32.767442
| 117
| 0.625621
|
4a04bcfb42d43a5b4ed01ff503a831787970a468
| 2,590
|
py
|
Python
|
basicsr/models/edvr_model.py
|
RuijiaoSun/BasicSR
|
b60162e9a0f17c63b87fce36092d08ab81304ab3
|
[
"Apache-2.0",
"MIT"
] | 2
|
2021-08-07T02:15:31.000Z
|
2021-09-09T02:52:15.000Z
|
basicsr/models/edvr_model.py
|
kuijiang0802/BasicSR
|
5c757162b348a09d236e00c2cc04463c0a8bba45
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
basicsr/models/edvr_model.py
|
kuijiang0802/BasicSR
|
5c757162b348a09d236e00c2cc04463c0a8bba45
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import logging
from torch.nn.parallel import DistributedDataParallel
from basicsr.utils.registry import MODEL_REGISTRY
from .video_base_model import VideoBaseModel
logger = logging.getLogger('basicsr')
@MODEL_REGISTRY.register()
class EDVRModel(VideoBaseModel):
"""EDVR Model.
Paper: EDVR: Video Restoration with Enhanced Deformable Convolutional Networks. # noqa: E501
"""
def __init__(self, opt):
super(EDVRModel, self).__init__(opt)
if self.is_train:
self.train_tsa_iter = opt['train'].get('tsa_iter')
def setup_optimizers(self):
train_opt = self.opt['train']
dcn_lr_mul = train_opt.get('dcn_lr_mul', 1)
logger.info(f'Multiple the learning rate for dcn with {dcn_lr_mul}.')
if dcn_lr_mul == 1:
optim_params = self.net_g.parameters()
else: # separate dcn params and normal params for differnet lr
normal_params = []
dcn_params = []
for name, param in self.net_g.named_parameters():
if 'dcn' in name:
dcn_params.append(param)
else:
normal_params.append(param)
optim_params = [
{ # add normal params first
'params': normal_params,
'lr': train_opt['optim_g']['lr']
},
{
'params': dcn_params,
'lr': train_opt['optim_g']['lr'] * dcn_lr_mul
},
]
optim_type = train_opt['optim_g'].pop('type')
self.optimizer_g = self.get_optimizer(optim_type, optim_params, **train_opt['optim_g'])
self.optimizers.append(self.optimizer_g)
def optimize_parameters(self, current_iter):
if self.train_tsa_iter:
if current_iter == 1:
logger.info(f'Only train TSA module for {self.train_tsa_iter} iters.')
for name, param in self.net_g.named_parameters():
if 'fusion' not in name:
param.requires_grad = False
elif current_iter == self.train_tsa_iter:
logger.warning('Train all the parameters.')
for param in self.net_g.parameters():
param.requires_grad = True
if isinstance(self.net_g, DistributedDataParallel):
logger.warning('Set net_g.find_unused_parameters = False.')
self.net_g.find_unused_parameters = False
super(VideoBaseModel, self).optimize_parameters(current_iter)
| 38.656716
| 97
| 0.585714
|
4a04be4fe54b8aac7770f72132159038720b2a26
| 1,133
|
py
|
Python
|
generated-libraries/python/netapp/storage_disk/storage_disk_get_iter_key_td.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/storage_disk/storage_disk_get_iter_key_td.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/storage_disk/storage_disk_get_iter_key_td.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.netapp_object import NetAppObject
class StorageDiskGetIterKeyTd(NetAppObject):
"""
Key typedef for table disk_view
"""
_key_1 = None
@property
def key_1(self):
"""
Field disk
"""
return self._key_1
@key_1.setter
def key_1(self, val):
if val != None:
self.validate('key_1', val)
self._key_1 = val
_key_0 = None
@property
def key_0(self):
"""
Field hidden_uid
"""
return self._key_0
@key_0.setter
def key_0(self, val):
if val != None:
self.validate('key_0', val)
self._key_0 = val
@staticmethod
def get_api_name():
return "storage-disk-get-iter-key-td"
@staticmethod
def get_desired_attrs():
return [
'key-1',
'key-0',
]
def describe_properties(self):
return {
'key_1': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_0': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| 22.66
| 87
| 0.520741
|
4a04bf61a4d4f14e76217f130029fa20cd6a5666
| 33,059
|
py
|
Python
|
flask_restplus/api.py
|
Sheile/flask-restplus
|
a8f35823fe40b2c7385632a2ad6b35b26467402c
|
[
"BSD-3-Clause"
] | 1
|
2019-10-11T00:49:54.000Z
|
2019-10-11T00:49:54.000Z
|
flask_restplus/api.py
|
Sheile/flask-restplus
|
a8f35823fe40b2c7385632a2ad6b35b26467402c
|
[
"BSD-3-Clause"
] | null | null | null |
flask_restplus/api.py
|
Sheile/flask-restplus
|
a8f35823fe40b2c7385632a2ad6b35b26467402c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import difflib
import inspect
from itertools import chain
import logging
import operator
import re
import six
import sys
from collections import OrderedDict
from functools import wraps, partial
from types import MethodType
from flask import url_for, request, current_app
from flask import make_response as original_flask_make_response
from flask.helpers import _endpoint_from_view_func
from flask.signals import got_request_exception
from jsonschema import RefResolver
from werkzeug import cached_property
from werkzeug.datastructures import Headers
from werkzeug.exceptions import HTTPException, MethodNotAllowed, NotFound, NotAcceptable, InternalServerError
from werkzeug.wrappers import BaseResponse
from . import apidoc
from .mask import ParseError, MaskError
from .namespace import Namespace
from .postman import PostmanCollectionV1
from .resource import Resource
from .swagger import Swagger
from .utils import default_id, camel_to_dash, unpack
from .representations import output_json
from ._http import HTTPStatus
RE_RULES = re.compile('(<.*>)')
# List headers that should never be handled by Flask-RESTPlus
HEADERS_BLACKLIST = ('Content-Length',)
DEFAULT_REPRESENTATIONS = [('application/json', output_json)]
log = logging.getLogger(__name__)
class Api(object):
'''
The main entry point for the application.
You need to initialize it with a Flask Application: ::
>>> app = Flask(__name__)
>>> api = Api(app)
Alternatively, you can use :meth:`init_app` to set the Flask application
after it has been constructed.
The endpoint parameter prefix all views and resources:
- The API root/documentation will be ``{endpoint}.root``
- A resource registered as 'resource' will be available as ``{endpoint}.resource``
:param flask.Flask|flask.Blueprint app: the Flask application object or a Blueprint
:param str version: The API version (used in Swagger documentation)
:param str title: The API title (used in Swagger documentation)
:param str description: The API description (used in Swagger documentation)
:param str terms_url: The API terms page URL (used in Swagger documentation)
:param str contact: A contact email for the API (used in Swagger documentation)
:param str license: The license associated to the API (used in Swagger documentation)
:param str license_url: The license page URL (used in Swagger documentation)
:param str endpoint: The API base endpoint (default to 'api).
:param str default: The default namespace base name (default to 'default')
:param str default_label: The default namespace label (used in Swagger documentation)
:param str default_mediatype: The default media type to return
:param bool validate: Whether or not the API should perform input payload validation.
:param bool ordered: Whether or not preserve order models and marshalling.
:param str doc: The documentation path. If set to a false value, documentation is disabled.
(Default to '/')
:param list decorators: Decorators to attach to every resource
:param bool catch_all_404s: Use :meth:`handle_error`
to handle 404 errors throughout your app
:param dict authorizations: A Swagger Authorizations declaration as dictionary
:param bool serve_challenge_on_401: Serve basic authentication challenge with 401
responses (default 'False')
:param FormatChecker format_checker: A jsonschema.FormatChecker object that is hooked into
the Model validator. A default or a custom FormatChecker can be provided (e.g., with custom
checkers), otherwise the default action is to not enforce any format validation.
'''
def __init__(self, app=None, version='1.0', title=None, description=None,
terms_url=None, license=None, license_url=None,
contact=None, contact_url=None, contact_email=None,
authorizations=None, security=None, doc='/', default_id=default_id,
default='default', default_label='Default namespace', validate=None,
tags=None, prefix='', ordered=False,
default_mediatype='application/json', decorators=None,
catch_all_404s=False, serve_challenge_on_401=False, format_checker=None,
**kwargs):
self.version = version
self.title = title or 'API'
self.description = description
self.terms_url = terms_url
self.contact = contact
self.contact_email = contact_email
self.contact_url = contact_url
self.license = license
self.license_url = license_url
self.authorizations = authorizations
self.security = security
self.default_id = default_id
self.ordered = ordered
self._validate = validate
self._doc = doc
self._doc_view = None
self._default_error_handler = None
self.tags = tags or []
self.error_handlers = {
ParseError: mask_parse_error_handler,
MaskError: mask_error_handler,
}
self._schema = None
self.models = {}
self._refresolver = None
self.format_checker = format_checker
self.namespaces = []
self.default_namespace = self.namespace(default, default_label,
endpoint='{0}-declaration'.format(default),
validate=validate,
api=self,
path='/',
)
self.ns_paths = dict()
self.representations = OrderedDict(DEFAULT_REPRESENTATIONS)
self.urls = {}
self.prefix = prefix
self.default_mediatype = default_mediatype
self.decorators = decorators if decorators else []
self.catch_all_404s = catch_all_404s
self.serve_challenge_on_401 = serve_challenge_on_401
self.blueprint_setup = None
self.endpoints = set()
self.resources = []
self.app = None
self.blueprint = None
if app is not None:
self.app = app
self.init_app(app)
# super(Api, self).__init__(app, **kwargs)
def init_app(self, app, **kwargs):
'''
Allow to lazy register the API on a Flask application::
>>> app = Flask(__name__)
>>> api = Api()
>>> api.init_app(app)
:param flask.Flask app: the Flask application object
:param str title: The API title (used in Swagger documentation)
:param str description: The API description (used in Swagger documentation)
:param str terms_url: The API terms page URL (used in Swagger documentation)
:param str contact: A contact email for the API (used in Swagger documentation)
:param str license: The license associated to the API (used in Swagger documentation)
:param str license_url: The license page URL (used in Swagger documentation)
'''
self.app = app
self.title = kwargs.get('title', self.title)
self.description = kwargs.get('description', self.description)
self.terms_url = kwargs.get('terms_url', self.terms_url)
self.contact = kwargs.get('contact', self.contact)
self.contact_url = kwargs.get('contact_url', self.contact_url)
self.contact_email = kwargs.get('contact_email', self.contact_email)
self.license = kwargs.get('license', self.license)
self.license_url = kwargs.get('license_url', self.license_url)
self._add_specs = kwargs.get('add_specs', True)
# If app is a blueprint, defer the initialization
try:
app.record(self._deferred_blueprint_init)
# Flask.Blueprint has a 'record' attribute, Flask.Api does not
except AttributeError:
self._init_app(app)
else:
self.blueprint = app
def _init_app(self, app):
'''
Perform initialization actions with the given :class:`flask.Flask` object.
:param flask.Flask app: The flask application object
'''
self._register_specs(self.blueprint or app)
self._register_doc(self.blueprint or app)
app.handle_exception = partial(self.error_router, app.handle_exception)
app.handle_user_exception = partial(self.error_router, app.handle_user_exception)
if len(self.resources) > 0:
for resource, namespace, urls, kwargs in self.resources:
self._register_view(app, resource, namespace, *urls, **kwargs)
self._register_apidoc(app)
self._validate = self._validate if self._validate is not None else app.config.get('RESTPLUS_VALIDATE', False)
app.config.setdefault('RESTPLUS_MASK_HEADER', 'X-Fields')
app.config.setdefault('RESTPLUS_MASK_SWAGGER', True)
def __getattr__(self, name):
try:
return getattr(self.default_namespace, name)
except AttributeError:
raise AttributeError('Api does not have {0} attribute'.format(name))
def _complete_url(self, url_part, registration_prefix):
'''
This method is used to defer the construction of the final url in
the case that the Api is created with a Blueprint.
:param url_part: The part of the url the endpoint is registered with
:param registration_prefix: The part of the url contributed by the
blueprint. Generally speaking, BlueprintSetupState.url_prefix
'''
parts = (registration_prefix, self.prefix, url_part)
return ''.join(part for part in parts if part)
def _register_apidoc(self, app):
conf = app.extensions.setdefault('restplus', {})
if not conf.get('apidoc_registered', False):
app.register_blueprint(apidoc.apidoc)
conf['apidoc_registered'] = True
def _register_specs(self, app_or_blueprint):
if self._add_specs:
endpoint = str('specs')
self._register_view(
app_or_blueprint,
SwaggerView,
self.default_namespace,
'/swagger.json',
endpoint=endpoint,
resource_class_args=(self, )
)
self.endpoints.add(endpoint)
def _register_doc(self, app_or_blueprint):
if self._add_specs and self._doc:
# Register documentation before root if enabled
app_or_blueprint.add_url_rule(self._doc, 'doc', self.render_doc)
app_or_blueprint.add_url_rule(self.prefix or '/', 'root', self.render_root)
def register_resource(self, namespace, resource, *urls, **kwargs):
endpoint = kwargs.pop('endpoint', None)
endpoint = str(endpoint or self.default_endpoint(resource, namespace))
kwargs['endpoint'] = endpoint
self.endpoints.add(endpoint)
if self.app is not None:
self._register_view(self.app, resource, namespace, *urls, **kwargs)
else:
self.resources.append((resource, namespace, urls, kwargs))
return endpoint
def _register_view(self, app, resource, namespace, *urls, **kwargs):
endpoint = kwargs.pop('endpoint', None) or camel_to_dash(resource.__name__)
resource_class_args = kwargs.pop('resource_class_args', ())
resource_class_kwargs = kwargs.pop('resource_class_kwargs', {})
# NOTE: 'view_functions' is cleaned up from Blueprint class in Flask 1.0
if endpoint in getattr(app, 'view_functions', {}):
previous_view_class = app.view_functions[endpoint].__dict__['view_class']
# if you override the endpoint with a different class, avoid the
# collision by raising an exception
if previous_view_class != resource:
msg = 'This endpoint (%s) is already set to the class %s.'
raise ValueError(msg % (endpoint, previous_view_class.__name__))
resource.mediatypes = self.mediatypes_method() # Hacky
resource.endpoint = endpoint
resource_func = self.output(resource.as_view(endpoint, self, *resource_class_args,
**resource_class_kwargs))
# Apply Namespace and Api decorators to a resource
for decorator in chain(namespace.decorators, self.decorators):
resource_func = decorator(resource_func)
for url in urls:
# If this Api has a blueprint
if self.blueprint:
# And this Api has been setup
if self.blueprint_setup:
# Set the rule to a string directly, as the blueprint is already
# set up.
self.blueprint_setup.add_url_rule(url, view_func=resource_func, **kwargs)
continue
else:
# Set the rule to a function that expects the blueprint prefix
# to construct the final url. Allows deferment of url finalization
# in the case that the associated Blueprint has not yet been
# registered to an application, so we can wait for the registration
# prefix
rule = partial(self._complete_url, url)
else:
# If we've got no Blueprint, just build a url with no prefix
rule = self._complete_url(url, '')
# Add the url to the application or blueprint
app.add_url_rule(rule, view_func=resource_func, **kwargs)
def output(self, resource):
'''
Wraps a resource (as a flask view function),
for cases where the resource does not directly return a response object
:param resource: The resource as a flask view function
'''
@wraps(resource)
def wrapper(*args, **kwargs):
resp = resource(*args, **kwargs)
if isinstance(resp, BaseResponse):
return resp
data, code, headers = unpack(resp)
return self.make_response(data, code, headers=headers)
return wrapper
def make_response(self, data, *args, **kwargs):
'''
Looks up the representation transformer for the requested media
type, invoking the transformer to create a response object. This
defaults to default_mediatype if no transformer is found for the
requested mediatype. If default_mediatype is None, a 406 Not
Acceptable response will be sent as per RFC 2616 section 14.1
:param data: Python object containing response data to be transformed
'''
default_mediatype = kwargs.pop('fallback_mediatype', None) or self.default_mediatype
mediatype = request.accept_mimetypes.best_match(
self.representations,
default=default_mediatype,
)
if mediatype is None:
raise NotAcceptable()
if mediatype in self.representations:
resp = self.representations[mediatype](data, *args, **kwargs)
resp.headers['Content-Type'] = mediatype
return resp
elif mediatype == 'text/plain':
resp = original_flask_make_response(str(data), *args, **kwargs)
resp.headers['Content-Type'] = 'text/plain'
return resp
else:
raise InternalServerError()
def documentation(self, func):
'''A decorator to specify a view funtion for the documentation'''
self._doc_view = func
return func
def render_root(self):
self.abort(HTTPStatus.NOT_FOUND)
def render_doc(self):
'''Override this method to customize the documentation page'''
if self._doc_view:
return self._doc_view()
elif not self._doc:
self.abort(HTTPStatus.NOT_FOUND)
return apidoc.ui_for(self)
def default_endpoint(self, resource, namespace):
'''
Provide a default endpoint for a resource on a given namespace.
Endpoints are ensured not to collide.
Override this method specify a custom algoryhtm for default endpoint.
:param Resource resource: the resource for which we want an endpoint
:param Namespace namespace: the namespace holding the resource
:returns str: An endpoint name
'''
endpoint = camel_to_dash(resource.__name__)
if namespace is not self.default_namespace:
endpoint = '{ns.name}_{endpoint}'.format(ns=namespace, endpoint=endpoint)
if endpoint in self.endpoints:
suffix = 2
while True:
new_endpoint = '{base}_{suffix}'.format(base=endpoint, suffix=suffix)
if new_endpoint not in self.endpoints:
endpoint = new_endpoint
break
suffix += 1
return endpoint
def get_ns_path(self, ns):
return self.ns_paths.get(ns)
def ns_urls(self, ns, urls):
path = self.get_ns_path(ns) or ns.path
return [path + url for url in urls]
def add_namespace(self, ns, path=None):
'''
This method registers resources from namespace for current instance of api.
You can use argument path for definition custom prefix url for namespace.
:param Namespace ns: the namespace
:param path: registration prefix of namespace
'''
if ns not in self.namespaces:
self.namespaces.append(ns)
if self not in ns.apis:
ns.apis.append(self)
# Associate ns with prefix-path
if path is not None:
self.ns_paths[ns] = path
# Register resources
for resource, urls, kwargs in ns.resources:
self.register_resource(ns, resource, *self.ns_urls(ns, urls), **kwargs)
# Register models
for name, definition in six.iteritems(ns.models):
self.models[name] = definition
def namespace(self, *args, **kwargs):
'''
A namespace factory.
:returns Namespace: a new namespace instance
'''
kwargs['ordered'] = kwargs.get('ordered', self.ordered)
ns = Namespace(*args, **kwargs)
self.add_namespace(ns)
return ns
def endpoint(self, name):
if self.blueprint:
return '{0}.{1}'.format(self.blueprint.name, name)
else:
return name
@property
def specs_url(self):
'''
The Swagger specifications absolute url (ie. `swagger.json`)
:rtype: str
'''
return url_for(self.endpoint('specs'), _external=True)
@property
def base_url(self):
'''
The API base absolute url
:rtype: str
'''
return url_for(self.endpoint('root'), _external=True)
@property
def base_path(self):
'''
The API path
:rtype: str
'''
return url_for(self.endpoint('root'), _external=False)
@cached_property
def __schema__(self):
'''
The Swagger specifications/schema for this API
:returns dict: the schema as a serializable dict
'''
if not self._schema:
try:
self._schema = Swagger(self).as_dict()
except Exception:
# Log the source exception for debugging purpose
# and return an error message
msg = 'Unable to render schema'
log.exception(msg) # This will provide a full traceback
return {'error': msg}
return self._schema
@property
def _own_and_child_error_handlers(self):
rv = {}
rv.update(self.error_handlers)
for ns in self.namespaces:
for exception, handler in six.iteritems(ns.error_handlers):
rv[exception] = handler
return rv
def errorhandler(self, exception):
'''A decorator to register an error handler for a given exception'''
if inspect.isclass(exception) and issubclass(exception, Exception):
# Register an error handler for a given exception
def wrapper(func):
self.error_handlers[exception] = func
return func
return wrapper
else:
# Register the default error handler
self._default_error_handler = exception
return exception
def owns_endpoint(self, endpoint):
'''
Tests if an endpoint name (not path) belongs to this Api.
Takes into account the Blueprint name part of the endpoint name.
:param str endpoint: The name of the endpoint being checked
:return: bool
'''
if self.blueprint:
if endpoint.startswith(self.blueprint.name):
endpoint = endpoint.split(self.blueprint.name + '.', 1)[-1]
else:
return False
return endpoint in self.endpoints
def _should_use_fr_error_handler(self):
'''
Determine if error should be handled with FR or default Flask
The goal is to return Flask error handlers for non-FR-related routes,
and FR errors (with the correct media type) for FR endpoints. This
method currently handles 404 and 405 errors.
:return: bool
'''
adapter = current_app.create_url_adapter(request)
try:
adapter.match()
except MethodNotAllowed as e:
# Check if the other HTTP methods at this url would hit the Api
valid_route_method = e.valid_methods[0]
rule, _ = adapter.match(method=valid_route_method, return_rule=True)
return self.owns_endpoint(rule.endpoint)
except NotFound:
return self.catch_all_404s
except Exception:
# Werkzeug throws other kinds of exceptions, such as Redirect
pass
def _has_fr_route(self):
'''Encapsulating the rules for whether the request was to a Flask endpoint'''
# 404's, 405's, which might not have a url_rule
if self._should_use_fr_error_handler():
return True
# for all other errors, just check if FR dispatched the route
if not request.url_rule:
return False
return self.owns_endpoint(request.url_rule.endpoint)
def error_router(self, original_handler, e):
'''
This function decides whether the error occured in a flask-restplus
endpoint or not. If it happened in a flask-restplus endpoint, our
handler will be dispatched. If it happened in an unrelated view, the
app's original error handler will be dispatched.
In the event that the error occurred in a flask-restplus endpoint but
the local handler can't resolve the situation, the router will fall
back onto the original_handler as last resort.
:param function original_handler: the original Flask error handler for the app
:param Exception e: the exception raised while handling the request
'''
if self._has_fr_route():
try:
return self.handle_error(e)
except Exception:
pass # Fall through to original handler
return original_handler(e)
def handle_error(self, e):
'''
Error handler for the API transforms a raised exception into a Flask response,
with the appropriate HTTP status code and body.
:param Exception e: the raised Exception object
'''
got_request_exception.send(current_app._get_current_object(), exception=e)
include_message_in_response = current_app.config.get("ERROR_INCLUDE_MESSAGE", True)
default_data = {}
headers = Headers()
for typecheck, handler in six.iteritems(self._own_and_child_error_handlers):
if isinstance(e, typecheck):
result = handler(e)
default_data, code, headers = unpack(result, HTTPStatus.INTERNAL_SERVER_ERROR)
break
else:
if isinstance(e, HTTPException):
code = HTTPStatus(e.code)
if include_message_in_response:
default_data = {
'message': getattr(e, 'description', code.phrase)
}
headers = e.get_response().headers
elif self._default_error_handler:
result = self._default_error_handler(e)
default_data, code, headers = unpack(result, HTTPStatus.INTERNAL_SERVER_ERROR)
else:
code = HTTPStatus.INTERNAL_SERVER_ERROR
if include_message_in_response:
default_data = {
'message': code.phrase,
}
if include_message_in_response:
default_data['message'] = default_data.get('message', str(e))
data = getattr(e, 'data', default_data)
fallback_mediatype = None
if code >= HTTPStatus.INTERNAL_SERVER_ERROR:
exc_info = sys.exc_info()
if exc_info[1] is None:
exc_info = None
current_app.log_exception(exc_info)
elif code == HTTPStatus.NOT_FOUND and current_app.config.get("ERROR_404_HELP", True) \
and include_message_in_response:
data['message'] = self._help_on_404(data.get('message', None))
elif code == HTTPStatus.NOT_ACCEPTABLE and self.default_mediatype is None:
# if we are handling NotAcceptable (406), make sure that
# make_response uses a representation we support as the
# default mediatype (so that make_response doesn't throw
# another NotAcceptable error).
supported_mediatypes = list(self.representations.keys())
fallback_mediatype = supported_mediatypes[0] if supported_mediatypes else "text/plain"
# Remove blacklisted headers
for header in HEADERS_BLACKLIST:
headers.pop(header, None)
resp = self.make_response(data, code, headers, fallback_mediatype=fallback_mediatype)
if code == HTTPStatus.UNAUTHORIZED:
resp = self.unauthorized(resp)
return resp
def _help_on_404(self, message=None):
rules = dict([(RE_RULES.sub('', rule.rule), rule.rule)
for rule in current_app.url_map.iter_rules()])
close_matches = difflib.get_close_matches(request.path, rules.keys())
if close_matches:
# If we already have a message, add punctuation and continue it.
message = ''.join((
(message.rstrip('.') + '. ') if message else '',
'You have requested this URI [',
request.path,
'] but did you mean ',
' or '.join((rules[match] for match in close_matches)),
' ?',
))
return message
def as_postman(self, urlvars=False, swagger=False):
'''
Serialize the API as Postman collection (v1)
:param bool urlvars: whether to include or not placeholders for query strings
:param bool swagger: whether to include or not the swagger.json specifications
'''
return PostmanCollectionV1(self, swagger=swagger).as_dict(urlvars=urlvars)
@property
def payload(self):
'''Store the input payload in the current request context'''
return request.get_json()
@property
def refresolver(self):
if not self._refresolver:
self._refresolver = RefResolver.from_schema(self.__schema__)
return self._refresolver
@staticmethod
def _blueprint_setup_add_url_rule_patch(blueprint_setup, rule, endpoint=None, view_func=None, **options):
'''
Method used to patch BlueprintSetupState.add_url_rule for setup
state instance corresponding to this Api instance. Exists primarily
to enable _complete_url's function.
:param blueprint_setup: The BlueprintSetupState instance (self)
:param rule: A string or callable that takes a string and returns a
string(_complete_url) that is the url rule for the endpoint
being registered
:param endpoint: See BlueprintSetupState.add_url_rule
:param view_func: See BlueprintSetupState.add_url_rule
:param **options: See BlueprintSetupState.add_url_rule
'''
if callable(rule):
rule = rule(blueprint_setup.url_prefix)
elif blueprint_setup.url_prefix:
rule = blueprint_setup.url_prefix + rule
options.setdefault('subdomain', blueprint_setup.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = blueprint_setup.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
blueprint_setup.app.add_url_rule(rule, '%s.%s' % (blueprint_setup.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
def _deferred_blueprint_init(self, setup_state):
'''
Synchronize prefix between blueprint/api and registration options, then
perform initialization with setup_state.app :class:`flask.Flask` object.
When a :class:`flask_restplus.Api` object is initialized with a blueprint,
this method is recorded on the blueprint to be run when the blueprint is later
registered to a :class:`flask.Flask` object. This method also monkeypatches
BlueprintSetupState.add_url_rule with _blueprint_setup_add_url_rule_patch.
:param setup_state: The setup state object passed to deferred functions
during blueprint registration
:type setup_state: flask.blueprints.BlueprintSetupState
'''
self.blueprint_setup = setup_state
if setup_state.add_url_rule.__name__ != '_blueprint_setup_add_url_rule_patch':
setup_state._original_add_url_rule = setup_state.add_url_rule
setup_state.add_url_rule = MethodType(Api._blueprint_setup_add_url_rule_patch,
setup_state)
if not setup_state.first_registration:
raise ValueError('flask-restplus blueprints can only be registered once.')
self._init_app(setup_state.app)
def mediatypes_method(self):
'''Return a method that returns a list of mediatypes'''
return lambda resource_cls: self.mediatypes() + [self.default_mediatype]
def mediatypes(self):
'''Returns a list of requested mediatypes sent in the Accept header'''
return [h for h, q in sorted(request.accept_mimetypes,
key=operator.itemgetter(1), reverse=True)]
def representation(self, mediatype):
'''
Allows additional representation transformers to be declared for the
api. Transformers are functions that must be decorated with this
method, passing the mediatype the transformer represents. Three
arguments are passed to the transformer:
* The data to be represented in the response body
* The http status code
* A dictionary of headers
The transformer should convert the data appropriately for the mediatype
and return a Flask response object.
Ex::
@api.representation('application/xml')
def xml(data, code, headers):
resp = make_response(convert_data_to_xml(data), code)
resp.headers.extend(headers)
return resp
'''
def wrapper(func):
self.representations[mediatype] = func
return func
return wrapper
def unauthorized(self, response):
'''Given a response, change it to ask for credentials'''
if self.serve_challenge_on_401:
realm = current_app.config.get("HTTP_BASIC_AUTH_REALM", "flask-restplus")
challenge = u"{0} realm=\"{1}\"".format("Basic", realm)
response.headers['WWW-Authenticate'] = challenge
return response
def url_for(self, resource, **values):
'''
Generates a URL to the given resource.
Works like :func:`flask.url_for`.
'''
endpoint = resource.endpoint
if self.blueprint:
endpoint = '{0}.{1}'.format(self.blueprint.name, endpoint)
return url_for(endpoint, **values)
class SwaggerView(Resource):
'''Render the Swagger specifications as JSON'''
def get(self):
schema = self.api.__schema__
return schema, HTTPStatus.INTERNAL_SERVER_ERROR if 'error' in schema else HTTPStatus.OK
def mediatypes(self):
return ['application/json']
def mask_parse_error_handler(error):
'''When a mask can't be parsed'''
return {'message': 'Mask parse error: {0}'.format(error)}, HTTPStatus.BAD_REQUEST
def mask_error_handler(error):
'''When any error occurs on mask'''
return {'message': 'Mask error: {0}'.format(error)}, HTTPStatus.BAD_REQUEST
| 40.168894
| 117
| 0.64037
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.