hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72734b87340580ae862b0b1ab93d1933cead503 | 608 | py | Python | 100_days_of_code/Beginner/day_13/art.py | Tiago-S-Ribeiro/Python-Pro-Bootcamp | 20a82443fe2e6ee9040ecd9a03853e6c6346592c | [
"MIT"
] | null | null | null | 100_days_of_code/Beginner/day_13/art.py | Tiago-S-Ribeiro/Python-Pro-Bootcamp | 20a82443fe2e6ee9040ecd9a03853e6c6346592c | [
"MIT"
] | null | null | null | 100_days_of_code/Beginner/day_13/art.py | Tiago-S-Ribeiro/Python-Pro-Bootcamp | 20a82443fe2e6ee9040ecd9a03853e6c6346592c | [
"MIT"
] | null | null | null | logo = '''
______ __ __ _ __ __
/ ____/__ __ ___ _____ _____ / /_ / /_ ___ / | / /__ __ ____ ___ / /_ ___ _____
/ / __ / / / // _ \ / ___// ___/ / __// __ \ / _ \ / |/ // / / // __ `__ \ / __ \ / _ \ / ___/
/ /_/ // /_/ // __/(__ )(__ ) / /_ / / / // __/ / /| // /_/ // / / / / // /_/ // __// /
\____/ \__,_/ \___//____//____/ \__//_/ /_/ \___/ /_/ |_/ \__,_//_/ /_/ /_//_.___/ \___//_/
''' | 86.857143 | 193 | 0.238487 | logo = '''
______ __ __ _ __ __
/ ____/__ __ ___ _____ _____ / /_ / /_ ___ / | / /__ __ ____ ___ / /_ ___ _____
/ / __ / / / // _ \ / ___// ___/ / __// __ \ / _ \ / |/ // / / // __ `__ \ / __ \ / _ \ / ___/
/ /_/ // /_/ // __/(__ )(__ ) / /_ / / / // __/ / /| // /_/ // / / / / // /_/ // __// /
\____/ \__,_/ \___//____//____/ \__//_/ /_/ \___/ /_/ |_/ \__,_//_/ /_/ /_//_.___/ \___//_/
''' | true | true |
f72734c285abc83d4428383f1e1fdcf37a42b826 | 12,086 | py | Python | mergify_engine/web/root.py | v1v/mergify-engine | 21f63be9987740e1466459f966b186392a235051 | [
"Apache-2.0"
] | null | null | null | mergify_engine/web/root.py | v1v/mergify-engine | 21f63be9987740e1466459f966b186392a235051 | [
"Apache-2.0"
] | 261 | 2020-10-15T15:56:15.000Z | 2022-03-31T07:08:30.000Z | mergify_engine/web/root.py | v1v/mergify-engine | 21f63be9987740e1466459f966b186392a235051 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
#
# Copyright © 2019–2021 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import typing
import aredis
import daiquiri
from datadog import statsd
import fastapi
import httpx
from starlette import requests
from starlette import responses
import voluptuous
from mergify_engine import config
from mergify_engine import github_events
from mergify_engine import github_types
from mergify_engine import json
from mergify_engine import subscription
from mergify_engine import utils
from mergify_engine.clients import github
from mergify_engine.clients import http
from mergify_engine.queue import merge_train
from mergify_engine.web import auth
from mergify_engine.web import badges
from mergify_engine.web import config_validator
from mergify_engine.web import redis
from mergify_engine.web import simulator
LOG = daiquiri.getLogger(__name__)
app = fastapi.FastAPI()
app.mount("/simulator", simulator.app)
app.mount("/validate", config_validator.app)
app.mount("/badges", badges.app)
# Set the maximum timeout to 5 seconds: GitHub is not going to wait for
# more than 10 seconds for us to accept an event, so if we're unable to
# forward an event in 5 seconds, just drop it.
EVENT_FORWARD_TIMEOUT = 5
@app.on_event("startup")
async def startup() -> None:
await redis.startup()
@app.on_event("shutdown")
async def shutdown() -> None:
await redis.shutdown()
@app.exception_handler(aredis.exceptions.ConnectionError)
async def redis_errors(
request: requests.Request, exc: aredis.exceptions.ConnectionError
) -> responses.JSONResponse:
statsd.increment("redis.client.connection.errors")
LOG.warning("FastAPI lost Redis connection", exc_info=exc)
return responses.JSONResponse(status_code=503)
@app.get("/installation") # noqa: FS003
async def installation() -> responses.Response:
return responses.Response(
"Your mergify installation succeed, the installer have been disabled.",
status_code=200,
)
@app.post(
"/refresh/{owner}/{repo_name}", # noqa: FS003
dependencies=[fastapi.Depends(auth.signature)],
)
async def refresh_repo(
owner: github_types.GitHubLogin,
repo_name: github_types.GitHubRepositoryName,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
redis_stream: utils.RedisStream = fastapi.Depends( # noqa: B008
redis.get_redis_stream
),
) -> responses.Response:
async with github.aget_client(owner_name=owner) as client:
try:
repository = await client.item(f"/repos/{owner}/{repo_name}")
except http.HTTPNotFound:
return responses.JSONResponse(
status_code=404, content="repository not found"
)
await github_events.send_refresh(redis_cache, redis_stream, repository)
return responses.Response("Refresh queued", status_code=202)
RefreshActionSchema = voluptuous.Schema(voluptuous.Any("user", "admin", "internal"))
@app.post(
"/refresh/{owner}/{repo_name}/pull/{pull_request_number}", # noqa: FS003
dependencies=[fastapi.Depends(auth.signature)],
)
async def refresh_pull(
owner: github_types.GitHubLogin,
repo_name: github_types.GitHubRepositoryName,
pull_request_number: github_types.GitHubPullRequestNumber,
action: github_types.GitHubEventRefreshActionType = "user",
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
redis_stream: utils.RedisStream = fastapi.Depends( # noqa: B008
redis.get_redis_stream
),
) -> responses.Response:
action = RefreshActionSchema(action)
async with github.aget_client(owner_name=owner) as client:
try:
repository = await client.item(f"/repos/{owner}/{repo_name}")
except http.HTTPNotFound:
return responses.JSONResponse(
status_code=404, content="repository not found"
)
await github_events.send_refresh(
redis_cache,
redis_stream,
repository,
pull_request_number=pull_request_number,
action=action,
)
return responses.Response("Refresh queued", status_code=202)
@app.post(
"/refresh/{owner}/{repo_name}/branch/{branch}", # noqa: FS003
dependencies=[fastapi.Depends(auth.signature)],
)
async def refresh_branch(
owner: github_types.GitHubLogin,
repo_name: github_types.GitHubRepositoryName,
branch: str,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
redis_stream: utils.RedisStream = fastapi.Depends( # noqa: B008
redis.get_redis_stream
),
) -> responses.Response:
async with github.aget_client(owner_name=owner) as client:
try:
repository = await client.item(f"/repos/{owner}/{repo_name}")
except http.HTTPNotFound:
return responses.JSONResponse(
status_code=404, content="repository not found"
)
await github_events.send_refresh(
redis_cache,
redis_stream,
repository,
ref=github_types.GitHubRefType(f"refs/heads/{branch}"),
)
return responses.Response("Refresh queued", status_code=202)
@app.put(
"/subscription-cache/{owner_id}", # noqa: FS003
dependencies=[fastapi.Depends(auth.signature)],
)
async def subscription_cache_update(
owner_id: github_types.GitHubAccountIdType,
request: requests.Request,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
) -> responses.Response:
sub = await request.json()
if sub is None:
return responses.Response("Empty content", status_code=400)
await subscription.Subscription.from_dict(
redis_cache, int(owner_id), sub
).save_subscription_to_cache()
return responses.Response("Cache updated", status_code=200)
@app.delete(
"/subscription-cache/{owner_id}", # noqa: FS003
dependencies=[fastapi.Depends(auth.signature)],
)
async def subscription_cache_delete(
owner_id: github_types.GitHubAccountIdType,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
) -> responses.Response:
await subscription.Subscription.delete(redis_cache, owner_id)
return responses.Response("Cache cleaned", status_code=200)
@app.post("/marketplace", dependencies=[fastapi.Depends(auth.signature)])
async def marketplace_handler(
request: requests.Request,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
) -> responses.Response:
event_type = request.headers.get("X-GitHub-Event")
event_id = request.headers.get("X-GitHub-Delivery")
data = await request.json()
LOG.info(
"Marketplace event",
event_type=event_type,
event_id=event_id,
sender=data["sender"]["login"],
gh_owner=data["marketplace_purchase"]["account"]["login"],
)
await subscription.Subscription.delete(
redis_cache, data["marketplace_purchase"]["account"]["id"]
)
if config.WEBHOOK_MARKETPLACE_FORWARD_URL:
raw = await request.body()
try:
async with http.AsyncClient(timeout=EVENT_FORWARD_TIMEOUT) as client:
await client.post(
config.WEBHOOK_MARKETPLACE_FORWARD_URL,
content=raw.decode(),
headers={
"X-GitHub-Event": event_type,
"X-GitHub-Delivery": event_id,
"X-Hub-Signature": request.headers.get("X-Hub-Signature"),
"User-Agent": request.headers.get("User-Agent"),
"Content-Type": request.headers.get("Content-Type"),
},
)
except httpx.TimeoutException:
LOG.warning(
"Fail to forward Marketplace event",
event_type=event_type,
event_id=event_id,
sender=data["sender"]["login"],
gh_owner=data["marketplace_purchase"]["account"]["login"],
)
return responses.Response("Event queued", status_code=202)
@app.get(
"/queues/{owner_id}", # noqa: FS003
dependencies=[fastapi.Depends(auth.signature)],
)
async def queues(
owner_id: github_types.GitHubAccountIdType,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
) -> responses.Response:
queues: typing.Dict[
str, typing.Dict[str, typing.List[int]]
] = collections.defaultdict(dict)
async for queue in redis_cache.scan_iter(
match=f"merge-*~{owner_id}~*", count=10000
):
queue_type, _, repo_id, branch = queue.split("~")
if queue_type == "merge-queue":
queues[repo_id][branch] = [
int(pull) async for pull, _ in redis_cache.zscan_iter(queue)
]
elif queue_type == "merge-train":
train_raw = await redis_cache.get(queue)
train = typing.cast(merge_train.Train.Serialized, json.loads(train_raw))
_, _, repo_id, branch = queue.split("~")
queues[repo_id][branch] = [
int(c["user_pull_request_number"]) for c in train["cars"]
] + [int(wp[0]) for wp in train["waiting_pulls"]]
return responses.JSONResponse(status_code=200, content=queues)
@app.post("/event", dependencies=[fastapi.Depends(auth.signature)])
async def event_handler(
request: requests.Request,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
redis_stream: utils.RedisStream = fastapi.Depends( # noqa: B008
redis.get_redis_stream
),
) -> responses.Response:
event_type = request.headers.get("X-GitHub-Event")
event_id = request.headers.get("X-GitHub-Delivery")
data = await request.json()
try:
await github_events.filter_and_dispatch(
redis_cache, redis_stream, event_type, event_id, data
)
except github_events.IgnoredEvent as ie:
status_code = 200
reason = f"Event ignored: {ie.reason}"
else:
status_code = 202
reason = "Event queued"
if (
config.WEBHOOK_APP_FORWARD_URL
and config.WEBHOOK_FORWARD_EVENT_TYPES is not None
and event_type in config.WEBHOOK_FORWARD_EVENT_TYPES
):
raw = await request.body()
try:
async with http.AsyncClient(timeout=EVENT_FORWARD_TIMEOUT) as client:
await client.post(
config.WEBHOOK_APP_FORWARD_URL,
content=raw.decode(),
headers={
"X-GitHub-Event": event_type,
"X-GitHub-Delivery": event_id,
"X-Hub-Signature": request.headers.get("X-Hub-Signature"),
"User-Agent": request.headers.get("User-Agent"),
"Content-Type": request.headers.get("Content-Type"),
},
)
except httpx.TimeoutException:
LOG.warning(
"Fail to forward GitHub event",
event_type=event_type,
event_id=event_id,
sender=data["sender"]["login"],
)
return responses.Response(reason, status_code=status_code)
@app.get("/")
async def index(): # pragma: no cover
return responses.RedirectResponse(url="https://mergify.io/")
| 34.141243 | 84 | 0.663247 |
import collections
import typing
import aredis
import daiquiri
from datadog import statsd
import fastapi
import httpx
from starlette import requests
from starlette import responses
import voluptuous
from mergify_engine import config
from mergify_engine import github_events
from mergify_engine import github_types
from mergify_engine import json
from mergify_engine import subscription
from mergify_engine import utils
from mergify_engine.clients import github
from mergify_engine.clients import http
from mergify_engine.queue import merge_train
from mergify_engine.web import auth
from mergify_engine.web import badges
from mergify_engine.web import config_validator
from mergify_engine.web import redis
from mergify_engine.web import simulator
LOG = daiquiri.getLogger(__name__)
app = fastapi.FastAPI()
app.mount("/simulator", simulator.app)
app.mount("/validate", config_validator.app)
app.mount("/badges", badges.app)
# forward an event in 5 seconds, just drop it.
EVENT_FORWARD_TIMEOUT = 5
@app.on_event("startup")
async def startup() -> None:
await redis.startup()
@app.on_event("shutdown")
async def shutdown() -> None:
await redis.shutdown()
@app.exception_handler(aredis.exceptions.ConnectionError)
async def redis_errors(
request: requests.Request, exc: aredis.exceptions.ConnectionError
) -> responses.JSONResponse:
statsd.increment("redis.client.connection.errors")
LOG.warning("FastAPI lost Redis connection", exc_info=exc)
return responses.JSONResponse(status_code=503)
@app.get("/installation") # noqa: FS003
async def installation() -> responses.Response:
return responses.Response(
"Your mergify installation succeed, the installer have been disabled.",
status_code=200,
)
@app.post(
"/refresh/{owner}/{repo_name}", # noqa: FS003
dependencies=[fastapi.Depends(auth.signature)],
)
async def refresh_repo(
owner: github_types.GitHubLogin,
repo_name: github_types.GitHubRepositoryName,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
redis_stream: utils.RedisStream = fastapi.Depends( # noqa: B008
redis.get_redis_stream
),
) -> responses.Response:
async with github.aget_client(owner_name=owner) as client:
try:
repository = await client.item(f"/repos/{owner}/{repo_name}")
except http.HTTPNotFound:
return responses.JSONResponse(
status_code=404, content="repository not found"
)
await github_events.send_refresh(redis_cache, redis_stream, repository)
return responses.Response("Refresh queued", status_code=202)
RefreshActionSchema = voluptuous.Schema(voluptuous.Any("user", "admin", "internal"))
@app.post(
"/refresh/{owner}/{repo_name}/pull/{pull_request_number}", # noqa: FS003
dependencies=[fastapi.Depends(auth.signature)],
)
async def refresh_pull(
owner: github_types.GitHubLogin,
repo_name: github_types.GitHubRepositoryName,
pull_request_number: github_types.GitHubPullRequestNumber,
action: github_types.GitHubEventRefreshActionType = "user",
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
redis_stream: utils.RedisStream = fastapi.Depends( # noqa: B008
redis.get_redis_stream
),
) -> responses.Response:
action = RefreshActionSchema(action)
async with github.aget_client(owner_name=owner) as client:
try:
repository = await client.item(f"/repos/{owner}/{repo_name}")
except http.HTTPNotFound:
return responses.JSONResponse(
status_code=404, content="repository not found"
)
await github_events.send_refresh(
redis_cache,
redis_stream,
repository,
pull_request_number=pull_request_number,
action=action,
)
return responses.Response("Refresh queued", status_code=202)
@app.post(
"/refresh/{owner}/{repo_name}/branch/{branch}", # noqa: FS003
dependencies=[fastapi.Depends(auth.signature)],
)
async def refresh_branch(
owner: github_types.GitHubLogin,
repo_name: github_types.GitHubRepositoryName,
branch: str,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
redis_stream: utils.RedisStream = fastapi.Depends( # noqa: B008
redis.get_redis_stream
),
) -> responses.Response:
async with github.aget_client(owner_name=owner) as client:
try:
repository = await client.item(f"/repos/{owner}/{repo_name}")
except http.HTTPNotFound:
return responses.JSONResponse(
status_code=404, content="repository not found"
)
await github_events.send_refresh(
redis_cache,
redis_stream,
repository,
ref=github_types.GitHubRefType(f"refs/heads/{branch}"),
)
return responses.Response("Refresh queued", status_code=202)
@app.put(
"/subscription-cache/{owner_id}", # noqa: FS003
dependencies=[fastapi.Depends(auth.signature)],
)
async def subscription_cache_update(
owner_id: github_types.GitHubAccountIdType,
request: requests.Request,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
) -> responses.Response:
sub = await request.json()
if sub is None:
return responses.Response("Empty content", status_code=400)
await subscription.Subscription.from_dict(
redis_cache, int(owner_id), sub
).save_subscription_to_cache()
return responses.Response("Cache updated", status_code=200)
@app.delete(
"/subscription-cache/{owner_id}", # noqa: FS003
dependencies=[fastapi.Depends(auth.signature)],
)
async def subscription_cache_delete(
owner_id: github_types.GitHubAccountIdType,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
) -> responses.Response:
await subscription.Subscription.delete(redis_cache, owner_id)
return responses.Response("Cache cleaned", status_code=200)
@app.post("/marketplace", dependencies=[fastapi.Depends(auth.signature)])
async def marketplace_handler(
request: requests.Request,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
) -> responses.Response:
event_type = request.headers.get("X-GitHub-Event")
event_id = request.headers.get("X-GitHub-Delivery")
data = await request.json()
LOG.info(
"Marketplace event",
event_type=event_type,
event_id=event_id,
sender=data["sender"]["login"],
gh_owner=data["marketplace_purchase"]["account"]["login"],
)
await subscription.Subscription.delete(
redis_cache, data["marketplace_purchase"]["account"]["id"]
)
if config.WEBHOOK_MARKETPLACE_FORWARD_URL:
raw = await request.body()
try:
async with http.AsyncClient(timeout=EVENT_FORWARD_TIMEOUT) as client:
await client.post(
config.WEBHOOK_MARKETPLACE_FORWARD_URL,
content=raw.decode(),
headers={
"X-GitHub-Event": event_type,
"X-GitHub-Delivery": event_id,
"X-Hub-Signature": request.headers.get("X-Hub-Signature"),
"User-Agent": request.headers.get("User-Agent"),
"Content-Type": request.headers.get("Content-Type"),
},
)
except httpx.TimeoutException:
LOG.warning(
"Fail to forward Marketplace event",
event_type=event_type,
event_id=event_id,
sender=data["sender"]["login"],
gh_owner=data["marketplace_purchase"]["account"]["login"],
)
return responses.Response("Event queued", status_code=202)
@app.get(
"/queues/{owner_id}", # noqa: FS003
dependencies=[fastapi.Depends(auth.signature)],
)
async def queues(
owner_id: github_types.GitHubAccountIdType,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
) -> responses.Response:
queues: typing.Dict[
str, typing.Dict[str, typing.List[int]]
] = collections.defaultdict(dict)
async for queue in redis_cache.scan_iter(
match=f"merge-*~{owner_id}~*", count=10000
):
queue_type, _, repo_id, branch = queue.split("~")
if queue_type == "merge-queue":
queues[repo_id][branch] = [
int(pull) async for pull, _ in redis_cache.zscan_iter(queue)
]
elif queue_type == "merge-train":
train_raw = await redis_cache.get(queue)
train = typing.cast(merge_train.Train.Serialized, json.loads(train_raw))
_, _, repo_id, branch = queue.split("~")
queues[repo_id][branch] = [
int(c["user_pull_request_number"]) for c in train["cars"]
] + [int(wp[0]) for wp in train["waiting_pulls"]]
return responses.JSONResponse(status_code=200, content=queues)
@app.post("/event", dependencies=[fastapi.Depends(auth.signature)])
async def event_handler(
request: requests.Request,
redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008
redis.get_redis_cache
),
redis_stream: utils.RedisStream = fastapi.Depends( # noqa: B008
redis.get_redis_stream
),
) -> responses.Response:
event_type = request.headers.get("X-GitHub-Event")
event_id = request.headers.get("X-GitHub-Delivery")
data = await request.json()
try:
await github_events.filter_and_dispatch(
redis_cache, redis_stream, event_type, event_id, data
)
except github_events.IgnoredEvent as ie:
status_code = 200
reason = f"Event ignored: {ie.reason}"
else:
status_code = 202
reason = "Event queued"
if (
config.WEBHOOK_APP_FORWARD_URL
and config.WEBHOOK_FORWARD_EVENT_TYPES is not None
and event_type in config.WEBHOOK_FORWARD_EVENT_TYPES
):
raw = await request.body()
try:
async with http.AsyncClient(timeout=EVENT_FORWARD_TIMEOUT) as client:
await client.post(
config.WEBHOOK_APP_FORWARD_URL,
content=raw.decode(),
headers={
"X-GitHub-Event": event_type,
"X-GitHub-Delivery": event_id,
"X-Hub-Signature": request.headers.get("X-Hub-Signature"),
"User-Agent": request.headers.get("User-Agent"),
"Content-Type": request.headers.get("Content-Type"),
},
)
except httpx.TimeoutException:
LOG.warning(
"Fail to forward GitHub event",
event_type=event_type,
event_id=event_id,
sender=data["sender"]["login"],
)
return responses.Response(reason, status_code=status_code)
@app.get("/")
async def index(): # pragma: no cover
return responses.RedirectResponse(url="https://mergify.io/")
| true | true |
f72734f8171a2b98bdc2a9bd97576b05bb2e2d82 | 2,808 | py | Python | lib/googlecloudsdk/dns/dnstools/managed_zone/list.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/dns/dnstools/managed_zone/list.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/dns/dnstools/managed_zone/list.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | 2 | 2020-07-25T05:03:06.000Z | 2020-11-04T04:55:57.000Z | # Copyright 2013 Google Inc. All Rights Reserved.
"""'dns managed-zone list' command."""
from apiclient import errors
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.dns.lib import util
class List(base.Command):
"""List Cloud DNS managed zones."""
DEFAULT_MAX_RESULTS = 0
DEFAULT_PAGE_SIZE = 1000
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'--max_results', required=False, help='If greater than zero, limit the '
'number of changes returned to <max_results>. '
'Default: %d' % List.DEFAULT_MAX_RESULTS)
def Run(self, args):
"""Run 'dns managed-zone list'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A list of dict objects representing the zone resource obtained by the
list operation if the list was successful.
"""
dns = self.context['dns']
project = properties.VALUES.core.project.Get(required=True)
max_results = List.DEFAULT_MAX_RESULTS
if args.max_results is not None:
max_results = int(args.max_results)
if max_results > 0:
page_size = min(max_results, List.DEFAULT_PAGE_SIZE)
else:
page_size = List.DEFAULT_PAGE_SIZE
request = dns.managedZones().list(project=project, maxResults=page_size)
try:
result_list = []
result = request.execute()
result_list.extend(result['managedZones'])
while ((max_results <= 0 or len(result_list) < max_results) and
'nextPageToken' in result and result['nextPageToken'] is not None):
if max_results > 0:
page_size = min(
max_results - len(result_list), List.DEFAULT_PAGE_SIZE)
request = dns.managedZones().list(project=project,
maxResults=page_size,
pageToken=result['nextPageToken'])
result = request.execute()
result_list.extend(result['managedZones'])
return result_list
except errors.HttpError as error:
raise exceptions.HttpException(util.GetError(error, verbose=True))
except errors.Error as error:
raise exceptions.ToolException(error)
def Display(self, unused_args, result):
"""Display prints information about what just happened to stdout.
Args:
unused_args: The same as the args in Run.
result: The results of the Run() method.
"""
util.PrettyPrint(result)
| 33.035294 | 80 | 0.668803 |
from apiclient import errors
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.dns.lib import util
class List(base.Command):
DEFAULT_MAX_RESULTS = 0
DEFAULT_PAGE_SIZE = 1000
@staticmethod
def Args(parser):
parser.add_argument(
'--max_results', required=False, help='If greater than zero, limit the '
'number of changes returned to <max_results>. '
'Default: %d' % List.DEFAULT_MAX_RESULTS)
def Run(self, args):
dns = self.context['dns']
project = properties.VALUES.core.project.Get(required=True)
max_results = List.DEFAULT_MAX_RESULTS
if args.max_results is not None:
max_results = int(args.max_results)
if max_results > 0:
page_size = min(max_results, List.DEFAULT_PAGE_SIZE)
else:
page_size = List.DEFAULT_PAGE_SIZE
request = dns.managedZones().list(project=project, maxResults=page_size)
try:
result_list = []
result = request.execute()
result_list.extend(result['managedZones'])
while ((max_results <= 0 or len(result_list) < max_results) and
'nextPageToken' in result and result['nextPageToken'] is not None):
if max_results > 0:
page_size = min(
max_results - len(result_list), List.DEFAULT_PAGE_SIZE)
request = dns.managedZones().list(project=project,
maxResults=page_size,
pageToken=result['nextPageToken'])
result = request.execute()
result_list.extend(result['managedZones'])
return result_list
except errors.HttpError as error:
raise exceptions.HttpException(util.GetError(error, verbose=True))
except errors.Error as error:
raise exceptions.ToolException(error)
def Display(self, unused_args, result):
util.PrettyPrint(result)
| true | true |
f727357192760291f5ec1451349246fa4f2c9de9 | 1,168 | py | Python | flask_blog/users/utils.py | dungnv2602/flask_blog_showcase | 9508518f30923363b20045640219db5722a7416e | [
"MIT"
] | null | null | null | flask_blog/users/utils.py | dungnv2602/flask_blog_showcase | 9508518f30923363b20045640219db5722a7416e | [
"MIT"
] | 2 | 2021-06-08T19:37:11.000Z | 2022-03-11T23:40:45.000Z | flask_blog/users/utils.py | dungnv2602/flask_blog_showcase | 9508518f30923363b20045640219db5722a7416e | [
"MIT"
] | null | null | null | import os
import secrets
from PIL import Image
from flask_blog import mail
from flask_mail import Message
from flask import current_app, url_for
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(
current_app.root_path, 'static/profile_pics', picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Password Reset Request',
sender='noreply@demo.com',
recipients=[user.email])
token = user.get_reset_token()
msg = Message('Password Reset Request', recipients=[user.email])
# _external – if set to True, an absolute URL is generated
msg.body = f'''To reset your password, visit the following link:
{url_for('users.reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
| 30.736842 | 95 | 0.69863 | import os
import secrets
from PIL import Image
from flask_blog import mail
from flask_mail import Message
from flask import current_app, url_for
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(
current_app.root_path, 'static/profile_pics', picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Password Reset Request',
sender='noreply@demo.com',
recipients=[user.email])
token = user.get_reset_token()
msg = Message('Password Reset Request', recipients=[user.email])
msg.body = f'''To reset your password, visit the following link:
{url_for('users.reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
| true | true |
f72735920d716a47d9d08cd21dfcce0ddb872b79 | 18,672 | py | Python | conans/test/unittests/client/build/cpp_std_flags_test.py | ninjayash/conan | 00fbc925fde93a148abfbcebf236c6b4f2da0572 | [
"MIT"
] | null | null | null | conans/test/unittests/client/build/cpp_std_flags_test.py | ninjayash/conan | 00fbc925fde93a148abfbcebf236c6b4f2da0572 | [
"MIT"
] | null | null | null | conans/test/unittests/client/build/cpp_std_flags_test.py | ninjayash/conan | 00fbc925fde93a148abfbcebf236c6b4f2da0572 | [
"MIT"
] | null | null | null | import unittest
from conans.client.build.cppstd_flags import cppstd_default
from conans.test.utils.mocks import MockSettings
from conans.tools import cppstd_flag
def _make_cppstd_flag(compiler, compiler_version, cppstd=None, compiler_base=None):
settings = MockSettings({"compiler": compiler,
"compiler.version": compiler_version,
"compiler.cppstd": cppstd})
if compiler_base:
settings.values["compiler.base"] = compiler_base
return cppstd_flag(settings)
def _make_cppstd_default(compiler, compiler_version, compiler_base=None):
settings = MockSettings({"compiler": compiler,
"compiler.version": compiler_version})
if compiler_base:
settings.values["compiler.base"] = compiler_base
return cppstd_default(settings)
class CompilerFlagsTest(unittest.TestCase):
def test_gcc_cppstd_flags(self):
self.assertEqual(_make_cppstd_flag("gcc", "4.2", "98"), "-std=c++98")
self.assertEqual(_make_cppstd_flag("gcc", "4.2", "gnu98"), "-std=gnu++98")
self.assertEqual(_make_cppstd_flag("gcc", "4.2", "11"), None)
self.assertEqual(_make_cppstd_flag("gcc", "4.2", "14"), None)
self.assertEqual(_make_cppstd_flag("gcc", "4.3", "98"), "-std=c++98")
self.assertEqual(_make_cppstd_flag("gcc", "4.3", "gnu98"), "-std=gnu++98")
self.assertEqual(_make_cppstd_flag("gcc", "4.3", "11"), "-std=c++0x")
self.assertEqual(_make_cppstd_flag("gcc", "4.3", "14"), None)
self.assertEqual(_make_cppstd_flag("gcc", "4.6", "11"), '-std=c++0x')
self.assertEqual(_make_cppstd_flag("gcc", "4.6", "14"), None)
self.assertEqual(_make_cppstd_flag("gcc", "4.7", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "4.7", "14"), None)
self.assertEqual(_make_cppstd_flag("gcc", "4.8", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "4.8", "14"), '-std=c++1y')
self.assertEqual(_make_cppstd_flag("gcc", "4.8", "17"), None)
self.assertEqual(_make_cppstd_flag("gcc", "4.9", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "4.9", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("gcc", "4.9", "17"), None)
self.assertEqual(_make_cppstd_flag("gcc", "5", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "5", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("gcc", "5", "gnu14"), '-std=gnu++14')
self.assertEqual(_make_cppstd_flag("gcc", "5", "17"), None)
self.assertEqual(_make_cppstd_flag("gcc", "5.1", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "5.1", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("gcc", "5.1", "17"), '-std=c++1z')
self.assertEqual(_make_cppstd_flag("gcc", "7", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "7", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("gcc", "7", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("gcc", "8", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "8", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("gcc", "8", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("gcc", "8", "20"), '-std=c++2a')
def test_gcc_cppstd_defaults(self):
self.assertEqual(_make_cppstd_default("gcc", "4"), "gnu98")
self.assertEqual(_make_cppstd_default("gcc", "5"), "gnu98")
self.assertEqual(_make_cppstd_default("gcc", "6"), "gnu14")
self.assertEqual(_make_cppstd_default("gcc", "6.1"), "gnu14")
self.assertEqual(_make_cppstd_default("gcc", "7.3"), "gnu14")
self.assertEqual(_make_cppstd_default("gcc", "8.1"), "gnu14")
def test_clang_cppstd_flags(self):
self.assertEqual(_make_cppstd_flag("clang", "2.0", "98"), None)
self.assertEqual(_make_cppstd_flag("clang", "2.0", "gnu98"), None)
self.assertEqual(_make_cppstd_flag("clang", "2.0", "11"), None)
self.assertEqual(_make_cppstd_flag("clang", "2.0", "14"), None)
self.assertEqual(_make_cppstd_flag("clang", "2.1", "98"), "-std=c++98")
self.assertEqual(_make_cppstd_flag("clang", "2.1", "gnu98"), "-std=gnu++98")
self.assertEqual(_make_cppstd_flag("clang", "2.1", "11"), "-std=c++0x")
self.assertEqual(_make_cppstd_flag("clang", "2.1", "14"), None)
self.assertEqual(_make_cppstd_flag("clang", "3.0", "11"), '-std=c++0x')
self.assertEqual(_make_cppstd_flag("clang", "3.0", "14"), None)
self.assertEqual(_make_cppstd_flag("clang", "3.1", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "3.1", "14"), None)
self.assertEqual(_make_cppstd_flag("clang", "3.4", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "3.4", "14"), '-std=c++1y')
self.assertEqual(_make_cppstd_flag("clang", "3.4", "17"), None)
self.assertEqual(_make_cppstd_flag("clang", "3.5", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "3.5", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("clang", "3.5", "17"), '-std=c++1z')
self.assertEqual(_make_cppstd_flag("clang", "5", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "5", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("clang", "5", "gnu14"), '-std=gnu++14')
self.assertEqual(_make_cppstd_flag("clang", "5", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("clang", "5.1", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "5.1", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("clang", "5.1", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("clang", "6", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "6", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("clang", "6", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("clang", "6", "20"), '-std=c++2a')
self.assertEqual(_make_cppstd_flag("clang", "7", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "7", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("clang", "7", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("clang", "7", "20"), '-std=c++2a')
self.assertEqual(_make_cppstd_flag("clang", "8", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "8", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("clang", "8", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("clang", "8", "20"), '-std=c++2a')
def test_clang_cppstd_defaults(self):
self.assertEqual(_make_cppstd_default("clang", "2"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "2.1"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "3.0"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "3.1"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "3.4"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "3.5"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "5"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "5.1"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "6"), "gnu14")
self.assertEqual(_make_cppstd_default("clang", "7"), "gnu14")
def test_apple_clang_cppstd_flags(self):
self.assertEqual(_make_cppstd_flag("apple-clang", "3.9", "98"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "3.9", "gnu98"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "3.9", "11"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "3.9", "14"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "4.0", "98"), "-std=c++98")
self.assertEqual(_make_cppstd_flag("apple-clang", "4.0", "gnu98"), "-std=gnu++98")
self.assertEqual(_make_cppstd_flag("apple-clang", "4.0", "11"), "-std=c++11")
self.assertEqual(_make_cppstd_flag("apple-clang", "4.0", "14"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "5.0", "98"), "-std=c++98")
self.assertEqual(_make_cppstd_flag("apple-clang", "5.0", "gnu98"), "-std=gnu++98")
self.assertEqual(_make_cppstd_flag("apple-clang", "5.0", "11"), "-std=c++11")
self.assertEqual(_make_cppstd_flag("apple-clang", "5.0", "14"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "5.1", "98"), "-std=c++98")
self.assertEqual(_make_cppstd_flag("apple-clang", "5.1", "gnu98"), "-std=gnu++98")
self.assertEqual(_make_cppstd_flag("apple-clang", "5.1", "11"), "-std=c++11")
self.assertEqual(_make_cppstd_flag("apple-clang", "5.1", "14"), "-std=c++1y")
self.assertEqual(_make_cppstd_flag("apple-clang", "6.1", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("apple-clang", "6.1", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("apple-clang", "6.1", "17"), "-std=c++1z")
self.assertEqual(_make_cppstd_flag("apple-clang", "7", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("apple-clang", "7", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("apple-clang", "7", "17"), "-std=c++1z")
self.assertEqual(_make_cppstd_flag("apple-clang", "8", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("apple-clang", "8", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("apple-clang", "8", "17"), "-std=c++1z")
self.assertEqual(_make_cppstd_flag("apple-clang", "9", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("apple-clang", "9", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("apple-clang", "9", "17"), "-std=c++1z")
self.assertEqual(_make_cppstd_flag("apple-clang", "9.1", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("apple-clang", "9.1", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("apple-clang", "9.1", "17"), "-std=c++17")
self.assertEqual(_make_cppstd_flag("apple-clang", "9.1", "20"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "10.0", "17"), "-std=c++17")
self.assertEqual(_make_cppstd_flag("apple-clang", "10.0", "20"), "-std=c++2a")
self.assertEqual(_make_cppstd_flag("apple-clang", "11.0", "17"), "-std=c++17")
self.assertEqual(_make_cppstd_flag("apple-clang", "11.0", "20"), "-std=c++2a")
self.assertEqual(_make_cppstd_flag("apple-clang", "12.0", "17"), "-std=c++17")
self.assertEqual(_make_cppstd_flag("apple-clang", "12.0", "20"), "-std=c++2a")
def test_apple_clang_cppstd_defaults(self):
self.assertEqual(_make_cppstd_default("apple-clang", "2"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "3"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "4"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "5"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "6"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "7"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "8"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "9"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "10"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "11"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "12"), "gnu98")
def test_visual_cppstd_flags(self):
self.assertEqual(_make_cppstd_flag("Visual Studio", "12", "11"), None)
self.assertEqual(_make_cppstd_flag("Visual Studio", "12", "14"), None)
self.assertEqual(_make_cppstd_flag("Visual Studio", "12", "17"), None)
self.assertEqual(_make_cppstd_flag("Visual Studio", "14", "11"), None)
self.assertEqual(_make_cppstd_flag("Visual Studio", "14", "14"), '/std:c++14')
self.assertEqual(_make_cppstd_flag("Visual Studio", "14", "17"), '/std:c++latest')
self.assertEqual(_make_cppstd_flag("Visual Studio", "17", "11"), None)
self.assertEqual(_make_cppstd_flag("Visual Studio", "17", "14"), '/std:c++14')
self.assertEqual(_make_cppstd_flag("Visual Studio", "17", "17"), '/std:c++17')
self.assertEqual(_make_cppstd_flag("Visual Studio", "17", "20"), '/std:c++latest')
def test_visual_cppstd_defaults(self):
self.assertEqual(_make_cppstd_default("Visual Studio", "11"), None)
self.assertEqual(_make_cppstd_default("Visual Studio", "12"), None)
self.assertEqual(_make_cppstd_default("Visual Studio", "13"), None)
self.assertEqual(_make_cppstd_default("Visual Studio", "14"), "14")
self.assertEqual(_make_cppstd_default("Visual Studio", "15"), "14")
def test_intel_visual_cppstd_defaults(self):
self.assertEquals(_make_cppstd_default("intel", "19", "Visual Studio"), None)
def test_intel_gcc_cppstd_defaults(self):
self.assertEquals(_make_cppstd_default("intel", "19", "gcc"), 'gnu98')
def test_intel_visual_cppstd_flag(self):
self.assertEquals(_make_cppstd_flag("intel", "19.1", "gnu98", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "19.1", "11", "Visual Studio"), '/Qstd=c++11')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "14", "Visual Studio"), '/Qstd=c++14')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "17", "Visual Studio"), '/Qstd=c++17')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "20", "Visual Studio"), '/Qstd=c++20')
self.assertEquals(_make_cppstd_flag("intel", "19", "gnu98", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "19", "11", "Visual Studio"), '/Qstd=c++11')
self.assertEquals(_make_cppstd_flag("intel", "19", "14", "Visual Studio"), '/Qstd=c++14')
self.assertEquals(_make_cppstd_flag("intel", "19", "17", "Visual Studio"), '/Qstd=c++17')
self.assertEquals(_make_cppstd_flag("intel", "19", "20", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "17", "gnu98", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "17", "11", "Visual Studio"), '/Qstd=c++11')
self.assertEquals(_make_cppstd_flag("intel", "17", "14", "Visual Studio"), '/Qstd=c++14')
self.assertEquals(_make_cppstd_flag("intel", "17", "17", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "17", "20", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "gnu98", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "11", "Visual Studio"), '/Qstd=c++11')
self.assertEquals(_make_cppstd_flag("intel", "15", "14", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "17", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "20", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "gnu98", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "11", "Visual Studio"), '/Qstd=c++0x')
self.assertEquals(_make_cppstd_flag("intel", "12", "14", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "17", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "20", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "gnu98", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "11", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "14", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "17", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "20", "Visual Studio"), None)
def test_intel_gcc_cppstd_flag(self):
self.assertEquals(_make_cppstd_flag("intel", "19.1", "gnu98", "gcc"), '-std=gnu++98')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "11", "gcc"), '-std=c++11')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "14", "gcc"), '-std=c++14')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "17", "gcc"), '-std=c++17')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "20", "gcc"), '-std=c++20')
self.assertEquals(_make_cppstd_flag("intel", "19", "gnu98", "gcc"), '-std=gnu++98')
self.assertEquals(_make_cppstd_flag("intel", "19", "11", "gcc"), '-std=c++11')
self.assertEquals(_make_cppstd_flag("intel", "19", "14", "gcc"), '-std=c++14')
self.assertEquals(_make_cppstd_flag("intel", "19", "17", "gcc"), '-std=c++17')
self.assertEquals(_make_cppstd_flag("intel", "19", "20", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "17", "gnu98", "gcc"), '-std=gnu++98')
self.assertEquals(_make_cppstd_flag("intel", "17", "11", "gcc"), '-std=c++11')
self.assertEquals(_make_cppstd_flag("intel", "17", "14", "gcc"), '-std=c++14')
self.assertEquals(_make_cppstd_flag("intel", "17", "17", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "17", "20", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "gnu98", "gcc"), '-std=gnu++98')
self.assertEquals(_make_cppstd_flag("intel", "15", "11", "gcc"), '-std=c++11')
self.assertEquals(_make_cppstd_flag("intel", "15", "14", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "17", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "20", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "gnu98", "gcc"), '-std=gnu++98')
self.assertEquals(_make_cppstd_flag("intel", "12", "11", "gcc"), '-std=c++0x')
self.assertEquals(_make_cppstd_flag("intel", "12", "14", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "17", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "20", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "gnu98", "gcc"), '-std=gnu++98')
self.assertEquals(_make_cppstd_flag("intel", "11", "11", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "14", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "17", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "20", "gcc"), None)
| 61.827815 | 99 | 0.626767 | import unittest
from conans.client.build.cppstd_flags import cppstd_default
from conans.test.utils.mocks import MockSettings
from conans.tools import cppstd_flag
def _make_cppstd_flag(compiler, compiler_version, cppstd=None, compiler_base=None):
settings = MockSettings({"compiler": compiler,
"compiler.version": compiler_version,
"compiler.cppstd": cppstd})
if compiler_base:
settings.values["compiler.base"] = compiler_base
return cppstd_flag(settings)
def _make_cppstd_default(compiler, compiler_version, compiler_base=None):
settings = MockSettings({"compiler": compiler,
"compiler.version": compiler_version})
if compiler_base:
settings.values["compiler.base"] = compiler_base
return cppstd_default(settings)
class CompilerFlagsTest(unittest.TestCase):
def test_gcc_cppstd_flags(self):
self.assertEqual(_make_cppstd_flag("gcc", "4.2", "98"), "-std=c++98")
self.assertEqual(_make_cppstd_flag("gcc", "4.2", "gnu98"), "-std=gnu++98")
self.assertEqual(_make_cppstd_flag("gcc", "4.2", "11"), None)
self.assertEqual(_make_cppstd_flag("gcc", "4.2", "14"), None)
self.assertEqual(_make_cppstd_flag("gcc", "4.3", "98"), "-std=c++98")
self.assertEqual(_make_cppstd_flag("gcc", "4.3", "gnu98"), "-std=gnu++98")
self.assertEqual(_make_cppstd_flag("gcc", "4.3", "11"), "-std=c++0x")
self.assertEqual(_make_cppstd_flag("gcc", "4.3", "14"), None)
self.assertEqual(_make_cppstd_flag("gcc", "4.6", "11"), '-std=c++0x')
self.assertEqual(_make_cppstd_flag("gcc", "4.6", "14"), None)
self.assertEqual(_make_cppstd_flag("gcc", "4.7", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "4.7", "14"), None)
self.assertEqual(_make_cppstd_flag("gcc", "4.8", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "4.8", "14"), '-std=c++1y')
self.assertEqual(_make_cppstd_flag("gcc", "4.8", "17"), None)
self.assertEqual(_make_cppstd_flag("gcc", "4.9", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "4.9", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("gcc", "4.9", "17"), None)
self.assertEqual(_make_cppstd_flag("gcc", "5", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "5", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("gcc", "5", "gnu14"), '-std=gnu++14')
self.assertEqual(_make_cppstd_flag("gcc", "5", "17"), None)
self.assertEqual(_make_cppstd_flag("gcc", "5.1", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "5.1", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("gcc", "5.1", "17"), '-std=c++1z')
self.assertEqual(_make_cppstd_flag("gcc", "7", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "7", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("gcc", "7", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("gcc", "8", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("gcc", "8", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("gcc", "8", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("gcc", "8", "20"), '-std=c++2a')
def test_gcc_cppstd_defaults(self):
self.assertEqual(_make_cppstd_default("gcc", "4"), "gnu98")
self.assertEqual(_make_cppstd_default("gcc", "5"), "gnu98")
self.assertEqual(_make_cppstd_default("gcc", "6"), "gnu14")
self.assertEqual(_make_cppstd_default("gcc", "6.1"), "gnu14")
self.assertEqual(_make_cppstd_default("gcc", "7.3"), "gnu14")
self.assertEqual(_make_cppstd_default("gcc", "8.1"), "gnu14")
def test_clang_cppstd_flags(self):
self.assertEqual(_make_cppstd_flag("clang", "2.0", "98"), None)
self.assertEqual(_make_cppstd_flag("clang", "2.0", "gnu98"), None)
self.assertEqual(_make_cppstd_flag("clang", "2.0", "11"), None)
self.assertEqual(_make_cppstd_flag("clang", "2.0", "14"), None)
self.assertEqual(_make_cppstd_flag("clang", "2.1", "98"), "-std=c++98")
self.assertEqual(_make_cppstd_flag("clang", "2.1", "gnu98"), "-std=gnu++98")
self.assertEqual(_make_cppstd_flag("clang", "2.1", "11"), "-std=c++0x")
self.assertEqual(_make_cppstd_flag("clang", "2.1", "14"), None)
self.assertEqual(_make_cppstd_flag("clang", "3.0", "11"), '-std=c++0x')
self.assertEqual(_make_cppstd_flag("clang", "3.0", "14"), None)
self.assertEqual(_make_cppstd_flag("clang", "3.1", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "3.1", "14"), None)
self.assertEqual(_make_cppstd_flag("clang", "3.4", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "3.4", "14"), '-std=c++1y')
self.assertEqual(_make_cppstd_flag("clang", "3.4", "17"), None)
self.assertEqual(_make_cppstd_flag("clang", "3.5", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "3.5", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("clang", "3.5", "17"), '-std=c++1z')
self.assertEqual(_make_cppstd_flag("clang", "5", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "5", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("clang", "5", "gnu14"), '-std=gnu++14')
self.assertEqual(_make_cppstd_flag("clang", "5", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("clang", "5.1", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "5.1", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("clang", "5.1", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("clang", "6", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "6", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("clang", "6", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("clang", "6", "20"), '-std=c++2a')
self.assertEqual(_make_cppstd_flag("clang", "7", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "7", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("clang", "7", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("clang", "7", "20"), '-std=c++2a')
self.assertEqual(_make_cppstd_flag("clang", "8", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("clang", "8", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("clang", "8", "17"), '-std=c++17')
self.assertEqual(_make_cppstd_flag("clang", "8", "20"), '-std=c++2a')
def test_clang_cppstd_defaults(self):
self.assertEqual(_make_cppstd_default("clang", "2"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "2.1"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "3.0"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "3.1"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "3.4"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "3.5"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "5"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "5.1"), "gnu98")
self.assertEqual(_make_cppstd_default("clang", "6"), "gnu14")
self.assertEqual(_make_cppstd_default("clang", "7"), "gnu14")
def test_apple_clang_cppstd_flags(self):
self.assertEqual(_make_cppstd_flag("apple-clang", "3.9", "98"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "3.9", "gnu98"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "3.9", "11"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "3.9", "14"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "4.0", "98"), "-std=c++98")
self.assertEqual(_make_cppstd_flag("apple-clang", "4.0", "gnu98"), "-std=gnu++98")
self.assertEqual(_make_cppstd_flag("apple-clang", "4.0", "11"), "-std=c++11")
self.assertEqual(_make_cppstd_flag("apple-clang", "4.0", "14"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "5.0", "98"), "-std=c++98")
self.assertEqual(_make_cppstd_flag("apple-clang", "5.0", "gnu98"), "-std=gnu++98")
self.assertEqual(_make_cppstd_flag("apple-clang", "5.0", "11"), "-std=c++11")
self.assertEqual(_make_cppstd_flag("apple-clang", "5.0", "14"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "5.1", "98"), "-std=c++98")
self.assertEqual(_make_cppstd_flag("apple-clang", "5.1", "gnu98"), "-std=gnu++98")
self.assertEqual(_make_cppstd_flag("apple-clang", "5.1", "11"), "-std=c++11")
self.assertEqual(_make_cppstd_flag("apple-clang", "5.1", "14"), "-std=c++1y")
self.assertEqual(_make_cppstd_flag("apple-clang", "6.1", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("apple-clang", "6.1", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("apple-clang", "6.1", "17"), "-std=c++1z")
self.assertEqual(_make_cppstd_flag("apple-clang", "7", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("apple-clang", "7", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("apple-clang", "7", "17"), "-std=c++1z")
self.assertEqual(_make_cppstd_flag("apple-clang", "8", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("apple-clang", "8", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("apple-clang", "8", "17"), "-std=c++1z")
self.assertEqual(_make_cppstd_flag("apple-clang", "9", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("apple-clang", "9", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("apple-clang", "9", "17"), "-std=c++1z")
self.assertEqual(_make_cppstd_flag("apple-clang", "9.1", "11"), '-std=c++11')
self.assertEqual(_make_cppstd_flag("apple-clang", "9.1", "14"), '-std=c++14')
self.assertEqual(_make_cppstd_flag("apple-clang", "9.1", "17"), "-std=c++17")
self.assertEqual(_make_cppstd_flag("apple-clang", "9.1", "20"), None)
self.assertEqual(_make_cppstd_flag("apple-clang", "10.0", "17"), "-std=c++17")
self.assertEqual(_make_cppstd_flag("apple-clang", "10.0", "20"), "-std=c++2a")
self.assertEqual(_make_cppstd_flag("apple-clang", "11.0", "17"), "-std=c++17")
self.assertEqual(_make_cppstd_flag("apple-clang", "11.0", "20"), "-std=c++2a")
self.assertEqual(_make_cppstd_flag("apple-clang", "12.0", "17"), "-std=c++17")
self.assertEqual(_make_cppstd_flag("apple-clang", "12.0", "20"), "-std=c++2a")
def test_apple_clang_cppstd_defaults(self):
self.assertEqual(_make_cppstd_default("apple-clang", "2"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "3"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "4"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "5"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "6"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "7"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "8"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "9"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "10"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "11"), "gnu98")
self.assertEqual(_make_cppstd_default("apple-clang", "12"), "gnu98")
def test_visual_cppstd_flags(self):
self.assertEqual(_make_cppstd_flag("Visual Studio", "12", "11"), None)
self.assertEqual(_make_cppstd_flag("Visual Studio", "12", "14"), None)
self.assertEqual(_make_cppstd_flag("Visual Studio", "12", "17"), None)
self.assertEqual(_make_cppstd_flag("Visual Studio", "14", "11"), None)
self.assertEqual(_make_cppstd_flag("Visual Studio", "14", "14"), '/std:c++14')
self.assertEqual(_make_cppstd_flag("Visual Studio", "14", "17"), '/std:c++latest')
self.assertEqual(_make_cppstd_flag("Visual Studio", "17", "11"), None)
self.assertEqual(_make_cppstd_flag("Visual Studio", "17", "14"), '/std:c++14')
self.assertEqual(_make_cppstd_flag("Visual Studio", "17", "17"), '/std:c++17')
self.assertEqual(_make_cppstd_flag("Visual Studio", "17", "20"), '/std:c++latest')
def test_visual_cppstd_defaults(self):
self.assertEqual(_make_cppstd_default("Visual Studio", "11"), None)
self.assertEqual(_make_cppstd_default("Visual Studio", "12"), None)
self.assertEqual(_make_cppstd_default("Visual Studio", "13"), None)
self.assertEqual(_make_cppstd_default("Visual Studio", "14"), "14")
self.assertEqual(_make_cppstd_default("Visual Studio", "15"), "14")
def test_intel_visual_cppstd_defaults(self):
self.assertEquals(_make_cppstd_default("intel", "19", "Visual Studio"), None)
def test_intel_gcc_cppstd_defaults(self):
self.assertEquals(_make_cppstd_default("intel", "19", "gcc"), 'gnu98')
def test_intel_visual_cppstd_flag(self):
self.assertEquals(_make_cppstd_flag("intel", "19.1", "gnu98", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "19.1", "11", "Visual Studio"), '/Qstd=c++11')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "14", "Visual Studio"), '/Qstd=c++14')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "17", "Visual Studio"), '/Qstd=c++17')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "20", "Visual Studio"), '/Qstd=c++20')
self.assertEquals(_make_cppstd_flag("intel", "19", "gnu98", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "19", "11", "Visual Studio"), '/Qstd=c++11')
self.assertEquals(_make_cppstd_flag("intel", "19", "14", "Visual Studio"), '/Qstd=c++14')
self.assertEquals(_make_cppstd_flag("intel", "19", "17", "Visual Studio"), '/Qstd=c++17')
self.assertEquals(_make_cppstd_flag("intel", "19", "20", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "17", "gnu98", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "17", "11", "Visual Studio"), '/Qstd=c++11')
self.assertEquals(_make_cppstd_flag("intel", "17", "14", "Visual Studio"), '/Qstd=c++14')
self.assertEquals(_make_cppstd_flag("intel", "17", "17", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "17", "20", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "gnu98", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "11", "Visual Studio"), '/Qstd=c++11')
self.assertEquals(_make_cppstd_flag("intel", "15", "14", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "17", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "20", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "gnu98", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "11", "Visual Studio"), '/Qstd=c++0x')
self.assertEquals(_make_cppstd_flag("intel", "12", "14", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "17", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "20", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "gnu98", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "11", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "14", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "17", "Visual Studio"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "20", "Visual Studio"), None)
def test_intel_gcc_cppstd_flag(self):
self.assertEquals(_make_cppstd_flag("intel", "19.1", "gnu98", "gcc"), '-std=gnu++98')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "11", "gcc"), '-std=c++11')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "14", "gcc"), '-std=c++14')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "17", "gcc"), '-std=c++17')
self.assertEquals(_make_cppstd_flag("intel", "19.1", "20", "gcc"), '-std=c++20')
self.assertEquals(_make_cppstd_flag("intel", "19", "gnu98", "gcc"), '-std=gnu++98')
self.assertEquals(_make_cppstd_flag("intel", "19", "11", "gcc"), '-std=c++11')
self.assertEquals(_make_cppstd_flag("intel", "19", "14", "gcc"), '-std=c++14')
self.assertEquals(_make_cppstd_flag("intel", "19", "17", "gcc"), '-std=c++17')
self.assertEquals(_make_cppstd_flag("intel", "19", "20", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "17", "gnu98", "gcc"), '-std=gnu++98')
self.assertEquals(_make_cppstd_flag("intel", "17", "11", "gcc"), '-std=c++11')
self.assertEquals(_make_cppstd_flag("intel", "17", "14", "gcc"), '-std=c++14')
self.assertEquals(_make_cppstd_flag("intel", "17", "17", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "17", "20", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "gnu98", "gcc"), '-std=gnu++98')
self.assertEquals(_make_cppstd_flag("intel", "15", "11", "gcc"), '-std=c++11')
self.assertEquals(_make_cppstd_flag("intel", "15", "14", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "17", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "15", "20", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "gnu98", "gcc"), '-std=gnu++98')
self.assertEquals(_make_cppstd_flag("intel", "12", "11", "gcc"), '-std=c++0x')
self.assertEquals(_make_cppstd_flag("intel", "12", "14", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "17", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "12", "20", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "gnu98", "gcc"), '-std=gnu++98')
self.assertEquals(_make_cppstd_flag("intel", "11", "11", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "14", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "17", "gcc"), None)
self.assertEquals(_make_cppstd_flag("intel", "11", "20", "gcc"), None)
| true | true |
f72735cd87fa6d88c743eda60cd6425d8578a594 | 1,671 | py | Python | oscar/lib/python2.7/site-packages/phonenumbers/data/region_UG.py | AMuratTuran/mkn | 557086426773ced10d82c969304bd349414a601e | [
"BSD-3-Clause"
] | 4 | 2018-10-19T04:36:20.000Z | 2020-02-13T16:14:09.000Z | oscar/lib/python2.7/site-packages/phonenumbers/data/region_UG.py | AMuratTuran/mkn | 557086426773ced10d82c969304bd349414a601e | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/phonenumbers/data/region_UG.py | AMuratTuran/mkn | 557086426773ced10d82c969304bd349414a601e | [
"BSD-3-Clause"
] | null | null | null | """Auto-generated file, do not edit by hand. UG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_UG = PhoneMetadata(id='UG', country_code=256, international_prefix='00[057]',
general_desc=PhoneNumberDesc(national_number_pattern='\\d{9}', possible_length=(9,), possible_length_local_only=(5, 6, 7)),
fixed_line=PhoneNumberDesc(national_number_pattern='20(?:[0147]\\d{3}|2(?:40|[5-9]\\d)\\d|3(?:0[0-4]|[2367]\\d)\\d|5[0-4]\\d{2}|6(?:00[0-2]|30[0-4]|[5-9]\\d{2})|8[0-2]\\d{2})\\d{3}|[34]\\d{8}', example_number='312345678', possible_length=(9,), possible_length_local_only=(5, 6, 7)),
mobile=PhoneNumberDesc(national_number_pattern='7(?:0[0-7]\\d|[1578]\\d{2}|2(?:[03]\\d|60)|30\\d|4[0-4]\\d|9(?:[0-6]\\d|74))\\d{5}', example_number='712345678', possible_length=(9,)),
toll_free=PhoneNumberDesc(national_number_pattern='800[123]\\d{5}', example_number='800123456', possible_length=(9,)),
premium_rate=PhoneNumberDesc(national_number_pattern='90[123]\\d{6}', example_number='901123456', possible_length=(9,)),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{3})(\\d{6})', format='\\1 \\2', leading_digits_pattern=['20[0-8]|4(?:6[45]|[7-9])|[7-9]', '20(?:[013-8]|2[5-9])|4(?:6[45]|[7-9])|[7-9]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{7})', format='\\1 \\2', leading_digits_pattern=['3|4(?:[1-5]|6[0-36-9])'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(2024)(\\d{5})', format='\\1 \\2', leading_digits_pattern=['202', '2024'], national_prefix_formatting_rule='0\\1')])
| 111.4 | 286 | 0.666068 | from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_UG = PhoneMetadata(id='UG', country_code=256, international_prefix='00[057]',
general_desc=PhoneNumberDesc(national_number_pattern='\\d{9}', possible_length=(9,), possible_length_local_only=(5, 6, 7)),
fixed_line=PhoneNumberDesc(national_number_pattern='20(?:[0147]\\d{3}|2(?:40|[5-9]\\d)\\d|3(?:0[0-4]|[2367]\\d)\\d|5[0-4]\\d{2}|6(?:00[0-2]|30[0-4]|[5-9]\\d{2})|8[0-2]\\d{2})\\d{3}|[34]\\d{8}', example_number='312345678', possible_length=(9,), possible_length_local_only=(5, 6, 7)),
mobile=PhoneNumberDesc(national_number_pattern='7(?:0[0-7]\\d|[1578]\\d{2}|2(?:[03]\\d|60)|30\\d|4[0-4]\\d|9(?:[0-6]\\d|74))\\d{5}', example_number='712345678', possible_length=(9,)),
toll_free=PhoneNumberDesc(national_number_pattern='800[123]\\d{5}', example_number='800123456', possible_length=(9,)),
premium_rate=PhoneNumberDesc(national_number_pattern='90[123]\\d{6}', example_number='901123456', possible_length=(9,)),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{3})(\\d{6})', format='\\1 \\2', leading_digits_pattern=['20[0-8]|4(?:6[45]|[7-9])|[7-9]', '20(?:[013-8]|2[5-9])|4(?:6[45]|[7-9])|[7-9]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{7})', format='\\1 \\2', leading_digits_pattern=['3|4(?:[1-5]|6[0-36-9])'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(2024)(\\d{5})', format='\\1 \\2', leading_digits_pattern=['202', '2024'], national_prefix_formatting_rule='0\\1')])
| true | true |
f72736523caea2797e8de3a4ae833839547bc926 | 2,559 | py | Python | patitasbackend/emailer/views.py | nahuelmol/patitas | 75815aa3b388a538f32395d93b1fa25d7fb6de1a | [
"MIT"
] | 1 | 2021-05-23T16:08:41.000Z | 2021-05-23T16:08:41.000Z | patitasbackend/emailer/views.py | nahuelmol/patitas | 75815aa3b388a538f32395d93b1fa25d7fb6de1a | [
"MIT"
] | null | null | null | patitasbackend/emailer/views.py | nahuelmol/patitas | 75815aa3b388a538f32395d93b1fa25d7fb6de1a | [
"MIT"
] | null | null | null | from django.shortcuts import render
import pickle
import os.path
import mimetypes
import base64
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
SCOPES = ['https://mail.google.com/']
def get_service():
creds = None
PICKLE_PATH = os.getcwd() + '\\emailer\\token.pickle'
CREDS_PATH = os.getcwd() + '\\emailer\\credentials.json'
if os.path.exists(PICKLE_PATH):
with open(PICKLE_PATH, 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(CREDS_PATH, SCOPES)
creds = flow.run_local_server(port=0)
with open(PICKLE_PATH, 'wb') as token:
pickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
return service
def send_message(service,user_id,message):
try:
message = service.users().messages().send(userId=user_id,body=message).execute()
print('Message id')
return message
except Exception as e:
print('an error occured in views line 45:{}',e)
return None
def create_message_with_attachment(sender,to,subject,body):
message = MIMEMultipart()
msg_content_html = """
<!DOCTYPE html>
<html>
<head>
<title>My Title</title>
<link rel="stylesheet" type="text/css" href="https://bootswatch.com/5/superhero/bootstrap.min.css">
<style type="text/css">
span.bold {font-weight: bold;}
table.noborder {border: 0px; padding: 8px;}
th {text-align: left;}
</style>
</head>
<body>
<div class="container">
<p>
Click on the button below to verify your patitas account
</p>
<a href="http://localhost:8000/user/verify_user_by_email" type="button" class="btn btn-success">Verify</button>
</div>
</body>
</html>
"""
message['to'] = to
message['from'] = sender
message['subject'] = subject
html_part = MIMEText(msg_content_html, 'html')
message.attach(html_part)
raw_msg = base64.urlsafe_b64encode(message.as_string().encode('utf-8'))
return {'raw':raw_msg.decode('utf-8')} | 28.752809 | 119 | 0.66315 | from django.shortcuts import render
import pickle
import os.path
import mimetypes
import base64
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
SCOPES = ['https://mail.google.com/']
def get_service():
creds = None
PICKLE_PATH = os.getcwd() + '\\emailer\\token.pickle'
CREDS_PATH = os.getcwd() + '\\emailer\\credentials.json'
if os.path.exists(PICKLE_PATH):
with open(PICKLE_PATH, 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(CREDS_PATH, SCOPES)
creds = flow.run_local_server(port=0)
with open(PICKLE_PATH, 'wb') as token:
pickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
return service
def send_message(service,user_id,message):
try:
message = service.users().messages().send(userId=user_id,body=message).execute()
print('Message id')
return message
except Exception as e:
print('an error occured in views line 45:{}',e)
return None
def create_message_with_attachment(sender,to,subject,body):
message = MIMEMultipart()
msg_content_html = """
<!DOCTYPE html>
<html>
<head>
<title>My Title</title>
<link rel="stylesheet" type="text/css" href="https://bootswatch.com/5/superhero/bootstrap.min.css">
<style type="text/css">
span.bold {font-weight: bold;}
table.noborder {border: 0px; padding: 8px;}
th {text-align: left;}
</style>
</head>
<body>
<div class="container">
<p>
Click on the button below to verify your patitas account
</p>
<a href="http://localhost:8000/user/verify_user_by_email" type="button" class="btn btn-success">Verify</button>
</div>
</body>
</html>
"""
message['to'] = to
message['from'] = sender
message['subject'] = subject
html_part = MIMEText(msg_content_html, 'html')
message.attach(html_part)
raw_msg = base64.urlsafe_b64encode(message.as_string().encode('utf-8'))
return {'raw':raw_msg.decode('utf-8')} | true | true |
f727369391ddd48ef41823994e2d66ef082c42f9 | 33,146 | py | Python | lingvo/jax/eval.py | ruomingp/lingvo | ba59e8c46471be77d5d3c48177f0f0dd8d5d44e9 | [
"Apache-2.0"
] | null | null | null | lingvo/jax/eval.py | ruomingp/lingvo | ba59e8c46471be77d5d3c48177f0f0dd8d5d44e9 | [
"Apache-2.0"
] | null | null | null | lingvo/jax/eval.py | ruomingp/lingvo | ba59e8c46471be77d5d3c48177f0f0dd8d5d44e9 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation loop for lingvo Jax model."""
import contextlib
import functools
import hashlib
import os
import time
from typing import List, Optional, Sequence
from absl import logging
import jax
from jax.experimental import maps
from jax.experimental import mesh_utils
from lingvo.jax import base_input
from lingvo.jax import base_layer
from lingvo.jax import base_metrics
from lingvo.jax import base_model_params
from lingvo.jax import base_task
from lingvo.jax import checkpoint_pb2
from lingvo.jax import model_utils
from lingvo.jax import py_utils
from lingvo.jax import pytypes
from lingvo.jax import summary_utils
from lingvo.jax import train_states
from lingvo.jax import trainer_lib
import tensorflow.compat.v2 as tf
from lingvo.jax import checkpoints
from lingvo.jax import io_utils
BaseModelParamsT = base_model_params.BaseModelParamsT
CheckpointType = checkpoint_pb2.CheckpointType
InstantiableParams = py_utils.InstantiableParams
NestedMap = py_utils.NestedMap
JTensor = pytypes.JTensor
NestedJTensor = pytypes.NestedJTensor
TrainState = train_states.TrainState
SummaryWriter = tf.summary.SummaryWriter
def maybe_ema(model_states):
"""Finds the ema state from optimizer states."""
if not model_states.opt_states:
return model_states
for i in range(len(model_states.opt_states[0])):
if 'ema' in model_states.opt_states[0][i]:
return TrainState(
step=model_states.step,
mdl_vars=model_states.opt_states[0][i].ema,
opt_states={})
return model_states
def evaluate(
model_name: str,
job_log_dir: Optional[str],
multi_host_checkpointing: Optional[bool],
maybe_use_persistence_checkpointing: bool,
) -> None:
"""Runs the evaluation loop on the entire eval data set.
Args:
model_name: The name of the model from the registry to evaluate.
job_log_dir: The directory for the job logs.
multi_host_checkpointing: Whether to use multi-host checkpointing.
maybe_use_persistence_checkpointing: If set, it will try to use
persistence-based checkpointing if suitable.
"""
model_config = model_utils.get_model(model_name)()
task_p = model_config.task()
model_p = task_p.model
eval_input_p = [v for v in model_config.datasets() if not v.is_training]
for inp in eval_input_p:
inp.num_infeed_hosts = jax.process_count()
inp.infeed_host_index = jax.process_index()
if model_p.device_mesh is not None:
checkpoint_type = checkpoints.retrieve_checkpoint_type(
multi_host_checkpointing, maybe_use_persistence_checkpointing, task_p)
evaluate_spmd_model(task_p, eval_input_p, job_log_dir, checkpoint_type)
else:
evaluate_pmap_model(task_p, eval_input_p, job_log_dir)
def evaluate_pmap_model(
task_p: InstantiableParams,
eval_input_p: Sequence[InstantiableParams],
job_log_dir: Optional[str],
) -> None:
"""Runs the evaluation loop on the entire test dataset for PMAP model.
Args:
task_p: Params for the task encapsulating the data parallel model.
eval_input_p: List of params for the eval data input pipelines.
job_log_dir: Directory for the job logs.
"""
logging.info('Using pmap for data parallelism.')
jax_task = task_p.Instantiate()
eval_input_pipelines = [input_p.Instantiate() for input_p in eval_input_p]
# TODO(shafey): Retrieve the seeds from the model definition instead.
prng_key = jax.random.PRNGKey(1234)
prng_key, init_key = jax.random.split(prng_key)
checkpoint_dir = os.path.join(job_log_dir, 'checkpoints')
# Restore flax checkpoints still required bak variables in TrainState
# TODO(pax): add is_eval=True to initialize_model_state
model_states = trainer_lib.initialize_model_state(jax_task, init_key)
# Pmap does not use GDA, and so global_mesh and mesh_axes are None.
model_states = checkpoints.restore_checkpoint(model_states, checkpoint_dir)
replicated_model_states = trainer_lib.replicate_model_state(model_states)
logging.info('replicated_model_states: %s',
jax.tree_map(lambda x: x.shape, replicated_model_states))
# From now on, different replicas should use different random seeds.
# Here, each process will have its unique prng_key.
# prng_key will be further split so that each core on a host will get
# different prng_key.
prng_key = jax.random.fold_in(prng_key, jax.process_index())
logging.info('root prng_key: %s', prng_key)
def eval_step(mdl_states, prng_key, inputs):
mdl_states = trainer_lib.train_state_for_eval_step(mdl_states)
return trainer_lib.eval_step_single_learner(
jax_task,
mdl_states,
prng_key,
inputs,
data_parallel_axis_name='batch',
fprop_dtype=jax_task.model.fprop_dtype)
num_devices = jax.local_device_count()
prng_key, eval_key = jax.random.split(prng_key)
eval_prng_seed = jax.random.split(eval_key, num=num_devices)
logging.info('eval prng_seed: %s', eval_prng_seed)
p_eval_step = jax.pmap(eval_step, axis_name='batch')
logging.info('Evaluation loop starting...')
summary_base_dir = os.path.join(job_log_dir, 'summaries')
summary_eval_dirs = [
os.path.join(summary_base_dir, f'eval_test_{split}')
for split, _ in enumerate(eval_input_p)
]
num_steps = [
-1 if p.reset_for_eval else p.eval_loop_num_batches for p in eval_input_p
]
last_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)
with contextlib.ExitStack() as exit_stack:
eval_summary_writers = [
exit_stack.enter_context(summary_utils.get_summary_writer(d))
for d in summary_eval_dirs
]
while True:
step_i = int(jax.device_get(replicated_model_states.step)[0])
eval_step = functools.partial(p_eval_step,
maybe_ema(replicated_model_states),
eval_prng_seed)
# Run the eval loop.
model_utils.run_eval_loop_over_test_splits(
num_steps,
eval_step,
eval_summary_writers,
step_i,
eval_input_pipelines,
reshard_inputs=True)
# If the last check point evaluated matches max train steps, exit.
if last_checkpoint is not None:
last_ckpt_step = checkpoints.get_step_from_checkpoint_asset(
last_checkpoint)
exceeded_ckpt = last_ckpt_step + task_p.train.save_interval_steps
if exceeded_ckpt >= task_p.train.num_train_steps:
break
# Release replicated_model_states.
del replicated_model_states
new_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)
while new_checkpoint == last_checkpoint:
# Sleep for a minute.
time.sleep(60)
new_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)
# There must be a new checkpoint here.
logging.info('Found new checkpoint: %s', new_checkpoint)
model_states = checkpoints.restore_checkpoint(model_states,
checkpoint_dir)
replicated_model_states = trainer_lib.replicate_model_state(model_states)
last_checkpoint = new_checkpoint
def evaluate_spmd_model(
task_p: InstantiableParams,
eval_input_p: Sequence[InstantiableParams],
job_log_dir: Optional[str],
checkpoint_type: CheckpointType,
) -> None:
"""Runs the evaluation loop on the entire test dataset for SPMD model.
Args:
task_p: Params of the task encapsulating an SPMD model.
eval_input_p: List of Params for the eval data pipelines.
job_log_dir: Directory for the job logs.
checkpoint_type: Type of model checkpointing method to use.
"""
logging.info('Using SPMD sharding for model parallelism.')
eval_input_pipelines = [input_p.Instantiate() for input_p in eval_input_p]
# TODO(bf-jax): Retrieve the seeds from the model definition instead.
prng_key = jax.random.PRNGKey(1234)
prng_key, init_key = jax.random.split(prng_key)
checkpoint_dir = os.path.join(job_log_dir, 'checkpoints')
# Note that GDA checkpoint requires all processes to participate in
# checkpointing but it does not require a separate checkpoint_dir per process.
if checkpoint_type == CheckpointType.CHECKPOINT_MULTI_HOST_FLAX:
checkpoint_task_dir = os.path.join(checkpoint_dir,
f'{jax.process_index():03d}')
else:
checkpoint_task_dir = checkpoint_dir
multi_host_checkpointing = bool(checkpoint_type in {
CheckpointType.CHECKPOINT_MULTI_HOST_FLAX, CheckpointType.CHECKPOINT_GDA
})
def get_shape_dtype(x):
y = jax.ShapeDtypeStruct(x.shape, x.dtype)
return y
# Do not ues eval_input_pipelines[0] directly.
sample_model_inputs = eval_input_p[0].Instantiate().get_next()
inputs_shape = tf.nest.map_structure(get_shape_dtype, sample_model_inputs)
jax_task = task_p.Instantiate()
model_p = task_p.model
mesh_shape = model_p.device_mesh.shape
device_mesh = mesh_utils.create_device_mesh(mesh_shape)
logging.info('device_mesh: %s', device_mesh)
global_mesh = maps.Mesh(device_mesh, model_p.mesh_axis_names)
use_gda_checkpoint = jax.config.jax_parallel_functions_output_gda
with global_mesh:
jax_task.model.instantiate_variable_configs()
# Restore flax checkpoints still required backward variables in TrainState
# TODO(pax): set is_eval=True for all ckpt types.
if use_gda_checkpoint:
partitioned_specs = jax_task.create_train_state_partition_specs(
jax_task.model.vars, discard_opt_states=True)
partitioned_train_state = checkpoints.restore_checkpoint(
None,
checkpoint_task_dir,
global_mesh=global_mesh,
checkpoint_type=checkpoint_type,
state_specs=partitioned_specs)
eval_step, inputs_partition_specs = (
trainer_lib.get_partitioned_spmd_model_step_fn(
jax_task,
init_key,
partitioned_specs,
inputs_shape,
is_eval=True))
else:
(partitioned_train_state, partitioned_specs, inputs_partition_specs, _,
eval_step, _) = trainer_lib.partition_spmd_model(task_p, init_key,
inputs_shape)
partitioned_train_state = checkpoints.restore_checkpoint(
partitioned_train_state,
checkpoint_task_dir,
global_mesh=global_mesh,
checkpoint_type=checkpoint_type,
state_specs=partitioned_specs)
logging.info('partitioned_train_state: %s',
jax.tree_map(lambda x: x.shape, partitioned_train_state))
if multi_host_checkpointing:
py_utils.sync_global_devices(f'checkpointer:restored:{checkpoint_dir}')
# We do not fold in jax.process_index in contrast to the pmap version and
# use a single global key instead to rely on pjit to split for different
# replicas.
logging.info('root prng_key: %s', prng_key)
prng_key, eval_key = jax.random.split(prng_key)
logging.info('eval prng_key: %s', eval_key)
logging.info('Evaluation loop starting...')
summary_base_dir = os.path.join(job_log_dir, 'summaries')
summary_eval_dirs = [
os.path.join(summary_base_dir, f'eval_{split}')
for split, _ in enumerate(eval_input_p)
]
num_steps = [-1 if p.reset_for_eval else 1 for p in eval_input_p]
last_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)
with contextlib.ExitStack() as exit_stack:
eval_summary_writers = [
exit_stack.enter_context(summary_utils.get_summary_writer(d))
for d in summary_eval_dirs
]
while True:
step_i = int(jax.device_get(partitioned_train_state.step))
eval_step_fn = functools.partial(
eval_step,
trainer_lib.train_state_for_eval_step(partitioned_train_state),
eval_key)
# Run the eval loop.
model_utils.run_eval_loop_over_test_splits(
num_steps,
eval_step_fn,
eval_summary_writers,
step_i,
eval_input_pipelines,
inputs_partition_specs,
inputs_shape,
global_mesh,
reshard_inputs=False)
# If the last check point evaluated matches max train steps, exit.
if last_checkpoint is not None:
last_ckpt_step = checkpoints.get_step_from_checkpoint_asset(
last_checkpoint)
exceeded_ckpt = last_ckpt_step + task_p.train.save_interval_steps
if exceeded_ckpt >= task_p.train.num_train_steps:
break
new_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)
while new_checkpoint == last_checkpoint:
# Sleep for a minute.
time.sleep(60)
new_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)
# There must be a new checkpoint here.
logging.info('Found new checkpoint: %s', new_checkpoint)
partitioned_train_state = checkpoints.restore_checkpoint(
None if use_gda_checkpoint else partitioned_train_state,
checkpoint_task_dir,
global_mesh=global_mesh,
checkpoint_type=checkpoint_type,
state_specs=partitioned_specs)
if multi_host_checkpointing:
py_utils.sync_global_devices(
f'checkpointer:restored:{checkpoint_dir}')
last_checkpoint = new_checkpoint
def decode(
model_name: str,
job_log_dir: Optional[str],
multi_host_checkpointing: Optional[bool],
maybe_use_persistence_checkpointing: bool,
restore_checkpoint_dir: Optional[str],
restore_checkpoint_step: Optional[int],
continuous_decode: bool,
) -> None:
"""Runs decoding once on the decoder datasets.
Args:
model_name: The name of the model from the registry to evaluate.
job_log_dir: The directory for the job logs.
multi_host_checkpointing: Whether to use multi-host checkpointing.
maybe_use_persistence_checkpointing: If set, it will try to use
persistence-based checkpointing if suitable.
restore_checkpoint_dir: The directory from which to restore checkpoint.
restore_checkpoint_step: If set, the checkpoint step to restore. If unset,
try to restore from the latest checkpoint if any.
continuous_decode: whether to continuously decode on the latest ckpt.
"""
logging.info('running decode_once on model %s restored from %s', model_name,
restore_checkpoint_dir)
model_config = model_utils.get_model(model_name)()
task_p = model_config.task()
model_p = task_p.model
decoder_inputs = model_config.decoder_datasets()
if not decoder_inputs:
return
for inp in decoder_inputs:
inp.num_infeed_hosts = jax.process_count()
inp.infeed_host_index = jax.process_index()
if model_p.device_mesh is not None:
if continuous_decode:
raise NotImplementedError('http://b/214589358: not supported')
checkpoint_type = checkpoints.retrieve_checkpoint_type(
multi_host_checkpointing, maybe_use_persistence_checkpointing, task_p)
decode_once_spmd_model(task_p, decoder_inputs, job_log_dir, checkpoint_type,
restore_checkpoint_dir, restore_checkpoint_step)
else:
decode_pmap_model(task_p, decoder_inputs, job_log_dir,
restore_checkpoint_dir, restore_checkpoint_step,
continuous_decode)
def _get_dir_names(input_p: Sequence[InstantiableParams]) -> Sequence[str]:
"""Returns a list of same length for parent dir names for each dataset."""
uniq_names = set()
ret = []
for idx, p in enumerate(input_p):
name = p.name or f'decode_test_{idx}'
if p.name and p.name in uniq_names:
name = f'{p.name}_{idx}'
if name in uniq_names:
suffix = hashlib.md5(name.encode()).hexdigest()[-5:]
name = f'{name}_{suffix}'
assert name not in uniq_names
uniq_names.add(name)
ret.append(name)
return ret
def _get_step(step: base_layer.JTensorOrPartitionSpec) -> int:
"""Returns an int for the current global step."""
if step.ndim == 0:
return jax.device_get(step)
if step.ndim == 1:
return jax.device_get(step[0])
raise ValueError(
f'Expecting a replicated 1D global step (got ndim=`{step.ndim}`).')
def _get_filename(step: base_layer.JTensorOrPartitionSpec) -> str:
"""Returns a filename for the given step."""
step_num = _get_step(step)
return f'decoder_out_{step_num}_shard_{jax.process_index()}'
def decode_pmap_model(
task_p: InstantiableParams,
input_p: Sequence[InstantiableParams],
job_log_dir: Optional[str],
restore_checkpoint_dir: Optional[str],
restore_checkpoint_step: Optional[int],
continuous_decode: bool,
) -> None:
"""Runs the decoding on the entire decoder datasets for a PMAP model.
Args:
task_p: Params of the task encapsulating a the data parallel model.
input_p: List of input params to be decoded.
job_log_dir: Directory for the job logs.
restore_checkpoint_dir: The directory from which to restore checkpoint. If
None, uses job_log_dir.
restore_checkpoint_step: If set, the checkpoint step to restore. If unset,
try to restore from the latest checkpoint if any.
continuous_decode: whether to continuously decode on the latest ckpt.
"""
if continuous_decode and restore_checkpoint_step is not None:
raise ValueError('Continuous decoding mode requires restore_checkpoint_step'
'=None, actual restore_checkpoint_step='
f'{restore_checkpoint_step}')
restore_checkpoint_dir = restore_checkpoint_dir or os.path.join(
job_log_dir, 'checkpoints')
# TODO(shafey): Retrieve the seeds from the model definition instead.
prng_key = jax.random.PRNGKey(1234)
prng_key, init_key = jax.random.split(prng_key)
# From now on, different replicas should use different random seeds.
# Here, each process will have its unique prng_key.
# prng_key will be further split so that each core on a host will get
# different prng_key.
prng_key = jax.random.fold_in(prng_key, jax.process_index())
logging.info('root prng_key: %s', prng_key)
prng_key, eval_key = jax.random.split(prng_key)
prng_seed = jax.random.split(eval_key, num=jax.local_device_count())
logging.info('decoder prng_seed: %s', prng_seed)
inputs = [p.Instantiate() for p in input_p]
summary_base_dir = os.path.join(job_log_dir, 'summaries')
dirnames = _get_dir_names(input_p)
summary_decode_dirs = [
os.path.join(summary_base_dir, f'decode_test_{dirnames[split]}')
for split, _ in enumerate(input_p)
]
with contextlib.ExitStack() as exit_stack:
summary_writers = [
exit_stack.enter_context(summary_utils.get_summary_writer(d))
for d in summary_decode_dirs
]
jax_task = task_p.Instantiate()
# Restore flax checkpoints still required bak variables in TrainState
# TODO(pax): add is_eval=True to initialize_model_state
model_states = trainer_lib.initialize_model_state(jax_task, init_key)
model_states = checkpoints.restore_checkpoint(
model_states, restore_checkpoint_dir, step=restore_checkpoint_step)
replicated_model_states = trainer_lib.replicate_model_state(model_states)
logging.info('replicated_model_states: %s',
jax.tree_map(lambda x: x.shape, replicated_model_states))
last_checkpoint = checkpoints.latest_checkpoint(restore_checkpoint_dir)
while True:
_decode_once_pmap_model(jax_task, task_p, inputs, input_p, prng_seed,
job_log_dir, replicated_model_states,
summary_writers)
if not continuous_decode:
break
if last_checkpoint is not None:
last_ckpt_step = int(last_checkpoint.split('_')[-1])
exceeded_ckpt = last_ckpt_step + task_p.train.save_interval_steps
if exceeded_ckpt >= task_p.train.num_train_steps:
break
# Release replicated_model_states.
del replicated_model_states
new_checkpoint = checkpoints.latest_checkpoint(restore_checkpoint_dir)
while new_checkpoint == last_checkpoint:
time.sleep(60)
new_checkpoint = checkpoints.latest_checkpoint(restore_checkpoint_dir)
logging.info('Found new checkpoint: %s', new_checkpoint)
model_states = checkpoints.restore_checkpoint(model_states,
restore_checkpoint_dir)
replicated_model_states = trainer_lib.replicate_model_state(model_states)
last_checkpoint = new_checkpoint
def _decode_once_pmap_model(
jax_task: base_task.SingleTask,
task_p: InstantiableParams,
inputs: List[base_input.BaseInput],
input_p: Sequence[InstantiableParams],
prng_seed: JTensor,
job_log_dir: Optional[str],
replicated_model_states: train_states.TrainState,
summary_writers: List[SummaryWriter],
) -> None:
"""Runs the decoding on the entire decoder datasets for a PMAP model.
Args:
jax_task: instantiated model from task_p.
task_p: Params for the task encapsulating a data parallel model.
inputs: instantiated inputs.
input_p: List of input params to be decoded.
prng_seed: The prng seed used for decoding.
job_log_dir: Directory for the job logs.
replicated_model_states: A TrainState object.
summary_writers: The summary writer objects to log summaries.
"""
model = jax_task.model
model_p = task_p.model
metrics_p = task_p.metrics
if not metrics_p:
metrics_p = base_metrics.MeanMetrics.Params()
decode_metrics = metrics_p.Instantiate()
process_decode_metrics = metrics_p.Instantiate()
step_i = _get_step(replicated_model_states.step)
pmap_axis_name = 'batch'
def decode_step(mdl_states, prng_key, inputs):
mdl_states = trainer_lib.train_state_for_eval_step(mdl_states)
metrics, out = trainer_lib.decode_step(model, mdl_states, prng_key, inputs,
model_p.fprop_dtype)
metrics = decode_metrics.aggregate(metrics)
return metrics, out
# As an example, suppose the output leaf from trainer_lib.decoder_step()
# for each core has shape: [per_core_batch_size, decoding_length].
# In the all_gather we set tiled=True, so the output chunks are all
# concatenated into the existing batch axis, so we get shape
# [num_cores x per_core_batch_size, decoding_length].
# In the pmap call we set out_axes=None to not have to manually unreplicate,
# so the output of pmap_decode_step() will have the same shape.
#
# Example code snippet showing this:
# # shape (8, 3, 2)
# x = jnp.tile(jnp.arange(8)[:, None, None],[1, 3, 2])
# # shape (24, 2)
# z = jax.pmap(
# lambda y: jax.lax.all_gather(y+1, axis_name='i', tiled=True),
# axis_name='i', out_axes=None)(x)
#
# We only aggregate metrics, not `out`, hence the tuple for out_axes.
pmap_decode_step = jax.pmap(
decode_step, axis_name=pmap_axis_name, out_axes=(None, 0))
decode_step_func = functools.partial(pmap_decode_step,
maybe_ema(replicated_model_states),
prng_seed)
num_steps = [
-1 if p.reset_for_eval else p.eval_loop_num_batches for p in input_p
]
decodes = [list() for _ in input_p]
for split, num_split_steps in enumerate(num_steps):
logging.info('Start decoding on input %s', input_p[split].name)
step_num = 0
while num_split_steps < 0 or step_num < num_split_steps:
step_num += 1
try:
batch = inputs[split].get_next()
except (tf.errors.OutOfRangeError, StopIteration):
inputs[split].reset()
break
batch = tf.nest.map_structure(py_utils.reshard, batch)
batch_metrics, out = decode_step_func(batch)
# we store the metric directly as it has already been aggregated in
# side decode_step_fun
decode_metrics.store(batch_metrics)
logging.info('Finished decoding input batch %d', step_num)
out = tf.nest.map_structure(py_utils.unshard, out)
process_metrics, processed = model.process_decode_out(inputs[split], out)
decodes[split].extend(processed)
logging.info('Finished processing decoded input batch %d', step_num)
# Reshard the metrics for pmap.
process_decode_metrics.update(process_metrics)
with summary_writers[split].as_default():
decode_metrics.summarize(step_i, 'decode_metrics')
process_decode_metrics.summarize(step_i, 'process_decode_metrics')
basedir = os.path.join(job_log_dir, 'decoder_out')
dirnames = _get_dir_names(input_p)
filename = _get_filename(replicated_model_states.step)
for s in dirnames:
dir_path = os.path.join(basedir, s)
if not tf.io.gfile.exists(dir_path):
tf.io.gfile.makedirs(dir_path)
filenames = [os.path.join(basedir, s, filename) for s in dirnames]
for split, output_file in enumerate(filenames):
logging.info('Writing decoder output to %s with %d entries', output_file,
len(decodes[split]))
io_utils.WriteKeyValuePairs(output_file, decodes[split])
def decode_once_spmd_model(
task_p: InstantiableParams,
input_p: Sequence[InstantiableParams],
job_log_dir: Optional[str],
checkpoint_type: CheckpointType,
restore_checkpoint_dir: str,
restore_checkpoint_step: Optional[int],
) -> None:
"""Runs the decoding once on the entire decoder datasets for SPMD model.
Args:
task_p: Params for the task that encapsulates an SPMD model.
input_p: List of input params to be decoded.
job_log_dir: Directory for the job logs.
checkpoint_type: Type of model checkpointing method to use.
restore_checkpoint_dir: The directory from which to restore checkpoint.
restore_checkpoint_step: If set, the checkpoint step to restore. If unset,
try to restore from the latest checkpoint if any.
"""
# TODO(bf-jax): Retrieve the seeds from the model definition instead.
prng_key = jax.random.PRNGKey(1234)
prng_key, init_key = jax.random.split(prng_key)
if restore_checkpoint_dir:
restore_checkpoint_parent_dir = restore_checkpoint_dir
if checkpoint_type == CheckpointType.CHECKPOINT_MULTI_HOST_FLAX:
# TODO(zhouwk): add sanity check on number of subdirs and number of
# processes and fail early if unequal.
restore_checkpoint_dir = os.path.join(restore_checkpoint_dir,
f'{jax.process_index():03d}')
multi_host_checkpointing = bool(checkpoint_type in {
CheckpointType.CHECKPOINT_MULTI_HOST_FLAX, CheckpointType.CHECKPOINT_GDA
})
sample_inputs = input_p[0].Instantiate().get_next()
inputs_shape = tf.nest.map_structure(py_utils.get_global_input_shape_dtype,
sample_inputs)
model_p = task_p.model
# TODO(b/198356509): This is a hack for now as we need to change some
# annotations for mode='decode'. A future cl will move this logic
# to a more generic model_p.update_sharding_params_v1(mode='decode').
model_p.lm = model_p.lm.cls.set_sharding_params_v1(
model_p.lm,
replica_axis=model_p.lm.mesh_axis_names[0],
data_axis=model_p.lm.mesh_axis_names[1],
mdl_axis=model_p.lm.mesh_axis_names[2],
device_ids_mesh=model_p.lm.device_mesh,
mesh_axis_names=model_p.lm.mesh_axis_names,
mode='decode')
mesh_shape = model_p.device_mesh.shape
device_mesh = mesh_utils.create_device_mesh(mesh_shape)
logging.info('device_mesh: %s', device_mesh)
jax_task = task_p.Instantiate()
global_mesh = maps.Mesh(device_mesh, model_p.mesh_axis_names)
with global_mesh:
if restore_checkpoint_dir:
model = jax_task.model
model.instantiate_variable_configs()
# Get the metadata from variables instead of actually instantiating them.
partitioned_specs = jax_task.create_train_state_partition_specs(
model.vars, discard_opt_states=True)
# Instantiate the TrainState directly from the checkpoint.
partitioned_train_state = checkpoints.restore_checkpoint(
None,
restore_checkpoint_dir,
global_mesh=global_mesh,
checkpoint_type=checkpoint_type,
state_specs=partitioned_specs,
step=restore_checkpoint_step)
if multi_host_checkpointing:
py_utils.sync_global_devices(
f'checkpointer:restored:{restore_checkpoint_parent_dir}')
decode_step_fn, inputs_partition_spec = (
trainer_lib.get_partitioned_spmd_model_decode_fn(
jax_task, init_key, partitioned_specs, inputs_shape))
else:
# When restore is not specified, randomly initiate the train_state.
(partitioned_train_state, inputs_partition_spec, partitioned_specs,
decode_step_fn) = trainer_lib.partition_spmd_model_decode(
task_p, init_key, inputs_shape)
logging.info('partitioned_train_state: %s',
jax.tree_map(lambda x: x.shape, partitioned_train_state))
# We do not fold in jax.process_index in contrast to the pmap version and
# use a single global key instead to rely on pjit to split for different
# replicas.
logging.info('root prng_key: %s', prng_key)
prng_key, decode_key = jax.random.split(prng_key)
logging.info('eval prng_key: %s', decode_key)
spmd_decode_step_fn = functools.partial(
decode_step_fn,
trainer_lib.train_state_for_eval_step(partitioned_train_state),
decode_key)
num_steps = [
-1 if p.reset_for_eval else p.eval_loop_num_batches for p in input_p
]
inputs = [p.Instantiate() for p in input_p]
decodes = [list() for _ in input_p]
process_id = jax.process_index()
for split, num_split_steps in enumerate(num_steps):
logging.info('Start decoding on input %s', input_p[split].name)
step_num = 0
while num_split_steps < 0 or step_num < num_split_steps:
step_num += 1
try:
batch = inputs[split].get_next()
except (tf.errors.OutOfRangeError, StopIteration):
break
if jax.config.jax_parallel_functions_output_gda:
batch = py_utils.create_gda(batch, inputs_shape, global_mesh,
inputs_partition_spec)
_, out = spmd_decode_step_fn(batch)
# Output is fully replicated now, so it's ok to unreplicate it by
# retrieving from device 0 only.
out = py_utils.maybe_unreplicate_gda(out)
global_batch_size = next(iter(out.values())).shape[0]
logging.info('Finished decoding input batch %d with %d examples',
step_num, global_batch_size)
# Manually shard the output per each jax process.
# We require that all fields in the output is batch major.
if global_batch_size % jax.process_count() != 0:
raise ValueError(f'Global batch size {global_batch_size} must divide '
f'jax process count {jax.process_count()}')
for k, v in out.items():
if v.shape[0] != global_batch_size:
raise ValueError('We require that all fields in the decode output '
'to have batch size as the first dim, got shape='
f'{v.shape} with key={k}, expect batch size = '
f'{global_batch_size}')
per_process_batch_size = global_batch_size // jax.process_count()
def shard(x, per_process_batch_size=per_process_batch_size):
return x[(process_id *
per_process_batch_size):((process_id + 1) *
per_process_batch_size)]
out = jax.tree_map(shard, out)
_, processed = jax_task.model.process_decode_out(inputs[split], out)
decodes[split].extend(processed)
logging.info('Finished processing decoded input batch %d', step_num)
basedir = os.path.join(job_log_dir, 'decoder_out')
dirnames = _get_dir_names(input_p)
filename = _get_filename(
py_utils.maybe_unreplicate_gda(partitioned_train_state.step))
for s in dirnames:
dir_path = os.path.join(basedir, s)
if not tf.io.gfile.exists(dir_path):
tf.io.gfile.makedirs(dir_path)
filenames = [os.path.join(basedir, s, filename) for s in dirnames]
for split, output_file in enumerate(filenames):
logging.info('Writing decoder output to %s with %d entries', output_file,
len(decodes[split]))
io_utils.WriteKeyValuePairs(output_file, decodes[split])
| 42.010139 | 80 | 0.710221 |
import contextlib
import functools
import hashlib
import os
import time
from typing import List, Optional, Sequence
from absl import logging
import jax
from jax.experimental import maps
from jax.experimental import mesh_utils
from lingvo.jax import base_input
from lingvo.jax import base_layer
from lingvo.jax import base_metrics
from lingvo.jax import base_model_params
from lingvo.jax import base_task
from lingvo.jax import checkpoint_pb2
from lingvo.jax import model_utils
from lingvo.jax import py_utils
from lingvo.jax import pytypes
from lingvo.jax import summary_utils
from lingvo.jax import train_states
from lingvo.jax import trainer_lib
import tensorflow.compat.v2 as tf
from lingvo.jax import checkpoints
from lingvo.jax import io_utils
BaseModelParamsT = base_model_params.BaseModelParamsT
CheckpointType = checkpoint_pb2.CheckpointType
InstantiableParams = py_utils.InstantiableParams
NestedMap = py_utils.NestedMap
JTensor = pytypes.JTensor
NestedJTensor = pytypes.NestedJTensor
TrainState = train_states.TrainState
SummaryWriter = tf.summary.SummaryWriter
def maybe_ema(model_states):
if not model_states.opt_states:
return model_states
for i in range(len(model_states.opt_states[0])):
if 'ema' in model_states.opt_states[0][i]:
return TrainState(
step=model_states.step,
mdl_vars=model_states.opt_states[0][i].ema,
opt_states={})
return model_states
def evaluate(
model_name: str,
job_log_dir: Optional[str],
multi_host_checkpointing: Optional[bool],
maybe_use_persistence_checkpointing: bool,
) -> None:
model_config = model_utils.get_model(model_name)()
task_p = model_config.task()
model_p = task_p.model
eval_input_p = [v for v in model_config.datasets() if not v.is_training]
for inp in eval_input_p:
inp.num_infeed_hosts = jax.process_count()
inp.infeed_host_index = jax.process_index()
if model_p.device_mesh is not None:
checkpoint_type = checkpoints.retrieve_checkpoint_type(
multi_host_checkpointing, maybe_use_persistence_checkpointing, task_p)
evaluate_spmd_model(task_p, eval_input_p, job_log_dir, checkpoint_type)
else:
evaluate_pmap_model(task_p, eval_input_p, job_log_dir)
def evaluate_pmap_model(
task_p: InstantiableParams,
eval_input_p: Sequence[InstantiableParams],
job_log_dir: Optional[str],
) -> None:
logging.info('Using pmap for data parallelism.')
jax_task = task_p.Instantiate()
eval_input_pipelines = [input_p.Instantiate() for input_p in eval_input_p]
prng_key = jax.random.PRNGKey(1234)
prng_key, init_key = jax.random.split(prng_key)
checkpoint_dir = os.path.join(job_log_dir, 'checkpoints')
model_states = trainer_lib.initialize_model_state(jax_task, init_key)
model_states = checkpoints.restore_checkpoint(model_states, checkpoint_dir)
replicated_model_states = trainer_lib.replicate_model_state(model_states)
logging.info('replicated_model_states: %s',
jax.tree_map(lambda x: x.shape, replicated_model_states))
prng_key = jax.random.fold_in(prng_key, jax.process_index())
logging.info('root prng_key: %s', prng_key)
def eval_step(mdl_states, prng_key, inputs):
mdl_states = trainer_lib.train_state_for_eval_step(mdl_states)
return trainer_lib.eval_step_single_learner(
jax_task,
mdl_states,
prng_key,
inputs,
data_parallel_axis_name='batch',
fprop_dtype=jax_task.model.fprop_dtype)
num_devices = jax.local_device_count()
prng_key, eval_key = jax.random.split(prng_key)
eval_prng_seed = jax.random.split(eval_key, num=num_devices)
logging.info('eval prng_seed: %s', eval_prng_seed)
p_eval_step = jax.pmap(eval_step, axis_name='batch')
logging.info('Evaluation loop starting...')
summary_base_dir = os.path.join(job_log_dir, 'summaries')
summary_eval_dirs = [
os.path.join(summary_base_dir, f'eval_test_{split}')
for split, _ in enumerate(eval_input_p)
]
num_steps = [
-1 if p.reset_for_eval else p.eval_loop_num_batches for p in eval_input_p
]
last_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)
with contextlib.ExitStack() as exit_stack:
eval_summary_writers = [
exit_stack.enter_context(summary_utils.get_summary_writer(d))
for d in summary_eval_dirs
]
while True:
step_i = int(jax.device_get(replicated_model_states.step)[0])
eval_step = functools.partial(p_eval_step,
maybe_ema(replicated_model_states),
eval_prng_seed)
model_utils.run_eval_loop_over_test_splits(
num_steps,
eval_step,
eval_summary_writers,
step_i,
eval_input_pipelines,
reshard_inputs=True)
if last_checkpoint is not None:
last_ckpt_step = checkpoints.get_step_from_checkpoint_asset(
last_checkpoint)
exceeded_ckpt = last_ckpt_step + task_p.train.save_interval_steps
if exceeded_ckpt >= task_p.train.num_train_steps:
break
del replicated_model_states
new_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)
while new_checkpoint == last_checkpoint:
time.sleep(60)
new_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)
logging.info('Found new checkpoint: %s', new_checkpoint)
model_states = checkpoints.restore_checkpoint(model_states,
checkpoint_dir)
replicated_model_states = trainer_lib.replicate_model_state(model_states)
last_checkpoint = new_checkpoint
def evaluate_spmd_model(
task_p: InstantiableParams,
eval_input_p: Sequence[InstantiableParams],
job_log_dir: Optional[str],
checkpoint_type: CheckpointType,
) -> None:
logging.info('Using SPMD sharding for model parallelism.')
eval_input_pipelines = [input_p.Instantiate() for input_p in eval_input_p]
prng_key = jax.random.PRNGKey(1234)
prng_key, init_key = jax.random.split(prng_key)
checkpoint_dir = os.path.join(job_log_dir, 'checkpoints')
if checkpoint_type == CheckpointType.CHECKPOINT_MULTI_HOST_FLAX:
checkpoint_task_dir = os.path.join(checkpoint_dir,
f'{jax.process_index():03d}')
else:
checkpoint_task_dir = checkpoint_dir
multi_host_checkpointing = bool(checkpoint_type in {
CheckpointType.CHECKPOINT_MULTI_HOST_FLAX, CheckpointType.CHECKPOINT_GDA
})
def get_shape_dtype(x):
y = jax.ShapeDtypeStruct(x.shape, x.dtype)
return y
sample_model_inputs = eval_input_p[0].Instantiate().get_next()
inputs_shape = tf.nest.map_structure(get_shape_dtype, sample_model_inputs)
jax_task = task_p.Instantiate()
model_p = task_p.model
mesh_shape = model_p.device_mesh.shape
device_mesh = mesh_utils.create_device_mesh(mesh_shape)
logging.info('device_mesh: %s', device_mesh)
global_mesh = maps.Mesh(device_mesh, model_p.mesh_axis_names)
use_gda_checkpoint = jax.config.jax_parallel_functions_output_gda
with global_mesh:
jax_task.model.instantiate_variable_configs()
if use_gda_checkpoint:
partitioned_specs = jax_task.create_train_state_partition_specs(
jax_task.model.vars, discard_opt_states=True)
partitioned_train_state = checkpoints.restore_checkpoint(
None,
checkpoint_task_dir,
global_mesh=global_mesh,
checkpoint_type=checkpoint_type,
state_specs=partitioned_specs)
eval_step, inputs_partition_specs = (
trainer_lib.get_partitioned_spmd_model_step_fn(
jax_task,
init_key,
partitioned_specs,
inputs_shape,
is_eval=True))
else:
(partitioned_train_state, partitioned_specs, inputs_partition_specs, _,
eval_step, _) = trainer_lib.partition_spmd_model(task_p, init_key,
inputs_shape)
partitioned_train_state = checkpoints.restore_checkpoint(
partitioned_train_state,
checkpoint_task_dir,
global_mesh=global_mesh,
checkpoint_type=checkpoint_type,
state_specs=partitioned_specs)
logging.info('partitioned_train_state: %s',
jax.tree_map(lambda x: x.shape, partitioned_train_state))
if multi_host_checkpointing:
py_utils.sync_global_devices(f'checkpointer:restored:{checkpoint_dir}')
logging.info('root prng_key: %s', prng_key)
prng_key, eval_key = jax.random.split(prng_key)
logging.info('eval prng_key: %s', eval_key)
logging.info('Evaluation loop starting...')
summary_base_dir = os.path.join(job_log_dir, 'summaries')
summary_eval_dirs = [
os.path.join(summary_base_dir, f'eval_{split}')
for split, _ in enumerate(eval_input_p)
]
num_steps = [-1 if p.reset_for_eval else 1 for p in eval_input_p]
last_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)
with contextlib.ExitStack() as exit_stack:
eval_summary_writers = [
exit_stack.enter_context(summary_utils.get_summary_writer(d))
for d in summary_eval_dirs
]
while True:
step_i = int(jax.device_get(partitioned_train_state.step))
eval_step_fn = functools.partial(
eval_step,
trainer_lib.train_state_for_eval_step(partitioned_train_state),
eval_key)
model_utils.run_eval_loop_over_test_splits(
num_steps,
eval_step_fn,
eval_summary_writers,
step_i,
eval_input_pipelines,
inputs_partition_specs,
inputs_shape,
global_mesh,
reshard_inputs=False)
if last_checkpoint is not None:
last_ckpt_step = checkpoints.get_step_from_checkpoint_asset(
last_checkpoint)
exceeded_ckpt = last_ckpt_step + task_p.train.save_interval_steps
if exceeded_ckpt >= task_p.train.num_train_steps:
break
new_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)
while new_checkpoint == last_checkpoint:
time.sleep(60)
new_checkpoint = checkpoints.latest_checkpoint(checkpoint_dir)
logging.info('Found new checkpoint: %s', new_checkpoint)
partitioned_train_state = checkpoints.restore_checkpoint(
None if use_gda_checkpoint else partitioned_train_state,
checkpoint_task_dir,
global_mesh=global_mesh,
checkpoint_type=checkpoint_type,
state_specs=partitioned_specs)
if multi_host_checkpointing:
py_utils.sync_global_devices(
f'checkpointer:restored:{checkpoint_dir}')
last_checkpoint = new_checkpoint
def decode(
model_name: str,
job_log_dir: Optional[str],
multi_host_checkpointing: Optional[bool],
maybe_use_persistence_checkpointing: bool,
restore_checkpoint_dir: Optional[str],
restore_checkpoint_step: Optional[int],
continuous_decode: bool,
) -> None:
logging.info('running decode_once on model %s restored from %s', model_name,
restore_checkpoint_dir)
model_config = model_utils.get_model(model_name)()
task_p = model_config.task()
model_p = task_p.model
decoder_inputs = model_config.decoder_datasets()
if not decoder_inputs:
return
for inp in decoder_inputs:
inp.num_infeed_hosts = jax.process_count()
inp.infeed_host_index = jax.process_index()
if model_p.device_mesh is not None:
if continuous_decode:
raise NotImplementedError('http://b/214589358: not supported')
checkpoint_type = checkpoints.retrieve_checkpoint_type(
multi_host_checkpointing, maybe_use_persistence_checkpointing, task_p)
decode_once_spmd_model(task_p, decoder_inputs, job_log_dir, checkpoint_type,
restore_checkpoint_dir, restore_checkpoint_step)
else:
decode_pmap_model(task_p, decoder_inputs, job_log_dir,
restore_checkpoint_dir, restore_checkpoint_step,
continuous_decode)
def _get_dir_names(input_p: Sequence[InstantiableParams]) -> Sequence[str]:
uniq_names = set()
ret = []
for idx, p in enumerate(input_p):
name = p.name or f'decode_test_{idx}'
if p.name and p.name in uniq_names:
name = f'{p.name}_{idx}'
if name in uniq_names:
suffix = hashlib.md5(name.encode()).hexdigest()[-5:]
name = f'{name}_{suffix}'
assert name not in uniq_names
uniq_names.add(name)
ret.append(name)
return ret
def _get_step(step: base_layer.JTensorOrPartitionSpec) -> int:
if step.ndim == 0:
return jax.device_get(step)
if step.ndim == 1:
return jax.device_get(step[0])
raise ValueError(
f'Expecting a replicated 1D global step (got ndim=`{step.ndim}`).')
def _get_filename(step: base_layer.JTensorOrPartitionSpec) -> str:
step_num = _get_step(step)
return f'decoder_out_{step_num}_shard_{jax.process_index()}'
def decode_pmap_model(
task_p: InstantiableParams,
input_p: Sequence[InstantiableParams],
job_log_dir: Optional[str],
restore_checkpoint_dir: Optional[str],
restore_checkpoint_step: Optional[int],
continuous_decode: bool,
) -> None:
if continuous_decode and restore_checkpoint_step is not None:
raise ValueError('Continuous decoding mode requires restore_checkpoint_step'
'=None, actual restore_checkpoint_step='
f'{restore_checkpoint_step}')
restore_checkpoint_dir = restore_checkpoint_dir or os.path.join(
job_log_dir, 'checkpoints')
prng_key = jax.random.PRNGKey(1234)
prng_key, init_key = jax.random.split(prng_key)
prng_key = jax.random.fold_in(prng_key, jax.process_index())
logging.info('root prng_key: %s', prng_key)
prng_key, eval_key = jax.random.split(prng_key)
prng_seed = jax.random.split(eval_key, num=jax.local_device_count())
logging.info('decoder prng_seed: %s', prng_seed)
inputs = [p.Instantiate() for p in input_p]
summary_base_dir = os.path.join(job_log_dir, 'summaries')
dirnames = _get_dir_names(input_p)
summary_decode_dirs = [
os.path.join(summary_base_dir, f'decode_test_{dirnames[split]}')
for split, _ in enumerate(input_p)
]
with contextlib.ExitStack() as exit_stack:
summary_writers = [
exit_stack.enter_context(summary_utils.get_summary_writer(d))
for d in summary_decode_dirs
]
jax_task = task_p.Instantiate()
model_states = trainer_lib.initialize_model_state(jax_task, init_key)
model_states = checkpoints.restore_checkpoint(
model_states, restore_checkpoint_dir, step=restore_checkpoint_step)
replicated_model_states = trainer_lib.replicate_model_state(model_states)
logging.info('replicated_model_states: %s',
jax.tree_map(lambda x: x.shape, replicated_model_states))
last_checkpoint = checkpoints.latest_checkpoint(restore_checkpoint_dir)
while True:
_decode_once_pmap_model(jax_task, task_p, inputs, input_p, prng_seed,
job_log_dir, replicated_model_states,
summary_writers)
if not continuous_decode:
break
if last_checkpoint is not None:
last_ckpt_step = int(last_checkpoint.split('_')[-1])
exceeded_ckpt = last_ckpt_step + task_p.train.save_interval_steps
if exceeded_ckpt >= task_p.train.num_train_steps:
break
del replicated_model_states
new_checkpoint = checkpoints.latest_checkpoint(restore_checkpoint_dir)
while new_checkpoint == last_checkpoint:
time.sleep(60)
new_checkpoint = checkpoints.latest_checkpoint(restore_checkpoint_dir)
logging.info('Found new checkpoint: %s', new_checkpoint)
model_states = checkpoints.restore_checkpoint(model_states,
restore_checkpoint_dir)
replicated_model_states = trainer_lib.replicate_model_state(model_states)
last_checkpoint = new_checkpoint
def _decode_once_pmap_model(
jax_task: base_task.SingleTask,
task_p: InstantiableParams,
inputs: List[base_input.BaseInput],
input_p: Sequence[InstantiableParams],
prng_seed: JTensor,
job_log_dir: Optional[str],
replicated_model_states: train_states.TrainState,
summary_writers: List[SummaryWriter],
) -> None:
model = jax_task.model
model_p = task_p.model
metrics_p = task_p.metrics
if not metrics_p:
metrics_p = base_metrics.MeanMetrics.Params()
decode_metrics = metrics_p.Instantiate()
process_decode_metrics = metrics_p.Instantiate()
step_i = _get_step(replicated_model_states.step)
pmap_axis_name = 'batch'
def decode_step(mdl_states, prng_key, inputs):
mdl_states = trainer_lib.train_state_for_eval_step(mdl_states)
metrics, out = trainer_lib.decode_step(model, mdl_states, prng_key, inputs,
model_p.fprop_dtype)
metrics = decode_metrics.aggregate(metrics)
return metrics, out
ode_step = jax.pmap(
decode_step, axis_name=pmap_axis_name, out_axes=(None, 0))
decode_step_func = functools.partial(pmap_decode_step,
maybe_ema(replicated_model_states),
prng_seed)
num_steps = [
-1 if p.reset_for_eval else p.eval_loop_num_batches for p in input_p
]
decodes = [list() for _ in input_p]
for split, num_split_steps in enumerate(num_steps):
logging.info('Start decoding on input %s', input_p[split].name)
step_num = 0
while num_split_steps < 0 or step_num < num_split_steps:
step_num += 1
try:
batch = inputs[split].get_next()
except (tf.errors.OutOfRangeError, StopIteration):
inputs[split].reset()
break
batch = tf.nest.map_structure(py_utils.reshard, batch)
batch_metrics, out = decode_step_func(batch)
decode_metrics.store(batch_metrics)
logging.info('Finished decoding input batch %d', step_num)
out = tf.nest.map_structure(py_utils.unshard, out)
process_metrics, processed = model.process_decode_out(inputs[split], out)
decodes[split].extend(processed)
logging.info('Finished processing decoded input batch %d', step_num)
process_decode_metrics.update(process_metrics)
with summary_writers[split].as_default():
decode_metrics.summarize(step_i, 'decode_metrics')
process_decode_metrics.summarize(step_i, 'process_decode_metrics')
basedir = os.path.join(job_log_dir, 'decoder_out')
dirnames = _get_dir_names(input_p)
filename = _get_filename(replicated_model_states.step)
for s in dirnames:
dir_path = os.path.join(basedir, s)
if not tf.io.gfile.exists(dir_path):
tf.io.gfile.makedirs(dir_path)
filenames = [os.path.join(basedir, s, filename) for s in dirnames]
for split, output_file in enumerate(filenames):
logging.info('Writing decoder output to %s with %d entries', output_file,
len(decodes[split]))
io_utils.WriteKeyValuePairs(output_file, decodes[split])
def decode_once_spmd_model(
task_p: InstantiableParams,
input_p: Sequence[InstantiableParams],
job_log_dir: Optional[str],
checkpoint_type: CheckpointType,
restore_checkpoint_dir: str,
restore_checkpoint_step: Optional[int],
) -> None:
prng_key = jax.random.PRNGKey(1234)
prng_key, init_key = jax.random.split(prng_key)
if restore_checkpoint_dir:
restore_checkpoint_parent_dir = restore_checkpoint_dir
if checkpoint_type == CheckpointType.CHECKPOINT_MULTI_HOST_FLAX:
restore_checkpoint_dir = os.path.join(restore_checkpoint_dir,
f'{jax.process_index():03d}')
multi_host_checkpointing = bool(checkpoint_type in {
CheckpointType.CHECKPOINT_MULTI_HOST_FLAX, CheckpointType.CHECKPOINT_GDA
})
sample_inputs = input_p[0].Instantiate().get_next()
inputs_shape = tf.nest.map_structure(py_utils.get_global_input_shape_dtype,
sample_inputs)
model_p = task_p.model
model_p.lm = model_p.lm.cls.set_sharding_params_v1(
model_p.lm,
replica_axis=model_p.lm.mesh_axis_names[0],
data_axis=model_p.lm.mesh_axis_names[1],
mdl_axis=model_p.lm.mesh_axis_names[2],
device_ids_mesh=model_p.lm.device_mesh,
mesh_axis_names=model_p.lm.mesh_axis_names,
mode='decode')
mesh_shape = model_p.device_mesh.shape
device_mesh = mesh_utils.create_device_mesh(mesh_shape)
logging.info('device_mesh: %s', device_mesh)
jax_task = task_p.Instantiate()
global_mesh = maps.Mesh(device_mesh, model_p.mesh_axis_names)
with global_mesh:
if restore_checkpoint_dir:
model = jax_task.model
model.instantiate_variable_configs()
partitioned_specs = jax_task.create_train_state_partition_specs(
model.vars, discard_opt_states=True)
partitioned_train_state = checkpoints.restore_checkpoint(
None,
restore_checkpoint_dir,
global_mesh=global_mesh,
checkpoint_type=checkpoint_type,
state_specs=partitioned_specs,
step=restore_checkpoint_step)
if multi_host_checkpointing:
py_utils.sync_global_devices(
f'checkpointer:restored:{restore_checkpoint_parent_dir}')
decode_step_fn, inputs_partition_spec = (
trainer_lib.get_partitioned_spmd_model_decode_fn(
jax_task, init_key, partitioned_specs, inputs_shape))
else:
(partitioned_train_state, inputs_partition_spec, partitioned_specs,
decode_step_fn) = trainer_lib.partition_spmd_model_decode(
task_p, init_key, inputs_shape)
logging.info('partitioned_train_state: %s',
jax.tree_map(lambda x: x.shape, partitioned_train_state))
logging.info('root prng_key: %s', prng_key)
prng_key, decode_key = jax.random.split(prng_key)
logging.info('eval prng_key: %s', decode_key)
spmd_decode_step_fn = functools.partial(
decode_step_fn,
trainer_lib.train_state_for_eval_step(partitioned_train_state),
decode_key)
num_steps = [
-1 if p.reset_for_eval else p.eval_loop_num_batches for p in input_p
]
inputs = [p.Instantiate() for p in input_p]
decodes = [list() for _ in input_p]
process_id = jax.process_index()
for split, num_split_steps in enumerate(num_steps):
logging.info('Start decoding on input %s', input_p[split].name)
step_num = 0
while num_split_steps < 0 or step_num < num_split_steps:
step_num += 1
try:
batch = inputs[split].get_next()
except (tf.errors.OutOfRangeError, StopIteration):
break
if jax.config.jax_parallel_functions_output_gda:
batch = py_utils.create_gda(batch, inputs_shape, global_mesh,
inputs_partition_spec)
_, out = spmd_decode_step_fn(batch)
# retrieving from device 0 only.
out = py_utils.maybe_unreplicate_gda(out)
global_batch_size = next(iter(out.values())).shape[0]
logging.info('Finished decoding input batch %d with %d examples',
step_num, global_batch_size)
# Manually shard the output per each jax process.
# We require that all fields in the output is batch major.
if global_batch_size % jax.process_count() != 0:
raise ValueError(f'Global batch size {global_batch_size} must divide '
f'jax process count {jax.process_count()}')
for k, v in out.items():
if v.shape[0] != global_batch_size:
raise ValueError('We require that all fields in the decode output '
'to have batch size as the first dim, got shape='
f'{v.shape} with key={k}, expect batch size = '
f'{global_batch_size}')
per_process_batch_size = global_batch_size // jax.process_count()
def shard(x, per_process_batch_size=per_process_batch_size):
return x[(process_id *
per_process_batch_size):((process_id + 1) *
per_process_batch_size)]
out = jax.tree_map(shard, out)
_, processed = jax_task.model.process_decode_out(inputs[split], out)
decodes[split].extend(processed)
logging.info('Finished processing decoded input batch %d', step_num)
basedir = os.path.join(job_log_dir, 'decoder_out')
dirnames = _get_dir_names(input_p)
filename = _get_filename(
py_utils.maybe_unreplicate_gda(partitioned_train_state.step))
for s in dirnames:
dir_path = os.path.join(basedir, s)
if not tf.io.gfile.exists(dir_path):
tf.io.gfile.makedirs(dir_path)
filenames = [os.path.join(basedir, s, filename) for s in dirnames]
for split, output_file in enumerate(filenames):
logging.info('Writing decoder output to %s with %d entries', output_file,
len(decodes[split]))
io_utils.WriteKeyValuePairs(output_file, decodes[split])
| true | true |
f72737182c2d3650ddf80bd690d0f8357d6c3fc4 | 6,681 | py | Python | tests/api/v2/managers/test_config_api_manager.py | malached/caldera | b622b0b8d0a04bcd0328040cbf53a01b93505afc | [
"Apache-2.0"
] | 1 | 2021-10-06T09:25:18.000Z | 2021-10-06T09:25:18.000Z | tests/api/v2/managers/test_config_api_manager.py | malached/caldera | b622b0b8d0a04bcd0328040cbf53a01b93505afc | [
"Apache-2.0"
] | 1 | 2019-04-25T07:12:14.000Z | 2019-04-25T07:12:14.000Z | tests/api/v2/managers/test_config_api_manager.py | malached/caldera | b622b0b8d0a04bcd0328040cbf53a01b93505afc | [
"Apache-2.0"
] | null | null | null | import pytest
from app.api.v2 import errors
from app.api.v2.managers import config_api_manager
from app.api.v2.managers.config_api_manager import ConfigApiManager, ConfigNotFound, ConfigUpdateNotAllowed
from app.utility.base_world import BaseWorld
class StubDataService:
def __init__(self,):
self.abilities = []
async def locate(self, key):
assert key == 'abilities'
return self.abilities
@pytest.fixture
def base_world():
main_conf = {
'app.contact.dns.domain': 'mycaldera.caldera',
'app.contact.dns.socket': '0.0.0.0:8853',
'app.contact.html': '/weather',
'app.contact.http': 'http://0.0.0.0:8888',
'app.contact.tcp': '0.0.0.0:7010',
'app.contact.tunnel.ssh.socket': '0.0.0.0:8022',
'app.contact.udp': '0.0.0.0:7013',
'app.contact.websocket': '0.0.0.0:7012',
'exfil_dir': '/tmp/caldera',
'plugins': [
'stockpile',
'atomic'
],
'reports_dir': '/tmp',
'host': '0.0.0.0',
'auth.login.handler.module': 'default',
'users': {
'red': {
'red': 'password-foo'
},
'blue': {
'blue': 'password-bar'
}
}
}
agents_conf = {
'sleep_min': '30',
'sleep_max': '60',
'untrusted_timer': '90',
'watchdog': '0',
'implant_name': 'splunkd',
'deadman_abilities': [
'this-is-a-fake-ability'
],
'bootstrap_abilities': [
'this-is-another-fake-ability'
]
}
BaseWorld.clear_config()
BaseWorld.apply_config('main', main_conf)
BaseWorld.apply_config('agents', agents_conf)
yield BaseWorld
BaseWorld.clear_config()
def test_filter_keys():
mapping = {
'foo': 1,
'bar': 2,
'baz': {
'key3': 3,
'key4': 4
}
}
filtered = config_api_manager.filter_keys(mapping, keys_to_remove=['baz', 'bar'])
expected = {'foo': 1}
assert filtered == expected
def test_get_filtered_config_remove_sensitive_keys(base_world, data_svc):
test_conf = {
'users': 'this should be filtered',
'host': 'this should be filtered',
'foo': '1',
'bar': '2',
'baz': '3'
}
base_world.apply_config('test', test_conf)
manager = ConfigApiManager(data_svc, None)
filtered = manager.get_filtered_config('test')
expected = {
'foo': '1',
'bar': '2',
'baz': '3'
}
assert filtered == expected
def test_get_filtered_config_all_sensitive_keys_filtered(base_world, data_svc):
sensitive_conf = {key: 'foo' for key in config_api_manager.SENSITIVE_CONFIG_PROPS}
base_world.apply_config('test', sensitive_conf)
assert base_world.get_config(name='test') == sensitive_conf
manager = ConfigApiManager(data_svc, None)
filtered = manager.get_filtered_config('test')
assert filtered == {}
def test_get_filtered_config_throws_exception_on_not_found(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(ConfigNotFound):
manager.get_filtered_config('THIS DOES NOT EXIST')
def test_update_main_config(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
manager.update_main_config(prop='foo.bar', value=100)
assert manager.get_filtered_config('main')['foo.bar'] == 100
def test_update_main_config_throws_exception_on_sensitive_field(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(ConfigUpdateNotAllowed):
manager.update_main_config(prop='host', value='this is not allowed')
async def test_update_global_agent_config(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
await manager.update_global_agent_config(sleep_min=5, sleep_max=10)
agent_config = manager.get_filtered_config('agents')
assert agent_config['sleep_min'] == 5
assert agent_config['sleep_max'] == 10
async def test_update_global_agent_config_allows_partial_updates(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
agent_config = manager.get_filtered_config('agents')
await manager.update_global_agent_config() # no arguments passed in--should no-op
assert manager.get_filtered_config('agents') == agent_config
async def test_update_global_agent_config_updates_list_properties(base_world, ability):
stub_data_svc = StubDataService()
stub_data_svc.abilities = [
ability('ability-1'),
ability('ability-2'),
ability('ability-3')
]
manager = ConfigApiManager(data_svc=stub_data_svc, file_svc=None)
await manager.update_global_agent_config(
deadman_abilities=['ability-1', 'ability-2'],
bootstrap_abilities=['ability-3']
)
agent_config = manager.get_filtered_config('agents')
assert agent_config['deadman_abilities'] == ['ability-1', 'ability-2']
assert agent_config['bootstrap_abilities'] == ['ability-3']
async def test_update_global_agent_config_throws_validation_error_bad_sleep_min(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(sleep_min=-1)
async def test_update_global_agent_config_throws_validation_error_bad_sleep_max(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(sleep_max=-1)
async def test_update_global_agent_config_throws_validation_error_bad_watchdog(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(watchdog=-1)
async def test_update_global_agent_config_throws_validation_error_bad_untrusted_timer(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(untrusted_timer=-1)
async def test_update_global_agent_config_throws_validation_error_bad_implant_name(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(implant_name='')
async def test_update_main_config_throws_validation_error_empty_prop(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_main_config(prop='', value=1234)
| 31.514151 | 108 | 0.68822 | import pytest
from app.api.v2 import errors
from app.api.v2.managers import config_api_manager
from app.api.v2.managers.config_api_manager import ConfigApiManager, ConfigNotFound, ConfigUpdateNotAllowed
from app.utility.base_world import BaseWorld
class StubDataService:
def __init__(self,):
self.abilities = []
async def locate(self, key):
assert key == 'abilities'
return self.abilities
@pytest.fixture
def base_world():
main_conf = {
'app.contact.dns.domain': 'mycaldera.caldera',
'app.contact.dns.socket': '0.0.0.0:8853',
'app.contact.html': '/weather',
'app.contact.http': 'http://0.0.0.0:8888',
'app.contact.tcp': '0.0.0.0:7010',
'app.contact.tunnel.ssh.socket': '0.0.0.0:8022',
'app.contact.udp': '0.0.0.0:7013',
'app.contact.websocket': '0.0.0.0:7012',
'exfil_dir': '/tmp/caldera',
'plugins': [
'stockpile',
'atomic'
],
'reports_dir': '/tmp',
'host': '0.0.0.0',
'auth.login.handler.module': 'default',
'users': {
'red': {
'red': 'password-foo'
},
'blue': {
'blue': 'password-bar'
}
}
}
agents_conf = {
'sleep_min': '30',
'sleep_max': '60',
'untrusted_timer': '90',
'watchdog': '0',
'implant_name': 'splunkd',
'deadman_abilities': [
'this-is-a-fake-ability'
],
'bootstrap_abilities': [
'this-is-another-fake-ability'
]
}
BaseWorld.clear_config()
BaseWorld.apply_config('main', main_conf)
BaseWorld.apply_config('agents', agents_conf)
yield BaseWorld
BaseWorld.clear_config()
def test_filter_keys():
mapping = {
'foo': 1,
'bar': 2,
'baz': {
'key3': 3,
'key4': 4
}
}
filtered = config_api_manager.filter_keys(mapping, keys_to_remove=['baz', 'bar'])
expected = {'foo': 1}
assert filtered == expected
def test_get_filtered_config_remove_sensitive_keys(base_world, data_svc):
test_conf = {
'users': 'this should be filtered',
'host': 'this should be filtered',
'foo': '1',
'bar': '2',
'baz': '3'
}
base_world.apply_config('test', test_conf)
manager = ConfigApiManager(data_svc, None)
filtered = manager.get_filtered_config('test')
expected = {
'foo': '1',
'bar': '2',
'baz': '3'
}
assert filtered == expected
def test_get_filtered_config_all_sensitive_keys_filtered(base_world, data_svc):
sensitive_conf = {key: 'foo' for key in config_api_manager.SENSITIVE_CONFIG_PROPS}
base_world.apply_config('test', sensitive_conf)
assert base_world.get_config(name='test') == sensitive_conf
manager = ConfigApiManager(data_svc, None)
filtered = manager.get_filtered_config('test')
assert filtered == {}
def test_get_filtered_config_throws_exception_on_not_found(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(ConfigNotFound):
manager.get_filtered_config('THIS DOES NOT EXIST')
def test_update_main_config(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
manager.update_main_config(prop='foo.bar', value=100)
assert manager.get_filtered_config('main')['foo.bar'] == 100
def test_update_main_config_throws_exception_on_sensitive_field(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(ConfigUpdateNotAllowed):
manager.update_main_config(prop='host', value='this is not allowed')
async def test_update_global_agent_config(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
await manager.update_global_agent_config(sleep_min=5, sleep_max=10)
agent_config = manager.get_filtered_config('agents')
assert agent_config['sleep_min'] == 5
assert agent_config['sleep_max'] == 10
async def test_update_global_agent_config_allows_partial_updates(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
agent_config = manager.get_filtered_config('agents')
await manager.update_global_agent_config()
assert manager.get_filtered_config('agents') == agent_config
async def test_update_global_agent_config_updates_list_properties(base_world, ability):
stub_data_svc = StubDataService()
stub_data_svc.abilities = [
ability('ability-1'),
ability('ability-2'),
ability('ability-3')
]
manager = ConfigApiManager(data_svc=stub_data_svc, file_svc=None)
await manager.update_global_agent_config(
deadman_abilities=['ability-1', 'ability-2'],
bootstrap_abilities=['ability-3']
)
agent_config = manager.get_filtered_config('agents')
assert agent_config['deadman_abilities'] == ['ability-1', 'ability-2']
assert agent_config['bootstrap_abilities'] == ['ability-3']
async def test_update_global_agent_config_throws_validation_error_bad_sleep_min(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(sleep_min=-1)
async def test_update_global_agent_config_throws_validation_error_bad_sleep_max(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(sleep_max=-1)
async def test_update_global_agent_config_throws_validation_error_bad_watchdog(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(watchdog=-1)
async def test_update_global_agent_config_throws_validation_error_bad_untrusted_timer(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(untrusted_timer=-1)
async def test_update_global_agent_config_throws_validation_error_bad_implant_name(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(implant_name='')
async def test_update_main_config_throws_validation_error_empty_prop(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_main_config(prop='', value=1234)
| true | true |
f727385f21f2d811533cd0c665f73487b0a69b03 | 4,078 | py | Python | salt/runners/git_pillar.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | null | null | null | salt/runners/git_pillar.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 1 | 2017-07-10T21:44:39.000Z | 2017-07-10T21:44:39.000Z | salt/runners/git_pillar.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | null | null | null | """
Runner module to directly manage the git external pillar
"""
import logging
import salt.pillar.git_pillar
import salt.utils.gitfs
from salt.exceptions import SaltRunnerError
log = logging.getLogger(__name__)
def update(branch=None, repo=None):
"""
.. versionadded:: 2014.1.0
.. versionchanged:: 2015.8.4
This runner function now supports the :ref:`git_pillar
configuration schema <git-pillar-configuration>` introduced in
2015.8.0. Additionally, the branch and repo can now be omitted to
update all git_pillar remotes. The return data has also changed to
a dictionary. The values will be ``True`` only if new commits were
fetched, and ``False`` if there were errors or no new commits were
fetched.
.. versionchanged:: 2018.3.0
The return for a given git_pillar remote will now be ``None`` when no
changes were fetched. ``False`` now is reserved only for instances in
which there were errors.
.. versionchanged:: 3001
The repo parameter also matches against the repo name.
Fetch one or all configured git_pillar remotes.
.. note::
This will *not* fast-forward the git_pillar cachedir on the master. All
it does is perform a ``git fetch``. If this runner is executed with
``-l debug``, you may see a log message that says that the repo is
up-to-date. Keep in mind that Salt automatically fetches git_pillar
repos roughly every 60 seconds (or whatever
:conf_master:`loop_interval` is set to). So, it is possible that the
repo was fetched automatically in the time between when changes were
pushed to the repo, and when this runner was executed. When in doubt,
simply refresh pillar data using :py:func:`saltutil.refresh_pillar
<salt.modules.saltutil.refresh_pillar>` and then use
:py:func:`pillar.item <salt.modules.pillar.item>` to check if the
pillar data has changed as expected.
CLI Example:
.. code-block:: bash
# Update specific branch and repo
salt-run git_pillar.update branch='branch' repo='https://foo.com/bar.git'
# Update specific repo, by name
salt-run git_pillar.update repo=myrepo
# Update all repos
salt-run git_pillar.update
# Run with debug logging
salt-run git_pillar.update -l debug
"""
ret = {}
for ext_pillar in __opts__.get("ext_pillar", []):
pillar_type = next(iter(ext_pillar))
if pillar_type != "git":
continue
pillar_conf = ext_pillar[pillar_type]
pillar = salt.utils.gitfs.GitPillar(
__opts__,
pillar_conf,
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY,
)
for remote in pillar.remotes:
# Skip this remote if it doesn't match the search criteria
if branch is not None:
if branch != remote.branch:
continue
if repo is not None:
if repo != remote.url and repo != getattr(remote, "name", None):
continue
try:
result = remote.fetch()
except Exception as exc: # pylint: disable=broad-except
log.error(
"Exception '%s' caught while fetching git_pillar " "remote '%s'",
exc,
remote.id,
exc_info_on_loglevel=logging.DEBUG,
)
result = False
finally:
remote.clear_lock()
ret[remote.id] = result
if not ret:
if branch is not None or repo is not None:
raise SaltRunnerError(
"Specified git branch/repo not found in ext_pillar config"
)
else:
raise SaltRunnerError("No git_pillar remotes are configured")
return ret
| 37.759259 | 85 | 0.620157 |
import logging
import salt.pillar.git_pillar
import salt.utils.gitfs
from salt.exceptions import SaltRunnerError
log = logging.getLogger(__name__)
def update(branch=None, repo=None):
ret = {}
for ext_pillar in __opts__.get("ext_pillar", []):
pillar_type = next(iter(ext_pillar))
if pillar_type != "git":
continue
pillar_conf = ext_pillar[pillar_type]
pillar = salt.utils.gitfs.GitPillar(
__opts__,
pillar_conf,
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY,
)
for remote in pillar.remotes:
if branch is not None:
if branch != remote.branch:
continue
if repo is not None:
if repo != remote.url and repo != getattr(remote, "name", None):
continue
try:
result = remote.fetch()
except Exception as exc: # pylint: disable=broad-except
log.error(
"Exception '%s' caught while fetching git_pillar " "remote '%s'",
exc,
remote.id,
exc_info_on_loglevel=logging.DEBUG,
)
result = False
finally:
remote.clear_lock()
ret[remote.id] = result
if not ret:
if branch is not None or repo is not None:
raise SaltRunnerError(
"Specified git branch/repo not found in ext_pillar config"
)
else:
raise SaltRunnerError("No git_pillar remotes are configured")
return ret
| true | true |
f72738ce22a36dacc0601114240ea55abf166fad | 832 | py | Python | FreeTAKServer/controllers/SpecificCoTControllers/SendFederatedCoT.py | Tapawingo/FreeTakServer | 30259fa0fb5a69bbf6606f06d9cd40a63d2aa4fd | [
"MIT"
] | 27 | 2020-05-01T01:45:59.000Z | 2020-07-03T00:17:13.000Z | FreeTAKServer/controllers/SpecificCoTControllers/SendFederatedCoT.py | Tapawingo/FreeTakServer | 30259fa0fb5a69bbf6606f06d9cd40a63d2aa4fd | [
"MIT"
] | 34 | 2020-04-26T11:25:52.000Z | 2020-07-03T21:06:34.000Z | FreeTAKServer/controllers/SpecificCoTControllers/SendFederatedCoT.py | Tapawingo/FreeTakServer | 30259fa0fb5a69bbf6606f06d9cd40a63d2aa4fd | [
"MIT"
] | 15 | 2020-05-01T01:46:07.000Z | 2020-07-03T12:14:04.000Z | from FreeTAKServer.model.SpecificCoT.SendFederatedCoT import SendFederatedCoT
from .SendCoTAbstractController import SendCoTAbstractController
from FreeTAKServer.controllers.configuration.LoggingConstants import LoggingConstants
from FreeTAKServer.controllers.CreateLoggerController import CreateLoggerController
loggingConstants = LoggingConstants()
logger = CreateLoggerController("SendDisconnectController").getLogger()
class SendFederatedCoT(SendCoTAbstractController):
def __init__(self, RawCoT):
try:
tempObject = super().Event.FederatedCoT()
object = SendFederatedCoT()
self.fill_object(object, tempObject, RawCoT, addToDB=False)
except Exception as e:
logger.error("there has been an exception in the creation of the send federated cot object " + str(e)) | 52 | 114 | 0.78125 | from FreeTAKServer.model.SpecificCoT.SendFederatedCoT import SendFederatedCoT
from .SendCoTAbstractController import SendCoTAbstractController
from FreeTAKServer.controllers.configuration.LoggingConstants import LoggingConstants
from FreeTAKServer.controllers.CreateLoggerController import CreateLoggerController
loggingConstants = LoggingConstants()
logger = CreateLoggerController("SendDisconnectController").getLogger()
class SendFederatedCoT(SendCoTAbstractController):
def __init__(self, RawCoT):
try:
tempObject = super().Event.FederatedCoT()
object = SendFederatedCoT()
self.fill_object(object, tempObject, RawCoT, addToDB=False)
except Exception as e:
logger.error("there has been an exception in the creation of the send federated cot object " + str(e)) | true | true |
f7273915b911e2c9ab5a33795229deb37059132d | 22,981 | py | Python | Tools/scripts/freeze_modules.py | erickpeirson/cpython | d441437ee71ae174c008c23308b749b91020ba77 | [
"0BSD"
] | null | null | null | Tools/scripts/freeze_modules.py | erickpeirson/cpython | d441437ee71ae174c008c23308b749b91020ba77 | [
"0BSD"
] | null | null | null | Tools/scripts/freeze_modules.py | erickpeirson/cpython | d441437ee71ae174c008c23308b749b91020ba77 | [
"0BSD"
] | null | null | null | """Freeze modules and regen related files (e.g. Python/frozen.c).
See the notes at the top of Python/frozen.c for more info.
"""
from collections import namedtuple
import hashlib
import os
import ntpath
import posixpath
import platform
import subprocess
import sys
import textwrap
import time
from update_file import updating_file_with_tmpfile, update_file_with_tmpfile
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
ROOT_DIR = os.path.abspath(ROOT_DIR)
STDLIB_DIR = os.path.join(ROOT_DIR, 'Lib')
# If MODULES_DIR is changed then the .gitattributes and .gitignore files
# need to be updated.
MODULES_DIR = os.path.join(ROOT_DIR, 'Python', 'frozen_modules')
if sys.platform != "win32":
TOOL = os.path.join(ROOT_DIR, 'Programs', '_freeze_module')
if not os.path.isfile(TOOL):
# When building out of the source tree, get the tool from the current
# directory
TOOL = os.path.join('Programs', '_freeze_module')
TOOL = os.path.abspath(TOOL)
if not os.path.isfile(TOOL):
sys.exit("ERROR: missing _freeze_module")
else:
def find_tool():
archs = ['amd64', 'win32']
if platform.machine() == "ARM64":
archs.append('arm64')
for arch in archs:
for exe in ['_freeze_module.exe', '_freeze_module_d.exe']:
tool = os.path.join(ROOT_DIR, 'PCbuild', arch, exe)
if os.path.isfile(tool):
return tool
sys.exit("ERROR: missing _freeze_module.exe; you need to run PCbuild/build.bat")
TOOL = find_tool()
del find_tool
MANIFEST = os.path.join(MODULES_DIR, 'MANIFEST')
FROZEN_FILE = os.path.join(ROOT_DIR, 'Python', 'frozen.c')
MAKEFILE = os.path.join(ROOT_DIR, 'Makefile.pre.in')
PCBUILD_PROJECT = os.path.join(ROOT_DIR, 'PCbuild', '_freeze_module.vcxproj')
PCBUILD_FILTERS = os.path.join(ROOT_DIR, 'PCbuild', '_freeze_module.vcxproj.filters')
TEST_CTYPES = os.path.join(STDLIB_DIR, 'ctypes', 'test', 'test_values.py')
OS_PATH = 'ntpath' if os.name == 'nt' else 'posixpath'
# These are modules that get frozen.
FROZEN = [
# See parse_frozen_spec() for the format.
# In cases where the frozenid is duplicated, the first one is re-used.
('import system', [
# These frozen modules are necessary for bootstrapping
# the import system.
'importlib._bootstrap : _frozen_importlib',
'importlib._bootstrap_external : _frozen_importlib_external',
# This module is important because some Python builds rely
# on a builtin zip file instead of a filesystem.
'zipimport',
]),
('stdlib - startup, without site (python -S)', [
'abc',
'codecs',
# For now we do not freeze the encodings, due # to the noise all
# those extra modules add to the text printed during the build.
# (See https://github.com/python/cpython/pull/28398#pullrequestreview-756856469.)
#'<encodings.*>',
'io',
]),
('stdlib - startup, with site', [
'_collections_abc',
'_sitebuiltins',
'genericpath',
'ntpath',
'posixpath',
# We must explicitly mark os.path as a frozen module
# even though it will never be imported.
f'{OS_PATH} : os.path',
'os',
'site',
'stat',
]),
('Test module', [
'__hello__',
'__hello__ : <__phello__>',
'__hello__ : __phello__.spam',
]),
]
ESSENTIAL = {
'importlib._bootstrap',
'importlib._bootstrap_external',
'zipimport',
}
#######################################
# platform-specific helpers
if os.path is posixpath:
relpath_for_posix_display = os.path.relpath
def relpath_for_windows_display(path, base):
return ntpath.relpath(
ntpath.join(*path.split(os.path.sep)),
ntpath.join(*base.split(os.path.sep)),
)
else:
relpath_for_windows_display = ntpath.relpath
def relpath_for_posix_display(path, base):
return posixpath.relpath(
posixpath.join(*path.split(os.path.sep)),
posixpath.join(*base.split(os.path.sep)),
)
#######################################
# specs
def parse_frozen_specs(sectionalspecs=FROZEN, destdir=None):
seen = {}
for section, specs in sectionalspecs:
parsed = _parse_specs(specs, section, seen)
for frozenid, pyfile, modname, ispkg, section in parsed:
try:
source = seen[frozenid]
except KeyError:
source = FrozenSource.from_id(frozenid, pyfile, destdir)
seen[frozenid] = source
else:
assert not pyfile
yield FrozenModule(modname, ispkg, section, source)
def _parse_specs(specs, section, seen):
for spec in specs:
info, subs = _parse_spec(spec, seen, section)
yield info
for info in subs or ():
yield info
def _parse_spec(spec, knownids=None, section=None):
"""Yield an info tuple for each module corresponding to the given spec.
The info consists of: (frozenid, pyfile, modname, ispkg, section).
Supported formats:
frozenid
frozenid : modname
frozenid : modname = pyfile
"frozenid" and "modname" must be valid module names (dot-separated
identifiers). If "modname" is not provided then "frozenid" is used.
If "pyfile" is not provided then the filename of the module
corresponding to "frozenid" is used.
Angle brackets around a frozenid (e.g. '<encodings>") indicate
it is a package. This also means it must be an actual module
(i.e. "pyfile" cannot have been provided). Such values can have
patterns to expand submodules:
<encodings.*> - also freeze all direct submodules
<encodings.**.*> - also freeze the full submodule tree
As with "frozenid", angle brackets around "modname" indicate
it is a package. However, in this case "pyfile" should not
have been provided and patterns in "modname" are not supported.
Also, if "modname" has brackets then "frozenid" should not,
and "pyfile" should have been provided..
"""
frozenid, _, remainder = spec.partition(':')
modname, _, pyfile = remainder.partition('=')
frozenid = frozenid.strip()
modname = modname.strip()
pyfile = pyfile.strip()
submodules = None
if modname.startswith('<') and modname.endswith('>'):
assert check_modname(frozenid), spec
modname = modname[1:-1]
assert check_modname(modname), spec
if frozenid in knownids:
pass
elif pyfile:
assert not os.path.isdir(pyfile), spec
else:
pyfile = _resolve_module(frozenid, ispkg=False)
ispkg = True
elif pyfile:
assert check_modname(frozenid), spec
assert not knownids or frozenid not in knownids, spec
assert check_modname(modname), spec
assert not os.path.isdir(pyfile), spec
ispkg = False
elif knownids and frozenid in knownids:
assert check_modname(frozenid), spec
assert check_modname(modname), spec
ispkg = False
else:
assert not modname or check_modname(modname), spec
resolved = iter(resolve_modules(frozenid))
frozenid, pyfile, ispkg = next(resolved)
if not modname:
modname = frozenid
if ispkg:
pkgid = frozenid
pkgname = modname
pkgfiles = {pyfile: pkgid}
def iter_subs():
for frozenid, pyfile, ispkg in resolved:
assert not knownids or frozenid not in knownids, (frozenid, spec)
if pkgname:
modname = frozenid.replace(pkgid, pkgname, 1)
else:
modname = frozenid
if pyfile:
if pyfile in pkgfiles:
frozenid = pkgfiles[pyfile]
pyfile = None
elif ispkg:
pkgfiles[pyfile] = frozenid
yield frozenid, pyfile, modname, ispkg, section
submodules = iter_subs()
info = (frozenid, pyfile or None, modname, ispkg, section)
return info, submodules
#######################################
# frozen source files
class FrozenSource(namedtuple('FrozenSource', 'id pyfile frozenfile')):
@classmethod
def from_id(cls, frozenid, pyfile=None, destdir=MODULES_DIR):
if not pyfile:
pyfile = os.path.join(STDLIB_DIR, *frozenid.split('.')) + '.py'
#assert os.path.exists(pyfile), (frozenid, pyfile)
frozenfile = resolve_frozen_file(frozenid, destdir)
return cls(frozenid, pyfile, frozenfile)
@property
def frozenid(self):
return self.id
@property
def modname(self):
if self.pyfile.startswith(STDLIB_DIR):
return self.id
return None
@property
def symbol(self):
# This matches what we do in Programs/_freeze_module.c:
name = self.frozenid.replace('.', '_')
return '_Py_M__' + name
def resolve_frozen_file(frozenid, destdir=MODULES_DIR):
"""Return the filename corresponding to the given frozen ID.
For stdlib modules the ID will always be the full name
of the source module.
"""
if not isinstance(frozenid, str):
try:
frozenid = frozenid.frozenid
except AttributeError:
raise ValueError(f'unsupported frozenid {frozenid!r}')
# We use a consistent naming convention for all frozen modules.
frozenfile = f'{frozenid}.h'
if not destdir:
return frozenfile
return os.path.join(destdir, frozenfile)
#######################################
# frozen modules
class FrozenModule(namedtuple('FrozenModule', 'name ispkg section source')):
def __getattr__(self, name):
return getattr(self.source, name)
@property
def modname(self):
return self.name
def summarize(self):
source = self.source.modname
if source:
source = f'<{source}>'
else:
source = relpath_for_posix_display(self.pyfile, ROOT_DIR)
return {
'module': self.name,
'ispkg': self.ispkg,
'source': source,
'frozen': os.path.basename(self.frozenfile),
'checksum': _get_checksum(self.frozenfile),
}
def _iter_sources(modules):
seen = set()
for mod in modules:
if mod.source not in seen:
yield mod.source
seen.add(mod.source)
#######################################
# generic helpers
def _get_checksum(filename):
with open(filename) as infile:
text = infile.read()
m = hashlib.sha256()
m.update(text.encode('utf8'))
return m.hexdigest()
def resolve_modules(modname, pyfile=None):
if modname.startswith('<') and modname.endswith('>'):
if pyfile:
assert os.path.isdir(pyfile) or os.path.basename(pyfile) == '__init__.py', pyfile
ispkg = True
modname = modname[1:-1]
rawname = modname
# For now, we only expect match patterns at the end of the name.
_modname, sep, match = modname.rpartition('.')
if sep:
if _modname.endswith('.**'):
modname = _modname[:-3]
match = f'**.{match}'
elif match and not match.isidentifier():
modname = _modname
# Otherwise it's a plain name so we leave it alone.
else:
match = None
else:
ispkg = False
rawname = modname
match = None
if not check_modname(modname):
raise ValueError(f'not a valid module name ({rawname})')
if not pyfile:
pyfile = _resolve_module(modname, ispkg=ispkg)
elif os.path.isdir(pyfile):
pyfile = _resolve_module(modname, pyfile, ispkg)
yield modname, pyfile, ispkg
if match:
pkgdir = os.path.dirname(pyfile)
yield from iter_submodules(modname, pkgdir, match)
def check_modname(modname):
return all(n.isidentifier() for n in modname.split('.'))
def iter_submodules(pkgname, pkgdir=None, match='*'):
if not pkgdir:
pkgdir = os.path.join(STDLIB_DIR, *pkgname.split('.'))
if not match:
match = '**.*'
match_modname = _resolve_modname_matcher(match, pkgdir)
def _iter_submodules(pkgname, pkgdir):
for entry in sorted(os.scandir(pkgdir), key=lambda e: e.name):
matched, recursive = match_modname(entry.name)
if not matched:
continue
modname = f'{pkgname}.{entry.name}'
if modname.endswith('.py'):
yield modname[:-3], entry.path, False
elif entry.is_dir():
pyfile = os.path.join(entry.path, '__init__.py')
# We ignore namespace packages.
if os.path.exists(pyfile):
yield modname, pyfile, True
if recursive:
yield from _iter_submodules(modname, entry.path)
return _iter_submodules(pkgname, pkgdir)
def _resolve_modname_matcher(match, rootdir=None):
if isinstance(match, str):
if match.startswith('**.'):
recursive = True
pat = match[3:]
assert match
else:
recursive = False
pat = match
if pat == '*':
def match_modname(modname):
return True, recursive
else:
raise NotImplementedError(match)
elif callable(match):
match_modname = match(rootdir)
else:
raise ValueError(f'unsupported matcher {match!r}')
return match_modname
def _resolve_module(modname, pathentry=STDLIB_DIR, ispkg=False):
assert pathentry, pathentry
pathentry = os.path.normpath(pathentry)
assert os.path.isabs(pathentry)
if ispkg:
return os.path.join(pathentry, *modname.split('.'), '__init__.py')
return os.path.join(pathentry, *modname.split('.')) + '.py'
#######################################
# regenerating dependent files
def find_marker(lines, marker, file):
for pos, line in enumerate(lines):
if marker in line:
return pos
raise Exception(f"Can't find {marker!r} in file {file}")
def replace_block(lines, start_marker, end_marker, replacements, file):
start_pos = find_marker(lines, start_marker, file)
end_pos = find_marker(lines, end_marker, file)
if end_pos <= start_pos:
raise Exception(f"End marker {end_marker!r} "
f"occurs before start marker {start_marker!r} "
f"in file {file}")
replacements = [line.rstrip() + '\n' for line in replacements]
return lines[:start_pos + 1] + replacements + lines[end_pos:]
def regen_manifest(modules):
header = 'module ispkg source frozen checksum'.split()
widths = [5] * len(header)
rows = []
for mod in modules:
info = mod.summarize()
row = []
for i, col in enumerate(header):
value = info[col]
if col == 'checksum':
value = value[:12]
elif col == 'ispkg':
value = 'YES' if value else 'no'
widths[i] = max(widths[i], len(value))
row.append(value or '-')
rows.append(row)
modlines = [
'# The list of frozen modules with key information.',
'# Note that the "check_generated_files" CI job will identify',
'# when source files were changed but regen-frozen wasn\'t run.',
'# This file is auto-generated by Tools/scripts/freeze_modules.py.',
' '.join(c.center(w) for c, w in zip(header, widths)).rstrip(),
' '.join('-' * w for w in widths),
]
for row in rows:
for i, w in enumerate(widths):
if header[i] == 'ispkg':
row[i] = row[i].center(w)
else:
row[i] = row[i].ljust(w)
modlines.append(' '.join(row).rstrip())
print(f'# Updating {os.path.relpath(MANIFEST)}')
with open(MANIFEST, 'w') as outfile:
lines = (l + '\n' for l in modlines)
outfile.writelines(lines)
def regen_frozen(modules):
headerlines = []
parentdir = os.path.dirname(FROZEN_FILE)
for src in _iter_sources(modules):
# Adding a comment to separate sections here doesn't add much,
# so we don't.
header = relpath_for_posix_display(src.frozenfile, parentdir)
headerlines.append(f'#include "{header}"')
deflines = []
indent = ' '
lastsection = None
for mod in modules:
if mod.section != lastsection:
if lastsection is not None:
deflines.append('')
deflines.append(f'/* {mod.section} */')
lastsection = mod.section
symbol = mod.symbol
pkg = '-' if mod.ispkg else ''
line = ('{"%s", %s, %s(int)sizeof(%s)},'
) % (mod.name, symbol, pkg, symbol)
# TODO: Consider not folding lines
if len(line) < 80:
deflines.append(line)
else:
line1, _, line2 = line.rpartition(' ')
deflines.append(line1)
deflines.append(indent + line2)
if not deflines[0]:
del deflines[0]
for i, line in enumerate(deflines):
if line:
deflines[i] = indent + line
print(f'# Updating {os.path.relpath(FROZEN_FILE)}')
with updating_file_with_tmpfile(FROZEN_FILE) as (infile, outfile):
lines = infile.readlines()
# TODO: Use more obvious markers, e.g.
# $START GENERATED FOOBAR$ / $END GENERATED FOOBAR$
lines = replace_block(
lines,
"/* Includes for frozen modules: */",
"/* End includes */",
headerlines,
FROZEN_FILE,
)
lines = replace_block(
lines,
"static const struct _frozen _PyImport_FrozenModules[] =",
"/* sentinel */",
deflines,
FROZEN_FILE,
)
outfile.writelines(lines)
def regen_makefile(modules):
pyfiles = []
frozenfiles = []
rules = ['']
for src in _iter_sources(modules):
header = relpath_for_posix_display(src.frozenfile, ROOT_DIR)
frozenfiles.append(f'\t\t{header} \\')
pyfile = relpath_for_posix_display(src.pyfile, ROOT_DIR)
pyfiles.append(f'\t\t{pyfile} \\')
freeze = (f'Programs/_freeze_module {src.frozenid} '
f'$(srcdir)/{pyfile} $(srcdir)/{header}')
rules.extend([
f'{header}: Programs/_freeze_module {pyfile}',
f'\t{freeze}',
'',
])
pyfiles[-1] = pyfiles[-1].rstrip(" \\")
frozenfiles[-1] = frozenfiles[-1].rstrip(" \\")
print(f'# Updating {os.path.relpath(MAKEFILE)}')
with updating_file_with_tmpfile(MAKEFILE) as (infile, outfile):
lines = infile.readlines()
lines = replace_block(
lines,
"FROZEN_FILES_IN =",
"# End FROZEN_FILES_IN",
pyfiles,
MAKEFILE,
)
lines = replace_block(
lines,
"FROZEN_FILES_OUT =",
"# End FROZEN_FILES_OUT",
frozenfiles,
MAKEFILE,
)
lines = replace_block(
lines,
"# BEGIN: freezing modules",
"# END: freezing modules",
rules,
MAKEFILE,
)
outfile.writelines(lines)
def regen_pcbuild(modules):
projlines = []
filterlines = []
for src in _iter_sources(modules):
pyfile = relpath_for_windows_display(src.pyfile, ROOT_DIR)
header = relpath_for_windows_display(src.frozenfile, ROOT_DIR)
intfile = ntpath.splitext(ntpath.basename(header))[0] + '.g.h'
projlines.append(f' <None Include="..\\{pyfile}">')
projlines.append(f' <ModName>{src.frozenid}</ModName>')
projlines.append(f' <IntFile>$(IntDir){intfile}</IntFile>')
projlines.append(f' <OutFile>$(PySourcePath){header}</OutFile>')
projlines.append(f' </None>')
filterlines.append(f' <None Include="..\\{pyfile}">')
filterlines.append(' <Filter>Python Files</Filter>')
filterlines.append(' </None>')
print(f'# Updating {os.path.relpath(PCBUILD_PROJECT)}')
with updating_file_with_tmpfile(PCBUILD_PROJECT) as (infile, outfile):
lines = infile.readlines()
lines = replace_block(
lines,
'<!-- BEGIN frozen modules -->',
'<!-- END frozen modules -->',
projlines,
PCBUILD_PROJECT,
)
outfile.writelines(lines)
print(f'# Updating {os.path.relpath(PCBUILD_FILTERS)}')
with updating_file_with_tmpfile(PCBUILD_FILTERS) as (infile, outfile):
lines = infile.readlines()
lines = replace_block(
lines,
'<!-- BEGIN frozen modules -->',
'<!-- END frozen modules -->',
filterlines,
PCBUILD_FILTERS,
)
outfile.writelines(lines)
#######################################
# freezing modules
def freeze_module(modname, pyfile=None, destdir=MODULES_DIR):
"""Generate the frozen module .h file for the given module."""
tmpsuffix = f'.{int(time.time())}'
for modname, pyfile, ispkg in resolve_modules(modname, pyfile):
frozenfile = resolve_frozen_file(modname, destdir)
_freeze_module(modname, pyfile, frozenfile, tmpsuffix)
def _freeze_module(frozenid, pyfile, frozenfile, tmpsuffix):
tmpfile = f'{frozenfile}.{int(time.time())}'
print(tmpfile)
argv = [TOOL, frozenid, pyfile, tmpfile]
print('#', ' '.join(os.path.relpath(a) for a in argv), flush=True)
try:
subprocess.run(argv, check=True)
except (FileNotFoundError, subprocess.CalledProcessError):
if not os.path.exists(TOOL):
sys.exit(f'ERROR: missing {TOOL}; you need to run "make regen-frozen"')
raise # re-raise
update_file_with_tmpfile(frozenfile, tmpfile, create=True)
#######################################
# the script
def main():
# Expand the raw specs, preserving order.
modules = list(parse_frozen_specs(destdir=MODULES_DIR))
# Regen build-related files.
regen_makefile(modules)
regen_pcbuild(modules)
# Freeze the target modules.
tmpsuffix = f'.{int(time.time())}'
for src in _iter_sources(modules):
_freeze_module(src.frozenid, src.pyfile, src.frozenfile, tmpsuffix)
# Regen files dependent of frozen file details.
regen_frozen(modules)
regen_manifest(modules)
if __name__ == '__main__':
argv = sys.argv[1:]
if argv:
sys.exit('ERROR: got unexpected args {argv}')
main()
| 32.924069 | 93 | 0.591576 |
from collections import namedtuple
import hashlib
import os
import ntpath
import posixpath
import platform
import subprocess
import sys
import textwrap
import time
from update_file import updating_file_with_tmpfile, update_file_with_tmpfile
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
ROOT_DIR = os.path.abspath(ROOT_DIR)
STDLIB_DIR = os.path.join(ROOT_DIR, 'Lib')
MODULES_DIR = os.path.join(ROOT_DIR, 'Python', 'frozen_modules')
if sys.platform != "win32":
TOOL = os.path.join(ROOT_DIR, 'Programs', '_freeze_module')
if not os.path.isfile(TOOL):
TOOL = os.path.join('Programs', '_freeze_module')
TOOL = os.path.abspath(TOOL)
if not os.path.isfile(TOOL):
sys.exit("ERROR: missing _freeze_module")
else:
def find_tool():
archs = ['amd64', 'win32']
if platform.machine() == "ARM64":
archs.append('arm64')
for arch in archs:
for exe in ['_freeze_module.exe', '_freeze_module_d.exe']:
tool = os.path.join(ROOT_DIR, 'PCbuild', arch, exe)
if os.path.isfile(tool):
return tool
sys.exit("ERROR: missing _freeze_module.exe; you need to run PCbuild/build.bat")
TOOL = find_tool()
del find_tool
MANIFEST = os.path.join(MODULES_DIR, 'MANIFEST')
FROZEN_FILE = os.path.join(ROOT_DIR, 'Python', 'frozen.c')
MAKEFILE = os.path.join(ROOT_DIR, 'Makefile.pre.in')
PCBUILD_PROJECT = os.path.join(ROOT_DIR, 'PCbuild', '_freeze_module.vcxproj')
PCBUILD_FILTERS = os.path.join(ROOT_DIR, 'PCbuild', '_freeze_module.vcxproj.filters')
TEST_CTYPES = os.path.join(STDLIB_DIR, 'ctypes', 'test', 'test_values.py')
OS_PATH = 'ntpath' if os.name == 'nt' else 'posixpath'
FROZEN = [
('import system', [
'importlib._bootstrap : _frozen_importlib',
'importlib._bootstrap_external : _frozen_importlib_external',
'zipimport',
]),
('stdlib - startup, without site (python -S)', [
'abc',
'codecs',
]),
('stdlib - startup, with site', [
'_collections_abc',
'_sitebuiltins',
'genericpath',
'ntpath',
'posixpath',
f'{OS_PATH} : os.path',
'os',
'site',
'stat',
]),
('Test module', [
'__hello__',
'__hello__ : <__phello__>',
'__hello__ : __phello__.spam',
]),
]
ESSENTIAL = {
'importlib._bootstrap',
'importlib._bootstrap_external',
'zipimport',
}
ip()
pyfile = pyfile.strip()
submodules = None
if modname.startswith('<') and modname.endswith('>'):
assert check_modname(frozenid), spec
modname = modname[1:-1]
assert check_modname(modname), spec
if frozenid in knownids:
pass
elif pyfile:
assert not os.path.isdir(pyfile), spec
else:
pyfile = _resolve_module(frozenid, ispkg=False)
ispkg = True
elif pyfile:
assert check_modname(frozenid), spec
assert not knownids or frozenid not in knownids, spec
assert check_modname(modname), spec
assert not os.path.isdir(pyfile), spec
ispkg = False
elif knownids and frozenid in knownids:
assert check_modname(frozenid), spec
assert check_modname(modname), spec
ispkg = False
else:
assert not modname or check_modname(modname), spec
resolved = iter(resolve_modules(frozenid))
frozenid, pyfile, ispkg = next(resolved)
if not modname:
modname = frozenid
if ispkg:
pkgid = frozenid
pkgname = modname
pkgfiles = {pyfile: pkgid}
def iter_subs():
for frozenid, pyfile, ispkg in resolved:
assert not knownids or frozenid not in knownids, (frozenid, spec)
if pkgname:
modname = frozenid.replace(pkgid, pkgname, 1)
else:
modname = frozenid
if pyfile:
if pyfile in pkgfiles:
frozenid = pkgfiles[pyfile]
pyfile = None
elif ispkg:
pkgfiles[pyfile] = frozenid
yield frozenid, pyfile, modname, ispkg, section
submodules = iter_subs()
info = (frozenid, pyfile or None, modname, ispkg, section)
return info, submodules
r=MODULES_DIR):
if not isinstance(frozenid, str):
try:
frozenid = frozenid.frozenid
except AttributeError:
raise ValueError(f'unsupported frozenid {frozenid!r}')
frozenfile = f'{frozenid}.h'
if not destdir:
return frozenfile
return os.path.join(destdir, frozenfile)
.source not in seen:
yield mod.source
seen.add(mod.source)
modname = _modname
else:
match = None
else:
ispkg = False
rawname = modname
match = None
if not check_modname(modname):
raise ValueError(f'not a valid module name ({rawname})')
if not pyfile:
pyfile = _resolve_module(modname, ispkg=ispkg)
elif os.path.isdir(pyfile):
pyfile = _resolve_module(modname, pyfile, ispkg)
yield modname, pyfile, ispkg
if match:
pkgdir = os.path.dirname(pyfile)
yield from iter_submodules(modname, pkgdir, match)
def check_modname(modname):
return all(n.isidentifier() for n in modname.split('.'))
def iter_submodules(pkgname, pkgdir=None, match='*'):
if not pkgdir:
pkgdir = os.path.join(STDLIB_DIR, *pkgname.split('.'))
if not match:
match = '**.*'
match_modname = _resolve_modname_matcher(match, pkgdir)
def _iter_submodules(pkgname, pkgdir):
for entry in sorted(os.scandir(pkgdir), key=lambda e: e.name):
matched, recursive = match_modname(entry.name)
if not matched:
continue
modname = f'{pkgname}.{entry.name}'
if modname.endswith('.py'):
yield modname[:-3], entry.path, False
elif entry.is_dir():
pyfile = os.path.join(entry.path, '__init__.py')
# We ignore namespace packages.
if os.path.exists(pyfile):
yield modname, pyfile, True
if recursive:
yield from _iter_submodules(modname, entry.path)
return _iter_submodules(pkgname, pkgdir)
def _resolve_modname_matcher(match, rootdir=None):
if isinstance(match, str):
if match.startswith('**.'):
recursive = True
pat = match[3:]
assert match
else:
recursive = False
pat = match
if pat == '*':
def match_modname(modname):
return True, recursive
else:
raise NotImplementedError(match)
elif callable(match):
match_modname = match(rootdir)
else:
raise ValueError(f'unsupported matcher {match!r}')
return match_modname
def _resolve_module(modname, pathentry=STDLIB_DIR, ispkg=False):
assert pathentry, pathentry
pathentry = os.path.normpath(pathentry)
assert os.path.isabs(pathentry)
if ispkg:
return os.path.join(pathentry, *modname.split('.'), '__init__.py')
return os.path.join(pathentry, *modname.split('.')) + '.py'
#######################################
# regenerating dependent files
def find_marker(lines, marker, file):
for pos, line in enumerate(lines):
if marker in line:
return pos
raise Exception(f"Can't find {marker!r} in file {file}")
def replace_block(lines, start_marker, end_marker, replacements, file):
start_pos = find_marker(lines, start_marker, file)
end_pos = find_marker(lines, end_marker, file)
if end_pos <= start_pos:
raise Exception(f"End marker {end_marker!r} "
f"occurs before start marker {start_marker!r} "
f"in file {file}")
replacements = [line.rstrip() + '\n' for line in replacements]
return lines[:start_pos + 1] + replacements + lines[end_pos:]
def regen_manifest(modules):
header = 'module ispkg source frozen checksum'.split()
widths = [5] * len(header)
rows = []
for mod in modules:
info = mod.summarize()
row = []
for i, col in enumerate(header):
value = info[col]
if col == 'checksum':
value = value[:12]
elif col == 'ispkg':
value = 'YES' if value else 'no'
widths[i] = max(widths[i], len(value))
row.append(value or '-')
rows.append(row)
modlines = [
'# The list of frozen modules with key information.',
'# Note that the "check_generated_files" CI job will identify',
'# when source files were changed but regen-frozen wasn\'t run.',
'
' '.join(c.center(w) for c, w in zip(header, widths)).rstrip(),
' '.join('-' * w for w in widths),
]
for row in rows:
for i, w in enumerate(widths):
if header[i] == 'ispkg':
row[i] = row[i].center(w)
else:
row[i] = row[i].ljust(w)
modlines.append(' '.join(row).rstrip())
print(f'
with open(MANIFEST, 'w') as outfile:
lines = (l + '\n' for l in modlines)
outfile.writelines(lines)
def regen_frozen(modules):
headerlines = []
parentdir = os.path.dirname(FROZEN_FILE)
for src in _iter_sources(modules):
# Adding a comment to separate sections here doesn't add much,
header = relpath_for_posix_display(src.frozenfile, parentdir)
headerlines.append(f'
deflines = []
indent = ' '
lastsection = None
for mod in modules:
if mod.section != lastsection:
if lastsection is not None:
deflines.append('')
deflines.append(f'/* {mod.section} */')
lastsection = mod.section
symbol = mod.symbol
pkg = '-' if mod.ispkg else ''
line = ('{"%s", %s, %s(int)sizeof(%s)},'
) % (mod.name, symbol, pkg, symbol)
# TODO: Consider not folding lines
if len(line) < 80:
deflines.append(line)
else:
line1, _, line2 = line.rpartition(' ')
deflines.append(line1)
deflines.append(indent + line2)
if not deflines[0]:
del deflines[0]
for i, line in enumerate(deflines):
if line:
deflines[i] = indent + line
print(f'
with updating_file_with_tmpfile(FROZEN_FILE) as (infile, outfile):
lines = infile.readlines()
# TODO: Use more obvious markers, e.g.
# $START GENERATED FOOBAR$ / $END GENERATED FOOBAR$
lines = replace_block(
lines,
"/* Includes for frozen modules: */",
"/* End includes */",
headerlines,
FROZEN_FILE,
)
lines = replace_block(
lines,
"static const struct _frozen _PyImport_FrozenModules[] =",
"/* sentinel */",
deflines,
FROZEN_FILE,
)
outfile.writelines(lines)
def regen_makefile(modules):
pyfiles = []
frozenfiles = []
rules = ['']
for src in _iter_sources(modules):
header = relpath_for_posix_display(src.frozenfile, ROOT_DIR)
frozenfiles.append(f'\t\t{header} \\')
pyfile = relpath_for_posix_display(src.pyfile, ROOT_DIR)
pyfiles.append(f'\t\t{pyfile} \\')
freeze = (f'Programs/_freeze_module {src.frozenid} '
f'$(srcdir)/{pyfile} $(srcdir)/{header}')
rules.extend([
f'{header}: Programs/_freeze_module {pyfile}',
f'\t{freeze}',
'',
])
pyfiles[-1] = pyfiles[-1].rstrip(" \\")
frozenfiles[-1] = frozenfiles[-1].rstrip(" \\")
print(f'
with updating_file_with_tmpfile(MAKEFILE) as (infile, outfile):
lines = infile.readlines()
lines = replace_block(
lines,
"FROZEN_FILES_IN =",
"# End FROZEN_FILES_IN",
pyfiles,
MAKEFILE,
)
lines = replace_block(
lines,
"FROZEN_FILES_OUT =",
"# End FROZEN_FILES_OUT",
frozenfiles,
MAKEFILE,
)
lines = replace_block(
lines,
"# BEGIN: freezing modules",
"# END: freezing modules",
rules,
MAKEFILE,
)
outfile.writelines(lines)
def regen_pcbuild(modules):
projlines = []
filterlines = []
for src in _iter_sources(modules):
pyfile = relpath_for_windows_display(src.pyfile, ROOT_DIR)
header = relpath_for_windows_display(src.frozenfile, ROOT_DIR)
intfile = ntpath.splitext(ntpath.basename(header))[0] + '.g.h'
projlines.append(f' <None Include="..\\{pyfile}">')
projlines.append(f' <ModName>{src.frozenid}</ModName>')
projlines.append(f' <IntFile>$(IntDir){intfile}</IntFile>')
projlines.append(f' <OutFile>$(PySourcePath){header}</OutFile>')
projlines.append(f' </None>')
filterlines.append(f' <None Include="..\\{pyfile}">')
filterlines.append(' <Filter>Python Files</Filter>')
filterlines.append(' </None>')
print(f'
with updating_file_with_tmpfile(PCBUILD_PROJECT) as (infile, outfile):
lines = infile.readlines()
lines = replace_block(
lines,
'<!-- BEGIN frozen modules -->',
'<!-- END frozen modules -->',
projlines,
PCBUILD_PROJECT,
)
outfile.writelines(lines)
print(f'
with updating_file_with_tmpfile(PCBUILD_FILTERS) as (infile, outfile):
lines = infile.readlines()
lines = replace_block(
lines,
'<!-- BEGIN frozen modules -->',
'<!-- END frozen modules -->',
filterlines,
PCBUILD_FILTERS,
)
outfile.writelines(lines)
#######################################
# freezing modules
def freeze_module(modname, pyfile=None, destdir=MODULES_DIR):
tmpsuffix = f'.{int(time.time())}'
for modname, pyfile, ispkg in resolve_modules(modname, pyfile):
frozenfile = resolve_frozen_file(modname, destdir)
_freeze_module(modname, pyfile, frozenfile, tmpsuffix)
def _freeze_module(frozenid, pyfile, frozenfile, tmpsuffix):
tmpfile = f'{frozenfile}.{int(time.time())}'
print(tmpfile)
argv = [TOOL, frozenid, pyfile, tmpfile]
print('
try:
subprocess.run(argv, check=True)
except (FileNotFoundError, subprocess.CalledProcessError):
if not os.path.exists(TOOL):
sys.exit(f'ERROR: missing {TOOL}; you need to run "make regen-frozen"')
raise # re-raise
update_file_with_tmpfile(frozenfile, tmpfile, create=True)
#######################################
# the script
def main():
# Expand the raw specs, preserving order.
modules = list(parse_frozen_specs(destdir=MODULES_DIR))
# Regen build-related files.
regen_makefile(modules)
regen_pcbuild(modules)
# Freeze the target modules.
tmpsuffix = f'.{int(time.time())}'
for src in _iter_sources(modules):
_freeze_module(src.frozenid, src.pyfile, src.frozenfile, tmpsuffix)
# Regen files dependent of frozen file details.
regen_frozen(modules)
regen_manifest(modules)
if __name__ == '__main__':
argv = sys.argv[1:]
if argv:
sys.exit('ERROR: got unexpected args {argv}')
main()
| true | true |
f727392d301c086ac8d765cc7989c6df28dc7616 | 962 | py | Python | lockss_configuration/__init__.py | lockss/lockss-configuration-python | d645b42e0fdae5ccfcaf80b26c09218c1e1c9c94 | [
"BSD-3-Clause"
] | null | null | null | lockss_configuration/__init__.py | lockss/lockss-configuration-python | d645b42e0fdae5ccfcaf80b26c09218c1e1c9c94 | [
"BSD-3-Clause"
] | null | null | null | lockss_configuration/__init__.py | lockss/lockss-configuration-python | d645b42e0fdae5ccfcaf80b26c09218c1e1c9c94 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# flake8: noqa
"""
LOCKSS Configuration Service REST API
API of the LOCKSS Configuration REST Service # noqa: E501
OpenAPI spec version: 1.0.0
Contact: lockss-support@lockss.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from lockss_configuration.api.aus_api import AusApi
from lockss_configuration.api.config_api import ConfigApi
from lockss_configuration.api.status_api import StatusApi
# import ApiClient
from lockss_configuration.api_client import ApiClient
from lockss_configuration.configuration import Configuration
# import models into sdk package
from lockss_configuration.lockss-configuration-python.api_status import ApiStatus
from lockss_configuration.lockss-configuration-python.config_exchange import ConfigExchange
from lockss_configuration.lockss-configuration-python.config_mod_spec import ConfigModSpec
| 32.066667 | 91 | 0.828482 |
"""
LOCKSS Configuration Service REST API
API of the LOCKSS Configuration REST Service # noqa: E501
OpenAPI spec version: 1.0.0
Contact: lockss-support@lockss.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
from lockss_configuration.api.aus_api import AusApi
from lockss_configuration.api.config_api import ConfigApi
from lockss_configuration.api.status_api import StatusApi
from lockss_configuration.api_client import ApiClient
from lockss_configuration.configuration import Configuration
from lockss_configuration.lockss-configuration-python.api_status import ApiStatus
from lockss_configuration.lockss-configuration-python.config_exchange import ConfigExchange
from lockss_configuration.lockss-configuration-python.config_mod_spec import ConfigModSpec
| false | true |
f72739fb303514524a1f36a098a48adc38f45626 | 4,753 | py | Python | explain.py | pakesson/scaml | c69d422d6839d75a81426c81fd8d570fa421744b | [
"MIT"
] | 1 | 2020-12-03T01:34:47.000Z | 2020-12-03T01:34:47.000Z | explain.py | pakesson/scaml | c69d422d6839d75a81426c81fd8d570fa421744b | [
"MIT"
] | null | null | null | explain.py | pakesson/scaml | c69d422d6839d75a81426c81fd8d570fa421744b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import math
import numpy as np
from tensorflow.keras.models import load_model
from aes import aes_sbox, aes_sbox_inv
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
def get_label(plaintext, key, index):
return aes_sbox[plaintext[index] ^ key[index]]
num_classes = 256
attack_byte = 0
start_trace_to_attack = 100
number_of_traces_to_attack = 25
number_of_traces_to_explain = 5
occlusion_size = 1
def apply_occlusion(sample, x, occlusion_size=1, occlusion_value=0):
occluded_sample = np.array(sample, copy=True)
occluded_sample[x:x+occlusion_size, :] = occlusion_value
return occluded_sample
def get_occlusion_sensitivity(samples, model, class_index, occlusion_size=1):
print("Generating occlusion sensitivity maps...")
confidence_map = np.zeros(math.ceil(samples[0].shape[0] / occlusion_size))
sensitivity_map = np.zeros(math.ceil(samples[0].shape[0] / occlusion_size))
for idx, sample in enumerate(samples):
print(f" Sample {idx}")
occlusion_value = np.mean(sample)
occlusions = [
apply_occlusion(sample, x, occlusion_size, occlusion_value)
for x in range(0, sample.shape[0], occlusion_size)
]
predictions = model.predict(np.array(occlusions), batch_size=32)
target_class_predictions = [
prediction[class_index[idx]] for prediction in predictions
]
for x, confidence in zip(range(sensitivity_map.shape[0]), target_class_predictions):
confidence_map[x] += confidence
# Mean confidence value
confidence_map = confidence_map / samples.shape[0]
sensitivity_map = 1 - confidence_map
# Scale back up
result = np.zeros(samples[0].shape[0])
for x in range(result.shape[0]):
result[x] = sensitivity_map[x // occlusion_size]
return result
def explain(data, model, class_index, occlusion_size=1):
# Make sure the data shape is (num_traces, num_points_per_trace, x)
if len(data.shape) == 2:
data = data.reshape((1, data.shape[0], data.shape[1]))
class_index = class_index.reshape((1, class_index.shape[0], class_index.shape[1]))
elif len(data.shape) != 3:
raise ValueError("unsupported data shape")
# Generate one map for all samples
return get_occlusion_sensitivity(data, model, class_index, occlusion_size)
if __name__ == '__main__':
if len(sys.argv) != 4:
print("Usage:")
print(f" {sys.argv[0]} <model filename> <trace filename> <sensitivity map filename>")
exit()
model_filename = sys.argv[1]
trace_filename = sys.argv[2]
sensitivity_map_filename = sys.argv[3]
model = load_model(model_filename)
print("Input shape: " + str(model.input_shape))
traces = np.load(trace_filename)
print(traces.files)
trace_array = traces['trace_array']
textin_array = traces['textin_array']
known_keys = traces['known_keys']
trace_array = trace_array.reshape((trace_array.shape[0], trace_array.shape[1], 1))
# Run an initial prediction before we try to explain anything
result = model.predict(trace_array[start_trace_to_attack:start_trace_to_attack+number_of_traces_to_attack, :, :])
log10_sum_prediction = np.zeros(num_classes)
for k in range(number_of_traces_to_attack):
plaintext = textin_array[start_trace_to_attack+k, attack_byte]
prediction = result[k]
for l in range(num_classes):
key_byte_index = (aes_sbox_inv[l] ^ plaintext)
log10_sum_prediction[key_byte_index] += np.log10(prediction[l] + 1e-22)
print("Best key byte guess: " + str(np.argmax(log10_sum_prediction)))
print("known_keys[0]: " + str(known_keys[0]))
# Run explainer
data = trace_array[start_trace_to_attack:start_trace_to_attack+number_of_traces_to_explain, :, :]
key_index = np.argmax(log10_sum_prediction)
class_index = aes_sbox[textin_array[start_trace_to_attack:start_trace_to_attack+number_of_traces_to_explain, attack_byte] ^ key_index]
sensitivity_map = explain(data, model, class_index, occlusion_size)
# Save results
np.savez_compressed(sensitivity_map_filename, sensitivity_map=sensitivity_map)
# Visualize the results
fig = plt.figure()
plt.title(f"Occlusion sensitivity for key byte {attack_byte} in trace {start_trace_to_attack}")
ax = fig.gca()
x = np.linspace(0, sensitivity_map.shape[0]-1, sensitivity_map.shape[0])
for i in range(0, sensitivity_map.shape[0]-1, occlusion_size):
color = (sensitivity_map[i]-min(sensitivity_map))/np.ptp(sensitivity_map)
ax.plot(x[i:i+occlusion_size+1], data[0, i:i+occlusion_size+1, 0], color=plt.cm.plasma(color))
plt.show()
| 36.007576 | 138 | 0.708605 |
import sys
import math
import numpy as np
from tensorflow.keras.models import load_model
from aes import aes_sbox, aes_sbox_inv
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
def get_label(plaintext, key, index):
return aes_sbox[plaintext[index] ^ key[index]]
num_classes = 256
attack_byte = 0
start_trace_to_attack = 100
number_of_traces_to_attack = 25
number_of_traces_to_explain = 5
occlusion_size = 1
def apply_occlusion(sample, x, occlusion_size=1, occlusion_value=0):
occluded_sample = np.array(sample, copy=True)
occluded_sample[x:x+occlusion_size, :] = occlusion_value
return occluded_sample
def get_occlusion_sensitivity(samples, model, class_index, occlusion_size=1):
print("Generating occlusion sensitivity maps...")
confidence_map = np.zeros(math.ceil(samples[0].shape[0] / occlusion_size))
sensitivity_map = np.zeros(math.ceil(samples[0].shape[0] / occlusion_size))
for idx, sample in enumerate(samples):
print(f" Sample {idx}")
occlusion_value = np.mean(sample)
occlusions = [
apply_occlusion(sample, x, occlusion_size, occlusion_value)
for x in range(0, sample.shape[0], occlusion_size)
]
predictions = model.predict(np.array(occlusions), batch_size=32)
target_class_predictions = [
prediction[class_index[idx]] for prediction in predictions
]
for x, confidence in zip(range(sensitivity_map.shape[0]), target_class_predictions):
confidence_map[x] += confidence
confidence_map = confidence_map / samples.shape[0]
sensitivity_map = 1 - confidence_map
result = np.zeros(samples[0].shape[0])
for x in range(result.shape[0]):
result[x] = sensitivity_map[x // occlusion_size]
return result
def explain(data, model, class_index, occlusion_size=1):
if len(data.shape) == 2:
data = data.reshape((1, data.shape[0], data.shape[1]))
class_index = class_index.reshape((1, class_index.shape[0], class_index.shape[1]))
elif len(data.shape) != 3:
raise ValueError("unsupported data shape")
return get_occlusion_sensitivity(data, model, class_index, occlusion_size)
if __name__ == '__main__':
if len(sys.argv) != 4:
print("Usage:")
print(f" {sys.argv[0]} <model filename> <trace filename> <sensitivity map filename>")
exit()
model_filename = sys.argv[1]
trace_filename = sys.argv[2]
sensitivity_map_filename = sys.argv[3]
model = load_model(model_filename)
print("Input shape: " + str(model.input_shape))
traces = np.load(trace_filename)
print(traces.files)
trace_array = traces['trace_array']
textin_array = traces['textin_array']
known_keys = traces['known_keys']
trace_array = trace_array.reshape((trace_array.shape[0], trace_array.shape[1], 1))
result = model.predict(trace_array[start_trace_to_attack:start_trace_to_attack+number_of_traces_to_attack, :, :])
log10_sum_prediction = np.zeros(num_classes)
for k in range(number_of_traces_to_attack):
plaintext = textin_array[start_trace_to_attack+k, attack_byte]
prediction = result[k]
for l in range(num_classes):
key_byte_index = (aes_sbox_inv[l] ^ plaintext)
log10_sum_prediction[key_byte_index] += np.log10(prediction[l] + 1e-22)
print("Best key byte guess: " + str(np.argmax(log10_sum_prediction)))
print("known_keys[0]: " + str(known_keys[0]))
data = trace_array[start_trace_to_attack:start_trace_to_attack+number_of_traces_to_explain, :, :]
key_index = np.argmax(log10_sum_prediction)
class_index = aes_sbox[textin_array[start_trace_to_attack:start_trace_to_attack+number_of_traces_to_explain, attack_byte] ^ key_index]
sensitivity_map = explain(data, model, class_index, occlusion_size)
np.savez_compressed(sensitivity_map_filename, sensitivity_map=sensitivity_map)
fig = plt.figure()
plt.title(f"Occlusion sensitivity for key byte {attack_byte} in trace {start_trace_to_attack}")
ax = fig.gca()
x = np.linspace(0, sensitivity_map.shape[0]-1, sensitivity_map.shape[0])
for i in range(0, sensitivity_map.shape[0]-1, occlusion_size):
color = (sensitivity_map[i]-min(sensitivity_map))/np.ptp(sensitivity_map)
ax.plot(x[i:i+occlusion_size+1], data[0, i:i+occlusion_size+1, 0], color=plt.cm.plasma(color))
plt.show()
| true | true |
f7273a7b5d7bdf69557a4052a69a66f7ebffb3ac | 588 | py | Python | stats/attendance.py | diegomrsantos/Python-Baseball | 4543df7a4d74e82106a3e8481553149c447d8ab6 | [
"MIT"
] | null | null | null | stats/attendance.py | diegomrsantos/Python-Baseball | 4543df7a4d74e82106a3e8481553149c447d8ab6 | [
"MIT"
] | null | null | null | stats/attendance.py | diegomrsantos/Python-Baseball | 4543df7a4d74e82106a3e8481553149c447d8ab6 | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
from data import games
info_filter = games['type'] == 'info'
attendance_filter = games['multi2'] == 'attendance'
attendance = games.loc[info_filter & attendance_filter, ['year', 'multi3']]
attendance.columns = ['year', 'attendance']
attendance.loc[:, 'attendance'] = pd.to_numeric(attendance.loc[:, 'attendance'])
attendance.plot(x='year', y='attendance', figsize=(15, 7), kind='bar')
plt.xlabel('Year')
plt.ylabel('Attendance')
plt.axhline(y=attendance['attendance'].mean(), label='Mean', linestyle='--', color='green')
plt.show()
| 32.666667 | 91 | 0.710884 | import pandas as pd
import matplotlib.pyplot as plt
from data import games
info_filter = games['type'] == 'info'
attendance_filter = games['multi2'] == 'attendance'
attendance = games.loc[info_filter & attendance_filter, ['year', 'multi3']]
attendance.columns = ['year', 'attendance']
attendance.loc[:, 'attendance'] = pd.to_numeric(attendance.loc[:, 'attendance'])
attendance.plot(x='year', y='attendance', figsize=(15, 7), kind='bar')
plt.xlabel('Year')
plt.ylabel('Attendance')
plt.axhline(y=attendance['attendance'].mean(), label='Mean', linestyle='--', color='green')
plt.show()
| true | true |
f7273aa823161ba436cb65218833b108a92b9ccc | 651 | py | Python | tests/test_pickling_in_main/main.py | smheidrich/pickle-spree | 73d7a6fd1265f28fc3b91db593309cf5d2ae9195 | [
"MIT"
] | null | null | null | tests/test_pickling_in_main/main.py | smheidrich/pickle-spree | 73d7a6fd1265f28fc3b91db593309cf5d2ae9195 | [
"MIT"
] | 2 | 2022-01-23T18:51:13.000Z | 2022-01-23T18:54:36.000Z | tests/test_pickling_in_main/main.py | smheidrich/pickle-spree | 73d7a6fd1265f28fc3b91db593309cf5d2ae9195 | [
"MIT"
] | null | null | null | from collections import ChainMap
import os
from pathlib import Path
from pickle_spree import PopenFactory
import subprocess
import sys
class CallableDefinedInMain:
def __call__(self):
return 1
callable = CallableDefinedInMain()
new_popen = PopenFactory(callable=callable)
subprocess.Popen = new_popen
pythonpaths = os.environ.get("PYTHONPATH", "").split(":")
pythonpath = ":".join([str(Path(__file__).parent.absolute())]+pythonpaths)
if __name__ == "__main__":
Path("child_script.py").write_text("print('foo')")
subprocess.run([sys.executable, "child_script.py"],
env=ChainMap({"PYTHONPATH": pythonpath}, os.environ), check=True)
| 25.038462 | 74 | 0.752688 | from collections import ChainMap
import os
from pathlib import Path
from pickle_spree import PopenFactory
import subprocess
import sys
class CallableDefinedInMain:
def __call__(self):
return 1
callable = CallableDefinedInMain()
new_popen = PopenFactory(callable=callable)
subprocess.Popen = new_popen
pythonpaths = os.environ.get("PYTHONPATH", "").split(":")
pythonpath = ":".join([str(Path(__file__).parent.absolute())]+pythonpaths)
if __name__ == "__main__":
Path("child_script.py").write_text("print('foo')")
subprocess.run([sys.executable, "child_script.py"],
env=ChainMap({"PYTHONPATH": pythonpath}, os.environ), check=True)
| true | true |
f7273b4fd11cad1a9484b1fbcd350c2e7b6f9e26 | 377 | py | Python | Exercicio6TapeEquilibrium/ResolucaoPropria/start.py | GRParasky/codility-exercises | 1a7144492d78fd712ec8d23d94502e3f5ed642a3 | [
"MIT"
] | null | null | null | Exercicio6TapeEquilibrium/ResolucaoPropria/start.py | GRParasky/codility-exercises | 1a7144492d78fd712ec8d23d94502e3f5ed642a3 | [
"MIT"
] | null | null | null | Exercicio6TapeEquilibrium/ResolucaoPropria/start.py | GRParasky/codility-exercises | 1a7144492d78fd712ec8d23d94502e3f5ed642a3 | [
"MIT"
] | null | null | null | def solution(A):
list_range = len(A)
difference_list = []
for p in range(1, list_range):
post_sum = sum(A[p:])
behind_sum = sum(A[:p])
difference = behind_sum - post_sum
if difference < 0:
difference *= -1
difference_list.append(difference)
return min(difference_list)
print(solution([3, 1, 2, 4, 3]))
| 18.85 | 42 | 0.572944 | def solution(A):
list_range = len(A)
difference_list = []
for p in range(1, list_range):
post_sum = sum(A[p:])
behind_sum = sum(A[:p])
difference = behind_sum - post_sum
if difference < 0:
difference *= -1
difference_list.append(difference)
return min(difference_list)
print(solution([3, 1, 2, 4, 3]))
| true | true |
f7273bbbd5fff48873e854018e8ac2d206d735b8 | 12,117 | py | Python | .github/scripts/check-header.py | githubliweichao/FreeRTOS | 208b260f982d7a0c8b9aaff6bc446f8c7e45d2e2 | [
"MIT"
] | 1 | 2020-12-20T03:45:04.000Z | 2020-12-20T03:45:04.000Z | .github/scripts/check-header.py | githubliweichao/FreeRTOS | 208b260f982d7a0c8b9aaff6bc446f8c7e45d2e2 | [
"MIT"
] | null | null | null | .github/scripts/check-header.py | githubliweichao/FreeRTOS | 208b260f982d7a0c8b9aaff6bc446f8c7e45d2e2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os, sys, re
from argparse import ArgumentParser
from difflib import unified_diff
from json import load
def dprint(msg):
print('[DEBUG]: %s' % str(msg))
class HeaderChecker:
def __init__(self, header, padding=1000, ignored_files=[], ignored_ext=[], ignored_patterns=[]):
self.padding = padding
self.header = header
self.ignorePatternList = ignored_patterns.copy()
self.ignoreFileList = ignored_files.copy()
self.ignoreExtList = ignored_ext.copy()
def checkJSONList(self, path_json):
'''
This is particularly useful when ingesting output from other programs, like git actions
'''
assert os.path.exists(path_json), 'No such file: ' + path_json
# Get list of files to check from JSON file
with open(path_json) as file_json:
file_checklist = load(file_json)
assert isinstance(file_checklist, list), 'Expected list for singular JSON List entry'
# Accrue how how files fail the check
n_failed = 0
for path_file in file_checklist:
assert isinstance(path_file, str), 'Unexpected JSON format for ' + path_json
n_failed += not self.isValidFile(path_file)
return n_failed
def isValidFile(self, path):
assert os.path.exists(path), 'No such file: ' + path
# Skip any ignored files
if self.isIgnoredFile(path):
return True
# Skip if entry is a directory.
if os.path.isdir(path):
print('Skipping valid file check on directory path: %s' % path)
return True
# Don't need entire file. Read sufficienly large chunk of file that should contain the header
with open(path, encoding='utf-8', errors='ignore') as file:
chunk = file.read(len(''.join(self.header)) + self.padding)
lines = [('%s\n' % l) for l in chunk.strip().splitlines()][:len(self.header)]
if self.header == lines:
return True
else:
print('File Delta: %s' % path)
print(*unified_diff(lines[:len(self.header)], self.header))
return False
def ignoreExtension(self, *args):
for ext in args:
self.ignoreExtList.append(ext)
def ignoreFile(self, *args):
for f in args:
self.ignoreFileList.append(f)
def ignorePattern(self, *args):
for p in args:
self.ignorePatternList.append(re.compile(p))
def isIgnoredFile(self, path):
'''
There are multiple ways a file can be ignored. This is a catch all
'''
assert os.path.exists(path), 'No such file: ' + path
# Try simpler checks first
filename = os.path.split(path)[-1]
extension = os.path.splitext(filename)[-1]
if extension in self.ignoreExtList or filename in self.ignoreFileList:
return True
# Then iterate against regex patterns. In future consider Trie
for pattern in self.ignorePatternList:
if pattern.match(path):
return True
return False
def configArgParser():
parser = ArgumentParser(description='FreeRTOS file header checker. We expect a consistent header across all '
'first party files. The header includes current version number, copyright, '
'and FreeRTOS license.')
parser.add_argument('files_checked',
nargs = '+',
metavar = 'FILE_LIST',
help = 'Space separated list of files to check.')
parser.add_argument('-k', '--kernel',
default = False,
action = 'store_true',
help = 'Compare with kernel file header. It has different versioning.')
parser.add_argument('-j', '--json',
default = False,
action = 'store_true',
help = 'Treat arguments json files that store a list of files to check.')
return parser
#--------------------------------------------------------------------------------------------------
# CONFIG
#--------------------------------------------------------------------------------------------------
FREERTOS_IGNORED_EXTENSIONS = [
'.1',
'.ASM',
'.C',
'.DSW',
'.G_C',
'.H',
'.Hbp',
'.IDE',
'.LIB',
'.Opt',
'.PC',
'.PRM',
'.TXT',
'.URL',
'.UVL',
'.Uv2',
'.a',
'.ac',
'.am',
'.atsln',
'.atstart',
'.atsuo',
'.bash',
'.bat',
'.bbl',
'.bit',
'.board',
'.bsb',
'.bsdl',
'.bts',
'.ccxml',
'.cdkproj',
'.cdkws',
'.cfg',
'.cgp',
'.cmake',
'.cmd',
'.config',
'.cpp',
'.cproj',
'.crun',
'.css',
'.csv',
'.custom_argvars',
'.cxx',
'.cydwr',
'.cyprj',
'.cysch',
'.dat',
'.datas',
'.db',
'.dbgdt',
'.dep',
'.dni',
'.dnx',
'.doc',
'.dox',
'.doxygen',
'.ds',
'.dsk',
'.dtd',
'.dts',
'.elf',
'.env_conf',
'.ewd',
'.ewp',
'.ewt',
'.eww',
'.exe',
'.filters',
'.flash',
'.fmt',
'.ftl',
'.gdb',
'.gif',
'.gise',
'.gld',
'.gpdsc',
'.gui',
'.h_from_toolchain',
'.hdf',
'.hdp',
'.hex',
'.hist',
'.history',
'.hsf',
'.htm',
'.html',
'.hwc',
'.hwl',
'.hwp',
'.hws',
'.hzp',
'.hzs',
'.i',
'.icf',
'.ide',
'.idx',
'.in',
'.inc',
'.include',
'.index',
'.inf',
'.ini',
'.init',
'.ipcf',
'.ise',
'.jlink',
'.json',
'.la',
'.launch',
'.lcf',
'.lds',
'.lib',
'.lk1',
'.lkr',
'.lm',
'.lo',
'.lock',
'.lsl',
'.lst',
'.m4',
'.mac',
'.make',
'.map',
'.mbt',
'.mcp',
'.mcpar',
'.mcs',
'.mcw',
'.md',
'.mdm',
'.mem',
'.mhs',
'.mk',
'.mk1',
'.mmi',
'.mrt',
'.mss',
'.mtpj',
'.nav',
'.ntrc_log',
'.opa',
'.opb',
'.opc',
'.opl',
'.opt',
'.opv',
'.out',
'.pack',
'.par',
'.patch',
'.pbd',
'.pdsc',
'.pe',
'.pem',
'.pgs',
'.pl',
'.plg',
'.png',
'.prc',
'.pref',
'.prefs',
'.prj',
'.properties',
'.ps1',
'.ptf',
'.r79',
'.rapp',
'.rc',
'.reggroups',
'.reglist',
'.resc',
'.resources',
'.rom',
'.rprj',
'.s79',
'.s82',
'.s90',
'.sc',
'.scf',
'.scfg',
'.script',
'.sct',
'.scvd',
'.session',
'.sfr',
'.sh',
'.shtml',
'.sig',
'.sln',
'.spec',
'.stf',
'.stg',
'.suo',
'.sup',
'.svg',
'.tags',
'.tcl',
'.tdt',
'.template',
'.tgt',
'.tps',
'.tra',
'.tree',
'.tws',
'.txt',
'.ucf',
'.url',
'.user',
'.ut',
'.uvmpw',
'.uvopt',
'.uvoptx',
'.uvproj',
'.uvprojx',
'.vcproj',
'.vcxproj',
'.version',
'.webserver',
'.wpj',
'.wsdt',
'.wsp',
'.wspos',
'.wsx',
'.x',
'.xbcd',
'.xcl',
'.xise',
'.xml',
'.xmp',
'.xmsgs',
'.xsl',
'.yml',
'.md',
'.zip'
]
FREERTOS_IGNORED_PATTERNS = [
r'.*\.git.*',
r'.*mbedtls_config\.h.*',
r'.*mbedtls_config\.h.*',
r'.*CMSIS.*',
r'.*/makefile',
r'.*/Makefile',
r'.*/trcConfig\.h.*',
r'.*/trcConfig\.c.*',
r'.*/trcSnapshotConfig\.h.*',
]
FREERTOS_HEADER = [
'/*\n',
' * FreeRTOS V202012.00\n',
' * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n',
' *\n',
' * Permission is hereby granted, free of charge, to any person obtaining a copy of\n',
' * this software and associated documentation files (the "Software"), to deal in\n',
' * the Software without restriction, including without limitation the rights to\n',
' * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n',
' * the Software, and to permit persons to whom the Software is furnished to do so,\n',
' * subject to the following conditions:\n',
' *\n',
' * The above copyright notice and this permission notice shall be included in all\n',
' * copies or substantial portions of the Software.\n',
' *\n',
' * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n',
' * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n',
' * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n',
' * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n',
' * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n',
' * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n',
' *\n',
' * https://www.FreeRTOS.org\n',
' * https://github.com/FreeRTOS\n',
' *\n',
' */\n',
]
KERNEL_IGNORED_EXTENSIONS = [
'.yml',
'.css',
'.idx',
'.md',
'.url',
'.sty',
'.0-rc2',
'.s82',
'.js',
'.out',
'.pack',
'.2',
'.1-kernel-only',
'.0-kernel-only',
'.0-rc1',
'.readme',
'.tex',
'.png',
'.bat',
'.sh'
]
KERNEL_IGNORED_PATTERNS = [r'.*\.git.*']
KERNEL_HEADER = [
'/*\n',
' * FreeRTOS Kernel V10.4.2\n',
' * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n',
' *\n',
' * Permission is hereby granted, free of charge, to any person obtaining a copy of\n',
' * this software and associated documentation files (the "Software"), to deal in\n',
' * the Software without restriction, including without limitation the rights to\n',
' * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n',
' * the Software, and to permit persons to whom the Software is furnished to do so,\n',
' * subject to the following conditions:\n',
' *\n',
' * The above copyright notice and this permission notice shall be included in all\n',
' * copies or substantial portions of the Software.\n',
' *\n',
' * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n',
' * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n',
' * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n',
' * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n',
' * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n',
' * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n',
' *\n',
' * https://www.FreeRTOS.org\n',
' * https://github.com/FreeRTOS\n',
' *\n',
' */\n',
]
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
def main():
parser = configArgParser()
args = parser.parse_args()
# Configure checks
if args.kernel:
checker = HeaderChecker(KERNEL_HEADER)
checker.ignoreExtension(*KERNEL_IGNORED_EXTENSIONS)
checker.ignorePattern(*KERNEL_IGNORED_PATTERNS)
else:
checker = HeaderChecker(FREERTOS_HEADER)
checker.ignoreExtension(*FREERTOS_IGNORED_EXTENSIONS)
checker.ignorePattern(*FREERTOS_IGNORED_PATTERNS)
checker.ignoreFile(os.path.split(__file__)[-1])
# Check all input files
print()
n_failed = 0
for path in args.files_checked:
if args.json:
n_failed += checker.checkJSONList(path)
else:
n_failed += not checker.isValidFile(path)
return n_failed
if __name__ == '__main__':
exit(main())
| 25.035124 | 116 | 0.505241 |
import os, sys, re
from argparse import ArgumentParser
from difflib import unified_diff
from json import load
def dprint(msg):
print('[DEBUG]: %s' % str(msg))
class HeaderChecker:
def __init__(self, header, padding=1000, ignored_files=[], ignored_ext=[], ignored_patterns=[]):
self.padding = padding
self.header = header
self.ignorePatternList = ignored_patterns.copy()
self.ignoreFileList = ignored_files.copy()
self.ignoreExtList = ignored_ext.copy()
def checkJSONList(self, path_json):
assert os.path.exists(path_json), 'No such file: ' + path_json
with open(path_json) as file_json:
file_checklist = load(file_json)
assert isinstance(file_checklist, list), 'Expected list for singular JSON List entry'
n_failed = 0
for path_file in file_checklist:
assert isinstance(path_file, str), 'Unexpected JSON format for ' + path_json
n_failed += not self.isValidFile(path_file)
return n_failed
def isValidFile(self, path):
assert os.path.exists(path), 'No such file: ' + path
if self.isIgnoredFile(path):
return True
if os.path.isdir(path):
print('Skipping valid file check on directory path: %s' % path)
return True
with open(path, encoding='utf-8', errors='ignore') as file:
chunk = file.read(len(''.join(self.header)) + self.padding)
lines = [('%s\n' % l) for l in chunk.strip().splitlines()][:len(self.header)]
if self.header == lines:
return True
else:
print('File Delta: %s' % path)
print(*unified_diff(lines[:len(self.header)], self.header))
return False
def ignoreExtension(self, *args):
for ext in args:
self.ignoreExtList.append(ext)
def ignoreFile(self, *args):
for f in args:
self.ignoreFileList.append(f)
def ignorePattern(self, *args):
for p in args:
self.ignorePatternList.append(re.compile(p))
def isIgnoredFile(self, path):
assert os.path.exists(path), 'No such file: ' + path
# Try simpler checks first
filename = os.path.split(path)[-1]
extension = os.path.splitext(filename)[-1]
if extension in self.ignoreExtList or filename in self.ignoreFileList:
return True
# Then iterate against regex patterns. In future consider Trie
for pattern in self.ignorePatternList:
if pattern.match(path):
return True
return False
def configArgParser():
parser = ArgumentParser(description='FreeRTOS file header checker. We expect a consistent header across all '
'first party files. The header includes current version number, copyright, '
'and FreeRTOS license.')
parser.add_argument('files_checked',
nargs = '+',
metavar = 'FILE_LIST',
help = 'Space separated list of files to check.')
parser.add_argument('-k', '--kernel',
default = False,
action = 'store_true',
help = 'Compare with kernel file header. It has different versioning.')
parser.add_argument('-j', '--json',
default = False,
action = 'store_true',
help = 'Treat arguments json files that store a list of files to check.')
return parser
#--------------------------------------------------------------------------------------------------
# CONFIG
#--------------------------------------------------------------------------------------------------
FREERTOS_IGNORED_EXTENSIONS = [
'.1',
'.ASM',
'.C',
'.DSW',
'.G_C',
'.H',
'.Hbp',
'.IDE',
'.LIB',
'.Opt',
'.PC',
'.PRM',
'.TXT',
'.URL',
'.UVL',
'.Uv2',
'.a',
'.ac',
'.am',
'.atsln',
'.atstart',
'.atsuo',
'.bash',
'.bat',
'.bbl',
'.bit',
'.board',
'.bsb',
'.bsdl',
'.bts',
'.ccxml',
'.cdkproj',
'.cdkws',
'.cfg',
'.cgp',
'.cmake',
'.cmd',
'.config',
'.cpp',
'.cproj',
'.crun',
'.css',
'.csv',
'.custom_argvars',
'.cxx',
'.cydwr',
'.cyprj',
'.cysch',
'.dat',
'.datas',
'.db',
'.dbgdt',
'.dep',
'.dni',
'.dnx',
'.doc',
'.dox',
'.doxygen',
'.ds',
'.dsk',
'.dtd',
'.dts',
'.elf',
'.env_conf',
'.ewd',
'.ewp',
'.ewt',
'.eww',
'.exe',
'.filters',
'.flash',
'.fmt',
'.ftl',
'.gdb',
'.gif',
'.gise',
'.gld',
'.gpdsc',
'.gui',
'.h_from_toolchain',
'.hdf',
'.hdp',
'.hex',
'.hist',
'.history',
'.hsf',
'.htm',
'.html',
'.hwc',
'.hwl',
'.hwp',
'.hws',
'.hzp',
'.hzs',
'.i',
'.icf',
'.ide',
'.idx',
'.in',
'.inc',
'.include',
'.index',
'.inf',
'.ini',
'.init',
'.ipcf',
'.ise',
'.jlink',
'.json',
'.la',
'.launch',
'.lcf',
'.lds',
'.lib',
'.lk1',
'.lkr',
'.lm',
'.lo',
'.lock',
'.lsl',
'.lst',
'.m4',
'.mac',
'.make',
'.map',
'.mbt',
'.mcp',
'.mcpar',
'.mcs',
'.mcw',
'.md',
'.mdm',
'.mem',
'.mhs',
'.mk',
'.mk1',
'.mmi',
'.mrt',
'.mss',
'.mtpj',
'.nav',
'.ntrc_log',
'.opa',
'.opb',
'.opc',
'.opl',
'.opt',
'.opv',
'.out',
'.pack',
'.par',
'.patch',
'.pbd',
'.pdsc',
'.pe',
'.pem',
'.pgs',
'.pl',
'.plg',
'.png',
'.prc',
'.pref',
'.prefs',
'.prj',
'.properties',
'.ps1',
'.ptf',
'.r79',
'.rapp',
'.rc',
'.reggroups',
'.reglist',
'.resc',
'.resources',
'.rom',
'.rprj',
'.s79',
'.s82',
'.s90',
'.sc',
'.scf',
'.scfg',
'.script',
'.sct',
'.scvd',
'.session',
'.sfr',
'.sh',
'.shtml',
'.sig',
'.sln',
'.spec',
'.stf',
'.stg',
'.suo',
'.sup',
'.svg',
'.tags',
'.tcl',
'.tdt',
'.template',
'.tgt',
'.tps',
'.tra',
'.tree',
'.tws',
'.txt',
'.ucf',
'.url',
'.user',
'.ut',
'.uvmpw',
'.uvopt',
'.uvoptx',
'.uvproj',
'.uvprojx',
'.vcproj',
'.vcxproj',
'.version',
'.webserver',
'.wpj',
'.wsdt',
'.wsp',
'.wspos',
'.wsx',
'.x',
'.xbcd',
'.xcl',
'.xise',
'.xml',
'.xmp',
'.xmsgs',
'.xsl',
'.yml',
'.md',
'.zip'
]
FREERTOS_IGNORED_PATTERNS = [
r'.*\.git.*',
r'.*mbedtls_config\.h.*',
r'.*mbedtls_config\.h.*',
r'.*CMSIS.*',
r'.*/makefile',
r'.*/Makefile',
r'.*/trcConfig\.h.*',
r'.*/trcConfig\.c.*',
r'.*/trcSnapshotConfig\.h.*',
]
FREERTOS_HEADER = [
'/*\n',
' * FreeRTOS V202012.00\n',
' * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n',
' *\n',
' * Permission is hereby granted, free of charge, to any person obtaining a copy of\n',
' * this software and associated documentation files (the "Software"), to deal in\n',
' * the Software without restriction, including without limitation the rights to\n',
' * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n',
' * the Software, and to permit persons to whom the Software is furnished to do so,\n',
' * subject to the following conditions:\n',
' *\n',
' * The above copyright notice and this permission notice shall be included in all\n',
' * copies or substantial portions of the Software.\n',
' *\n',
' * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n',
' * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n',
' * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n',
' * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n',
' * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n',
' * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n',
' *\n',
' * https://www.FreeRTOS.org\n',
' * https://github.com/FreeRTOS\n',
' *\n',
' */\n',
]
KERNEL_IGNORED_EXTENSIONS = [
'.yml',
'.css',
'.idx',
'.md',
'.url',
'.sty',
'.0-rc2',
'.s82',
'.js',
'.out',
'.pack',
'.2',
'.1-kernel-only',
'.0-kernel-only',
'.0-rc1',
'.readme',
'.tex',
'.png',
'.bat',
'.sh'
]
KERNEL_IGNORED_PATTERNS = [r'.*\.git.*']
KERNEL_HEADER = [
'/*\n',
' * FreeRTOS Kernel V10.4.2\n',
' * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n',
' *\n',
' * Permission is hereby granted, free of charge, to any person obtaining a copy of\n',
' * this software and associated documentation files (the "Software"), to deal in\n',
' * the Software without restriction, including without limitation the rights to\n',
' * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n',
' * the Software, and to permit persons to whom the Software is furnished to do so,\n',
' * subject to the following conditions:\n',
' *\n',
' * The above copyright notice and this permission notice shall be included in all\n',
' * copies or substantial portions of the Software.\n',
' *\n',
' * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n',
' * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n',
' * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n',
' * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n',
' * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n',
' * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n',
' *\n',
' * https://www.FreeRTOS.org\n',
' * https://github.com/FreeRTOS\n',
' *\n',
' */\n',
]
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
def main():
parser = configArgParser()
args = parser.parse_args()
# Configure checks
if args.kernel:
checker = HeaderChecker(KERNEL_HEADER)
checker.ignoreExtension(*KERNEL_IGNORED_EXTENSIONS)
checker.ignorePattern(*KERNEL_IGNORED_PATTERNS)
else:
checker = HeaderChecker(FREERTOS_HEADER)
checker.ignoreExtension(*FREERTOS_IGNORED_EXTENSIONS)
checker.ignorePattern(*FREERTOS_IGNORED_PATTERNS)
checker.ignoreFile(os.path.split(__file__)[-1])
# Check all input files
print()
n_failed = 0
for path in args.files_checked:
if args.json:
n_failed += checker.checkJSONList(path)
else:
n_failed += not checker.isValidFile(path)
return n_failed
if __name__ == '__main__':
exit(main())
| true | true |
f7273bc9f69a30526a9b3eca4ec533b0eff5edfe | 11,534 | py | Python | evaluate/evaluate_FDR.py | rperi/trustworthy-asv-fairness | 15df69a8f3f8ad5262002c9e3d12aa12ea8f1c6f | [
"MIT"
] | 1 | 2022-03-30T07:50:10.000Z | 2022-03-30T07:50:10.000Z | evaluate/evaluate_FDR.py | rperi/trustworthy-asv-fairness | 15df69a8f3f8ad5262002c9e3d12aa12ea8f1c6f | [
"MIT"
] | null | null | null | evaluate/evaluate_FDR.py | rperi/trustworthy-asv-fairness | 15df69a8f3f8ad5262002c9e3d12aa12ea8f1c6f | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import os
import pdb
from scipy.spatial.distance import cosine
from sklearn.metrics import roc_curve, confusion_matrix
import sys
from tqdm import tqdm
from sklearn.metrics import auc
import argparse
fprs = [0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1,0.2,0.3,0.4,0.5]
groups = ['male_male','female_female']
omegas = [0.0, 0.25, 0.5, 0.75, 1.0]
emb_map = {}
xvec_map = {}
def compute_scores(df_, eer_threshold_overall=0, agnostic_FLAG=False, emb_FLAG=True):
if emb_FLAG:
emb_mapping = emb_map
else:
emb_mapping = xvec_map
similarity_scores= []
labels = []
for idx, row in tqdm(enumerate(df_.iterrows())):
enrol = row[1]['audio_1']
test = row[1]['audio_2']
label = row[1]['label']
if not enrol in emb_mapping.keys():
print(enrol)
if not test in emb_mapping.keys():
print(test)
sim = 1 - cosine(emb_mapping[enrol],emb_mapping[test])
similarity_scores.append(sim)
labels.append(label)
fpr, tpr, threshold = roc_curve(labels, similarity_scores)
fnr = 1 - tpr
eer_threshold = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
eer1 = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
eer2 = fnr[np.nanargmin(np.absolute((fnr - fpr)))]
eer = np.mean((eer1,eer2))
sim = np.array(similarity_scores)
labels = np.array(labels)
if not agnostic_FLAG:
fpr, fnr = compute_fpr_fnr(sim, labels, eer_threshold_overall)
return sim, labels, eer, fpr, fnr
else:
return sim, labels, eer, eer_threshold
def compute_fpr_fnr(sim,labels_e1, thresh):
preds = np.zeros(labels_e1.shape[0])
preds[sim > thresh] = 1
tn, fp, fn, tp = confusion_matrix(labels_e1, preds).ravel()
fpr = fp/(fp+tn)
fnr = fn/(fn+tp)
return fpr, fnr
def compute_fdr(fprs, fnrs, omega=0.5):
A = np.absolute(fprs[0]-fprs[1])
B = np.absolute(fnrs[0]-fnrs[1])
return 1 - (omega*A + (1-omega)*B)
def compute_auFDR(fpr_ov, tpr_ov, threshold_ov, sim_g0, sim_g1, labels_g0, labels_g1,
score_dir, emb_FLAG=True, omega=0.5):
# FDRs at various thersholds
fdrs = []
fnrs = []
for fpr in tqdm(fprs):
thresh = threshold_ov[np.nanargmin(np.absolute((fpr_ov-fpr)))]
fnr = 1 - tpr_ov[np.nanargmin(np.absolute((fpr_ov-fpr)))]
fpr_g0, fnr_g0 = compute_fpr_fnr(sim_g0, labels_g0, thresh)
fpr_g1, fnr_g1 = compute_fpr_fnr(sim_g1, labels_g1, thresh)
fdr = compute_fdr((fpr_g0, fpr_g1), (fnr_g0, fnr_g1), float(omega))
fdrs.append(np.round(fdr*100,2))
fnrs.append(np.round(fnr*100,2))
auFDR = auc([x*100 for x in fprs], fdrs)
auFDR_10 = auc([x*100 for x in fprs[0:10]], fdrs[0:10])
df = pd.DataFrame(zip(fprs,fdrs, fnrs), columns=['fpr','fdr', 'fnr'])
if emb_FLAG:
print("Alpha = {} auFDR auFDR_10".format(omega))
print("Embeddings: {} {}\n".format(auFDR, auFDR_10))
df.to_csv(os.path.join(score_dir, 'fdr_at_fpr_gender_omega_{}.csv'.format(omega)), index=None)
else:
print("Alpha = {} auFDR auFDR_10".format(omega))
print("xvectors: {} {}\n".format(auFDR, auFDR_10))
df.to_csv(os.path.join(score_dir, 'fdr_at_fpr_gender_omega_{}.csv'.format(omega)), index=None)
return auFDR, auFDR_10
def main(args):
xvec_FLAG = args.eval_xvector
# Creating necessary trials for gender-specific evaluations
trial_dir = args.trials_root
trials = os.path.join(trial_dir, 'Test-Combined.csv')
df = pd.read_csv(trials)
df['label'] = pd.to_numeric(df['label'])
df_m = df.loc[df["gender_1"]=='male']
df_f = df.loc[df["gender_1"]=='female']
df_m_m = df_m.loc[df_m["gender_2"]=='male']
df_f_f = df_f.loc[df_f["gender_2"]=='female']
if not os.path.exists(os.path.join(trial_dir,'Test-male-all.csv')):
df_m.to_csv(os.path.join(trial_dir,'Test-male-all.csv'), index=None)
if not os.path.exists(os.path.join(trial_dir,'Test-female-all.csv')):
df_f.to_csv(os.path.join(trial_dir,'Test-female-all.csv'), index=None)
if not os.path.exists(os.path.join(trial_dir,'Test-male-male.csv')):
df_m_m.to_csv(os.path.join(trial_dir,'Test-male-male.csv'), index=None)
if not os.path.exists(os.path.join(trial_dir,'Test-female-female.csv')):
df_f_f.to_csv(os.path.join(trial_dir,'Test-female-female.csv'), index=None)
# Create directories to save ASV scores
scores_dir_base = args.scores_root
scores_dir_xvec = os.path.join(scores_dir_base,'baseline')
scores_dir = os.path.join(scores_dir_base,'{}'.format(args.mode))
os.makedirs(scores_dir_xvec, exist_ok=True)
os.makedirs(scores_dir, exist_ok=True)
# Load extracted embeddings and xvectors
test_utts = np.load(os.path.join(args.data_root,'test_utts.npy'))
pred_dir = args.pred_root
e1 = np.load(os.path.join(pred_dir,'emb1.npy'))
for idx, utt in enumerate(test_utts):
emb_map[utt] = e1[idx,:]
if xvec_FLAG:
xvec = np.load(os.path.join(args.data_root,'test_data.npy'))
for idx, utt in enumerate(test_utts):
xvec_map[utt] = xvec[idx,:]
# Gender-agnostic scoring
print("Computing Gender-agnostic scores")
if os.path.exists(os.path.join(scores_dir_xvec, 'sim_xvec_overall.npy')) and os.path.exists(os.path.join(scores_dir, 'sim_e1_overall.npy')) and os.path.exists(os.path.join(scores_dir_xvec, 'labels_overall.npy')):
sim_e1_ov = np.load(os.path.join(scores_dir, 'sim_e1_overall.npy'))
labels_ov = np.load(os.path.join(scores_dir_xvec, 'labels_overall.npy'))
fpr, tpr, threshold = roc_curve(labels_ov, sim_e1_ov)
fnr = 1 - tpr
eer_threshold_e1_ov = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
eer_e1_ov = fpr[np.nanargmin(np.absolute((fnr - fpr))) ]
if xvec_FLAG:
sim_xvec_ov = np.load(os.path.join(scores_dir_xvec, 'sim_xvec_overall.npy'))
fpr, tpr, threshold = roc_curve(labels_ov, sim_xvec_ov)
fnr = 1 - tpr
eer_threshold_xvec_ov = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
eer_xvec_ov = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
print("Done scoring Gender-agnostic trials")
else:
sim_e1_ov, labels_ov, eer_e1_ov, eer_threshold_e1_ov = compute_scores(df, agnostic_FLAG=True)
np.save(os.path.join(scores_dir, 'sim_e1_overall'), sim_e1_ov)
np.save(os.path.join(scores_dir_xvec, 'labels_overall'), labels_ov)
if xvec_FLAG:
sim_xvec_ov, labels_xvec_ov, eer_xvec_ov, eer_threshold_xvec_ov = compute_scores(df, agnostic_FLAG=True, emb_FLAG=False)
np.save(os.path.join(scores_dir_xvec, 'sim_xvec_overall'), sim_xvec_ov)
print("Done scoring Gender-agnostic trials")
#Gender-specific scoring
print("Computing Gender-specific scores")
if (not os.path.exists(os.path.join(scores_dir, 'sim_e1_male_male.npy'))) or (not os.path.exists(os.path.join(scores_dir, 'sim_e1_female_female.npy'))):
sim_e1_m, labels_e1_m, eer_e1_m, fpr_e1_m, fnr_e1_m = compute_scores(df_m_m, eer_threshold_e1_ov)
sim_e1_f, labels_e1_f, eer_e1_f, fpr_e1_f, fnr_e1_f = compute_scores(df_f_f, eer_threshold_e1_ov)
np.save(os.path.join(scores_dir, 'sim_e1_male_male'), sim_e1_m)
np.save(os.path.join(scores_dir, 'sim_e1_female_female'), sim_e1_f)
np.save(os.path.join(scores_dir_xvec, 'labels_male_male'), labels_e1_m)
np.save(os.path.join(scores_dir_xvec, 'labels_female_female'), labels_e1_f)
print("EER_all EER_Male EER_Female")
print("Embeddings: {} {} {}\n".format(np.round(eer_e1_ov*100,2), np.round(eer_e1_m*100,2), np.round(eer_e1_f*100,2)))
sim_e1_g0 = sim_e1_m
sim_e1_g1 = sim_e1_f
labels_g0 = labels_e1_m
labels_g1 = labels_e1_f
print("Done scoring Gender-specific trials")
else:
sim_e1 = []
labels = []
for group in groups:
sim_e1.append(np.load(os.path.join(scores_dir, 'sim_e1_{}.npy'.format(group))))
labels.append(np.load(os.path.join(scores_dir_xvec, 'labels_{}.npy'.format(group))))
sim_e1_g0 = sim_e1[0]
sim_e1_g1 = sim_e1[1]
labels_g0 = labels[0]
labels_g1 = labels[1]
print("Done scoring Gender-specific trials")
if xvec_FLAG:
if (not os.path.exists(os.path.join(scores_dir_xvec, 'sim_xvec_male_male.npy'))) or (not os.path.exists(os.path.join(scores_dir_xvec, 'sim_xvec_female_female.npy'))):
print("Computing Gender-specific scores for x-vectors")
sim_xvec_m, labels_xvec_m, eer_xvec_m, fpr_xvec_m, fnr_xvec_m = compute_scores(df_m_m, eer_threshold_xvec_ov, emb_FLAG=False)
sim_xvec_f, labels_xvec_f, eer_xvec_f, fpr_xvec_f, fnr_xvec_f = compute_scores(df_f_f, eer_threshold_xvec_ov, emb_FLAG=False)
np.save(os.path.join(scores_dir_xvec, 'sim_xvec_male_male'), sim_xvec_m)
np.save(os.path.join(scores_dir_xvec, 'sim_xvec_female_female'), sim_xvec_f)
sim_xvec_g0 = sim_xvec_m
sim_xvec_g1 = sim_xvec_f
print("x-vector: {} {} {}\n".format(np.round(eer_xvec_ov*100,2), np.round(eer_xvec_m*100,2),np.round(eer_xvec_f*100,2)))
print("Done scoring Gender-specific trials for x-vectors")
else:
sim_xvec = []
for group in groups:
sim_xvec.append(np.load(os.path.join(scores_dir_xvec, 'sim_xvec_{}.npy'.format(group))))
sim_xvec_g0 = sim_xvec[0]
sim_xvec_g1 = sim_xvec[1]
print("Done scoring Gender-specific trials for x-vectors")
# Compute area under FDR-FPR curve
fpr_ov, tpr_ov, threshold_ov = roc_curve(labels_ov, sim_e1_ov)
aus, au10s = [], []
for omega in omegas:
au, au10 = compute_auFDR(fpr_ov, tpr_ov, threshold_ov, sim_e1_g0, sim_e1_g1, labels_g0, labels_g1, scores_dir, emb_FLAG=True, omega=omega)
aus.append(au)
au10s.append(au10)
df = pd.DataFrame(zip(omegas,aus, au10s), columns=['omega','au', 'au10'])
df.to_csv(os.path.join(scores_dir, 'au_fdrs.csv'), index=None)
if xvec_FLAG:
fpr_ov, tpr_ov, threshold_ov = roc_curve(labels_ov, sim_xvec_ov)
aus, aus10 = [],[]
for omega in omegas:
compute_auFDR(fpr_ov, tpr_ov, threshold_ov, sim_xvec_g0, sim_xvec_g1, labels_g0, labels_g1, scores_dir_xvec, emb_FLAG=False, omega=omega)
aus.append(au)
au10s.append(au10)
df = pd.DataFrame(zip(omegas,aus, au10s), columns=['omega','au', 'au10'])
df.to_csv(os.path.join(scores_dir_xvec, 'aufdrs.csv'), index=None)
pdb.set_trace()
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, required=True)
parser.add_argument('--trials_root', type=str, required=True,
help="Directory containing Test-Combined.csv")
parser.add_argument('--data_root', type=str, required=True,
help="Directory containing test_utts.npy")
parser.add_argument('--pred_root', type=str, required=True,
help="Directory containing Extracted embeddings")
parser.add_argument('--scores_root', type=str, required=True,
help="Directory to save ASV scores")
parser.add_argument('--eval_xvector', default=False, action='store_true')
args = parser.parse_args()
main(args)
| 46.321285 | 216 | 0.658401 | import numpy as np
import pandas as pd
import os
import pdb
from scipy.spatial.distance import cosine
from sklearn.metrics import roc_curve, confusion_matrix
import sys
from tqdm import tqdm
from sklearn.metrics import auc
import argparse
fprs = [0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1,0.2,0.3,0.4,0.5]
groups = ['male_male','female_female']
omegas = [0.0, 0.25, 0.5, 0.75, 1.0]
emb_map = {}
xvec_map = {}
def compute_scores(df_, eer_threshold_overall=0, agnostic_FLAG=False, emb_FLAG=True):
if emb_FLAG:
emb_mapping = emb_map
else:
emb_mapping = xvec_map
similarity_scores= []
labels = []
for idx, row in tqdm(enumerate(df_.iterrows())):
enrol = row[1]['audio_1']
test = row[1]['audio_2']
label = row[1]['label']
if not enrol in emb_mapping.keys():
print(enrol)
if not test in emb_mapping.keys():
print(test)
sim = 1 - cosine(emb_mapping[enrol],emb_mapping[test])
similarity_scores.append(sim)
labels.append(label)
fpr, tpr, threshold = roc_curve(labels, similarity_scores)
fnr = 1 - tpr
eer_threshold = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
eer1 = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
eer2 = fnr[np.nanargmin(np.absolute((fnr - fpr)))]
eer = np.mean((eer1,eer2))
sim = np.array(similarity_scores)
labels = np.array(labels)
if not agnostic_FLAG:
fpr, fnr = compute_fpr_fnr(sim, labels, eer_threshold_overall)
return sim, labels, eer, fpr, fnr
else:
return sim, labels, eer, eer_threshold
def compute_fpr_fnr(sim,labels_e1, thresh):
preds = np.zeros(labels_e1.shape[0])
preds[sim > thresh] = 1
tn, fp, fn, tp = confusion_matrix(labels_e1, preds).ravel()
fpr = fp/(fp+tn)
fnr = fn/(fn+tp)
return fpr, fnr
def compute_fdr(fprs, fnrs, omega=0.5):
A = np.absolute(fprs[0]-fprs[1])
B = np.absolute(fnrs[0]-fnrs[1])
return 1 - (omega*A + (1-omega)*B)
def compute_auFDR(fpr_ov, tpr_ov, threshold_ov, sim_g0, sim_g1, labels_g0, labels_g1,
score_dir, emb_FLAG=True, omega=0.5):
fdrs = []
fnrs = []
for fpr in tqdm(fprs):
thresh = threshold_ov[np.nanargmin(np.absolute((fpr_ov-fpr)))]
fnr = 1 - tpr_ov[np.nanargmin(np.absolute((fpr_ov-fpr)))]
fpr_g0, fnr_g0 = compute_fpr_fnr(sim_g0, labels_g0, thresh)
fpr_g1, fnr_g1 = compute_fpr_fnr(sim_g1, labels_g1, thresh)
fdr = compute_fdr((fpr_g0, fpr_g1), (fnr_g0, fnr_g1), float(omega))
fdrs.append(np.round(fdr*100,2))
fnrs.append(np.round(fnr*100,2))
auFDR = auc([x*100 for x in fprs], fdrs)
auFDR_10 = auc([x*100 for x in fprs[0:10]], fdrs[0:10])
df = pd.DataFrame(zip(fprs,fdrs, fnrs), columns=['fpr','fdr', 'fnr'])
if emb_FLAG:
print("Alpha = {} auFDR auFDR_10".format(omega))
print("Embeddings: {} {}\n".format(auFDR, auFDR_10))
df.to_csv(os.path.join(score_dir, 'fdr_at_fpr_gender_omega_{}.csv'.format(omega)), index=None)
else:
print("Alpha = {} auFDR auFDR_10".format(omega))
print("xvectors: {} {}\n".format(auFDR, auFDR_10))
df.to_csv(os.path.join(score_dir, 'fdr_at_fpr_gender_omega_{}.csv'.format(omega)), index=None)
return auFDR, auFDR_10
def main(args):
xvec_FLAG = args.eval_xvector
trial_dir = args.trials_root
trials = os.path.join(trial_dir, 'Test-Combined.csv')
df = pd.read_csv(trials)
df['label'] = pd.to_numeric(df['label'])
df_m = df.loc[df["gender_1"]=='male']
df_f = df.loc[df["gender_1"]=='female']
df_m_m = df_m.loc[df_m["gender_2"]=='male']
df_f_f = df_f.loc[df_f["gender_2"]=='female']
if not os.path.exists(os.path.join(trial_dir,'Test-male-all.csv')):
df_m.to_csv(os.path.join(trial_dir,'Test-male-all.csv'), index=None)
if not os.path.exists(os.path.join(trial_dir,'Test-female-all.csv')):
df_f.to_csv(os.path.join(trial_dir,'Test-female-all.csv'), index=None)
if not os.path.exists(os.path.join(trial_dir,'Test-male-male.csv')):
df_m_m.to_csv(os.path.join(trial_dir,'Test-male-male.csv'), index=None)
if not os.path.exists(os.path.join(trial_dir,'Test-female-female.csv')):
df_f_f.to_csv(os.path.join(trial_dir,'Test-female-female.csv'), index=None)
scores_dir_base = args.scores_root
scores_dir_xvec = os.path.join(scores_dir_base,'baseline')
scores_dir = os.path.join(scores_dir_base,'{}'.format(args.mode))
os.makedirs(scores_dir_xvec, exist_ok=True)
os.makedirs(scores_dir, exist_ok=True)
test_utts = np.load(os.path.join(args.data_root,'test_utts.npy'))
pred_dir = args.pred_root
e1 = np.load(os.path.join(pred_dir,'emb1.npy'))
for idx, utt in enumerate(test_utts):
emb_map[utt] = e1[idx,:]
if xvec_FLAG:
xvec = np.load(os.path.join(args.data_root,'test_data.npy'))
for idx, utt in enumerate(test_utts):
xvec_map[utt] = xvec[idx,:]
print("Computing Gender-agnostic scores")
if os.path.exists(os.path.join(scores_dir_xvec, 'sim_xvec_overall.npy')) and os.path.exists(os.path.join(scores_dir, 'sim_e1_overall.npy')) and os.path.exists(os.path.join(scores_dir_xvec, 'labels_overall.npy')):
sim_e1_ov = np.load(os.path.join(scores_dir, 'sim_e1_overall.npy'))
labels_ov = np.load(os.path.join(scores_dir_xvec, 'labels_overall.npy'))
fpr, tpr, threshold = roc_curve(labels_ov, sim_e1_ov)
fnr = 1 - tpr
eer_threshold_e1_ov = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
eer_e1_ov = fpr[np.nanargmin(np.absolute((fnr - fpr))) ]
if xvec_FLAG:
sim_xvec_ov = np.load(os.path.join(scores_dir_xvec, 'sim_xvec_overall.npy'))
fpr, tpr, threshold = roc_curve(labels_ov, sim_xvec_ov)
fnr = 1 - tpr
eer_threshold_xvec_ov = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
eer_xvec_ov = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
print("Done scoring Gender-agnostic trials")
else:
sim_e1_ov, labels_ov, eer_e1_ov, eer_threshold_e1_ov = compute_scores(df, agnostic_FLAG=True)
np.save(os.path.join(scores_dir, 'sim_e1_overall'), sim_e1_ov)
np.save(os.path.join(scores_dir_xvec, 'labels_overall'), labels_ov)
if xvec_FLAG:
sim_xvec_ov, labels_xvec_ov, eer_xvec_ov, eer_threshold_xvec_ov = compute_scores(df, agnostic_FLAG=True, emb_FLAG=False)
np.save(os.path.join(scores_dir_xvec, 'sim_xvec_overall'), sim_xvec_ov)
print("Done scoring Gender-agnostic trials")
print("Computing Gender-specific scores")
if (not os.path.exists(os.path.join(scores_dir, 'sim_e1_male_male.npy'))) or (not os.path.exists(os.path.join(scores_dir, 'sim_e1_female_female.npy'))):
sim_e1_m, labels_e1_m, eer_e1_m, fpr_e1_m, fnr_e1_m = compute_scores(df_m_m, eer_threshold_e1_ov)
sim_e1_f, labels_e1_f, eer_e1_f, fpr_e1_f, fnr_e1_f = compute_scores(df_f_f, eer_threshold_e1_ov)
np.save(os.path.join(scores_dir, 'sim_e1_male_male'), sim_e1_m)
np.save(os.path.join(scores_dir, 'sim_e1_female_female'), sim_e1_f)
np.save(os.path.join(scores_dir_xvec, 'labels_male_male'), labels_e1_m)
np.save(os.path.join(scores_dir_xvec, 'labels_female_female'), labels_e1_f)
print("EER_all EER_Male EER_Female")
print("Embeddings: {} {} {}\n".format(np.round(eer_e1_ov*100,2), np.round(eer_e1_m*100,2), np.round(eer_e1_f*100,2)))
sim_e1_g0 = sim_e1_m
sim_e1_g1 = sim_e1_f
labels_g0 = labels_e1_m
labels_g1 = labels_e1_f
print("Done scoring Gender-specific trials")
else:
sim_e1 = []
labels = []
for group in groups:
sim_e1.append(np.load(os.path.join(scores_dir, 'sim_e1_{}.npy'.format(group))))
labels.append(np.load(os.path.join(scores_dir_xvec, 'labels_{}.npy'.format(group))))
sim_e1_g0 = sim_e1[0]
sim_e1_g1 = sim_e1[1]
labels_g0 = labels[0]
labels_g1 = labels[1]
print("Done scoring Gender-specific trials")
if xvec_FLAG:
if (not os.path.exists(os.path.join(scores_dir_xvec, 'sim_xvec_male_male.npy'))) or (not os.path.exists(os.path.join(scores_dir_xvec, 'sim_xvec_female_female.npy'))):
print("Computing Gender-specific scores for x-vectors")
sim_xvec_m, labels_xvec_m, eer_xvec_m, fpr_xvec_m, fnr_xvec_m = compute_scores(df_m_m, eer_threshold_xvec_ov, emb_FLAG=False)
sim_xvec_f, labels_xvec_f, eer_xvec_f, fpr_xvec_f, fnr_xvec_f = compute_scores(df_f_f, eer_threshold_xvec_ov, emb_FLAG=False)
np.save(os.path.join(scores_dir_xvec, 'sim_xvec_male_male'), sim_xvec_m)
np.save(os.path.join(scores_dir_xvec, 'sim_xvec_female_female'), sim_xvec_f)
sim_xvec_g0 = sim_xvec_m
sim_xvec_g1 = sim_xvec_f
print("x-vector: {} {} {}\n".format(np.round(eer_xvec_ov*100,2), np.round(eer_xvec_m*100,2),np.round(eer_xvec_f*100,2)))
print("Done scoring Gender-specific trials for x-vectors")
else:
sim_xvec = []
for group in groups:
sim_xvec.append(np.load(os.path.join(scores_dir_xvec, 'sim_xvec_{}.npy'.format(group))))
sim_xvec_g0 = sim_xvec[0]
sim_xvec_g1 = sim_xvec[1]
print("Done scoring Gender-specific trials for x-vectors")
fpr_ov, tpr_ov, threshold_ov = roc_curve(labels_ov, sim_e1_ov)
aus, au10s = [], []
for omega in omegas:
au, au10 = compute_auFDR(fpr_ov, tpr_ov, threshold_ov, sim_e1_g0, sim_e1_g1, labels_g0, labels_g1, scores_dir, emb_FLAG=True, omega=omega)
aus.append(au)
au10s.append(au10)
df = pd.DataFrame(zip(omegas,aus, au10s), columns=['omega','au', 'au10'])
df.to_csv(os.path.join(scores_dir, 'au_fdrs.csv'), index=None)
if xvec_FLAG:
fpr_ov, tpr_ov, threshold_ov = roc_curve(labels_ov, sim_xvec_ov)
aus, aus10 = [],[]
for omega in omegas:
compute_auFDR(fpr_ov, tpr_ov, threshold_ov, sim_xvec_g0, sim_xvec_g1, labels_g0, labels_g1, scores_dir_xvec, emb_FLAG=False, omega=omega)
aus.append(au)
au10s.append(au10)
df = pd.DataFrame(zip(omegas,aus, au10s), columns=['omega','au', 'au10'])
df.to_csv(os.path.join(scores_dir_xvec, 'aufdrs.csv'), index=None)
pdb.set_trace()
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, required=True)
parser.add_argument('--trials_root', type=str, required=True,
help="Directory containing Test-Combined.csv")
parser.add_argument('--data_root', type=str, required=True,
help="Directory containing test_utts.npy")
parser.add_argument('--pred_root', type=str, required=True,
help="Directory containing Extracted embeddings")
parser.add_argument('--scores_root', type=str, required=True,
help="Directory to save ASV scores")
parser.add_argument('--eval_xvector', default=False, action='store_true')
args = parser.parse_args()
main(args)
| true | true |
f7273c1bf115cb1687982e0d1e6f9de4ff2abedf | 11,677 | py | Python | com/precisely/apis/model/individual_value_variable.py | PreciselyData/PreciselyAPIsSDK-Python | 28ffff0c96d81d3a53a5599c987d54d7b632b508 | [
"Apache-2.0"
] | null | null | null | com/precisely/apis/model/individual_value_variable.py | PreciselyData/PreciselyAPIsSDK-Python | 28ffff0c96d81d3a53a5599c987d54d7b632b508 | [
"Apache-2.0"
] | null | null | null | com/precisely/apis/model/individual_value_variable.py | PreciselyData/PreciselyAPIsSDK-Python | 28ffff0c96d81d3a53a5599c987d54d7b632b508 | [
"Apache-2.0"
] | null | null | null | """
Precisely APIs
Enhance & enrich your data, applications, business processes, and workflows with rich location, information, and identify APIs. # noqa: E501
The version of the OpenAPI document: 11.9.3
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from com.precisely.apis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from com.precisely.apis.exceptions import ApiAttributeError
class IndividualValueVariable(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'description': (str,), # noqa: E501
'year': (str,), # noqa: E501
'value': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'description': 'description', # noqa: E501
'year': 'year', # noqa: E501
'value': 'value', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""IndividualValueVariable - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): [optional] # noqa: E501
description (str): [optional] # noqa: E501
year (str): [optional] # noqa: E501
value (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""IndividualValueVariable - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): [optional] # noqa: E501
description (str): [optional] # noqa: E501
year (str): [optional] # noqa: E501
value (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.570896 | 145 | 0.565899 |
import re
import sys
from com.precisely.apis.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from com.precisely.apis.exceptions import ApiAttributeError
class IndividualValueVariable(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
return {
'name': (str,),
'description': (str,),
'year': (str,),
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name',
'description': 'description',
'year': 'year',
'value': 'value',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true | true |
f7273c9914ccfd701d1ff364fe87e5615b331733 | 1,279 | py | Python | ownblock/ownblock/apps/notices/migrations/0001_initial.py | danjac/ownblock | ac662fb7efb2f04567e2f85638c1250286452611 | [
"MIT"
] | 3 | 2015-06-12T04:42:02.000Z | 2018-10-29T17:09:10.000Z | ownblock/ownblock/apps/notices/migrations/0001_initial.py | danjac/ownblock | ac662fb7efb2f04567e2f85638c1250286452611 | [
"MIT"
] | null | null | null | ownblock/ownblock/apps/notices/migrations/0001_initial.py | danjac/ownblock | ac662fb7efb2f04567e2f85638c1250286452611 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('buildings', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Notice',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created', model_utils.fields.AutoCreatedField(editable=False, verbose_name='created', default=django.utils.timezone.now)),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, verbose_name='modified', default=django.utils.timezone.now)),
('title', models.CharField(max_length=100)),
('details', models.TextField(blank=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('building', models.ForeignKey(to='buildings.Building')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| 36.542857 | 147 | 0.620797 |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('buildings', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Notice',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created', model_utils.fields.AutoCreatedField(editable=False, verbose_name='created', default=django.utils.timezone.now)),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, verbose_name='modified', default=django.utils.timezone.now)),
('title', models.CharField(max_length=100)),
('details', models.TextField(blank=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('building', models.ForeignKey(to='buildings.Building')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| true | true |
f7273d2cdf1526800b8a09c10a36d9eb9438cb1e | 333 | py | Python | Exercicios/Desafio033.py | victorhugof94/Python | 8b42955634f3ae44bded350ac88396a02b1f6970 | [
"MIT"
] | null | null | null | Exercicios/Desafio033.py | victorhugof94/Python | 8b42955634f3ae44bded350ac88396a02b1f6970 | [
"MIT"
] | null | null | null | Exercicios/Desafio033.py | victorhugof94/Python | 8b42955634f3ae44bded350ac88396a02b1f6970 | [
"MIT"
] | null | null | null | n1 = int(input('primeiro numero:'))
n2 = int(input('segundo numero:'))
n3 = int(input('terceiro numero:'))
menor = n1
if n2<n1 and n2<n3:
menor=n2
if n3<n1 and n3<n2:
menor=n3
maior = n1
if n2>n1 and n2>n3:
maior=n2
if n3>n1 and n3>n2:
maior=n3
print ('menor = {}'.format(menor))
print ('maior = {}'.format(maior)) | 18.5 | 35 | 0.621622 | n1 = int(input('primeiro numero:'))
n2 = int(input('segundo numero:'))
n3 = int(input('terceiro numero:'))
menor = n1
if n2<n1 and n2<n3:
menor=n2
if n3<n1 and n3<n2:
menor=n3
maior = n1
if n2>n1 and n2>n3:
maior=n2
if n3>n1 and n3>n2:
maior=n3
print ('menor = {}'.format(menor))
print ('maior = {}'.format(maior)) | true | true |
f7273d39afe2ba8bd90bcbf4e85702e9d6bb3817 | 43,399 | py | Python | podpac/core/coordinates/test/test_uniform_coordinates1d.py | creare-com/podpac | 7feb5c957513c146ce73ba1c36c630284f513a6e | [
"Apache-2.0"
] | 46 | 2018-04-06T19:54:32.000Z | 2022-02-08T02:00:02.000Z | podpac/core/coordinates/test/test_uniform_coordinates1d.py | creare-com/podpac | 7feb5c957513c146ce73ba1c36c630284f513a6e | [
"Apache-2.0"
] | 474 | 2018-04-05T22:21:09.000Z | 2022-02-24T14:21:16.000Z | podpac/core/coordinates/test/test_uniform_coordinates1d.py | creare-com/podpac | 7feb5c957513c146ce73ba1c36c630284f513a6e | [
"Apache-2.0"
] | 4 | 2019-04-11T17:49:53.000Z | 2020-11-29T22:36:53.000Z | from datetime import datetime
import json
import pytest
import traitlets as tl
import numpy as np
from numpy.testing import assert_equal
import podpac
from podpac.core.coordinates.utils import make_coord_array
from podpac.core.coordinates.coordinates1d import Coordinates1d
from podpac.core.coordinates.array_coordinates1d import ArrayCoordinates1d
from podpac.core.coordinates.uniform_coordinates1d import UniformCoordinates1d
from podpac.core.coordinates.coordinates import Coordinates
class TestUniformCoordinatesCreation(object):
def test_numerical(self):
# ascending
c = UniformCoordinates1d(0, 50, 10)
a = np.array([0, 10, 20, 30, 40, 50], dtype=float)
assert c.start == 0
assert c.stop == 50
assert c.step == 10
assert_equal(c.coordinates, a)
assert_equal(c.bounds, [0, 50])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 6
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
# descending
c = UniformCoordinates1d(50, 0, -10)
a = np.array([50, 40, 30, 20, 10, 0], dtype=float)
assert c.start == 50
assert c.stop == 0
assert c.step == -10
assert_equal(c.coordinates, a)
assert_equal(c.bounds, [0, 50])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 6
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_numerical_inexact(self):
# ascending
c = UniformCoordinates1d(0, 49, 10)
a = np.array([0, 10, 20, 30, 40], dtype=float)
assert c.start == 0
assert c.stop == 49
assert c.step == 10
assert_equal(c.coordinates, a)
assert_equal(c.bounds, [0, 40])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 5
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
# descending
c = UniformCoordinates1d(50, 1, -10)
a = np.array([50, 40, 30, 20, 10], dtype=float)
assert c.start == 50
assert c.stop == 1
assert c.step == -10
assert_equal(c.coordinates, a)
assert_equal(c.bounds, [10, 50])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.dtype == float
assert c.size == a.size
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_datetime(self):
# ascending
c = UniformCoordinates1d("2018-01-01", "2018-01-04", "1,D")
a = np.array(["2018-01-01", "2018-01-02", "2018-01-03", "2018-01-04"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-04")
assert c.step == np.timedelta64(1, "D")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
# descending
c = UniformCoordinates1d("2018-01-04", "2018-01-01", "-1,D")
a = np.array(["2018-01-04", "2018-01-03", "2018-01-02", "2018-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-04")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(-1, "D")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_datetime_inexact(self):
# ascending
c = UniformCoordinates1d("2018-01-01", "2018-01-06", "2,D")
a = np.array(["2018-01-01", "2018-01-03", "2018-01-05"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-06")
assert c.step == np.timedelta64(2, "D")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
# descending
c = UniformCoordinates1d("2018-01-06", "2018-01-01", "-2,D")
a = np.array(["2018-01-06", "2018-01-04", "2018-01-02"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-06")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(-2, "D")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_datetime_month_step(self):
# ascending
c = UniformCoordinates1d("2018-01-01", "2018-04-01", "1,M")
a = np.array(["2018-01-01", "2018-02-01", "2018-03-01", "2018-04-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-04-01")
assert c.step == np.timedelta64(1, "M")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
# descending
c = UniformCoordinates1d("2018-04-01", "2018-01-01", "-1,M")
a = np.array(["2018-04-01", "2018-03-01", "2018-02-01", "2018-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-04-01")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(-1, "M")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_datetime_year_step(self):
# ascending, exact
c = UniformCoordinates1d("2018-01-01", "2021-01-01", "1,Y")
a = np.array(["2018-01-01", "2019-01-01", "2020-01-01", "2021-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2021-01-01")
assert c.step == np.timedelta64(1, "Y")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
# descending, exact
c = UniformCoordinates1d("2021-01-01", "2018-01-01", "-1,Y")
a = np.array(["2021-01-01", "2020-01-01", "2019-01-01", "2018-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2021-01-01")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(-1, "Y")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
# ascending, inexact (two cases)
c = UniformCoordinates1d("2018-01-01", "2021-04-01", "1,Y")
a = np.array(["2018-01-01", "2019-01-01", "2020-01-01", "2021-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2021-04-01")
assert c.step == np.timedelta64(1, "Y")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
c = UniformCoordinates1d("2018-04-01", "2021-01-01", "1,Y")
a = np.array(["2018-04-01", "2019-04-01", "2020-04-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-04-01")
assert c.stop == np.datetime64("2021-01-01")
assert c.step == np.timedelta64(1, "Y")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
# descending, inexact (two cases)
c = UniformCoordinates1d("2021-01-01", "2018-04-01", "-1,Y")
a = np.array(["2021-01-01", "2020-01-01", "2019-01-01", "2018-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2021-01-01")
assert c.stop == np.datetime64("2018-04-01")
assert c.step == np.timedelta64(-1, "Y")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
c = UniformCoordinates1d("2021-04-01", "2018-01-01", "-1,Y")
a = np.array(["2021-04-01", "2020-04-01", "2019-04-01", "2018-04-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2021-04-01")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(-1, "Y")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_numerical_size(self):
# ascending
c = UniformCoordinates1d(0, 10, size=20)
assert c.start == 0
assert c.stop == 10
assert c.step == 10 / 19.0
assert_equal(c.coordinates, np.linspace(0, 10, 20))
assert_equal(c.bounds, [0, 10])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 20
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
# descending
c = UniformCoordinates1d(10, 0, size=20)
assert c.start == 10
assert c.stop == 0
assert c.step == -10 / 19.0
assert_equal(c.coordinates, np.linspace(10, 0, 20))
assert_equal(c.bounds, [0, 10])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 20
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_datetime_size(self):
# ascending
c = UniformCoordinates1d("2018-01-01", "2018-01-10", size=10)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-10")
assert_equal(c.bounds, [np.datetime64("2018-01-01"), np.datetime64("2018-01-10")])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 10
assert c.dtype == np.datetime64
assert c.is_descending == False
# descending
c = UniformCoordinates1d("2018-01-10", "2018-01-01", size=10)
assert c.start == np.datetime64("2018-01-10")
assert c.stop == np.datetime64("2018-01-01")
assert_equal(c.bounds, [np.datetime64("2018-01-01"), np.datetime64("2018-01-10")])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 10
assert c.dtype == np.datetime64
assert c.is_descending == True
# increase resolution
c = UniformCoordinates1d("2018-01-01", "2018-01-10", size=21)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-10")
assert_equal(c.bounds, [np.datetime64("2018-01-01"), np.datetime64("2018-01-10")])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 21
assert c.dtype == np.datetime64
assert c.is_descending == False
def test_datetime_size_invalid(self):
with pytest.raises(ValueError, match="Cannot divide timedelta"):
c = UniformCoordinates1d("2018-01-01", "2018-01-10", size=20)
def test_numerical_size_floating_point_error(self):
c = UniformCoordinates1d(50.619, 50.62795, size=30)
assert c.size == 30
def test_numerical_singleton(self):
# positive step
c = UniformCoordinates1d(1, 1, 10)
a = np.array([1], dtype=float)
assert c.start == 1
assert c.stop == 1
assert c.step == 10
assert_equal(c.coordinates, a)
assert_equal(c.bounds, [1, 1])
assert c.size == 1
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == None
assert c.is_uniform == True
# negative step
c = UniformCoordinates1d(1, 1, -10)
a = np.array([1], dtype=float)
assert c.start == 1
assert c.stop == 1
assert c.step == -10
assert_equal(c.coordinates, a)
assert_equal(c.bounds, [1, 1])
assert c.size == 1
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == None
assert c.is_uniform == True
def test_datetime_singleton(self):
# positive step
c = UniformCoordinates1d("2018-01-01", "2018-01-01", "1,D")
a = np.array(["2018-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(1, "D")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == None
assert c.is_uniform == True
# negative step
c = UniformCoordinates1d("2018-01-01", "2018-01-01", "-1,D")
a = np.array(["2018-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(-1, "D")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == None
assert c.is_uniform == True
def test_from_tuple(self):
# numerical, step
c = UniformCoordinates1d.from_tuple((0, 10, 0.5))
assert c.start == 0.0
assert c.stop == 10.0
assert c.step == 0.5
# numerical, size
c = UniformCoordinates1d.from_tuple((0, 10, 20))
assert c.start == 0.0
assert c.stop == 10.0
assert c.size == 20
# datetime, step
c = UniformCoordinates1d.from_tuple(("2018-01-01", "2018-01-04", "1,D"))
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-04")
assert c.step == np.timedelta64(1, "D")
# invalid
with pytest.raises(ValueError, match="UniformCoordinates1d.from_tuple expects a tuple"):
UniformCoordinates1d.from_tuple((0, 10))
with pytest.raises(ValueError, match="UniformCoordinates1d.from_tuple expects a tuple"):
UniformCoordinates1d.from_tuple(np.array([0, 10, 0.5]))
def test_copy(self):
c = UniformCoordinates1d(0, 10, 50, name="lat")
c2 = c.copy()
assert c is not c2
assert c == c2
def test_invalid_init(self):
with pytest.raises(ValueError):
UniformCoordinates1d(0, 0, 0)
with pytest.raises(ValueError):
UniformCoordinates1d(0, 50, 0)
with pytest.raises(ValueError):
UniformCoordinates1d(0, 50, -10)
with pytest.raises(ValueError):
UniformCoordinates1d(50, 0, 10)
with pytest.raises(TypeError):
UniformCoordinates1d(0, "2018-01-01", 10)
with pytest.raises(TypeError):
UniformCoordinates1d("2018-01-01", 50, 10)
with pytest.raises(TypeError):
UniformCoordinates1d("2018-01-01", "2018-01-02", 10)
with pytest.raises(TypeError):
UniformCoordinates1d(0.0, "2018-01-01", "1,D")
with pytest.raises(TypeError):
UniformCoordinates1d("2018-01-01", 50, "1,D")
with pytest.raises(TypeError):
UniformCoordinates1d(0, 50, "1,D")
with pytest.raises(ValueError):
UniformCoordinates1d("a", 50, 10)
with pytest.raises(ValueError):
UniformCoordinates1d(0, "b", 10)
with pytest.raises(ValueError):
UniformCoordinates1d(0, 50, "a")
with pytest.raises(TypeError):
UniformCoordinates1d()
with pytest.raises(TypeError):
UniformCoordinates1d(0)
with pytest.raises(TypeError):
UniformCoordinates1d(0, 50)
with pytest.raises(TypeError):
UniformCoordinates1d(0, 50, 10, size=6)
with pytest.raises(TypeError):
UniformCoordinates1d(0, 10, size=20.0)
with pytest.raises(TypeError):
UniformCoordinates1d(0, 10, size="string")
with pytest.raises(TypeError):
UniformCoordinates1d("2018-01-10", "2018-01-01", size="1,D")
class TestUniformCoordinatesEq(object):
def test_equal(self):
c1 = UniformCoordinates1d(0, 50, 10)
c2 = UniformCoordinates1d(0, 50, 10)
c3 = UniformCoordinates1d(0, 50, 10)
c4 = UniformCoordinates1d(5, 50, 10)
c5 = UniformCoordinates1d(0, 60, 10)
c6 = UniformCoordinates1d(0, 50, 5)
c7 = UniformCoordinates1d(50, 0, -10)
assert c1 == c2
assert c1 == c3
assert c1 != c4
assert c1 != c5
assert c1 != c6
assert c1 != c7
def test_equal_array_coordinates(self):
c1 = UniformCoordinates1d(0, 50, 10)
c2 = ArrayCoordinates1d([0, 10, 20, 30, 40, 50])
c3 = ArrayCoordinates1d([10, 20, 30, 40, 50, 60])
assert c1 == c2
assert c1 != c3
class TestUniformCoordinatesSerialization(object):
def test_definition(self):
# numerical
c = UniformCoordinates1d(0, 50, 10, name="lat")
d = c.definition
assert isinstance(d, dict)
assert set(d.keys()) == set(["start", "stop", "step", "name"])
json.dumps(d, cls=podpac.core.utils.JSONEncoder) # test serializable
c2 = UniformCoordinates1d.from_definition(d) # test from_definition
assert c2 == c
# datetimes
c = UniformCoordinates1d("2018-01-01", "2018-01-03", "1,D")
d = c.definition
assert isinstance(d, dict)
assert set(d.keys()) == set(["start", "stop", "step"])
json.dumps(d, cls=podpac.core.utils.JSONEncoder) # test serializable
c2 = UniformCoordinates1d.from_definition(d) # test from_definition
assert c2 == c
def test_invalid_definition(self):
# incorrect definition
d = {"stop": 50}
with pytest.raises(ValueError, match='UniformCoordinates1d definition requires "start"'):
UniformCoordinates1d.from_definition(d)
d = {"start": 0}
with pytest.raises(ValueError, match='UniformCoordinates1d definition requires "stop"'):
UniformCoordinates1d.from_definition(d)
def test_from_definition_size(self):
# numerical
d = {"start": 0, "stop": 50, "size": 6}
c = UniformCoordinates1d.from_definition(d)
assert_equal(c.coordinates, [0, 10, 20, 30, 40, 50])
# datetime, size
d = {"start": "2018-01-01", "stop": "2018-01-03", "size": 3}
c = UniformCoordinates1d.from_definition(d)
assert_equal(c.coordinates, np.array(["2018-01-01", "2018-01-02", "2018-01-03"]).astype(np.datetime64))
class TestUniformCoordinatesIndexing(object):
def test_len(self):
c = UniformCoordinates1d(0, 50, 10)
assert len(c) == 6
def test_index(self):
c = UniformCoordinates1d(0, 50, 10, name="lat")
# int
c2 = c[2]
assert isinstance(c2, Coordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [20])
c2 = c[-2]
assert isinstance(c2, Coordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [40])
# slice
c2 = c[:2]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 0
assert c2.stop == 10
assert c2.step == 10
c2 = c[2:]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 20
assert c2.stop == 50
assert c2.step == 10
c2 = c[::2]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 0
assert c2.stop == 50
assert c2.step == 20
c2 = c[1:-1]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 10
assert c2.stop == 40
assert c2.step == 10
c2 = c[-3:5]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 30
assert c2.stop == 40
assert c2.step == 10
c2 = c[::-1]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 50
assert c2.stop == 0
assert c2.step == -10
# index array
c2 = c[[0, 1, 3]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [0, 10, 30])
c2 = c[[3, 1, 0]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [30, 10, 0])
c2 = c[[0, 3, 1]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [0, 30, 10])
c2 = c[[]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [])
c2 = c[0:0]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [])
c2 = c[[]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [])
# boolean array
c2 = c[[True, True, True, False, True, False]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [0, 10, 20, 40])
# invalid
with pytest.raises(IndexError):
c[0.3]
with pytest.raises(IndexError):
c[10]
def test_index_descending(self):
c = UniformCoordinates1d(50, 0, -10, name="lat")
# int
c2 = c[2]
assert isinstance(c2, Coordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [30])
c2 = c[-2]
assert isinstance(c2, Coordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [10])
# slice
c2 = c[:2]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 50
assert c2.stop == 40
assert c2.step == -10
c2 = c[2:]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 30
assert c2.stop == 0
assert c2.step == -10
c2 = c[::2]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 50
assert c2.stop == 0
assert c2.step == -20
c2 = c[1:-1]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 40
assert c2.stop == 10
assert c2.step == -10
c2 = c[-3:5]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 20
assert c2.stop == 10
assert c2.step == -10
c2 = c[::-1]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 0
assert c2.stop == 50
assert c2.step == 10
# index array
c2 = c[[0, 1, 3]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [50, 40, 20])
c2 = c[[3, 1, 0]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [20, 40, 50])
c2 = c[[0, 3, 1]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [50, 20, 40])
# boolean array
c2 = c[[True, True, True, False, True, False]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [50, 40, 30, 10])
def test_in(self):
c = UniformCoordinates1d(0, 50, 10, name="lat")
assert 0 in c
assert 10 in c
assert 50 in c
assert -10 not in c
assert 60 not in c
assert 5 not in c
assert np.datetime64("2018") not in c
assert "a" not in c
c = UniformCoordinates1d(50, 0, -10, name="lat")
assert 0 in c
assert 10 in c
assert 50 in c
assert -10 not in c
assert 60 not in c
assert 5 not in c
assert np.datetime64("2018") not in c
assert "a" not in c
c = UniformCoordinates1d("2020-01-01", "2020-01-09", "2,D", name="time")
assert np.datetime64("2020-01-01") in c
assert np.datetime64("2020-01-03") in c
assert np.datetime64("2020-01-09") in c
assert np.datetime64("2020-01-11") not in c
assert np.datetime64("2020-01-02") not in c
assert 10 not in c
assert "a" not in c
class TestArrayCoordinatesAreaBounds(object):
def test_get_area_bounds_numerical(self):
c = UniformCoordinates1d(0, 50, 10)
# point
area_bounds = c.get_area_bounds(None)
assert_equal(area_bounds, [0.0, 50.0])
# uniform
area_bounds = c.get_area_bounds(0.5)
assert_equal(area_bounds, [-0.5, 50.5])
# segment
area_bounds = c.get_area_bounds([-0.2, 0.7])
assert_equal(area_bounds, [-0.2, 50.7])
# polygon (i.e. there would be corresponding offets for another dimension)
area_bounds = c.get_area_bounds([-0.2, -0.5, 0.7, 0.5])
assert_equal(area_bounds, [-0.5, 50.7])
def test_get_area_bounds_datetime(self):
c = UniformCoordinates1d("2018-01-01", "2018-01-04", "1,D")
# point
area_bounds = c.get_area_bounds(None)
assert_equal(area_bounds, make_coord_array(["2018-01-01", "2018-01-04"]))
# uniform
area_bounds = c.get_area_bounds("1,D")
assert_equal(area_bounds, make_coord_array(["2017-12-31", "2018-01-05"]))
area_bounds = c.get_area_bounds("1,M")
assert_equal(area_bounds, make_coord_array(["2017-12-01", "2018-02-04"]))
area_bounds = c.get_area_bounds("1,Y")
assert_equal(area_bounds, make_coord_array(["2017-01-01", "2019-01-04"]))
# segment
area_bounds = c.get_area_bounds(["0,h", "12,h"])
assert_equal(area_bounds, make_coord_array(["2018-01-01 00:00", "2018-01-04 12:00"]))
class TestUniformCoordinatesSelection(object):
def test_select_all_shortcut(self):
c = UniformCoordinates1d(20.0, 70.0, 10.0)
s = c.select([0, 100])
assert s.start == 20.0
assert s.stop == 70.0
assert s.step == 10.0
s, I = c.select([0, 100], return_index=True)
assert s.start == 20.0
assert s.stop == 70.0
assert s.step == 10.0
assert_equal(c[I], s)
def test_select_none_shortcut(self):
c = UniformCoordinates1d(20.0, 70.0, 10.0)
# above
s = c.select([100, 200])
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([100, 200], return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert c[I] == s
# below
s = c.select([0, 5])
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([0, 5], return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert c[I] == s
def test_select_ascending(self):
c = UniformCoordinates1d(20.0, 70.0, 10.0)
# inner
s = c.select([35.0, 55.0])
assert s.start == 40.0
assert s.stop == 50.0
assert s.step == 10.0
s, I = c.select([35.0, 55.0], return_index=True)
assert s.start == 40.0
assert s.stop == 50.0
assert s.step == 10.0
assert c[I] == s
# inner with aligned bounds
s = c.select([30.0, 60.0])
assert s.start == 30.0
assert s.stop == 60.0
assert s.step == 10.0
s, I = c.select([30.0, 60.0], return_index=True)
assert s.start == 30.0
assert s.stop == 60.0
assert s.step == 10.0
assert c[I] == s
# above
s = c.select([45, 100])
assert s.start == 50.0
assert s.stop == 70.0
assert s.step == 10.0
s, I = c.select([45, 100], return_index=True)
assert s.start == 50.0
assert s.stop == 70.0
assert s.step == 10.0
assert c[I] == s
# below
s = c.select([5, 55])
assert s.start == 20.0
assert s.stop == 50.0
assert s.step == 10.0
s, I = c.select([5, 55], return_index=True)
assert s.start == 20.0
assert s.stop == 50.0
assert s.step == 10.0
assert c[I] == s
# between coordinates
s = c.select([52, 55])
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([52, 55], return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert_equal(c.coordinates[I], [])
# backwards bounds
s = c.select([70, 30])
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([70, 30], return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert_equal(c.coordinates[I], [])
def test_select_descending(self):
c = UniformCoordinates1d(70.0, 20.0, -10.0)
# inner
s = c.select([35.0, 55.0])
assert s.start == 50.0
assert s.stop == 40.0
assert s.step == -10.0
s, I = c.select([35.0, 55.0], return_index=True)
assert s.start == 50.0
assert s.stop == 40.0
assert s.step == -10.0
assert c[I] == s
# inner with aligned bounds
s = c.select([30.0, 60.0])
assert s.start == 60.0
assert s.stop == 30.0
assert s.step == -10.0
s, I = c.select([30.0, 60.0], return_index=True)
assert s.start == 60.0
assert s.stop == 30.0
assert s.step == -10.0
assert c[I] == s
# above
s = c.select([45, 100])
assert s.start == 70.0
assert s.stop == 50.0
assert s.step == -10.0
s, I = c.select([45, 100], return_index=True)
assert s.start == 70.0
assert s.stop == 50.0
assert s.step == -10.0
assert c[I] == s
# below
s = c.select([5, 55])
assert s.start == 50.0
assert s.stop == 20.0
assert s.step == -10.0
s, I = c.select([5, 55], return_index=True)
assert s.start == 50.0
assert s.stop == 20.0
assert s.step == -10.0
assert c[I] == s
# between coordinates
s = c.select([52, 55])
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([52, 55], return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert_equal(c.coordinates[I], [])
# backwards bounds
s = c.select([70, 30])
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([70, 30], return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert_equal(c.coordinates[I], [])
def test_select_outer(self):
c = UniformCoordinates1d(20.0, 70.0, 10.0)
# inner
s = c.select([35.0, 55.0], outer=True)
assert s.start == 30.0
assert s.stop == 60.0
assert s.step == 10.0
s, I = c.select([35.0, 55.0], outer=True, return_index=True)
assert s.start == 30.0
assert s.stop == 60.0
assert s.step == 10.0
assert c[I] == s
# inner with aligned bounds
s = c.select([30.0, 60.0], outer=True)
assert s.start == 30.0
assert s.stop == 60.0
assert s.step == 10.0
s, I = c.select([30.0, 60.0], outer=True, return_index=True)
assert s.start == 30.0
assert s.stop == 60.0
assert s.step == 10.0
assert c[I] == s
# above
s = c.select([45, 100], outer=True)
assert s.start == 40.0
assert s.stop == 70.0
assert s.step == 10.0
s, I = c.select([45, 100], outer=True, return_index=True)
assert s.start == 40.0
assert s.stop == 70.0
assert s.step == 10.0
assert c[I] == s
# below
s = c.select([5, 55], outer=True)
assert s.start == 20.0
assert s.stop == 60.0
assert s.step == 10.0
s, I = c.select([5, 55], outer=True, return_index=True)
assert s.start == 20.0
assert s.stop == 60.0
assert s.step == 10.0
assert c[I] == s
# between coordinates
s = c.select([52, 55], outer=True)
assert s.start == 50.0
assert s.stop == 60.0
assert s.step == 10.0
s, I = c.select([52, 55], outer=True, return_index=True)
assert s.start == 50.0
assert s.stop == 60.0
assert s.step == 10.0
assert c[I] == s
# backwards bounds
s = c.select([70, 30], outer=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([70, 30], outer=True, return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert_equal(c.coordinates[I], [])
def test_select_time_variable_precision(self):
c = UniformCoordinates1d("2012-05-19", "2012-05-20", "1,D", name="time")
c2 = UniformCoordinates1d("2012-05-20T12:00:00", "2012-05-21T12:00:00", "1,D", name="time")
s = c.select(c2.bounds, outer=True)
s1 = c.select(c2.bounds, outer=False)
s2 = c2.select(c.bounds)
assert s.size == 1
assert s1.size == 0
assert s2.size == 1
class TestUniformCoordinatesMethods(object):
def test_unique(self):
c = UniformCoordinates1d(1, 5, step=1)
c2 = c.unique()
assert c2 == c and c2 is not c
c2, I = c.unique(return_index=True)
assert c2 == c and c2 is not c
assert c2 == c[I]
def test_simplify(self):
c = UniformCoordinates1d(1, 5, step=1)
c2 = c.simplify()
assert c2 == c and c2 is not c
# reversed, step -2
c = UniformCoordinates1d(4, 0, step=-2)
c2 = c.simplify()
assert c2 == c and c2 is not c
# time, convert to UniformCoordinates
c = UniformCoordinates1d("2020-01-01", "2020-01-05", step="1,D")
c2 = c.simplify()
assert c2 == c and c2 is not c
# time, reverse -2,h
c = UniformCoordinates1d("2020-01-01T12:00", "2020-01-01T08:00", step="-3,h")
c2 = c.simplify()
assert c2 == c and c2 is not c
def test_flatten(self):
c = UniformCoordinates1d(1, 5, step=1)
c2 = c.flatten()
assert c2 == c and c2 is not c
def test_reshape(self):
c = UniformCoordinates1d(1, 6, step=1, name="lat")
c2 = c.reshape((2, 3))
assert c2 == ArrayCoordinates1d(c.coordinates.reshape((2, 3)), name="lat")
def test_issubset(self):
c1 = UniformCoordinates1d(2, 1, step=-1)
c2 = UniformCoordinates1d(1, 3, step=1)
c3 = UniformCoordinates1d(0, 2, step=1)
c4 = UniformCoordinates1d(1, 4, step=0.5)
c5 = UniformCoordinates1d(1.5, 2.5, step=0.5)
c6 = UniformCoordinates1d(1.4, 2.4, step=0.5)
c7 = UniformCoordinates1d(1.4, 2.4, step=10)
# self
assert c1.issubset(c1)
# subsets
assert c1.issubset(c2)
assert c1.issubset(c3)
assert c1.issubset(c4)
assert c5.issubset(c4)
assert c7.issubset(c6)
# not subsets
assert not c2.issubset(c1)
assert not c2.issubset(c3)
assert not c3.issubset(c1)
assert not c3.issubset(c2)
assert not c4.issubset(c1)
assert not c6.issubset(c4)
def test_issubset_datetime(self):
c1 = UniformCoordinates1d("2020-01-01", "2020-01-03", "1,D")
c2 = UniformCoordinates1d("2020-01-01", "2020-01-03", "2,D")
c3 = UniformCoordinates1d("2020-01-01", "2020-01-05", "1,D")
c4 = UniformCoordinates1d("2020-01-05", "2020-01-01", "-2,D")
# self
assert c1.issubset(c1)
# same resolution
assert c1.issubset(c3)
assert c2.issubset(c1)
assert c2.issubset(c4)
assert not c1.issubset(c2)
assert not c1.issubset(c4)
assert not c3.issubset(c1)
# different resolution
c5 = UniformCoordinates1d("2020-01-01T00:00", "2020-01-03T00:00", "1,D")
c6 = UniformCoordinates1d("2020-01-01T00:00", "2020-01-03T00:00", "6,h")
assert c1.issubset(c5)
assert c5.issubset(c1)
assert c1.issubset(c6)
assert not c6.issubset(c1)
def test_issubset_dtype(self):
c1 = UniformCoordinates1d(0, 10, step=1)
c2 = UniformCoordinates1d("2018", "2020", step="1,Y")
assert not c1.issubset(c2)
assert not c2.issubset(c1)
def test_issubset_array_coordinates(self):
u = UniformCoordinates1d(start=1, stop=3, step=1)
a1 = ArrayCoordinates1d([1, 3, 2])
a2 = ArrayCoordinates1d([1, 2, 3])
a3 = ArrayCoordinates1d([1, 3, 4])
e = ArrayCoordinates1d([])
# self
assert u.issubset(a1)
assert u.issubset(a2)
assert not u.issubset(a3)
assert not u.issubset(e)
def test_issubset_coordinates(self):
u = UniformCoordinates1d(1, 3, 1, name="lat")
c1 = Coordinates([[1, 2, 3], [10, 20, 30]], dims=["lat", "lon"])
c2 = Coordinates([[1, 2, 4], [10, 20, 30]], dims=["lat", "lon"])
c3 = Coordinates([[10, 20, 30]], dims=["alt"])
assert u.issubset(c1)
assert not u.issubset(c2)
assert not u.issubset(c3)
| 34.999194 | 111 | 0.57446 | from datetime import datetime
import json
import pytest
import traitlets as tl
import numpy as np
from numpy.testing import assert_equal
import podpac
from podpac.core.coordinates.utils import make_coord_array
from podpac.core.coordinates.coordinates1d import Coordinates1d
from podpac.core.coordinates.array_coordinates1d import ArrayCoordinates1d
from podpac.core.coordinates.uniform_coordinates1d import UniformCoordinates1d
from podpac.core.coordinates.coordinates import Coordinates
class TestUniformCoordinatesCreation(object):
def test_numerical(self):
c = UniformCoordinates1d(0, 50, 10)
a = np.array([0, 10, 20, 30, 40, 50], dtype=float)
assert c.start == 0
assert c.stop == 50
assert c.step == 10
assert_equal(c.coordinates, a)
assert_equal(c.bounds, [0, 50])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 6
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
c = UniformCoordinates1d(50, 0, -10)
a = np.array([50, 40, 30, 20, 10, 0], dtype=float)
assert c.start == 50
assert c.stop == 0
assert c.step == -10
assert_equal(c.coordinates, a)
assert_equal(c.bounds, [0, 50])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 6
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_numerical_inexact(self):
c = UniformCoordinates1d(0, 49, 10)
a = np.array([0, 10, 20, 30, 40], dtype=float)
assert c.start == 0
assert c.stop == 49
assert c.step == 10
assert_equal(c.coordinates, a)
assert_equal(c.bounds, [0, 40])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 5
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
c = UniformCoordinates1d(50, 1, -10)
a = np.array([50, 40, 30, 20, 10], dtype=float)
assert c.start == 50
assert c.stop == 1
assert c.step == -10
assert_equal(c.coordinates, a)
assert_equal(c.bounds, [10, 50])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.dtype == float
assert c.size == a.size
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_datetime(self):
c = UniformCoordinates1d("2018-01-01", "2018-01-04", "1,D")
a = np.array(["2018-01-01", "2018-01-02", "2018-01-03", "2018-01-04"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-04")
assert c.step == np.timedelta64(1, "D")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
c = UniformCoordinates1d("2018-01-04", "2018-01-01", "-1,D")
a = np.array(["2018-01-04", "2018-01-03", "2018-01-02", "2018-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-04")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(-1, "D")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_datetime_inexact(self):
c = UniformCoordinates1d("2018-01-01", "2018-01-06", "2,D")
a = np.array(["2018-01-01", "2018-01-03", "2018-01-05"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-06")
assert c.step == np.timedelta64(2, "D")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
c = UniformCoordinates1d("2018-01-06", "2018-01-01", "-2,D")
a = np.array(["2018-01-06", "2018-01-04", "2018-01-02"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-06")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(-2, "D")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_datetime_month_step(self):
c = UniformCoordinates1d("2018-01-01", "2018-04-01", "1,M")
a = np.array(["2018-01-01", "2018-02-01", "2018-03-01", "2018-04-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-04-01")
assert c.step == np.timedelta64(1, "M")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
c = UniformCoordinates1d("2018-04-01", "2018-01-01", "-1,M")
a = np.array(["2018-04-01", "2018-03-01", "2018-02-01", "2018-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-04-01")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(-1, "M")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_datetime_year_step(self):
c = UniformCoordinates1d("2018-01-01", "2021-01-01", "1,Y")
a = np.array(["2018-01-01", "2019-01-01", "2020-01-01", "2021-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2021-01-01")
assert c.step == np.timedelta64(1, "Y")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
c = UniformCoordinates1d("2021-01-01", "2018-01-01", "-1,Y")
a = np.array(["2021-01-01", "2020-01-01", "2019-01-01", "2018-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2021-01-01")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(-1, "Y")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
c = UniformCoordinates1d("2018-01-01", "2021-04-01", "1,Y")
a = np.array(["2018-01-01", "2019-01-01", "2020-01-01", "2021-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2021-04-01")
assert c.step == np.timedelta64(1, "Y")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
c = UniformCoordinates1d("2018-04-01", "2021-01-01", "1,Y")
a = np.array(["2018-04-01", "2019-04-01", "2020-04-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-04-01")
assert c.stop == np.datetime64("2021-01-01")
assert c.step == np.timedelta64(1, "Y")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
c = UniformCoordinates1d("2021-01-01", "2018-04-01", "-1,Y")
a = np.array(["2021-01-01", "2020-01-01", "2019-01-01", "2018-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2021-01-01")
assert c.stop == np.datetime64("2018-04-01")
assert c.step == np.timedelta64(-1, "Y")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
c = UniformCoordinates1d("2021-04-01", "2018-01-01", "-1,Y")
a = np.array(["2021-04-01", "2020-04-01", "2019-04-01", "2018-04-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2021-04-01")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(-1, "Y")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_numerical_size(self):
c = UniformCoordinates1d(0, 10, size=20)
assert c.start == 0
assert c.stop == 10
assert c.step == 10 / 19.0
assert_equal(c.coordinates, np.linspace(0, 10, 20))
assert_equal(c.bounds, [0, 10])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 20
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
c = UniformCoordinates1d(10, 0, size=20)
assert c.start == 10
assert c.stop == 0
assert c.step == -10 / 19.0
assert_equal(c.coordinates, np.linspace(10, 0, 20))
assert_equal(c.bounds, [0, 10])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 20
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
def test_datetime_size(self):
c = UniformCoordinates1d("2018-01-01", "2018-01-10", size=10)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-10")
assert_equal(c.bounds, [np.datetime64("2018-01-01"), np.datetime64("2018-01-10")])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 10
assert c.dtype == np.datetime64
assert c.is_descending == False
c = UniformCoordinates1d("2018-01-10", "2018-01-01", size=10)
assert c.start == np.datetime64("2018-01-10")
assert c.stop == np.datetime64("2018-01-01")
assert_equal(c.bounds, [np.datetime64("2018-01-01"), np.datetime64("2018-01-10")])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 10
assert c.dtype == np.datetime64
assert c.is_descending == True
c = UniformCoordinates1d("2018-01-01", "2018-01-10", size=21)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-10")
assert_equal(c.bounds, [np.datetime64("2018-01-01"), np.datetime64("2018-01-10")])
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 21
assert c.dtype == np.datetime64
assert c.is_descending == False
def test_datetime_size_invalid(self):
with pytest.raises(ValueError, match="Cannot divide timedelta"):
c = UniformCoordinates1d("2018-01-01", "2018-01-10", size=20)
def test_numerical_size_floating_point_error(self):
c = UniformCoordinates1d(50.619, 50.62795, size=30)
assert c.size == 30
def test_numerical_singleton(self):
c = UniformCoordinates1d(1, 1, 10)
a = np.array([1], dtype=float)
assert c.start == 1
assert c.stop == 1
assert c.step == 10
assert_equal(c.coordinates, a)
assert_equal(c.bounds, [1, 1])
assert c.size == 1
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == None
assert c.is_uniform == True
c = UniformCoordinates1d(1, 1, -10)
a = np.array([1], dtype=float)
assert c.start == 1
assert c.stop == 1
assert c.step == -10
assert_equal(c.coordinates, a)
assert_equal(c.bounds, [1, 1])
assert c.size == 1
assert c.dtype == float
assert c.is_monotonic == True
assert c.is_descending == None
assert c.is_uniform == True
def test_datetime_singleton(self):
c = UniformCoordinates1d("2018-01-01", "2018-01-01", "1,D")
a = np.array(["2018-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(1, "D")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[0, -1]])
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == None
assert c.is_uniform == True
c = UniformCoordinates1d("2018-01-01", "2018-01-01", "-1,D")
a = np.array(["2018-01-01"]).astype(np.datetime64)
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-01")
assert c.step == np.timedelta64(-1, "D")
assert_equal(c.coordinates, a)
assert_equal(c.bounds, a[[-1, 0]])
assert c.size == a.size
assert c.dtype == np.datetime64
assert c.is_monotonic == True
assert c.is_descending == None
assert c.is_uniform == True
def test_from_tuple(self):
c = UniformCoordinates1d.from_tuple((0, 10, 0.5))
assert c.start == 0.0
assert c.stop == 10.0
assert c.step == 0.5
c = UniformCoordinates1d.from_tuple((0, 10, 20))
assert c.start == 0.0
assert c.stop == 10.0
assert c.size == 20
c = UniformCoordinates1d.from_tuple(("2018-01-01", "2018-01-04", "1,D"))
assert c.start == np.datetime64("2018-01-01")
assert c.stop == np.datetime64("2018-01-04")
assert c.step == np.timedelta64(1, "D")
with pytest.raises(ValueError, match="UniformCoordinates1d.from_tuple expects a tuple"):
UniformCoordinates1d.from_tuple((0, 10))
with pytest.raises(ValueError, match="UniformCoordinates1d.from_tuple expects a tuple"):
UniformCoordinates1d.from_tuple(np.array([0, 10, 0.5]))
def test_copy(self):
c = UniformCoordinates1d(0, 10, 50, name="lat")
c2 = c.copy()
assert c is not c2
assert c == c2
def test_invalid_init(self):
with pytest.raises(ValueError):
UniformCoordinates1d(0, 0, 0)
with pytest.raises(ValueError):
UniformCoordinates1d(0, 50, 0)
with pytest.raises(ValueError):
UniformCoordinates1d(0, 50, -10)
with pytest.raises(ValueError):
UniformCoordinates1d(50, 0, 10)
with pytest.raises(TypeError):
UniformCoordinates1d(0, "2018-01-01", 10)
with pytest.raises(TypeError):
UniformCoordinates1d("2018-01-01", 50, 10)
with pytest.raises(TypeError):
UniformCoordinates1d("2018-01-01", "2018-01-02", 10)
with pytest.raises(TypeError):
UniformCoordinates1d(0.0, "2018-01-01", "1,D")
with pytest.raises(TypeError):
UniformCoordinates1d("2018-01-01", 50, "1,D")
with pytest.raises(TypeError):
UniformCoordinates1d(0, 50, "1,D")
with pytest.raises(ValueError):
UniformCoordinates1d("a", 50, 10)
with pytest.raises(ValueError):
UniformCoordinates1d(0, "b", 10)
with pytest.raises(ValueError):
UniformCoordinates1d(0, 50, "a")
with pytest.raises(TypeError):
UniformCoordinates1d()
with pytest.raises(TypeError):
UniformCoordinates1d(0)
with pytest.raises(TypeError):
UniformCoordinates1d(0, 50)
with pytest.raises(TypeError):
UniformCoordinates1d(0, 50, 10, size=6)
with pytest.raises(TypeError):
UniformCoordinates1d(0, 10, size=20.0)
with pytest.raises(TypeError):
UniformCoordinates1d(0, 10, size="string")
with pytest.raises(TypeError):
UniformCoordinates1d("2018-01-10", "2018-01-01", size="1,D")
class TestUniformCoordinatesEq(object):
def test_equal(self):
c1 = UniformCoordinates1d(0, 50, 10)
c2 = UniformCoordinates1d(0, 50, 10)
c3 = UniformCoordinates1d(0, 50, 10)
c4 = UniformCoordinates1d(5, 50, 10)
c5 = UniformCoordinates1d(0, 60, 10)
c6 = UniformCoordinates1d(0, 50, 5)
c7 = UniformCoordinates1d(50, 0, -10)
assert c1 == c2
assert c1 == c3
assert c1 != c4
assert c1 != c5
assert c1 != c6
assert c1 != c7
def test_equal_array_coordinates(self):
c1 = UniformCoordinates1d(0, 50, 10)
c2 = ArrayCoordinates1d([0, 10, 20, 30, 40, 50])
c3 = ArrayCoordinates1d([10, 20, 30, 40, 50, 60])
assert c1 == c2
assert c1 != c3
class TestUniformCoordinatesSerialization(object):
def test_definition(self):
c = UniformCoordinates1d(0, 50, 10, name="lat")
d = c.definition
assert isinstance(d, dict)
assert set(d.keys()) == set(["start", "stop", "step", "name"])
json.dumps(d, cls=podpac.core.utils.JSONEncoder)
c2 = UniformCoordinates1d.from_definition(d)
assert c2 == c
c = UniformCoordinates1d("2018-01-01", "2018-01-03", "1,D")
d = c.definition
assert isinstance(d, dict)
assert set(d.keys()) == set(["start", "stop", "step"])
json.dumps(d, cls=podpac.core.utils.JSONEncoder)
c2 = UniformCoordinates1d.from_definition(d)
assert c2 == c
def test_invalid_definition(self):
d = {"stop": 50}
with pytest.raises(ValueError, match='UniformCoordinates1d definition requires "start"'):
UniformCoordinates1d.from_definition(d)
d = {"start": 0}
with pytest.raises(ValueError, match='UniformCoordinates1d definition requires "stop"'):
UniformCoordinates1d.from_definition(d)
def test_from_definition_size(self):
d = {"start": 0, "stop": 50, "size": 6}
c = UniformCoordinates1d.from_definition(d)
assert_equal(c.coordinates, [0, 10, 20, 30, 40, 50])
d = {"start": "2018-01-01", "stop": "2018-01-03", "size": 3}
c = UniformCoordinates1d.from_definition(d)
assert_equal(c.coordinates, np.array(["2018-01-01", "2018-01-02", "2018-01-03"]).astype(np.datetime64))
class TestUniformCoordinatesIndexing(object):
def test_len(self):
c = UniformCoordinates1d(0, 50, 10)
assert len(c) == 6
def test_index(self):
c = UniformCoordinates1d(0, 50, 10, name="lat")
c2 = c[2]
assert isinstance(c2, Coordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [20])
c2 = c[-2]
assert isinstance(c2, Coordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [40])
c2 = c[:2]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 0
assert c2.stop == 10
assert c2.step == 10
c2 = c[2:]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 20
assert c2.stop == 50
assert c2.step == 10
c2 = c[::2]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 0
assert c2.stop == 50
assert c2.step == 20
c2 = c[1:-1]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 10
assert c2.stop == 40
assert c2.step == 10
c2 = c[-3:5]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 30
assert c2.stop == 40
assert c2.step == 10
c2 = c[::-1]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 50
assert c2.stop == 0
assert c2.step == -10
c2 = c[[0, 1, 3]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [0, 10, 30])
c2 = c[[3, 1, 0]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [30, 10, 0])
c2 = c[[0, 3, 1]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [0, 30, 10])
c2 = c[[]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [])
c2 = c[0:0]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [])
c2 = c[[]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [])
c2 = c[[True, True, True, False, True, False]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [0, 10, 20, 40])
with pytest.raises(IndexError):
c[0.3]
with pytest.raises(IndexError):
c[10]
def test_index_descending(self):
c = UniformCoordinates1d(50, 0, -10, name="lat")
c2 = c[2]
assert isinstance(c2, Coordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [30])
c2 = c[-2]
assert isinstance(c2, Coordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [10])
c2 = c[:2]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 50
assert c2.stop == 40
assert c2.step == -10
c2 = c[2:]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 30
assert c2.stop == 0
assert c2.step == -10
c2 = c[::2]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 50
assert c2.stop == 0
assert c2.step == -20
c2 = c[1:-1]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 40
assert c2.stop == 10
assert c2.step == -10
c2 = c[-3:5]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 20
assert c2.stop == 10
assert c2.step == -10
c2 = c[::-1]
assert isinstance(c2, UniformCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert c2.start == 0
assert c2.stop == 50
assert c2.step == 10
c2 = c[[0, 1, 3]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [50, 40, 20])
c2 = c[[3, 1, 0]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [20, 40, 50])
c2 = c[[0, 3, 1]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [50, 20, 40])
c2 = c[[True, True, True, False, True, False]]
assert isinstance(c2, ArrayCoordinates1d)
assert c2.name == c.name
assert c2.properties == c.properties
assert_equal(c2.coordinates, [50, 40, 30, 10])
def test_in(self):
c = UniformCoordinates1d(0, 50, 10, name="lat")
assert 0 in c
assert 10 in c
assert 50 in c
assert -10 not in c
assert 60 not in c
assert 5 not in c
assert np.datetime64("2018") not in c
assert "a" not in c
c = UniformCoordinates1d(50, 0, -10, name="lat")
assert 0 in c
assert 10 in c
assert 50 in c
assert -10 not in c
assert 60 not in c
assert 5 not in c
assert np.datetime64("2018") not in c
assert "a" not in c
c = UniformCoordinates1d("2020-01-01", "2020-01-09", "2,D", name="time")
assert np.datetime64("2020-01-01") in c
assert np.datetime64("2020-01-03") in c
assert np.datetime64("2020-01-09") in c
assert np.datetime64("2020-01-11") not in c
assert np.datetime64("2020-01-02") not in c
assert 10 not in c
assert "a" not in c
class TestArrayCoordinatesAreaBounds(object):
def test_get_area_bounds_numerical(self):
c = UniformCoordinates1d(0, 50, 10)
area_bounds = c.get_area_bounds(None)
assert_equal(area_bounds, [0.0, 50.0])
area_bounds = c.get_area_bounds(0.5)
assert_equal(area_bounds, [-0.5, 50.5])
area_bounds = c.get_area_bounds([-0.2, 0.7])
assert_equal(area_bounds, [-0.2, 50.7])
area_bounds = c.get_area_bounds([-0.2, -0.5, 0.7, 0.5])
assert_equal(area_bounds, [-0.5, 50.7])
def test_get_area_bounds_datetime(self):
c = UniformCoordinates1d("2018-01-01", "2018-01-04", "1,D")
area_bounds = c.get_area_bounds(None)
assert_equal(area_bounds, make_coord_array(["2018-01-01", "2018-01-04"]))
area_bounds = c.get_area_bounds("1,D")
assert_equal(area_bounds, make_coord_array(["2017-12-31", "2018-01-05"]))
area_bounds = c.get_area_bounds("1,M")
assert_equal(area_bounds, make_coord_array(["2017-12-01", "2018-02-04"]))
area_bounds = c.get_area_bounds("1,Y")
assert_equal(area_bounds, make_coord_array(["2017-01-01", "2019-01-04"]))
area_bounds = c.get_area_bounds(["0,h", "12,h"])
assert_equal(area_bounds, make_coord_array(["2018-01-01 00:00", "2018-01-04 12:00"]))
class TestUniformCoordinatesSelection(object):
def test_select_all_shortcut(self):
c = UniformCoordinates1d(20.0, 70.0, 10.0)
s = c.select([0, 100])
assert s.start == 20.0
assert s.stop == 70.0
assert s.step == 10.0
s, I = c.select([0, 100], return_index=True)
assert s.start == 20.0
assert s.stop == 70.0
assert s.step == 10.0
assert_equal(c[I], s)
def test_select_none_shortcut(self):
c = UniformCoordinates1d(20.0, 70.0, 10.0)
s = c.select([100, 200])
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([100, 200], return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert c[I] == s
s = c.select([0, 5])
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([0, 5], return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert c[I] == s
def test_select_ascending(self):
c = UniformCoordinates1d(20.0, 70.0, 10.0)
s = c.select([35.0, 55.0])
assert s.start == 40.0
assert s.stop == 50.0
assert s.step == 10.0
s, I = c.select([35.0, 55.0], return_index=True)
assert s.start == 40.0
assert s.stop == 50.0
assert s.step == 10.0
assert c[I] == s
s = c.select([30.0, 60.0])
assert s.start == 30.0
assert s.stop == 60.0
assert s.step == 10.0
s, I = c.select([30.0, 60.0], return_index=True)
assert s.start == 30.0
assert s.stop == 60.0
assert s.step == 10.0
assert c[I] == s
s = c.select([45, 100])
assert s.start == 50.0
assert s.stop == 70.0
assert s.step == 10.0
s, I = c.select([45, 100], return_index=True)
assert s.start == 50.0
assert s.stop == 70.0
assert s.step == 10.0
assert c[I] == s
s = c.select([5, 55])
assert s.start == 20.0
assert s.stop == 50.0
assert s.step == 10.0
s, I = c.select([5, 55], return_index=True)
assert s.start == 20.0
assert s.stop == 50.0
assert s.step == 10.0
assert c[I] == s
s = c.select([52, 55])
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([52, 55], return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert_equal(c.coordinates[I], [])
s = c.select([70, 30])
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([70, 30], return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert_equal(c.coordinates[I], [])
def test_select_descending(self):
c = UniformCoordinates1d(70.0, 20.0, -10.0)
s = c.select([35.0, 55.0])
assert s.start == 50.0
assert s.stop == 40.0
assert s.step == -10.0
s, I = c.select([35.0, 55.0], return_index=True)
assert s.start == 50.0
assert s.stop == 40.0
assert s.step == -10.0
assert c[I] == s
s = c.select([30.0, 60.0])
assert s.start == 60.0
assert s.stop == 30.0
assert s.step == -10.0
s, I = c.select([30.0, 60.0], return_index=True)
assert s.start == 60.0
assert s.stop == 30.0
assert s.step == -10.0
assert c[I] == s
s = c.select([45, 100])
assert s.start == 70.0
assert s.stop == 50.0
assert s.step == -10.0
s, I = c.select([45, 100], return_index=True)
assert s.start == 70.0
assert s.stop == 50.0
assert s.step == -10.0
assert c[I] == s
s = c.select([5, 55])
assert s.start == 50.0
assert s.stop == 20.0
assert s.step == -10.0
s, I = c.select([5, 55], return_index=True)
assert s.start == 50.0
assert s.stop == 20.0
assert s.step == -10.0
assert c[I] == s
s = c.select([52, 55])
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([52, 55], return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert_equal(c.coordinates[I], [])
s = c.select([70, 30])
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([70, 30], return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert_equal(c.coordinates[I], [])
def test_select_outer(self):
c = UniformCoordinates1d(20.0, 70.0, 10.0)
s = c.select([35.0, 55.0], outer=True)
assert s.start == 30.0
assert s.stop == 60.0
assert s.step == 10.0
s, I = c.select([35.0, 55.0], outer=True, return_index=True)
assert s.start == 30.0
assert s.stop == 60.0
assert s.step == 10.0
assert c[I] == s
s = c.select([30.0, 60.0], outer=True)
assert s.start == 30.0
assert s.stop == 60.0
assert s.step == 10.0
s, I = c.select([30.0, 60.0], outer=True, return_index=True)
assert s.start == 30.0
assert s.stop == 60.0
assert s.step == 10.0
assert c[I] == s
s = c.select([45, 100], outer=True)
assert s.start == 40.0
assert s.stop == 70.0
assert s.step == 10.0
s, I = c.select([45, 100], outer=True, return_index=True)
assert s.start == 40.0
assert s.stop == 70.0
assert s.step == 10.0
assert c[I] == s
s = c.select([5, 55], outer=True)
assert s.start == 20.0
assert s.stop == 60.0
assert s.step == 10.0
s, I = c.select([5, 55], outer=True, return_index=True)
assert s.start == 20.0
assert s.stop == 60.0
assert s.step == 10.0
assert c[I] == s
s = c.select([52, 55], outer=True)
assert s.start == 50.0
assert s.stop == 60.0
assert s.step == 10.0
s, I = c.select([52, 55], outer=True, return_index=True)
assert s.start == 50.0
assert s.stop == 60.0
assert s.step == 10.0
assert c[I] == s
s = c.select([70, 30], outer=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
s, I = c.select([70, 30], outer=True, return_index=True)
assert isinstance(s, ArrayCoordinates1d)
assert_equal(s.coordinates, [])
assert_equal(c.coordinates[I], [])
def test_select_time_variable_precision(self):
c = UniformCoordinates1d("2012-05-19", "2012-05-20", "1,D", name="time")
c2 = UniformCoordinates1d("2012-05-20T12:00:00", "2012-05-21T12:00:00", "1,D", name="time")
s = c.select(c2.bounds, outer=True)
s1 = c.select(c2.bounds, outer=False)
s2 = c2.select(c.bounds)
assert s.size == 1
assert s1.size == 0
assert s2.size == 1
class TestUniformCoordinatesMethods(object):
def test_unique(self):
c = UniformCoordinates1d(1, 5, step=1)
c2 = c.unique()
assert c2 == c and c2 is not c
c2, I = c.unique(return_index=True)
assert c2 == c and c2 is not c
assert c2 == c[I]
def test_simplify(self):
c = UniformCoordinates1d(1, 5, step=1)
c2 = c.simplify()
assert c2 == c and c2 is not c
c = UniformCoordinates1d(4, 0, step=-2)
c2 = c.simplify()
assert c2 == c and c2 is not c
c = UniformCoordinates1d("2020-01-01", "2020-01-05", step="1,D")
c2 = c.simplify()
assert c2 == c and c2 is not c
c = UniformCoordinates1d("2020-01-01T12:00", "2020-01-01T08:00", step="-3,h")
c2 = c.simplify()
assert c2 == c and c2 is not c
def test_flatten(self):
c = UniformCoordinates1d(1, 5, step=1)
c2 = c.flatten()
assert c2 == c and c2 is not c
def test_reshape(self):
c = UniformCoordinates1d(1, 6, step=1, name="lat")
c2 = c.reshape((2, 3))
assert c2 == ArrayCoordinates1d(c.coordinates.reshape((2, 3)), name="lat")
def test_issubset(self):
c1 = UniformCoordinates1d(2, 1, step=-1)
c2 = UniformCoordinates1d(1, 3, step=1)
c3 = UniformCoordinates1d(0, 2, step=1)
c4 = UniformCoordinates1d(1, 4, step=0.5)
c5 = UniformCoordinates1d(1.5, 2.5, step=0.5)
c6 = UniformCoordinates1d(1.4, 2.4, step=0.5)
c7 = UniformCoordinates1d(1.4, 2.4, step=10)
assert c1.issubset(c1)
assert c1.issubset(c2)
assert c1.issubset(c3)
assert c1.issubset(c4)
assert c5.issubset(c4)
assert c7.issubset(c6)
assert not c2.issubset(c1)
assert not c2.issubset(c3)
assert not c3.issubset(c1)
assert not c3.issubset(c2)
assert not c4.issubset(c1)
assert not c6.issubset(c4)
def test_issubset_datetime(self):
c1 = UniformCoordinates1d("2020-01-01", "2020-01-03", "1,D")
c2 = UniformCoordinates1d("2020-01-01", "2020-01-03", "2,D")
c3 = UniformCoordinates1d("2020-01-01", "2020-01-05", "1,D")
c4 = UniformCoordinates1d("2020-01-05", "2020-01-01", "-2,D")
assert c1.issubset(c1)
assert c1.issubset(c3)
assert c2.issubset(c1)
assert c2.issubset(c4)
assert not c1.issubset(c2)
assert not c1.issubset(c4)
assert not c3.issubset(c1)
c5 = UniformCoordinates1d("2020-01-01T00:00", "2020-01-03T00:00", "1,D")
c6 = UniformCoordinates1d("2020-01-01T00:00", "2020-01-03T00:00", "6,h")
assert c1.issubset(c5)
assert c5.issubset(c1)
assert c1.issubset(c6)
assert not c6.issubset(c1)
def test_issubset_dtype(self):
c1 = UniformCoordinates1d(0, 10, step=1)
c2 = UniformCoordinates1d("2018", "2020", step="1,Y")
assert not c1.issubset(c2)
assert not c2.issubset(c1)
def test_issubset_array_coordinates(self):
u = UniformCoordinates1d(start=1, stop=3, step=1)
a1 = ArrayCoordinates1d([1, 3, 2])
a2 = ArrayCoordinates1d([1, 2, 3])
a3 = ArrayCoordinates1d([1, 3, 4])
e = ArrayCoordinates1d([])
assert u.issubset(a1)
assert u.issubset(a2)
assert not u.issubset(a3)
assert not u.issubset(e)
def test_issubset_coordinates(self):
u = UniformCoordinates1d(1, 3, 1, name="lat")
c1 = Coordinates([[1, 2, 3], [10, 20, 30]], dims=["lat", "lon"])
c2 = Coordinates([[1, 2, 4], [10, 20, 30]], dims=["lat", "lon"])
c3 = Coordinates([[10, 20, 30]], dims=["alt"])
assert u.issubset(c1)
assert not u.issubset(c2)
assert not u.issubset(c3)
| true | true |
f7273dd94996a596fd89b435c509524733c41b42 | 4,323 | py | Python | lyft_CNN.py | govindap/lyft_motion_prediction | 15412444fec69ce4a0082d8de730cb882833eab0 | [
"Apache-2.0"
] | null | null | null | lyft_CNN.py | govindap/lyft_motion_prediction | 15412444fec69ce4a0082d8de730cb882833eab0 | [
"Apache-2.0"
] | null | null | null | lyft_CNN.py | govindap/lyft_motion_prediction | 15412444fec69ce4a0082d8de730cb882833eab0 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision.models.resnet import resnet50, resnet34
from torch import Tensor
from typing import Dict
from l5kit.configs import load_config_data
from l5kit.data import LocalDataManager, ChunkedDataset
from l5kit.dataset import AgentDataset, EgoDataset
from l5kit.rasterization import build_rasterizer
from l5kit.evaluation import write_pred_csv, compute_metrics_csv, read_gt_csv, create_chopped_dataset
from l5kit.evaluation.chop_dataset import MIN_FUTURE_STEPS
from l5kit.evaluation.metrics import neg_multi_log_likelihood, time_displace
from l5kit.geometry import transform_points
from l5kit.visualization import PREDICTED_POINTS_COLOR, TARGET_POINTS_COLOR, draw_trajectory
from pathlib import Path
import pandas as pd
import os
import random
import time
import gc, psutil
cfg = {
'format_version': 4,
'model_params': {
'model_architecture': "resnet34",
'history_num_frames': 10,
'history_step_size': 1,
'history_delta_time': 0.1,
'future_num_frames': 50,
'future_step_size': 1,
'future_delta_time': 0.1,
'model_name': "model_resnet34",
'lr': 1e-3,
'train': True,
'predict': True
},
'raster_params': {
'raster_size': [224, 224],
'pixel_size': [0.5, 0.5],
'ego_center': [0.25, 0.5],
'map_type': 'py_semantic',
'satellite_map_key': 'aerial_map/aerial_map.png',
'semantic_map_key': 'semantic_map/semantic_map.pb',
'dataset_meta_key': 'meta.json',
'filter_agents_threshold': 0.5
},
'train_data_loader': {
'key': 'scenes/train.zarr',
'batch_size': 16,
'shuffle': True,
'num_workers': 0
},
'test_data_loader': {
'key': 'scenes/test.zarr',
'batch_size': 16,
'shuffle': False,
'num_workers': 0,
},
'train_params': {
'steps': 120,
'update_steps': 50,
'checkpoint_steps': 100,
'precision': True
}
}
class LyftCNNModel(nn.Module):
def __init__(self, cfg: Dict, num_modes=3):
super().__init__()
architecture = cfg["model_params"]["model_architecture"]
backbone = eval(architecture)(pretrained=True, progress=True)
self.backbone = backbone
num_history_channels = (cfg["model_params"]["history_num_frames"] + 1) * 2
num_in_channels = 3 + num_history_channels
self.backbone.conv1 = nn.Conv2d(
num_in_channels,
self.backbone.conv1.out_channels,
kernel_size=self.backbone.conv1.kernel_size,
stride=self.backbone.conv1.stride,
padding=self.backbone.conv1.padding,
bias=False,
)
if architecture == "resnet50":
backbone_out_features = 2048
else:
backbone_out_features = 512
# X, Y coords for the future positions (output shape: batch_sizex50x2)
self.future_len = cfg["model_params"]["future_num_frames"]
num_targets = 2 * self.future_len
# You can add more layers here.
self.head = nn.Sequential(
# nn.Dropout(0.2),
nn.Linear(in_features=backbone_out_features, out_features=4096),
)
self.num_preds = num_targets * num_modes
self.num_modes = num_modes
self.logit = nn.Linear(4096, out_features=self.num_preds + num_modes)
def forward(self, x):
x = self.backbone.conv1(x)
x = self.backbone.bn1(x)
x = self.backbone.relu(x)
x = self.backbone.maxpool(x)
x = self.backbone.layer1(x)
x = self.backbone.layer2(x)
x = self.backbone.layer3(x)
x = self.backbone.layer4(x)
x = self.backbone.avgpool(x)
x = torch.flatten(x, 1)
x = self.head(x)
x = self.logit(x)
# pred (batch_size)x(modes)x(time)x(2D coords)
# confidences (batch_size)x(modes)
bs, _ = x.shape
pred, confidences = torch.split(x, self.num_preds, dim=1)
pred = pred.view(bs, self.num_modes, self.future_len, 2)
assert confidences.shape == (bs, self.num_modes)
confidences = torch.softmax(confidences, dim=1)
return pred, confidences
| 31.100719 | 101 | 0.635901 | import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision.models.resnet import resnet50, resnet34
from torch import Tensor
from typing import Dict
from l5kit.configs import load_config_data
from l5kit.data import LocalDataManager, ChunkedDataset
from l5kit.dataset import AgentDataset, EgoDataset
from l5kit.rasterization import build_rasterizer
from l5kit.evaluation import write_pred_csv, compute_metrics_csv, read_gt_csv, create_chopped_dataset
from l5kit.evaluation.chop_dataset import MIN_FUTURE_STEPS
from l5kit.evaluation.metrics import neg_multi_log_likelihood, time_displace
from l5kit.geometry import transform_points
from l5kit.visualization import PREDICTED_POINTS_COLOR, TARGET_POINTS_COLOR, draw_trajectory
from pathlib import Path
import pandas as pd
import os
import random
import time
import gc, psutil
cfg = {
'format_version': 4,
'model_params': {
'model_architecture': "resnet34",
'history_num_frames': 10,
'history_step_size': 1,
'history_delta_time': 0.1,
'future_num_frames': 50,
'future_step_size': 1,
'future_delta_time': 0.1,
'model_name': "model_resnet34",
'lr': 1e-3,
'train': True,
'predict': True
},
'raster_params': {
'raster_size': [224, 224],
'pixel_size': [0.5, 0.5],
'ego_center': [0.25, 0.5],
'map_type': 'py_semantic',
'satellite_map_key': 'aerial_map/aerial_map.png',
'semantic_map_key': 'semantic_map/semantic_map.pb',
'dataset_meta_key': 'meta.json',
'filter_agents_threshold': 0.5
},
'train_data_loader': {
'key': 'scenes/train.zarr',
'batch_size': 16,
'shuffle': True,
'num_workers': 0
},
'test_data_loader': {
'key': 'scenes/test.zarr',
'batch_size': 16,
'shuffle': False,
'num_workers': 0,
},
'train_params': {
'steps': 120,
'update_steps': 50,
'checkpoint_steps': 100,
'precision': True
}
}
class LyftCNNModel(nn.Module):
def __init__(self, cfg: Dict, num_modes=3):
super().__init__()
architecture = cfg["model_params"]["model_architecture"]
backbone = eval(architecture)(pretrained=True, progress=True)
self.backbone = backbone
num_history_channels = (cfg["model_params"]["history_num_frames"] + 1) * 2
num_in_channels = 3 + num_history_channels
self.backbone.conv1 = nn.Conv2d(
num_in_channels,
self.backbone.conv1.out_channels,
kernel_size=self.backbone.conv1.kernel_size,
stride=self.backbone.conv1.stride,
padding=self.backbone.conv1.padding,
bias=False,
)
if architecture == "resnet50":
backbone_out_features = 2048
else:
backbone_out_features = 512
self.future_len = cfg["model_params"]["future_num_frames"]
num_targets = 2 * self.future_len
self.head = nn.Sequential(
nn.Linear(in_features=backbone_out_features, out_features=4096),
)
self.num_preds = num_targets * num_modes
self.num_modes = num_modes
self.logit = nn.Linear(4096, out_features=self.num_preds + num_modes)
def forward(self, x):
x = self.backbone.conv1(x)
x = self.backbone.bn1(x)
x = self.backbone.relu(x)
x = self.backbone.maxpool(x)
x = self.backbone.layer1(x)
x = self.backbone.layer2(x)
x = self.backbone.layer3(x)
x = self.backbone.layer4(x)
x = self.backbone.avgpool(x)
x = torch.flatten(x, 1)
x = self.head(x)
x = self.logit(x)
bs, _ = x.shape
pred, confidences = torch.split(x, self.num_preds, dim=1)
pred = pred.view(bs, self.num_modes, self.future_len, 2)
assert confidences.shape == (bs, self.num_modes)
confidences = torch.softmax(confidences, dim=1)
return pred, confidences
| true | true |
f7273e61673d7705a50b7baced2d412d3ccc1539 | 167 | py | Python | backend/course_application/apps.py | heyImDrew/edupro | 98b8342dda45071da4871bbf73f2ef002fee938f | [
"Apache-2.0"
] | null | null | null | backend/course_application/apps.py | heyImDrew/edupro | 98b8342dda45071da4871bbf73f2ef002fee938f | [
"Apache-2.0"
] | null | null | null | backend/course_application/apps.py | heyImDrew/edupro | 98b8342dda45071da4871bbf73f2ef002fee938f | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class CourseApplicationConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'course_application'
| 23.857143 | 56 | 0.790419 | from django.apps import AppConfig
class CourseApplicationConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'course_application'
| true | true |
f7273e63342dcead5dd52298df0ffc0e7b2d6ce6 | 41 | py | Python | addons/mail_client_extension/controllers/__init__.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/mail_client_extension/controllers/__init__.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/mail_client_extension/controllers/__init__.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*
from . import main | 20.5 | 22 | 0.585366 |
from . import main | true | true |
f7273ea22419d001ae46764df21f035e585f8246 | 1,368 | py | Python | gitflow/util.py | chassing/gitflow | 722ec6a68165bac80443eecf97bd00b8f7818b50 | [
"BSD-3-Clause"
] | 7 | 2015-05-09T20:31:36.000Z | 2021-05-17T02:14:30.000Z | gitflow/util.py | chassing/gitflow | 722ec6a68165bac80443eecf97bd00b8f7818b50 | [
"BSD-3-Clause"
] | 5 | 2016-05-31T22:15:08.000Z | 2021-02-16T08:44:28.000Z | gitflow/util.py | chassing/gitflow | 722ec6a68165bac80443eecf97bd00b8f7818b50 | [
"BSD-3-Clause"
] | 10 | 2016-05-31T21:41:25.000Z | 2021-04-11T13:33:48.000Z | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
#
# Shamelessly ripped from
# http://code.activestate.com/recipes/576949-find-all-subclasses-of-a-given-class/
#
def itersubclasses(cls, _seen=None):
"""
itersubclasses(cls)
Generator over all subclasses of a given class, in depth first order.
>>> list(itersubclasses(int)) == [bool]
True
>>> class A(object): pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL (new-style) classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)] #doctest: +ELLIPSIS
['type', ...'tuple', ...]
"""
if not isinstance(cls, type):
raise TypeError('itersubclasses must be called with '
'new-style classes, not %.100r' % cls)
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
| 27.36 | 82 | 0.57383 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
def itersubclasses(cls, _seen=None):
if not isinstance(cls, type):
raise TypeError('itersubclasses must be called with '
'new-style classes, not %.100r' % cls)
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError:
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
| true | true |
f7273ee822a983a7b06727171c767b66a9c5a749 | 1,054 | py | Python | tests/functional/test_hooks/test_six.py | yoda-vid/pyinstaller | 419f349dad721a253b19d9c596e251818132d6ba | [
"Apache-2.0"
] | 2 | 2017-02-08T22:22:09.000Z | 2020-10-08T12:28:36.000Z | tests/functional/test_hooks/test_six.py | 416426/pyinstaller | 0f2b2e921433ab5a510c7efdb21d9c1d7cfbc645 | [
"Apache-2.0"
] | 3 | 2020-04-06T15:48:37.000Z | 2021-03-23T10:22:21.000Z | tests/functional/test_hooks/test_six.py | 416426/pyinstaller | 0f2b2e921433ab5a510c7efdb21d9c1d7cfbc645 | [
"Apache-2.0"
] | 4 | 2018-06-04T20:40:37.000Z | 2020-10-13T22:38:40.000Z | # -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
from PyInstaller.utils.tests import importorskip
@importorskip('six.moves')
def test_six_moves(pyi_builder):
pyi_builder.test_source(
"""
from six.moves import UserList
UserList
""")
# Run the same test a second time to trigger errors like
# Target module "six.moves.urllib" already imported as "AliasNode(…)"
# caused by PyiModuleGraph being cached in a insufficient way.
@importorskip('six.moves')
def test_six_moves_2nd_run(pyi_builder):
return test_six_moves(pyi_builder)
| 34 | 78 | 0.627135 |
from PyInstaller.utils.tests import importorskip
@importorskip('six.moves')
def test_six_moves(pyi_builder):
pyi_builder.test_source(
"""
from six.moves import UserList
UserList
""")
@importorskip('six.moves')
def test_six_moves_2nd_run(pyi_builder):
return test_six_moves(pyi_builder)
| true | true |
f7273f7676d92413f9cd5cae85de640904f6032a | 8,163 | py | Python | accelbyte_py_sdk/api/cloudsave/operations/concurrent_record/put_game_record_concurrent_handler_v1.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/cloudsave/operations/concurrent_record/put_game_record_concurrent_handler_v1.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/cloudsave/operations/concurrent_record/put_game_record_concurrent_handler_v1.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | # Auto-generated at 2021-09-27T17:01:31.256010+08:00
# from: Justice Cloudsave Service (3.38.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HttpResponse
from ...models import ModelsConcurrentRecordRequest
from ...models import ResponseError
class PutGameRecordConcurrentHandlerV1(Operation):
"""Create or replace game record (putGameRecordConcurrentHandlerV1)
Properties:
url: /cloudsave/v1/namespaces/{namespace}/concurrent/records/{key}
method: PUT
tags: ConcurrentRecord
consumes: ["application/json"]
produces: ["application/json"]
security: bearer
body: (body) REQUIRED ModelsConcurrentRecordRequest in body
namespace: (namespace) REQUIRED str in path
key: (key) REQUIRED str in path
Responses:
204: No Content - (Record saved)
400: Bad Request - ResponseError (Bad Request)
412: Precondition Failed - ResponseError (Precondition Failed)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
# region fields
_url: str = "/cloudsave/v1/namespaces/{namespace}/concurrent/records/{key}"
_method: str = "PUT"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_security: Optional[str] = "bearer"
_location_query: str = None
body: ModelsConcurrentRecordRequest # REQUIRED in [body]
namespace: str # REQUIRED in [path]
key: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def security(self) -> Optional[str]:
return self._security
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
def get_full_url(self, base_url: Union[None, str] = None) -> str:
result = base_url if base_url is not None else ""
# path params
url = self.url
for k, v in self.get_path_params().items():
url = url.replace(f"{{{k}}}", v)
result += url
return result
# noinspection PyMethodMayBeStatic
def get_all_required_fields(self) -> List[str]:
return [
"body",
"namespace",
"key",
]
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "key"):
result["key"] = self.key
return result
# endregion get_x_params methods
# region is/has methods
def is_valid(self) -> bool:
if not hasattr(self, "body") or self.body is None:
return False
if not hasattr(self, "namespace") or self.namespace is None:
return False
if not hasattr(self, "key") or self.key is None:
return False
return True
# endregion is/has methods
# region with_x methods
def with_body(self, value: ModelsConcurrentRecordRequest) -> PutGameRecordConcurrentHandlerV1:
self.body = value
return self
def with_namespace(self, value: str) -> PutGameRecordConcurrentHandlerV1:
self.namespace = value
return self
def with_key(self, value: str) -> PutGameRecordConcurrentHandlerV1:
self.key = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ModelsConcurrentRecordRequest()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
if hasattr(self, "key") and self.key:
result["key"] = str(self.key)
elif include_empty:
result["key"] = str()
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, HttpResponse], Union[None, ResponseError]]:
"""Parse the given response.
204: No Content - (Record saved)
400: Bad Request - ResponseError (Bad Request)
412: Precondition Failed - ResponseError (Precondition Failed)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
if code == 204:
return HttpResponse.create(code, "No Content"), None
if code == 400:
return None, ResponseError.create_from_dict(content)
if code == 412:
return None, ResponseError.create_from_dict(content)
if code == 500:
return None, ResponseError.create_from_dict(content)
was_handled, undocumented_response = HttpResponse.try_create_undocumented_response(code, content)
if was_handled:
return None, undocumented_response
return None, HttpResponse.create_unhandled_error()
# endregion response methods
# region static methods
@classmethod
def create(
cls,
body: ModelsConcurrentRecordRequest,
namespace: str,
key: str,
) -> PutGameRecordConcurrentHandlerV1:
instance = cls()
instance.body = body
instance.namespace = namespace
instance.key = key
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> PutGameRecordConcurrentHandlerV1:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ModelsConcurrentRecordRequest.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = ModelsConcurrentRecordRequest()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
if "key" in dict_ and dict_["key"] is not None:
instance.key = str(dict_["key"])
elif include_empty:
instance.key = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
"key": "key",
}
# endregion static methods
| 30.233333 | 137 | 0.61987 |
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HttpResponse
from ...models import ModelsConcurrentRecordRequest
from ...models import ResponseError
class PutGameRecordConcurrentHandlerV1(Operation):
_url: str = "/cloudsave/v1/namespaces/{namespace}/concurrent/records/{key}"
_method: str = "PUT"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_security: Optional[str] = "bearer"
_location_query: str = None
body: ModelsConcurrentRecordRequest
namespace: str
key: str
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def security(self) -> Optional[str]:
return self._security
@property
def location_query(self) -> str:
return self._location_query
def get_full_url(self, base_url: Union[None, str] = None) -> str:
result = base_url if base_url is not None else ""
url = self.url
for k, v in self.get_path_params().items():
url = url.replace(f"{{{k}}}", v)
result += url
return result
def get_all_required_fields(self) -> List[str]:
return [
"body",
"namespace",
"key",
]
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "key"):
result["key"] = self.key
return result
def is_valid(self) -> bool:
if not hasattr(self, "body") or self.body is None:
return False
if not hasattr(self, "namespace") or self.namespace is None:
return False
if not hasattr(self, "key") or self.key is None:
return False
return True
def with_body(self, value: ModelsConcurrentRecordRequest) -> PutGameRecordConcurrentHandlerV1:
self.body = value
return self
def with_namespace(self, value: str) -> PutGameRecordConcurrentHandlerV1:
self.namespace = value
return self
def with_key(self, value: str) -> PutGameRecordConcurrentHandlerV1:
self.key = value
return self
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ModelsConcurrentRecordRequest()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
if hasattr(self, "key") and self.key:
result["key"] = str(self.key)
elif include_empty:
result["key"] = str()
return result
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, HttpResponse], Union[None, ResponseError]]:
if code == 204:
return HttpResponse.create(code, "No Content"), None
if code == 400:
return None, ResponseError.create_from_dict(content)
if code == 412:
return None, ResponseError.create_from_dict(content)
if code == 500:
return None, ResponseError.create_from_dict(content)
was_handled, undocumented_response = HttpResponse.try_create_undocumented_response(code, content)
if was_handled:
return None, undocumented_response
return None, HttpResponse.create_unhandled_error()
@classmethod
def create(
cls,
body: ModelsConcurrentRecordRequest,
namespace: str,
key: str,
) -> PutGameRecordConcurrentHandlerV1:
instance = cls()
instance.body = body
instance.namespace = namespace
instance.key = key
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> PutGameRecordConcurrentHandlerV1:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ModelsConcurrentRecordRequest.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = ModelsConcurrentRecordRequest()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
if "key" in dict_ and dict_["key"] is not None:
instance.key = str(dict_["key"])
elif include_empty:
instance.key = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
"key": "key",
}
| true | true |
f72740045bcd8582e8aa4a06002d0eadbb562489 | 2,739 | py | Python | main.py | sem-onyalo/gan-textures | ed75ce150d98920bf0d1f0a00ff42d3992c0e32e | [
"MIT"
] | null | null | null | main.py | sem-onyalo/gan-textures | ed75ce150d98920bf0d1f0a00ff42d3992c0e32e | [
"MIT"
] | null | null | null | main.py | sem-onyalo/gan-textures | ed75ce150d98920bf0d1f0a00ff42d3992c0e32e | [
"MIT"
] | null | null | null | import argparse
import logging
from model import DCGAN_1024
def init_logger():
logging.basicConfig(
format="[%(asctime)s] [%(levelname)s] [%(name)s] %(message)s",
datefmt="%Y/%m/%d %H:%M:%S",
level=logging.INFO
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=27)
parser.add_argument("--ngpu", type=int, default=1)
parser.add_argument("--data_root", type=str, default="data")
parser.add_argument("--data_source_dir", type=str, default="01-cur")
parser.add_argument("--data_target_dir", type=str, default="02-trn")
parser.add_argument("--dataloader_workers", type=int, default=2)
parser.add_argument("--epochs", type=int, default=50)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--learning_rate", type=float, default=0.0002)
parser.add_argument("--adam_beta_1", type=float, default=0.5)
parser.add_argument("--adam_beta_2", type=float, default=0.999)
parser.add_argument("--image_size", type=int, default=1080)
parser.add_argument("--image_channels", type=int, default=3)
parser.add_argument("--g_latent_vector_size", type=int, default=100)
parser.add_argument("--g_feature_map_filters", type=int, default=64)
parser.add_argument("--g_conv_kernel_size", type=int, default=4)
parser.add_argument("--g_conv_stride", type=int, default=2)
parser.add_argument("--d_feature_map_filters", type=int, default=64)
parser.add_argument("--d_conv_kernel_size", type=int, default=4)
parser.add_argument("--d_conv_stride", type=int, default=2)
parser.add_argument("--d_activation_negative_slope", type=float, default=0.2)
parser.add_argument("--eval_sample_count", type=int, default=64)
parser.add_argument("--eval_epoch_frequency", type=int, default=10)
parser.add_argument('--train', action='store_true', help='Train the model')
args = parser.parse_args()
init_logger()
gan = DCGAN_1024(
args.seed,
args.ngpu,
args.data_root,
args.data_source_dir,
args.data_target_dir,
args.dataloader_workers,
args.epochs,
args.batch_size,
args.learning_rate,
args.adam_beta_1,
args.adam_beta_2,
args.image_size,
args.image_channels,
args.g_latent_vector_size,
args.g_feature_map_filters,
args.g_conv_kernel_size,
args.g_conv_stride,
args.d_feature_map_filters,
args.d_conv_kernel_size,
args.d_conv_stride,
args.d_activation_negative_slope,
args.eval_sample_count,
args.eval_epoch_frequency
)
if args.train:
gan.train()
| 38.577465 | 81 | 0.680175 | import argparse
import logging
from model import DCGAN_1024
def init_logger():
logging.basicConfig(
format="[%(asctime)s] [%(levelname)s] [%(name)s] %(message)s",
datefmt="%Y/%m/%d %H:%M:%S",
level=logging.INFO
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=27)
parser.add_argument("--ngpu", type=int, default=1)
parser.add_argument("--data_root", type=str, default="data")
parser.add_argument("--data_source_dir", type=str, default="01-cur")
parser.add_argument("--data_target_dir", type=str, default="02-trn")
parser.add_argument("--dataloader_workers", type=int, default=2)
parser.add_argument("--epochs", type=int, default=50)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--learning_rate", type=float, default=0.0002)
parser.add_argument("--adam_beta_1", type=float, default=0.5)
parser.add_argument("--adam_beta_2", type=float, default=0.999)
parser.add_argument("--image_size", type=int, default=1080)
parser.add_argument("--image_channels", type=int, default=3)
parser.add_argument("--g_latent_vector_size", type=int, default=100)
parser.add_argument("--g_feature_map_filters", type=int, default=64)
parser.add_argument("--g_conv_kernel_size", type=int, default=4)
parser.add_argument("--g_conv_stride", type=int, default=2)
parser.add_argument("--d_feature_map_filters", type=int, default=64)
parser.add_argument("--d_conv_kernel_size", type=int, default=4)
parser.add_argument("--d_conv_stride", type=int, default=2)
parser.add_argument("--d_activation_negative_slope", type=float, default=0.2)
parser.add_argument("--eval_sample_count", type=int, default=64)
parser.add_argument("--eval_epoch_frequency", type=int, default=10)
parser.add_argument('--train', action='store_true', help='Train the model')
args = parser.parse_args()
init_logger()
gan = DCGAN_1024(
args.seed,
args.ngpu,
args.data_root,
args.data_source_dir,
args.data_target_dir,
args.dataloader_workers,
args.epochs,
args.batch_size,
args.learning_rate,
args.adam_beta_1,
args.adam_beta_2,
args.image_size,
args.image_channels,
args.g_latent_vector_size,
args.g_feature_map_filters,
args.g_conv_kernel_size,
args.g_conv_stride,
args.d_feature_map_filters,
args.d_conv_kernel_size,
args.d_conv_stride,
args.d_activation_negative_slope,
args.eval_sample_count,
args.eval_epoch_frequency
)
if args.train:
gan.train()
| true | true |
f727406dcaa18843458f6c479462d8f14bb82493 | 2,802 | py | Python | DLCoursera_part1_week4_1.py | zhouhan921001/DeepLearning-homework | 20562dc49ca5898b531a678c0e54c8d985fcc72f | [
"MIT"
] | null | null | null | DLCoursera_part1_week4_1.py | zhouhan921001/DeepLearning-homework | 20562dc49ca5898b531a678c0e54c8d985fcc72f | [
"MIT"
] | null | null | null | DLCoursera_part1_week4_1.py | zhouhan921001/DeepLearning-homework | 20562dc49ca5898b531a678c0e54c8d985fcc72f | [
"MIT"
] | null | null | null | import numpy as np
from dnn_utils import sigmoid,sigmoid_backward,relu,relu_backward
def initialize_two_layer(n_x,n_h,n_y):
W1 = np.random.randn(n_h,n_x) * 0.01
b1 = np.zeros(n_h,1)
W2 = np.random.randn(n_y,n_h) * 0.01
b2 = np.zeros(n_y,1)
param = {"W1":W1,"b1":b1,"W2":W2,"b2":b2}
return param
def initialize_l_layer(layer_dims):
param = {}
L = len(layer_dims)
for l in range(1, L):
param['W' + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1]) * 0.01
param['b' + str(l)] = np.zeros(layer_dims[l],1)
return param
def linear_forward(W,A,b):
"""
Implement the linear part of neural unit
"""
Z = np.dot(W,A) + b
return Z
def linear_activation_forward(A_pre,W,b,activation):
"""
Implement neural unit with the activation of Relu or sigmoid
"""
if activation == "Relu":
Z = linear_forward(W,A_pre,b)
A,activation_cache = relu(Z)
elif activation == "sigmoid":
Z = linear_forward(W,A_pre,b)
A,activation_cache = sigmoid(Z)
backward_used_cache = (A_pre,W,b)
cache = (backward_used_cache,activation_cache)
return A,cache
def L_model_forward(X,param):
"""
Implement forward propagation for L layers model
"""
caches = []
L = len(param) // 2
A = X
for l in range(1,L):
A,cache = linear_activation_forward(A,param['W'+str(l)],param['b'+str(l)],Relu)
caches.append(cache)
Al,cache = linear_activation_forward(A,param['W'+str(l)],param['b'+str(l)],Relu)
caches.append(cache)
return Al,caches
def linear_backward(dz,cache):
"""
Implement the backward propagation of linear part
"""
m = dz.shape[1]
dw = np.dot(dz,cache[0]) / m
db = np.sum(dz) / m
dA_pre = np.dot(cache[1],dz)
return dw,db,dA_pre
def linear_activation_backward(dA,cache,activation):
"""
Implement the backward propagation of neural unit
"""
if activation == "Relu":
dz = relu_backward(dA,cache[1])
elif activation == "sigmoid":
dz = sigmoid_backward(dA,cache[1])
dw,db,dA_pre = linear_backward(dz,cache[0])
return dw,db,dA_pre
def L_model_backward(AL,Y,caches):
"""
Implement the backward propagation for L layer model
"""
grads = {}
L = len(caches)
dAl = - (np.divide(Y,AL) - np.divide(1-Y,1-AL))
grads['dw'+str(L)],grads['db'+str(L)],grads['dA'+str(L)] = linear_activation_backward(dAL,caches[-1],"sigmoid")
for l in reversed(range(L-1)):
cache = caches[l]
grads['dw'+str(l+1)],grads['db'+str(l+1)],grads['dA'+str(l+1)] = linear_activation_backward(grads['dA'+str(l+2)],
cache,"Relu")
return grads
def update_param(param,grads,learning_rate):
"""
Update the parameters
"""
L = len(param) // 2
for l in range(L):
param['W'+str(l+1)] = param['W'+str(l+1)] - learning_rate * grads['W'+str(l+1)]
param['b'+str(l+1)] = param['b'+str(l+1)] - learning_rate * grads['b'+str(l+1)]
return param
| 22.062992 | 115 | 0.662384 | import numpy as np
from dnn_utils import sigmoid,sigmoid_backward,relu,relu_backward
def initialize_two_layer(n_x,n_h,n_y):
W1 = np.random.randn(n_h,n_x) * 0.01
b1 = np.zeros(n_h,1)
W2 = np.random.randn(n_y,n_h) * 0.01
b2 = np.zeros(n_y,1)
param = {"W1":W1,"b1":b1,"W2":W2,"b2":b2}
return param
def initialize_l_layer(layer_dims):
param = {}
L = len(layer_dims)
for l in range(1, L):
param['W' + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1]) * 0.01
param['b' + str(l)] = np.zeros(layer_dims[l],1)
return param
def linear_forward(W,A,b):
Z = np.dot(W,A) + b
return Z
def linear_activation_forward(A_pre,W,b,activation):
if activation == "Relu":
Z = linear_forward(W,A_pre,b)
A,activation_cache = relu(Z)
elif activation == "sigmoid":
Z = linear_forward(W,A_pre,b)
A,activation_cache = sigmoid(Z)
backward_used_cache = (A_pre,W,b)
cache = (backward_used_cache,activation_cache)
return A,cache
def L_model_forward(X,param):
caches = []
L = len(param) // 2
A = X
for l in range(1,L):
A,cache = linear_activation_forward(A,param['W'+str(l)],param['b'+str(l)],Relu)
caches.append(cache)
Al,cache = linear_activation_forward(A,param['W'+str(l)],param['b'+str(l)],Relu)
caches.append(cache)
return Al,caches
def linear_backward(dz,cache):
m = dz.shape[1]
dw = np.dot(dz,cache[0]) / m
db = np.sum(dz) / m
dA_pre = np.dot(cache[1],dz)
return dw,db,dA_pre
def linear_activation_backward(dA,cache,activation):
if activation == "Relu":
dz = relu_backward(dA,cache[1])
elif activation == "sigmoid":
dz = sigmoid_backward(dA,cache[1])
dw,db,dA_pre = linear_backward(dz,cache[0])
return dw,db,dA_pre
def L_model_backward(AL,Y,caches):
grads = {}
L = len(caches)
dAl = - (np.divide(Y,AL) - np.divide(1-Y,1-AL))
grads['dw'+str(L)],grads['db'+str(L)],grads['dA'+str(L)] = linear_activation_backward(dAL,caches[-1],"sigmoid")
for l in reversed(range(L-1)):
cache = caches[l]
grads['dw'+str(l+1)],grads['db'+str(l+1)],grads['dA'+str(l+1)] = linear_activation_backward(grads['dA'+str(l+2)],
cache,"Relu")
return grads
def update_param(param,grads,learning_rate):
L = len(param) // 2
for l in range(L):
param['W'+str(l+1)] = param['W'+str(l+1)] - learning_rate * grads['W'+str(l+1)]
param['b'+str(l+1)] = param['b'+str(l+1)] - learning_rate * grads['b'+str(l+1)]
return param
| true | true |
f727406ebc3243dcb2438e089040b82a0966c25b | 9,078 | py | Python | examples/adwords/v201702/extensions/add_site_links_using_feeds.py | agencia-watermelons/googleads-python-lib | d2e55863ecf7e5090c225d74b3f4c1f948cd5a21 | [
"Apache-2.0"
] | null | null | null | examples/adwords/v201702/extensions/add_site_links_using_feeds.py | agencia-watermelons/googleads-python-lib | d2e55863ecf7e5090c225d74b3f4c1f948cd5a21 | [
"Apache-2.0"
] | null | null | null | examples/adwords/v201702/extensions/add_site_links_using_feeds.py | agencia-watermelons/googleads-python-lib | d2e55863ecf7e5090c225d74b3f4c1f948cd5a21 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a sitelinks feed and associates it with a campaign.
To add sitelinks using the simpler ExtensionSetting services, see:
add_sitelinks.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import re
import uuid
from googleads import adwords
from googleads import errors
# See the Placeholder reference page for a list of all the placeholder types and
# fields.
# https://developers.google.com/adwords/api/docs/appendix/placeholders.html
PLACEHOLDER_SITELINKS = '1'
PLACEHOLDER_FIELD_SITELINK_LINK_TEXT = '1'
PLACEHOLDER_FIELD_SITELINK_FINAL_URLS = '5'
PLACEHOLDER_FIELD_LINE_2_TEXT = '3'
PLACEHOLDER_FIELD_LINE_3_TEXT = '4'
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
# Initialize appropriate service.
feed_service = client.GetService('FeedService', version='v201702')
feed_item_service = client.GetService('FeedItemService', version='v201702')
feed_mapping_service = client.GetService(
'FeedMappingService', version='v201702')
campaign_feed_service = client.GetService(
'CampaignFeedService', version='v201702')
sitelinks_data = {}
# Create site links feed first.
site_links_feed = {
'name': 'Feed For Site Links #%s' % uuid.uuid4(),
'attributes': [
{'type': 'STRING', 'name': 'Link Text'},
{'type': 'URL_LIST', 'name': 'Link Final URLs'},
{'type': 'STRING', 'name': 'Line 2 Description'},
{'type': 'STRING', 'name': 'Line 3 Description'}
]
}
response = feed_service.mutate([
{'operator': 'ADD', 'operand': site_links_feed}
])
if 'value' in response:
feed = response['value'][0]
link_text_feed_attribute_id = feed['attributes'][0]['id']
final_url_feed_attribute_id = feed['attributes'][1]['id']
line_2_feed_attribute_id = feed['attributes'][2]['id']
line_3_feed_attribute_id = feed['attributes'][3]['id']
print ('Feed with name \'%s\' and ID \'%s\' was added with' %
(feed['name'], feed['id']))
print ('\tText attribute ID \'%s\' and Final URL attribute ID \'%s\'.' %
(link_text_feed_attribute_id, final_url_feed_attribute_id))
print ('\tLine 2 attribute ID \'%s\' and Line 3 attribute ID \'%s\'.' %
(line_2_feed_attribute_id, line_3_feed_attribute_id))
sitelinks_data['feedId'] = feed['id']
sitelinks_data['linkTextFeedId'] = link_text_feed_attribute_id
sitelinks_data['finalUrlFeedId'] = final_url_feed_attribute_id
sitelinks_data['line2FeedId'] = line_2_feed_attribute_id
sitelinks_data['line3FeedId'] = line_3_feed_attribute_id
else:
raise errors.GoogleAdsError('No feeds were added.')
# Create site links feed items.
items_data = [
{'text': 'Home', 'finalUrls': 'http://www.example.com',
'line2': 'Home line 2', 'line3': 'Home line 3'},
{'text': 'Stores', 'finalUrls': 'http://www.example.com/stores',
'line2': 'Stores line 2', 'line3': 'Stores line 3'},
{'text': 'On Sale', 'finalUrls': 'http://www.example.com/sale',
'line2': 'On Sale line 2', 'line3': 'On Sale line 3'},
{'text': 'Support', 'finalUrls': 'http://www.example.com/support',
'line2': 'Support line 2', 'line3': 'Support line 3'},
{'text': 'Products', 'finalUrls': 'http://www.example.com/products',
'line2': 'Products line 2', 'line3': 'Products line 3'},
{'text': 'About Us', 'finalUrls': 'http://www.example.com/about',
'line2': 'About line 2', 'line3': 'About line 3', 'locationId': '21137'}
]
feed_items = []
for item in items_data:
feed_item = {
'feedId': sitelinks_data['feedId'],
'attributeValues': [
{
'feedAttributeId': sitelinks_data['linkTextFeedId'],
'stringValue': item['text']
},
{
'feedAttributeId': sitelinks_data['finalUrlFeedId'],
'stringValues': [item['finalUrls']]
},
{
'feedAttributeId': sitelinks_data['line2FeedId'],
'stringValue': item['line2']
},
{
'feedAttributeId': sitelinks_data['line3FeedId'],
'stringValue': item['line3']
}
],
# Optional: use the 'startTime' and 'endTime' keys to specify the time
# period for the feed to deliver. The example below will make the feed
# start now and stop in one month.
# Make sure you specify the datetime in the customer's time zone. You
# can retrieve this from customer['dateTimeZone'].
#
# ['startTime']: datetime.datetime.now().strftime('%Y%m%d %H%M%S')
# ['endTime']: (datetime.datetime.now() +
# relativedelta(months=1)).strftime('%Y%m%d %H%M%S')
}
# Use geographical targeting on a feed.
# The IDs can be found in the documentation or retrieved with the
# LocationCriterionService.
if 'locationId' in item:
feed_item['geoTargeting'] = {
'id': item['locationId']
}
feed_item['geoTargetingRestriction'] = {
'geoRestriction': 'LOCATION_OF_PRESENCE'
}
feed_items.append(feed_item)
feed_items_operations = [{'operator': 'ADD', 'operand': item} for item
in feed_items]
response = feed_item_service.mutate(feed_items_operations)
if 'value' in response:
sitelinks_data['feedItemIds'] = []
for feed_item in response['value']:
print 'Feed item with ID %s was added.' % feed_item['feedItemId']
sitelinks_data['feedItemIds'].append(feed_item['feedItemId'])
else:
raise errors.GoogleAdsError('No feed items were added.')
# Create site links feed mapping.
feed_mapping = {
'placeholderType': PLACEHOLDER_SITELINKS,
'feedId': sitelinks_data['feedId'],
'attributeFieldMappings': [
{
'feedAttributeId': sitelinks_data['linkTextFeedId'],
'fieldId': PLACEHOLDER_FIELD_SITELINK_LINK_TEXT
},
{
'feedAttributeId': sitelinks_data['finalUrlFeedId'],
'fieldId': PLACEHOLDER_FIELD_SITELINK_FINAL_URLS
},
{
'feedAttributeId': sitelinks_data['line2FeedId'],
'fieldId': PLACEHOLDER_FIELD_LINE_2_TEXT
},
{
'feedAttributeId': sitelinks_data['line3FeedId'],
'fieldId': PLACEHOLDER_FIELD_LINE_3_TEXT
}
]
}
response = feed_mapping_service.mutate([
{'operator': 'ADD', 'operand': feed_mapping}
])
if 'value' in response:
feed_mapping = response['value'][0]
print ('Feed mapping with ID %s and placeholder type %s was saved for feed'
' with ID %s.' %
(feed_mapping['feedMappingId'], feed_mapping['placeholderType'],
feed_mapping['feedId']))
else:
raise errors.GoogleAdsError('No feed mappings were added.')
# Construct a matching function that associates the sitelink feeditems to the
# campaign, and set the device preference to Mobile. For more details, see the
# matching function guide:
# https://developers.google.com/adwords/api/docs/guides/feed-matching-functions
matching_function_string = (
'AND(IN(FEED_ITEM_ID, {%s}), EQUALS(CONTEXT.DEVICE, \'Mobile\'))' %
re.sub(r'\[|\]|L', '', str(sitelinks_data['feedItemIds'])))
campaign_feed = {
'feedId': sitelinks_data['feedId'],
'campaignId': campaign_id,
'matchingFunction': {'functionString': matching_function_string},
# Specifying placeholder types on the CampaignFeed allows the same feed
# to be used for different placeholders in different Campaigns.
'placeholderTypes': [PLACEHOLDER_SITELINKS]
}
response = campaign_feed_service.mutate([
{'operator': 'ADD', 'operand': campaign_feed}
])
if 'value' in response:
campaign_feed = response['value'][0]
print ('Campaign with ID %s was associated with feed with ID %s.' %
(campaign_feed['campaignId'], campaign_feed['feedId']))
else:
raise errors.GoogleAdsError('No campaign feeds were added.')
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID)
| 38.303797 | 81 | 0.649372 |
"""This example adds a sitelinks feed and associates it with a campaign.
To add sitelinks using the simpler ExtensionSetting services, see:
add_sitelinks.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import re
import uuid
from googleads import adwords
from googleads import errors
PLACEHOLDER_SITELINKS = '1'
PLACEHOLDER_FIELD_SITELINK_LINK_TEXT = '1'
PLACEHOLDER_FIELD_SITELINK_FINAL_URLS = '5'
PLACEHOLDER_FIELD_LINE_2_TEXT = '3'
PLACEHOLDER_FIELD_LINE_3_TEXT = '4'
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
feed_service = client.GetService('FeedService', version='v201702')
feed_item_service = client.GetService('FeedItemService', version='v201702')
feed_mapping_service = client.GetService(
'FeedMappingService', version='v201702')
campaign_feed_service = client.GetService(
'CampaignFeedService', version='v201702')
sitelinks_data = {}
site_links_feed = {
'name': 'Feed For Site Links #%s' % uuid.uuid4(),
'attributes': [
{'type': 'STRING', 'name': 'Link Text'},
{'type': 'URL_LIST', 'name': 'Link Final URLs'},
{'type': 'STRING', 'name': 'Line 2 Description'},
{'type': 'STRING', 'name': 'Line 3 Description'}
]
}
response = feed_service.mutate([
{'operator': 'ADD', 'operand': site_links_feed}
])
if 'value' in response:
feed = response['value'][0]
link_text_feed_attribute_id = feed['attributes'][0]['id']
final_url_feed_attribute_id = feed['attributes'][1]['id']
line_2_feed_attribute_id = feed['attributes'][2]['id']
line_3_feed_attribute_id = feed['attributes'][3]['id']
print ('Feed with name \'%s\' and ID \'%s\' was added with' %
(feed['name'], feed['id']))
print ('\tText attribute ID \'%s\' and Final URL attribute ID \'%s\'.' %
(link_text_feed_attribute_id, final_url_feed_attribute_id))
print ('\tLine 2 attribute ID \'%s\' and Line 3 attribute ID \'%s\'.' %
(line_2_feed_attribute_id, line_3_feed_attribute_id))
sitelinks_data['feedId'] = feed['id']
sitelinks_data['linkTextFeedId'] = link_text_feed_attribute_id
sitelinks_data['finalUrlFeedId'] = final_url_feed_attribute_id
sitelinks_data['line2FeedId'] = line_2_feed_attribute_id
sitelinks_data['line3FeedId'] = line_3_feed_attribute_id
else:
raise errors.GoogleAdsError('No feeds were added.')
items_data = [
{'text': 'Home', 'finalUrls': 'http://www.example.com',
'line2': 'Home line 2', 'line3': 'Home line 3'},
{'text': 'Stores', 'finalUrls': 'http://www.example.com/stores',
'line2': 'Stores line 2', 'line3': 'Stores line 3'},
{'text': 'On Sale', 'finalUrls': 'http://www.example.com/sale',
'line2': 'On Sale line 2', 'line3': 'On Sale line 3'},
{'text': 'Support', 'finalUrls': 'http://www.example.com/support',
'line2': 'Support line 2', 'line3': 'Support line 3'},
{'text': 'Products', 'finalUrls': 'http://www.example.com/products',
'line2': 'Products line 2', 'line3': 'Products line 3'},
{'text': 'About Us', 'finalUrls': 'http://www.example.com/about',
'line2': 'About line 2', 'line3': 'About line 3', 'locationId': '21137'}
]
feed_items = []
for item in items_data:
feed_item = {
'feedId': sitelinks_data['feedId'],
'attributeValues': [
{
'feedAttributeId': sitelinks_data['linkTextFeedId'],
'stringValue': item['text']
},
{
'feedAttributeId': sitelinks_data['finalUrlFeedId'],
'stringValues': [item['finalUrls']]
},
{
'feedAttributeId': sitelinks_data['line2FeedId'],
'stringValue': item['line2']
},
{
'feedAttributeId': sitelinks_data['line3FeedId'],
'stringValue': item['line3']
}
],
# can retrieve this from customer['dateTimeZone'].
#
# ['startTime']: datetime.datetime.now().strftime('%Y%m%d %H%M%S')
# ['endTime']: (datetime.datetime.now() +
# relativedelta(months=1)).strftime('%Y%m%d %H%M%S')
}
# Use geographical targeting on a feed.
# The IDs can be found in the documentation or retrieved with the
# LocationCriterionService.
if 'locationId' in item:
feed_item['geoTargeting'] = {
'id': item['locationId']
}
feed_item['geoTargetingRestriction'] = {
'geoRestriction': 'LOCATION_OF_PRESENCE'
}
feed_items.append(feed_item)
feed_items_operations = [{'operator': 'ADD', 'operand': item} for item
in feed_items]
response = feed_item_service.mutate(feed_items_operations)
if 'value' in response:
sitelinks_data['feedItemIds'] = []
for feed_item in response['value']:
print 'Feed item with ID %s was added.' % feed_item['feedItemId']
sitelinks_data['feedItemIds'].append(feed_item['feedItemId'])
else:
raise errors.GoogleAdsError('No feed items were added.')
# Create site links feed mapping.
feed_mapping = {
'placeholderType': PLACEHOLDER_SITELINKS,
'feedId': sitelinks_data['feedId'],
'attributeFieldMappings': [
{
'feedAttributeId': sitelinks_data['linkTextFeedId'],
'fieldId': PLACEHOLDER_FIELD_SITELINK_LINK_TEXT
},
{
'feedAttributeId': sitelinks_data['finalUrlFeedId'],
'fieldId': PLACEHOLDER_FIELD_SITELINK_FINAL_URLS
},
{
'feedAttributeId': sitelinks_data['line2FeedId'],
'fieldId': PLACEHOLDER_FIELD_LINE_2_TEXT
},
{
'feedAttributeId': sitelinks_data['line3FeedId'],
'fieldId': PLACEHOLDER_FIELD_LINE_3_TEXT
}
]
}
response = feed_mapping_service.mutate([
{'operator': 'ADD', 'operand': feed_mapping}
])
if 'value' in response:
feed_mapping = response['value'][0]
print ('Feed mapping with ID %s and placeholder type %s was saved for feed'
' with ID %s.' %
(feed_mapping['feedMappingId'], feed_mapping['placeholderType'],
feed_mapping['feedId']))
else:
raise errors.GoogleAdsError('No feed mappings were added.')
# Construct a matching function that associates the sitelink feeditems to the
# campaign, and set the device preference to Mobile. For more details, see the
# matching function guide:
# https://developers.google.com/adwords/api/docs/guides/feed-matching-functions
matching_function_string = (
'AND(IN(FEED_ITEM_ID, {%s}), EQUALS(CONTEXT.DEVICE, \'Mobile\'))' %
re.sub(r'\[|\]|L', '', str(sitelinks_data['feedItemIds'])))
campaign_feed = {
'feedId': sitelinks_data['feedId'],
'campaignId': campaign_id,
'matchingFunction': {'functionString': matching_function_string},
# Specifying placeholder types on the CampaignFeed allows the same feed
# to be used for different placeholders in different Campaigns.
'placeholderTypes': [PLACEHOLDER_SITELINKS]
}
response = campaign_feed_service.mutate([
{'operator': 'ADD', 'operand': campaign_feed}
])
if 'value' in response:
campaign_feed = response['value'][0]
print ('Campaign with ID %s was associated with feed with ID %s.' %
(campaign_feed['campaignId'], campaign_feed['feedId']))
else:
raise errors.GoogleAdsError('No campaign feeds were added.')
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID)
| false | true |
f727409d08bf28ed9c3b7b788835af2b38189f4f | 15,624 | py | Python | word2vec_basic.py | rmodi6/word-representations | 4f9a13cee9ff60ce3c667c833330b59de774ed39 | [
"MIT"
] | null | null | null | word2vec_basic.py | rmodi6/word-representations | 4f9a13cee9ff60ce3c667c833330b59de774ed39 | [
"MIT"
] | null | null | null | word2vec_basic.py | rmodi6/word-representations | 4f9a13cee9ff60ce3c667c833330b59de774ed39 | [
"MIT"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os, sys
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import loss_func as tf_func
import pickle
from collections import namedtuple
Word2Vec = namedtuple('Word2Vec', ['train_inputs', 'train_labels', 'loss', 'optimizer', 'global_step',
'embeddings', 'normalized_embeddings', 'valid_embeddings','similarity',
'saver','summary', 'summary_writer'])
def maybe_create_path(path):
if not os.path.exists(path):
os.mkdir(path)
print ("Created a path: %s"%(path))
def maybe_download(filename, expected_bytes):
#Download a file if not present, and make sure it's the right size.
if not os.path.exists(filename):
print('Downloading %s'%(url+filename))
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
# Read the data into a list of strings.
def read_data(filename):
#Extract the first file enclosed in a zip file as a list of words
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
def generate_batch(data, batch_size, num_skips, skip_window):
"""
Write the code generate a training batch
@data_index: the index of a word. You can access a word using data[data_index]
@batch_size: the number of instances in one batch
@num_skips: the number of samples you want to draw in a window
(In the below example, it was 2)
@skip_windows: decides how many words to consider left and right from a context word.
(So, skip_windows*2+1 = window_size)
batch will contain word ids for context words. Dimension is [batch_size].
labels will contain word ids for predicting(target) words. Dimension is [batch_size, 1].
"""
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
"""
=================================================================================
You will generate small subset of training data, which is called batch.
For skip-gram model, you will slide a window
and sample training instances from the data insdie the window.
Here is a small example.
Suppose that we have a text: "The quick brown fox jumps over the lazy dog."
And batch_size = 8, window_size = 3
"[The quick brown] fox jumps over the lazy dog"
Context word would be 'quick' and predicting words are 'The' and 'brown'.
This will generate training examples:
context(x), predicted_word(y)
(quick , The)
(quick , brown)
And then move the sliding window.
"The [quick brown fox] jumps over the lazy dog"
In the same way, we have to two more examples:
(brown, quick)
(brown, fox)
move thd window again,
"The quick [brown fox jumps] over the lazy dog"
and we have
(fox, brown)
(fox, jumps)
Finally we get two instance from the moved window,
"The quick brown [fox jumps over] the lazy dog"
(jumps, fox)
(jumps, over)
Since now we have 8 training instances, which is the batch size,
stop generating batch and return batch data.
===============================================================================
"""
# Initialize batch_count to 0
batch_count = 0
while batch_count < batch_size: # Continue while we haven't generated required number of batches
# Re-initialize data_index so that there are skip_window words on either side of data_index
if (data_index - skip_window) < 0 or (data_index + skip_window) >= len(data):
data_index = skip_window
left_context_word = data_index - 1 # Index for outer words on left side of data_index
right_context_word = data_index + 1 # Index for outer words on right side of data_index
for x in range(skip_window): # Loop skip_window times
batch[batch_count] = data[data_index] # Add data_index word to batch as center word
labels[batch_count, 0] = data[left_context_word] # Add left index word to labels as target word
batch[batch_count+1] = data[data_index] # Add data_index word to batch as center word
labels[batch_count+1, 0] = data[right_context_word] # Add right index word to labels as target word
batch_count += 2 # Increment batch_count by 2 as we added 2 words: one from left and one from right
left_context_word -= 1 # Move left index towards left
right_context_word += 1 # Move right index towards right
data_index += 1 # Increment data_index making next word as center word
return batch, labels # Return the generated batches and labels
def build_model(sess, graph, loss_model):
"""
Builds a tensor graph model
"""
model = None
with graph.as_default():
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
global_step = tf.Variable(0, trainable=False)
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
sm_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
# Get context embeddings from lables
true_w = tf.nn.embedding_lookup(sm_weights, train_labels)
true_w = tf.reshape(true_w, [-1, embedding_size])
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
if loss_model == 'cross_entropy':
loss = tf.reduce_mean(tf_func.cross_entropy_loss(embed, true_w))
else:
#sample negative examples with unigram probability
sample = np.random.choice(vocabulary_size, num_sampled, p=unigram_prob, replace=False)
loss = tf.reduce_mean(tf_func.nce_loss(embed, nce_weights, nce_biases, train_labels, sample, unigram_prob))
# tf.summary.scalar('loss', loss)
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss, global_step=global_step)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
saver = tf.train.Saver(tf.global_variables())
# Save summary
# summary = tf.summary.merge_all()
# summary_writer = tf.summary.FileWriter(summary_path + '/summary', sess.graph)
summary = None
summary_writer = None
tf.global_variables_initializer().run()
print("Initialized")
model = Word2Vec(train_inputs, train_labels, loss, optimizer, global_step, embeddings,
normalized_embeddings, valid_embeddings, similarity, saver, summary, summary_writer)
return model
def load_pretrained_model(sess, model, pretrained_model_path):
if not os.path.exists(filename):
print("Missing pre-trained model: [%s]"%(pretrained_model_path))
return
ckpt = tf.train.get_checkpoint_state(pretrained_model_path)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(sess, ckpt.model_checkpoint_path)
def train(sess, model, data, dictionary, batch_size, num_skips, skip_window,
max_num_steps, checkpoint_step, loss_model):
average_loss_step = max(checkpoint_step/10, 100)
average_loss = 0
for step in xrange(max_num_steps):
batch_inputs, batch_labels = generate_batch(data, batch_size, num_skips, skip_window)
feed_dict = {model.train_inputs.name: batch_inputs, model.train_labels.name: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
# _, loss_val, summary = sess.run([model.optimizer, model.loss, model.summary], feed_dict=feed_dict)
_, loss_val = sess.run([model.optimizer, model.loss], feed_dict=feed_dict)
average_loss += loss_val
if step % average_loss_step == 0:
if step > 0:
average_loss /= average_loss_step
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# model.summary_writer.add_summary(summary, model.global_step.eval())
# model.summary_writer.flush()
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % checkpoint_step == 0:
sim = model.similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
# chkpt_path = os.path.join(checkpoint_model_path, 'w2v_%s.cpkt'%(loss_model))
# model.saver.save(sess, chkpt_path, global_step=model.global_step.eval())
# model.summary_writer.close()
# Saving the final embedding to a file
final_embeddings = model.normalized_embeddings.eval()
return final_embeddings
if __name__ == '__main__':
loss_model = 'cross_entropy'
if len(sys.argv) > 1:
if sys.argv[1] == 'nce':
loss_model = 'nce'
####################################################################################
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', 31344016)
words = read_data(filename)
print('Data size', len(words))
####################################################################################
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 100000
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
#Calculate the probability of unigrams
unigram_cnt = [c for w, c in count]
total = sum(unigram_cnt)
unigram_prob = [c*1.0/total for c in unigram_cnt]
data_index = 0
####################################################################################
# Step 3: Test the function that generates a training batch for the skip-gram model.
# TODO You must implement this method "generate_batch"
# Uncomment below to check batch output
# batch, labels = generate_batch(data, batch_size=8, num_skips=2, skip_window=1)
# for i in range(8):
# print(batch[i], reverse_dictionary[batch[i]],
# '->', labels[i, 0], reverse_dictionary[labels[i, 0]])
####################################################################################
# Hyper Parameters to config
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 4 # How many words to consider left and right.
num_skips = 8 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
# summary_path = './summary_%s'%(loss_model)
pretrained_model_path = './pretrained/'
checkpoint_model_path = './checkpoints_%s/'%(loss_model)
model_path = './models'
# maximum training step
max_num_steps = 200001
checkpoint_step = 50000
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
####################################################################################
# Step 4: Build and train a skip-gram model.
model = build_model(sess, graph, loss_model)
# You must start with the pretrained model.
# If you want to resume from your checkpoints, change this path name
load_pretrained_model(sess, model, pretrained_model_path)
####################################################################################
# Step 6: Begin training.
maybe_create_path(checkpoint_model_path)
embeddings = train(sess, model, data, dictionary, batch_size, num_skips, skip_window,
max_num_steps, checkpoint_step, loss_model)
####################################################################################
# Step 7: Save the trained model.
trained_steps = model.global_step.eval()
maybe_create_path(model_path)
model_filepath = os.path.join(model_path, 'word2vec_%s.model'%(loss_model))
print("Saving word2vec model as [%s]"%(model_filepath))
pickle.dump([dictionary, trained_steps, embeddings], open(model_filepath, 'w'))
| 37.467626 | 113 | 0.662186 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os, sys
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange
import tensorflow as tf
import loss_func as tf_func
import pickle
from collections import namedtuple
Word2Vec = namedtuple('Word2Vec', ['train_inputs', 'train_labels', 'loss', 'optimizer', 'global_step',
'embeddings', 'normalized_embeddings', 'valid_embeddings','similarity',
'saver','summary', 'summary_writer'])
def maybe_create_path(path):
if not os.path.exists(path):
os.mkdir(path)
print ("Created a path: %s"%(path))
def maybe_download(filename, expected_bytes):
if not os.path.exists(filename):
print('Downloading %s'%(url+filename))
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
# Read the data into a list of strings.
def read_data(filename):
#Extract the first file enclosed in a zip file as a list of words
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
def generate_batch(data, batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
# Initialize batch_count to 0
batch_count = 0
while batch_count < batch_size: # Continue while we haven't generated required number of batches
if (data_index - skip_window) < 0 or (data_index + skip_window) >= len(data):
data_index = skip_window
left_context_word = data_index - 1
right_context_word = data_index + 1
for x in range(skip_window):
batch[batch_count] = data[data_index]
labels[batch_count, 0] = data[left_context_word]
batch[batch_count+1] = data[data_index]
labels[batch_count+1, 0] = data[right_context_word]
batch_count += 2
left_context_word -= 1
right_context_word += 1
data_index += 1
return batch, labels
def build_model(sess, graph, loss_model):
model = None
with graph.as_default():
with tf.device('/cpu:0'):
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
global_step = tf.Variable(0, trainable=False)
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
sm_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
true_w = tf.nn.embedding_lookup(sm_weights, train_labels)
true_w = tf.reshape(true_w, [-1, embedding_size])
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
if loss_model == 'cross_entropy':
loss = tf.reduce_mean(tf_func.cross_entropy_loss(embed, true_w))
else:
sample = np.random.choice(vocabulary_size, num_sampled, p=unigram_prob, replace=False)
loss = tf.reduce_mean(tf_func.nce_loss(embed, nce_weights, nce_biases, train_labels, sample, unigram_prob))
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss, global_step=global_step)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
saver = tf.train.Saver(tf.global_variables())
summary = None
summary_writer = None
tf.global_variables_initializer().run()
print("Initialized")
model = Word2Vec(train_inputs, train_labels, loss, optimizer, global_step, embeddings,
normalized_embeddings, valid_embeddings, similarity, saver, summary, summary_writer)
return model
def load_pretrained_model(sess, model, pretrained_model_path):
if not os.path.exists(filename):
print("Missing pre-trained model: [%s]"%(pretrained_model_path))
return
ckpt = tf.train.get_checkpoint_state(pretrained_model_path)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(sess, ckpt.model_checkpoint_path)
def train(sess, model, data, dictionary, batch_size, num_skips, skip_window,
max_num_steps, checkpoint_step, loss_model):
average_loss_step = max(checkpoint_step/10, 100)
average_loss = 0
for step in xrange(max_num_steps):
batch_inputs, batch_labels = generate_batch(data, batch_size, num_skips, skip_window)
feed_dict = {model.train_inputs.name: batch_inputs, model.train_labels.name: batch_labels}
_, loss_val = sess.run([model.optimizer, model.loss], feed_dict=feed_dict)
average_loss += loss_val
if step % average_loss_step == 0:
if step > 0:
average_loss /= average_loss_step
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
if step % checkpoint_step == 0:
sim = model.similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = model.normalized_embeddings.eval()
return final_embeddings
if __name__ == '__main__':
loss_model = 'cross_entropy'
if len(sys.argv) > 1:
if sys.argv[1] == 'nce':
loss_model = 'nce'
| true | true |
f7274148a77ac245be2e506c83b47f5520833782 | 2,404 | py | Python | components/org.wso2.ppaas.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/util/asyncscheduledtask.py | gayangunarathne/private-paas | d4dd794a7dcf46312d17e81fe0442e42d30c8c63 | [
"Apache-2.0"
] | null | null | null | components/org.wso2.ppaas.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/util/asyncscheduledtask.py | gayangunarathne/private-paas | d4dd794a7dcf46312d17e81fe0442e42d30c8c63 | [
"Apache-2.0"
] | null | null | null | components/org.wso2.ppaas.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/util/asyncscheduledtask.py | gayangunarathne/private-paas | d4dd794a7dcf46312d17e81fe0442e42d30c8c63 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from threading import Thread
class AbstractAsyncScheduledTask:
"""
Exposes the contract to follow to implement a scheduled task to be executed by the ScheduledExecutor
"""
def execute_task(self):
"""
Override this method and implement the task to be executed by the ScheduledExecutor with a specified
interval.
"""
raise NotImplementedError
class ScheduledExecutor(Thread):
"""
Executes a given task with a given interval until being terminated
"""
def __init__(self, delay, task):
"""
Creates a ScheduledExecutor thread to handle interval based repeated execution of a given task of type
AbstractAsyncScheduledTask
:param int delay: The interval to keep between executions
:param AbstractAsyncScheduledTask task: The task to be implemented
:return:
"""
Thread.__init__(self)
self.delay = delay
""" :type : int """
self.task = task
""" :type : AbstractAsyncScheduledTask """
self.terminated = False
""" :type : bool """
def run(self):
"""
Start the scheduled task with a sleep time of delay in between
:return:
"""
while not self.terminated:
time.sleep(self.delay)
task_thread = Thread(target=self.task.execute_task)
task_thread.start()
def terminate(self):
"""
Terminate the scheduled task. Allow a maximum of 'delay' seconds to be terminated.
:return: void
"""
self.terminated = True
| 32.931507 | 110 | 0.669301 |
import time
from threading import Thread
class AbstractAsyncScheduledTask:
def execute_task(self):
raise NotImplementedError
class ScheduledExecutor(Thread):
def __init__(self, delay, task):
Thread.__init__(self)
self.delay = delay
self.task = task
self.terminated = False
def run(self):
while not self.terminated:
time.sleep(self.delay)
task_thread = Thread(target=self.task.execute_task)
task_thread.start()
def terminate(self):
self.terminated = True
| true | true |
f727415cbcd7b0a3044c71aebd591a61b3989d00 | 393 | py | Python | techtest/asgi.py | rising-entropy/Techcurve-Test | eeefd14d1a83451b9b5333f582ed6cc12efca44c | [
"MIT"
] | null | null | null | techtest/asgi.py | rising-entropy/Techcurve-Test | eeefd14d1a83451b9b5333f582ed6cc12efca44c | [
"MIT"
] | null | null | null | techtest/asgi.py | rising-entropy/Techcurve-Test | eeefd14d1a83451b9b5333f582ed6cc12efca44c | [
"MIT"
] | 1 | 2021-03-26T11:39:15.000Z | 2021-03-26T11:39:15.000Z | """
ASGI config for techtest project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'techtest.settings')
application = get_asgi_application()
| 23.117647 | 78 | 0.78626 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'techtest.settings')
application = get_asgi_application()
| true | true |
f72741a466183d81d1fca4196b837b1a9535f959 | 6,357 | py | Python | read_xml_all/calcul_matrix_compare_ce_good_192matrix.py | daniel20162016/my-first | f9554dd476302b26e8a296393025f150922f349c | [
"MIT"
] | null | null | null | read_xml_all/calcul_matrix_compare_ce_good_192matrix.py | daniel20162016/my-first | f9554dd476302b26e8a296393025f150922f349c | [
"MIT"
] | null | null | null | read_xml_all/calcul_matrix_compare_ce_good_192matrix.py | daniel20162016/my-first | f9554dd476302b26e8a296393025f150922f349c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:45:22 2016
@author: wang
"""
#from matplotlib import pylab as plt
#from numpy import fft, fromstring, int16, linspace
#import wave
from read_wav_xml_good_1 import*
from matrix_24_2 import*
from max_matrix_norm import*
import numpy as np
# open a wave file
filename = 'francois_filon_pure_3.wav'
filename_1 ='francois_filon_pure_3.xml'
word ='ce'
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)
#print 'word_start_point=',word_start_point
#print 'word_length_point=',word_length_point
#print 'word_end_point=',word_end_point
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#print 'matrix_all_step_4=',matrix_all_step_4
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
#print 'matrix_all_step_5=',matrix_all_step_5
np.savez('ce_compare_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)
| 39.240741 | 147 | 0.532012 |
from read_wav_xml_good_1 import*
from matrix_24_2 import*
from max_matrix_norm import*
import numpy as np
filename = 'francois_filon_pure_3.wav'
filename_1 ='francois_filon_pure_3.xml'
word ='ce'
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
np.savez('ce_compare_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)
| true | true |
f7274274d7a0048e67b5610c186be3f936227e5f | 1,037 | py | Python | debussy_concert/data_ingestion/config/movement_parameters/time_partitioned.py | DotzInc/debussy_concert | a28d7ca01814f24ffa75cfece758d619b71509f2 | [
"Apache-2.0"
] | 3 | 2022-03-23T19:16:25.000Z | 2022-03-30T18:12:19.000Z | debussy_concert/data_ingestion/config/movement_parameters/time_partitioned.py | DotzInc/debussy_concert | a28d7ca01814f24ffa75cfece758d619b71509f2 | [
"Apache-2.0"
] | null | null | null | debussy_concert/data_ingestion/config/movement_parameters/time_partitioned.py | DotzInc/debussy_concert | a28d7ca01814f24ffa75cfece758d619b71509f2 | [
"Apache-2.0"
] | 1 | 2022-03-23T20:14:48.000Z | 2022-03-23T20:14:48.000Z | from dataclasses import dataclass
from debussy_concert.core.config.movement_parameters.base import MovementParametersBase
@dataclass(frozen=True)
class BigQueryDataPartitioning:
partitioning_type: str
gcs_partition_schema: str
partition_field: str
destination_partition: str
@dataclass(frozen=True)
class BigQueryTimeDataPartitioning(BigQueryDataPartitioning):
partition_granularity: str
@dataclass(frozen=True)
class TimePartitionedDataIngestionMovementParameters(MovementParametersBase):
extract_connection_id: str
data_partitioning: BigQueryTimeDataPartitioning
def __post_init__(self):
if isinstance(self.data_partitioning, BigQueryTimeDataPartitioning):
return
data_partitioning = BigQueryTimeDataPartitioning(**self.data_partitioning)
# hack for frozen dataclass https://stackoverflow.com/a/54119384
# overwriting data_partitioning with BigQueryTimeDataPartitioning instance
object.__setattr__(self, 'data_partitioning', data_partitioning)
| 34.566667 | 87 | 0.802314 | from dataclasses import dataclass
from debussy_concert.core.config.movement_parameters.base import MovementParametersBase
@dataclass(frozen=True)
class BigQueryDataPartitioning:
partitioning_type: str
gcs_partition_schema: str
partition_field: str
destination_partition: str
@dataclass(frozen=True)
class BigQueryTimeDataPartitioning(BigQueryDataPartitioning):
partition_granularity: str
@dataclass(frozen=True)
class TimePartitionedDataIngestionMovementParameters(MovementParametersBase):
extract_connection_id: str
data_partitioning: BigQueryTimeDataPartitioning
def __post_init__(self):
if isinstance(self.data_partitioning, BigQueryTimeDataPartitioning):
return
data_partitioning = BigQueryTimeDataPartitioning(**self.data_partitioning)
object.__setattr__(self, 'data_partitioning', data_partitioning)
| true | true |
f72742b11c9975baf67b3cee27f9d5f9ddcb878a | 1,593 | py | Python | userbot/plugins/instastory_x3.py | x3raqee/x3raqe | d062ace8d69895a8ab80a003fc76da63e2b63a1d | [
"Apache-2.0"
] | null | null | null | userbot/plugins/instastory_x3.py | x3raqee/x3raqe | d062ace8d69895a8ab80a003fc76da63e2b63a1d | [
"Apache-2.0"
] | null | null | null | userbot/plugins/instastory_x3.py | x3raqee/x3raqe | d062ace8d69895a8ab80a003fc76da63e2b63a1d | [
"Apache-2.0"
] | 1 | 2021-04-27T23:28:43.000Z | 2021-04-27T23:28:43.000Z | # @x3raqe
#ممول محمد
"""QuotLy: Avaible commands: .انستا
"""
import datetime
import asyncio
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern="ستوري ?(.*)"))
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.edit("``` ~ @X3RAQE - .```")
return
reply_message = await event.get_reply_message()
if not reply_message.text:
await event.edit("``` ~ @X3RAQE - ```")
return
chat = "@x3storybot"
sender = reply_message.sender
if reply_message.sender.bot:
await event.edit("``` ~ @X3RAQE - ```")
return
await event.edit("`جار ارسال لك التحميل من @x3storybot`")
async with event.client.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1077724863))
await event.client.forward_messages(chat, reply_message)
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock me (@x3storybot) u Nigga```")
return
if response.text.startswith("Hi!"):
await event.edit("```Can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await event.client.send_message(event.chat_id, response.message)
| 37.046512 | 102 | 0.629002 |
import datetime
import asyncio
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern="ستوري ?(.*)"))
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.edit("``` ~ @X3RAQE - .```")
return
reply_message = await event.get_reply_message()
if not reply_message.text:
await event.edit("``` ~ @X3RAQE - ```")
return
chat = "@x3storybot"
sender = reply_message.sender
if reply_message.sender.bot:
await event.edit("``` ~ @X3RAQE - ```")
return
await event.edit("`جار ارسال لك التحميل من @x3storybot`")
async with event.client.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1077724863))
await event.client.forward_messages(chat, reply_message)
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock me (@x3storybot) u Nigga```")
return
if response.text.startswith("Hi!"):
await event.edit("```Can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await event.client.send_message(event.chat_id, response.message)
| true | true |
f72743000a24fcbc0e4390eb46261f96678ebe0b | 956 | py | Python | jdcloud_sdk/services/vod/models/UpdateTranscodeTemplateGroupReqData.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | jdcloud_sdk/services/vod/models/UpdateTranscodeTemplateGroupReqData.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | jdcloud_sdk/services/vod/models/UpdateTranscodeTemplateGroupReqData.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class UpdateTranscodeTemplateGroupReqData(object):
def __init__(self, groupName=None, templates=None):
"""
:param groupName: (Optional) 转码模板组名称
:param templates: (Optional)
"""
self.groupName = groupName
self.templates = templates
| 31.866667 | 75 | 0.722803 |
class UpdateTranscodeTemplateGroupReqData(object):
def __init__(self, groupName=None, templates=None):
self.groupName = groupName
self.templates = templates
| true | true |
f72743d6ec89246b1658151bdccfda8fc5b489b1 | 8,498 | py | Python | docs/conf.py | Akhail/Tebless | 87faff5547f168d0cf2d5caaf313c1efe1c19950 | [
"MIT"
] | 5 | 2017-09-20T02:12:25.000Z | 2019-10-22T14:12:07.000Z | docs/conf.py | mdbetancourt/Tebless | 87faff5547f168d0cf2d5caaf313c1efe1c19950 | [
"MIT"
] | 3 | 2021-06-14T14:20:53.000Z | 2021-11-15T17:47:37.000Z | docs/conf.py | Akhail/Tebless | 87faff5547f168d0cf2d5caaf313c1efe1c19950 | [
"MIT"
] | 1 | 2021-04-13T14:03:53.000Z | 2021-04-13T14:03:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tebless documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import tebless
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tebless'
copyright = u"2017, Michel Betancourt"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = tebless.__version__
# The full version, including alpha/beta/rc tags.
release = tebless.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'teblessdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'tebless.tex',
u'Tebless Documentation',
u'Michel Betancourt', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tebless',
u'Tebless Documentation',
[u'Michel Betancourt'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tebless',
u'Tebless Documentation',
u'Michel Betancourt',
'tebless',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.6787 | 76 | 0.717581 |
import sys
import os
import sphinx_rtd_theme
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
import tebless
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Tebless'
copyright = u"2017, Michel Betancourt"
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = tebless.__version__
# The full version, including alpha/beta/rc tags.
release = tebless.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'teblessdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'tebless.tex',
u'Tebless Documentation',
u'Michel Betancourt', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tebless',
u'Tebless Documentation',
[u'Michel Betancourt'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tebless',
u'Tebless Documentation',
u'Michel Betancourt',
'tebless',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true | true |
f72744a63d45a288c51ef43928f1452981437cd1 | 2,703 | py | Python | snake.py | leonardoarthur/SnakeGame | 106d3d238fd0d15091aa25d1770886961cedcc73 | [
"MIT"
] | 1 | 2021-05-03T02:03:36.000Z | 2021-05-03T02:03:36.000Z | snake.py | leonardoarthur/SnakeGame | 106d3d238fd0d15091aa25d1770886961cedcc73 | [
"MIT"
] | null | null | null | snake.py | leonardoarthur/SnakeGame | 106d3d238fd0d15091aa25d1770886961cedcc73 | [
"MIT"
] | null | null | null | import pygame
import random
pygame.init()
azul = (50, 100, 213)
laranja = (205, 102, 0)
verde = (0, 255, 0)
amarelo = (255, 255, 102)
dimensoes = (600, 600)
x = 300
y = 300
d = 20
lista_cobra = [[x, y]]
dx = 0
dy = 0
x_comida = round(random.randrange(0, 600 - d) /20) * 20
y_comida = round(random.randrange(0, 600 - d) /20) * 20
fonte = pygame.font.SysFont("hack", 35)
tela = pygame.display.set_mode((dimensoes))
pygame.display.set_caption('Snake')
tela.fill(azul)
clock = pygame.time.Clock()
def desenha_cobra(lista_cobra):
tela.fill(azul)
for unidade in lista_cobra:
pygame.draw.rect(tela, laranja, [unidade[0], unidade[1], d, d])
def mover_cobra(dx, dy, lista_cobra):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
dx = -d
dy = 0
elif event.key == pygame.K_RIGHT:
dx = d
dy = 0
elif event.key == pygame.K_UP:
dx = 0
dy = -d
elif event.key == pygame.K_DOWN:
dx = 0
dy = d
x_novo = lista_cobra[-1][0] + dx
y_novo = lista_cobra[-1][1] + dy
lista_cobra.append([x_novo, y_novo])
del lista_cobra[0]
return dx, dy, lista_cobra
def verifica_comida(dx, dy, x_comida, y_comida, lista_cobra):
head = lista_cobra[-1]
x_novo = head[0] + dx
y_novo = head[1] + dy
if head[0] == x_comida and head[1]== y_comida:
lista_cobra.append([x_novo, y_novo])
x_comida = round(random.randrange(0, 600 - d) / 20) * 20
y_comida = round(random.randrange(0, 600 - d) / 20) * 20
pygame.draw.rect(tela, verde, [x_comida, y_comida, d, d])
return x_comida, y_comida, lista_cobra
def verifica_parede(lista_cobra):
head = lista_cobra[-1]
x = head[0]
y = head[1]
if x not in range(600) or y not in range(600):
raise Exception
def verifica_modeu_cobra(lista_cobra):
head = lista_cobra[-1]
corpo = lista_cobra.copy()
del corpo[-1]
for x, y in corpo:
if x == head[0] and y == head[1]:
raise Exception
def atualizar_pontos(lista_cobra):
pts = str(len(lista_cobra))
escore = fonte.render("pontuação: " + pts, True, amarelo)
tela.blit(escore, [0, 0])
while True:
pygame.display.update()
desenha_cobra(lista_cobra)
dx, dy, lista_cobra = mover_cobra(dx, dy, lista_cobra)
x_comida, y_comida, lista_cobra = verifica_comida(dx, dy, x_comida, y_comida, lista_cobra)
verifica_parede(lista_cobra)
atualizar_pontos(lista_cobra)
verifica_modeu_cobra(lista_cobra)
print(lista_cobra)
clock.tick(10) | 24.572727 | 94 | 0.607103 | import pygame
import random
pygame.init()
azul = (50, 100, 213)
laranja = (205, 102, 0)
verde = (0, 255, 0)
amarelo = (255, 255, 102)
dimensoes = (600, 600)
x = 300
y = 300
d = 20
lista_cobra = [[x, y]]
dx = 0
dy = 0
x_comida = round(random.randrange(0, 600 - d) /20) * 20
y_comida = round(random.randrange(0, 600 - d) /20) * 20
fonte = pygame.font.SysFont("hack", 35)
tela = pygame.display.set_mode((dimensoes))
pygame.display.set_caption('Snake')
tela.fill(azul)
clock = pygame.time.Clock()
def desenha_cobra(lista_cobra):
tela.fill(azul)
for unidade in lista_cobra:
pygame.draw.rect(tela, laranja, [unidade[0], unidade[1], d, d])
def mover_cobra(dx, dy, lista_cobra):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
dx = -d
dy = 0
elif event.key == pygame.K_RIGHT:
dx = d
dy = 0
elif event.key == pygame.K_UP:
dx = 0
dy = -d
elif event.key == pygame.K_DOWN:
dx = 0
dy = d
x_novo = lista_cobra[-1][0] + dx
y_novo = lista_cobra[-1][1] + dy
lista_cobra.append([x_novo, y_novo])
del lista_cobra[0]
return dx, dy, lista_cobra
def verifica_comida(dx, dy, x_comida, y_comida, lista_cobra):
head = lista_cobra[-1]
x_novo = head[0] + dx
y_novo = head[1] + dy
if head[0] == x_comida and head[1]== y_comida:
lista_cobra.append([x_novo, y_novo])
x_comida = round(random.randrange(0, 600 - d) / 20) * 20
y_comida = round(random.randrange(0, 600 - d) / 20) * 20
pygame.draw.rect(tela, verde, [x_comida, y_comida, d, d])
return x_comida, y_comida, lista_cobra
def verifica_parede(lista_cobra):
head = lista_cobra[-1]
x = head[0]
y = head[1]
if x not in range(600) or y not in range(600):
raise Exception
def verifica_modeu_cobra(lista_cobra):
head = lista_cobra[-1]
corpo = lista_cobra.copy()
del corpo[-1]
for x, y in corpo:
if x == head[0] and y == head[1]:
raise Exception
def atualizar_pontos(lista_cobra):
pts = str(len(lista_cobra))
escore = fonte.render("pontuação: " + pts, True, amarelo)
tela.blit(escore, [0, 0])
while True:
pygame.display.update()
desenha_cobra(lista_cobra)
dx, dy, lista_cobra = mover_cobra(dx, dy, lista_cobra)
x_comida, y_comida, lista_cobra = verifica_comida(dx, dy, x_comida, y_comida, lista_cobra)
verifica_parede(lista_cobra)
atualizar_pontos(lista_cobra)
verifica_modeu_cobra(lista_cobra)
print(lista_cobra)
clock.tick(10) | true | true |
f7274749a010a84dccc4f71883f73ec4a8832b8a | 417 | py | Python | glitter/migrations/0004_object_id_required.py | dhamaniasad/django-glitter | b9b0a3d8b49d5d9b840656f84564ba0a6e016f98 | [
"BSD-3-Clause"
] | 3 | 2017-06-01T16:22:18.000Z | 2018-08-22T21:45:55.000Z | glitter/migrations/0004_object_id_required.py | blancltd/django-glitter | b9b0a3d8b49d5d9b840656f84564ba0a6e016f98 | [
"BSD-3-Clause"
] | 85 | 2016-02-25T10:34:03.000Z | 2017-04-03T11:07:59.000Z | glitter/migrations/0004_object_id_required.py | blancltd/django-glitter | b9b0a3d8b49d5d9b840656f84564ba0a6e016f98 | [
"BSD-3-Clause"
] | 1 | 2016-08-02T08:21:19.000Z | 2016-08-02T08:21:19.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('glitter', '0003_remove_empty_contentblocks'),
]
operations = [
migrations.AlterField(
model_name='contentblock',
name='object_id',
field=models.PositiveIntegerField(),
),
]
| 20.85 | 55 | 0.621103 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('glitter', '0003_remove_empty_contentblocks'),
]
operations = [
migrations.AlterField(
model_name='contentblock',
name='object_id',
field=models.PositiveIntegerField(),
),
]
| true | true |
f72747c52a599d07bf9a7350a23de712aeb51d69 | 3,190 | py | Python | app/result/views.py | Ravishrks/examin | 974f8d86ca116b3135a482e8e81532a40ea187c3 | [
"MIT"
] | null | null | null | app/result/views.py | Ravishrks/examin | 974f8d86ca116b3135a482e8e81532a40ea187c3 | [
"MIT"
] | null | null | null | app/result/views.py | Ravishrks/examin | 974f8d86ca116b3135a482e8e81532a40ea187c3 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.views import View
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.http import HttpResponse
from result.models import ResponseSheet
import os
import subprocess
def result(request):
return render(request, 'user/dashboard.html')
def checkResponseSheet(request, exam_id):
my_response_sheet = ResponseSheet.objects.filter(exam__pk = exam_id)
# testing with one entry
print(my_response_sheet)
for sheet in my_response_sheet:
# sheet = my_response_sheet.first()
new_file = f'{sheet.pk}{sheet.exam.pk}{sheet.question.pk}{sheet.profile.user.username}'
my_programme = sheet.response
# taking decision based on programme type
if sheet.question.question_type == "Programme":
my_language_type = sheet.language_type[1:]
programe_file_location = f'result/program/{my_language_type}/files/{new_file}{sheet.language_type}'
output_file_location = f'result/program/{my_language_type}/output/{new_file}{sheet.language_type}.txt'
error_file_location = f'result/program/{my_language_type}/error/{new_file}{sheet.language_type}.txt'
sh_file_location = f'result/program/{my_language_type}/sh/{new_file}{sheet.language_type}.sh'
if sheet.language_type == ".js":
print("It's js bro")
with open(programe_file_location, 'w') as f:
f.write(my_programme)
# create shell script files
with open(sh_file_location, 'w') as sh:
shell_cmd = f'#!/bin/sh\nnode {programe_file_location} > {output_file_location}\nnode {programe_file_location} 2> {error_file_location}'
sh.write(shell_cmd)
subprocess.run(["chmod","777",sh_file_location])
subprocess.run(["chmod","777",programe_file_location])
subprocess.run(["chmod","777",output_file_location])
subprocess.run(["chmod","777",error_file_location])
subprocess.run([sh_file_location])
# Save output or error to response file
with open(output_file_location) as rf:
read_file = rf.read()
sheet.output = read_file
sheet.save()
with open(error_file_location) as ef:
read_file_error = ef.read()
sheet.error = read_file_error
sheet.save()
elif sheet.language_type == ".c":
print("It's c bro")
elif sheet.language_type == ".cpp":
print("It's c++ bro")
elif sheet.language_type == ".py":
print("It's python bro")
elif sheet.language_type == ".php":
print("It's php bro")
elif sheet.language_type == ".java":
print("It's java bro")
return HttpResponse("Checked!")
| 34.673913 | 156 | 0.601254 | from django.shortcuts import render, redirect
from django.views import View
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.http import HttpResponse
from result.models import ResponseSheet
import os
import subprocess
def result(request):
return render(request, 'user/dashboard.html')
def checkResponseSheet(request, exam_id):
my_response_sheet = ResponseSheet.objects.filter(exam__pk = exam_id)
print(my_response_sheet)
for sheet in my_response_sheet:
new_file = f'{sheet.pk}{sheet.exam.pk}{sheet.question.pk}{sheet.profile.user.username}'
my_programme = sheet.response
if sheet.question.question_type == "Programme":
my_language_type = sheet.language_type[1:]
programe_file_location = f'result/program/{my_language_type}/files/{new_file}{sheet.language_type}'
output_file_location = f'result/program/{my_language_type}/output/{new_file}{sheet.language_type}.txt'
error_file_location = f'result/program/{my_language_type}/error/{new_file}{sheet.language_type}.txt'
sh_file_location = f'result/program/{my_language_type}/sh/{new_file}{sheet.language_type}.sh'
if sheet.language_type == ".js":
print("It's js bro")
with open(programe_file_location, 'w') as f:
f.write(my_programme)
# create shell script files
with open(sh_file_location, 'w') as sh:
shell_cmd = f'
sh.write(shell_cmd)
subprocess.run(["chmod","777",sh_file_location])
subprocess.run(["chmod","777",programe_file_location])
subprocess.run(["chmod","777",output_file_location])
subprocess.run(["chmod","777",error_file_location])
subprocess.run([sh_file_location])
# Save output or error to response file
with open(output_file_location) as rf:
read_file = rf.read()
sheet.output = read_file
sheet.save()
with open(error_file_location) as ef:
read_file_error = ef.read()
sheet.error = read_file_error
sheet.save()
elif sheet.language_type == ".c":
print("It's c bro")
elif sheet.language_type == ".cpp":
print("It's c++ bro")
elif sheet.language_type == ".py":
print("It's python bro")
elif sheet.language_type == ".php":
print("It's php bro")
elif sheet.language_type == ".java":
print("It's java bro")
return HttpResponse("Checked!")
| true | true |
f727483e267d49216f2db46205941f51cd603a86 | 544 | py | Python | src/manage.py | tegarty/socialrating | b80888ee8e637bd0a5517614c78235d563fead2e | [
"BSD-3-Clause"
] | 1 | 2019-02-03T17:17:02.000Z | 2019-02-03T17:17:02.000Z | src/manage.py | tegarty/socialrating | b80888ee8e637bd0a5517614c78235d563fead2e | [
"BSD-3-Clause"
] | null | null | null | src/manage.py | tegarty/socialrating | b80888ee8e637bd0a5517614c78235d563fead2e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'socialrating.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34 | 76 | 0.689338 |
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'socialrating.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true | true |
f7274a0d65eb66f00c4a41033040a9f2ae9f8cac | 18,672 | py | Python | autobahn/wamp/interfaces.py | meejah/AutobahnPython | 54da8882eea3f4b1da62a6d3481556ab77720d41 | [
"MIT"
] | null | null | null | autobahn/wamp/interfaces.py | meejah/AutobahnPython | 54da8882eea3f4b1da62a6d3481556ab77720d41 | [
"MIT"
] | null | null | null | autobahn/wamp/interfaces.py | meejah/AutobahnPython | 54da8882eea3f4b1da62a6d3481556ab77720d41 | [
"MIT"
] | 1 | 2018-11-07T12:52:07.000Z | 2018-11-07T12:52:07.000Z | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import abc
import six
__all__ = (
'IObjectSerializer',
'ISerializer',
'ITransport',
'ITransportHandler',
'ISession',
'IApplicationSession',
)
@six.add_metaclass(abc.ABCMeta)
class IObjectSerializer(object):
"""
Raw Python object serialization and deserialization. Object serializers are
used by classes implementing WAMP serializers, that is instances of
:class:`autobahn.wamp.interfaces.ISerializer`.
"""
@abc.abstractproperty
def BINARY(self):
"""
Flag (read-only) to indicate if serializer requires a binary clean
transport or if UTF8 transparency is sufficient.
"""
@abc.abstractmethod
def serialize(self, obj):
"""
Serialize an object to a byte string.
:param obj: Object to serialize.
:type obj: Any serializable type.
:returns: bytes -- Serialized byte string.
"""
@abc.abstractmethod
def unserialize(self, payload):
"""
Unserialize objects from a byte string.
:param payload: Objects to unserialize.
:type payload: bytes
:returns: list -- List of (raw) objects unserialized.
"""
@six.add_metaclass(abc.ABCMeta)
class ISerializer(object):
"""
WAMP message serialization and deserialization.
"""
@abc.abstractproperty
def MESSAGE_TYPE_MAP(self):
"""
Mapping of WAMP message type codes to WAMP message classes.
"""
@abc.abstractproperty
def SERIALIZER_ID(self):
"""
The WAMP serialization format ID.
"""
@abc.abstractmethod
def serialize(self, message):
"""
Serializes a WAMP message to bytes for sending over a transport.
:param message: An instance that implements :class:`autobahn.wamp.interfaces.IMessage`
:type message: obj
:returns: tuple -- A pair ``(payload, is_binary)``.
"""
@abc.abstractmethod
def unserialize(self, payload, is_binary):
"""
Deserialize bytes from a transport and parse into WAMP messages.
:param payload: Byte string from wire.
:type payload: bytes
:param is_binary: Type of payload. True if payload is a binary string, else
the payload is UTF-8 encoded Unicode text.
:type is_binary: bool
:returns: list -- List of ``a.w.m.Message`` objects.
"""
@six.add_metaclass(abc.ABCMeta)
class ITransport(object):
"""
A WAMP transport is a bidirectional, full-duplex, reliable, ordered,
message-based channel.
"""
@abc.abstractmethod
def send(self, message):
"""
Send a WAMP message over the transport to the peer. If the transport is
not open, this raises :class:`autobahn.wamp.exception.TransportLost`.
Returns a deferred/future when the message has been processed and more
messages may be sent. When send() is called while a previous deferred/future
has not yet fired, the send will fail immediately.
:param message: An instance that implements :class:`autobahn.wamp.interfaces.IMessage`
:type message: obj
:returns: obj -- A Deferred/Future
"""
@abc.abstractmethod
def is_open(self):
"""
Check if the transport is open for messaging.
:returns: bool -- ``True``, if the transport is open.
"""
@abc.abstractmethod
def close(self):
"""
Close the transport regularly. The transport will perform any
closing handshake if applicable. This should be used for any
application initiated closing.
"""
@abc.abstractmethod
def abort(self):
"""
Abort the transport abruptly. The transport will be destroyed as
fast as possible, and without playing nice to the peer. This should
only be used in case of fatal errors, protocol violations or possible
detected attacks.
"""
@abc.abstractmethod
def get_channel_id(self):
"""
Return the unique channel ID of the underlying transport. This is used to
mitigate credential forwarding man-in-the-middle attacks when running
application level authentication (eg WAMP-cryptosign) which are decoupled
from the underlying transport.
The channel ID is only available when running over TLS (either WAMP-WebSocket
or WAMP-RawSocket). It is not available for non-TLS transports (plain TCP or
Unix domain sockets). It is also not available for WAMP-over-HTTP/Longpoll.
Further, it is currently unimplemented for asyncio (only works on Twisted).
The channel ID is computed as follows:
- for a client, the SHA256 over the "TLS Finished" message sent by the client
to the server is returned.
- for a server, the SHA256 over the "TLS Finished" message the server expected
the client to send
Note: this is similar to `tls-unique` as described in RFC5929, but instead
of returning the raw "TLS Finished" message, it returns a SHA256 over such a
message. The reason is that we use the channel ID mainly with WAMP-cryptosign,
which is based on Ed25519, where keys are always 32 bytes. And having a channel ID
which is always 32 bytes (independent of the TLS ciphers/hashfuns in use) allows
use to easily XOR channel IDs with Ed25519 keys and WAMP-cryptosign challenges.
WARNING: For safe use of this (that is, for safely binding app level authentication
to the underlying transport), you MUST use TLS, and you SHOULD deactivate both
TLS session renegotiation and TLS session resumption.
References:
- https://tools.ietf.org/html/rfc5056
- https://tools.ietf.org/html/rfc5929
- http://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Connection.get_finished
- http://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Connection.get_peer_finished
:returns: The channel ID (if available) of the underlying WAMP transport. The
channel ID is a 32 bytes value.
:rtype: binary or None
"""
@six.add_metaclass(abc.ABCMeta)
class ITransportHandler(object):
@abc.abstractproperty
def transport(self):
"""
When the transport this handler is attached to is currently open, this property
can be read from. The property should be considered read-only. When the transport
is gone, this property is set to None.
"""
@abc.abstractmethod
def on_open(self, transport):
"""
Callback fired when transport is open. May run asynchronously. The transport
is considered running and is_open() would return true, as soon as this callback
has completed successfully.
:param transport: An instance that implements :class:`autobahn.wamp.interfaces.ITransport`
:type transport: obj
"""
@abc.abstractmethod
def on_message(self, message):
"""
Callback fired when a WAMP message was received. May run asynchronously. The callback
should return or fire the returned deferred/future when it's done processing the message.
In particular, an implementation of this callback must not access the message afterwards.
:param message: An instance that implements :class:`autobahn.wamp.interfaces.IMessage`
:type message: obj
"""
@abc.abstractmethod
def on_close(self, was_clean):
"""
Callback fired when the transport has been closed.
:param was_clean: Indicates if the transport has been closed regularly.
:type was_clean: bool
"""
@six.add_metaclass(abc.ABCMeta)
class ISession(object):
"""
Base interface for WAMP sessions.
"""
@abc.abstractmethod
def on_connect(self):
"""
Callback fired when the transport this session will run over has been established.
"""
@abc.abstractmethod
def join(self, realm):
"""
Attach the session to the given realm. A session is open as soon as it is attached to a realm.
"""
@abc.abstractmethod
def on_challenge(self, challenge):
"""
Callback fired when the peer demands authentication.
May return a Deferred/Future.
:param challenge: The authentication challenge.
:type challenge: Instance of :class:`autobahn.wamp.types.Challenge`.
"""
@abc.abstractmethod
def on_join(self, details):
"""
Callback fired when WAMP session has been established.
May return a Deferred/Future.
:param details: Session information.
:type details: Instance of :class:`autobahn.wamp.types.SessionDetails`.
"""
@abc.abstractmethod
def leave(self, reason=None, message=None):
"""
Actively close this WAMP session.
:param reason: An optional URI for the closing reason.
:type reason: str
:param message: An optional (human readable) closing message, intended for
logging purposes.
:type message: str
:return: may return a Future/Deferred that fires when we've disconnected
"""
@abc.abstractmethod
def on_leave(self, details):
"""
Callback fired when WAMP session has is closed
:param details: Close information.
:type details: Instance of :class:`autobahn.wamp.types.CloseDetails`.
"""
@abc.abstractmethod
def disconnect(self):
"""
Close the underlying transport.
"""
@abc.abstractmethod
def is_connected(self):
"""
Check if the underlying transport is connected.
"""
@abc.abstractmethod
def is_attached(self):
"""
Check if the session has currently joined a realm.
"""
@abc.abstractmethod
def on_disconnect(self):
"""
Callback fired when underlying transport has been closed.
"""
@six.add_metaclass(abc.ABCMeta)
class IApplicationSession(ISession):
"""
Interface for WAMP client peers implementing the four different
WAMP roles (caller, callee, publisher, subscriber).
"""
@abc.abstractmethod
def define(self, exception, error=None):
"""
Defines an exception for a WAMP error in the context of this WAMP session.
:param exception: The exception class to define an error mapping for.
:type exception: A class that derives of ``Exception``.
:param error: The URI (or URI pattern) the exception class should be mapped for.
Iff the ``exception`` class is decorated, this must be ``None``.
:type error: str
"""
@abc.abstractmethod
def call(self, procedure, *args, **kwargs):
"""
Call a remote procedure.
This will return a Deferred/Future, that when resolved, provides the actual result
returned by the called remote procedure.
- If the result is a single positional return value, it'll be returned "as-is".
- If the result contains multiple positional return values or keyword return values,
the result is wrapped in an instance of :class:`autobahn.wamp.types.CallResult`.
- If the call fails, the returned Deferred/Future will be rejected with an instance
of :class:`autobahn.wamp.exception.ApplicationError`.
If ``kwargs`` contains an ``options`` keyword argument that is an instance of
:class:`autobahn.wamp.types.CallOptions`, this will provide specific options for
the call to perform.
When the *Caller* and *Dealer* implementations support canceling of calls, the call may
be canceled by canceling the returned Deferred/Future.
:param procedure: The URI of the remote procedure to be called, e.g. ``u"com.myapp.hello"``.
:type procedure: unicode
:param args: Any positional arguments for the call.
:type args: list
:param kwargs: Any keyword arguments for the call.
:type kwargs: dict
:returns: A Deferred/Future for the call result -
:rtype: instance of :tx:`twisted.internet.defer.Deferred` / :py:class:`asyncio.Future`
"""
@abc.abstractmethod
def register(self, endpoint, procedure=None, options=None):
"""
Register a procedure for remote calling.
When ``endpoint`` is a callable (function, method or object that implements ``__call__``),
then ``procedure`` must be provided and an instance of
:tx:`twisted.internet.defer.Deferred` (when running on **Twisted**) or an instance
of :py:class:`asyncio.Future` (when running on **asyncio**) is returned.
- If the registration *succeeds* the returned Deferred/Future will *resolve* to
an object that implements :class:`autobahn.wamp.interfaces.IRegistration`.
- If the registration *fails* the returned Deferred/Future will *reject* with an
instance of :class:`autobahn.wamp.exception.ApplicationError`.
When ``endpoint`` is an object, then each of the object's methods that is decorated
with :func:`autobahn.wamp.register` is automatically registered and a (single)
DeferredList or Future is returned that gathers all individual underlying Deferreds/Futures.
:param endpoint: The endpoint called under the procedure.
:type endpoint: callable or object
:param procedure: When ``endpoint`` is a callable, the URI (or URI pattern)
of the procedure to register for. When ``endpoint`` is an object,
the argument is ignored (and should be ``None``).
:type procedure: unicode
:param options: Options for registering.
:type options: instance of :class:`autobahn.wamp.types.RegisterOptions`.
:returns: A registration or a list of registrations (or errors)
:rtype: instance(s) of :tx:`twisted.internet.defer.Deferred` / :py:class:`asyncio.Future`
"""
@abc.abstractmethod
def publish(self, topic, *args, **kwargs):
"""
Publish an event to a topic.
If ``kwargs`` contains an ``options`` keyword argument that is an instance of
:class:`autobahn.wamp.types.PublishOptions`, this will provide
specific options for the publish to perform.
.. note::
By default, publications are non-acknowledged and the publication can
fail silently, e.g. because the session is not authorized to publish
to the topic.
When publication acknowledgement is requested via ``options.acknowledge == True``,
this function returns a Deferred/Future:
- If the publication succeeds the Deferred/Future will resolve to an object
that implements :class:`autobahn.wamp.interfaces.IPublication`.
- If the publication fails the Deferred/Future will reject with an instance
of :class:`autobahn.wamp.exception.ApplicationError`.
:param topic: The URI of the topic to publish to, e.g. ``u"com.myapp.mytopic1"``.
:type topic: unicode
:param args: Arbitrary application payload for the event (positional arguments).
:type args: list
:param kwargs: Arbitrary application payload for the event (keyword arguments).
:type kwargs: dict
:returns: Acknowledgement for acknowledge publications - otherwise nothing.
:rtype: ``None`` or instance of :tx:`twisted.internet.defer.Deferred` / :py:class:`asyncio.Future`
"""
@abc.abstractmethod
def subscribe(self, handler, topic=None, options=None):
"""
Subscribe to a topic for receiving events.
When ``handler`` is a callable (function, method or object that implements ``__call__``),
then `topic` must be provided and an instance of
:tx:`twisted.internet.defer.Deferred` (when running on **Twisted**) or an instance
of :class:`asyncio.Future` (when running on **asyncio**) is returned.
- If the subscription succeeds the Deferred/Future will resolve to an object
that implements :class:`autobahn.wamp.interfaces.ISubscription`.
- If the subscription fails the Deferred/Future will reject with an instance
of :class:`autobahn.wamp.exception.ApplicationError`.
When ``handler`` is an object, then each of the object's methods that is decorated
with :func:`autobahn.wamp.subscribe` is automatically subscribed as event handlers,
and a list of Deferreds/Futures is returned that each resolves or rejects as above.
:param handler: The event handler to receive events.
:type handler: callable or object
:param topic: When ``handler`` is a callable, the URI (or URI pattern)
of the topic to subscribe to. When ``handler`` is an object, this
value is ignored (and should be ``None``).
:type topic: unicode
:param options: Options for subscribing.
:type options: An instance of :class:`autobahn.wamp.types.SubscribeOptions`.
:returns: A single Deferred/Future or a list of such objects
:rtype: instance(s) of :tx:`twisted.internet.defer.Deferred` / :py:class:`asyncio.Future`
"""
| 37.569416 | 106 | 0.658044 | true | true | |
f7274b9a30433a0e80ac0f1cf680738f5e8edb50 | 20,559 | py | Python | PyDSS/pyPostprocessor/PostprocessScripts/DERMSOptimizer_helper_modules/opt_funcs.py | daniel-thom/PyDSS | 8c7ae2d3a17d596b42a92e33f7d29329e26fbc30 | [
"BSD-3-Clause"
] | 1 | 2020-11-25T17:52:53.000Z | 2020-11-25T17:52:53.000Z | PyDSS/pyPostprocessor/PostprocessScripts/DERMSOptimizer_helper_modules/opt_funcs.py | daniel-thom/PyDSS | 8c7ae2d3a17d596b42a92e33f7d29329e26fbc30 | [
"BSD-3-Clause"
] | null | null | null | PyDSS/pyPostprocessor/PostprocessScripts/DERMSOptimizer_helper_modules/opt_funcs.py | daniel-thom/PyDSS | 8c7ae2d3a17d596b42a92e33f7d29329e26fbc30 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T19:52:02.000Z | 2020-07-23T19:52:02.000Z | import numpy as np
from scipy.sparse import lil_matrix
import scipy.sparse.linalg as sp
import scipy.sparse as sparse
import math
import csv
import matplotlib.pyplot as plt
def linear_powerflow_model(Y00,Y01,Y10,Y11_inv,I_coeff,V1,slack_no):
# voltage linearlization
V1_conj = np.conj(V1[slack_no:])
V1_conj_inv = 1 / V1_conj
coeff_V = Y11_inv * V1_conj_inv
coeff_V_P = coeff_V
coeff_V_Q = -1j*coeff_V
coeff_Vm = -np.dot(Y11_inv,np.dot(Y10,V1[:slack_no]))
# voltage magnitude linearization
m = coeff_Vm
m_inv = 1 / coeff_Vm
coeff_Vmag_k = abs(m)
A = (np.multiply(coeff_V.transpose(),m_inv)).transpose()
coeff_Vmag_P = (np.multiply(A.real.transpose(),coeff_Vmag_k)).transpose()
coeff_Vmag_Q = (np.multiply((-1j*A).real.transpose(),coeff_Vmag_k)).transpose()
# current linearization
if len(I_coeff):
coeff_I_P = np.dot(I_coeff[:,slack_no:],coeff_V_P)
coeff_I_Q = np.dot(I_coeff[:,slack_no:],coeff_V_Q)
coeff_I_const = np.dot(I_coeff[:,slack_no:],coeff_Vm) + np.dot(I_coeff[:,:slack_no],V1[:slack_no])
else:
coeff_I_P = []
coeff_I_Q = []
coeff_I_const = []
#=========================================Yiyun's Notes===========================================#
# Output relations: Vmag = coeff_Vmag_P * Pnode + coeff_Vmag_Q * Qnode + coeff_Vm
# I = coeff_I_P * Pnode + coeff_I_Q * Qnode + coeff_I_const (complex value)
# ================================================================================================#
return coeff_V_P, coeff_V_Q, coeff_Vm, coeff_Vmag_P, coeff_Vmag_Q, coeff_Vmag_k, coeff_I_P, coeff_I_Q, coeff_I_const
def validate_linear_model(coeff_Vp,coeff_Vq,coeff_Vm,PQ_node,slack_number):
V_cal = coeff_Vm + np.dot(coeff_Vp,np.array([np.real(ii)*1000 for ii in PQ_node[slack_number:]])) + np.dot(coeff_Vq,np.array([np.imag(ii)*1000 for ii in PQ_node[slack_number:]]))
v_cal_1 = coeff_Vm + np.dot(coeff_Vp,np.conj(PQ_node[slack_number:]*1000))
#coeff_Vp*Pnode + coeff_Vq*Qnode + coeff_Vm
# =========================================Yiyun's Notes===========================================#
# 1000 should be the S base
# =================================================================================================#
return [V_cal,v_cal_1]
def check_VI_correct(V1,PQ_node,slack_number,coeff_V,coeff_Vm,coeff_Vmag_P,coeff_Vmag_Q,coeff_Vmag_k,Y10,Y11,coeff_I_P, coeff_I_Q, coeff_I_const,I_coeff):
V1_linear = np.dot(coeff_V,np.conj(PQ_node[slack_number:]*1000)) + coeff_Vm
V1_linear = list(V1_linear)
Vdiff = list(map(lambda x: abs(x[0]-x[1])/abs(x[0])*100,zip(V1[slack_number:],V1_linear)))
print(sum(Vdiff))
with open('voltage_diff.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(Vdiff)
f.close()
V1_mag_linear = np.dot(coeff_Vmag_P,(PQ_node[slack_number:]*1000).real) + np.dot(coeff_Vmag_Q,(PQ_node[slack_number:]*1000).imag) + coeff_Vmag_k
V1_mag_linear = list(V1_mag_linear)
Vdiff = list(map(lambda x: abs(abs(x[0])-x[1])/abs(x[0])*100,zip(V1[slack_number:],V1_mag_linear)))
print(sum(Vdiff))
with open('voltageMag_diff.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(Vdiff)
f.close()
# get Ibus
Ibus = list(map(lambda x: (x[0]*1000/x[1]).conjugate(),zip(list(PQ_node)[slack_number:],V1[slack_number:])))
Ibus_cal_0 = np.dot(Y10,V1[0:slack_number])
Ibus_cal_1 = np.dot(Y11,V1[slack_number:])
Ibus_cal = list(map(lambda x: x[0]+x[1],zip(Ibus_cal_0,Ibus_cal_1)))
Idiff = list(map(lambda x: abs(x[0]-x[1]),zip(Ibus,Ibus_cal)))
print(sum(Idiff))
with open('currentBus_diff.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(Idiff)
f.close()
# get Ibranch
Ibranch = np.dot(I_coeff,V1)
Ibranch_cal = np.dot(I_coeff[:,slack_number:],V1_linear)+np.dot(I_coeff[:,0:slack_number],V1[:slack_number])
Ibranch_diff = list(map(lambda x: abs(x[0]-x[1]),zip(Ibranch,Ibranch_cal)))
print(sum(Ibranch_diff))
with open('current_diff.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(Ibranch_diff)
f.close()
def costFun(x,dual_upper,dual_lower,v1_pu,Ppv_max,coeff_p,coeff_q,NPV,control_bus_index,Vupper,Vlower,dual_current,ThermalLimit,I1_mag):
# cost_function = coeff_p*(Pmax-P)^2+coeff_q*Q^2+dual_upper*(v1-1.05)+dual_lower*(0.95-v1)
f1 = 0
for ii in range(NPV):
f1 = f1 + coeff_p*(Ppv_max[ii]-x[ii])*(Ppv_max[ii]-x[ii])+coeff_q*x[ii+NPV]*x[ii+NPV]
#f = f1 + np.dot(dual_upper,(np.array(v1_pu)[control_bus_index]-Vupper)) + np.dot(dual_lower,(Vlower-np.array(v1_pu)[control_bus_index]))
v_evaluate = [v1_pu[ii] for ii in control_bus_index]
f2 = f1 + np.dot(dual_upper,np.array([max(ii-Vupper,0) for ii in v_evaluate])) + np.dot(dual_lower,np.array([max(Vlower-ii,0) for ii in v_evaluate]))
f3 = np.dot(dual_current,np.array([max(ii,0) for ii in list(map(lambda x: x[0]*x[0]-x[1]*x[1],zip(I1_mag,ThermalLimit)))]))
f = f2+f3
# =========================================Yiyun's Notes===========================================#
# f1 is the quadratic PV curtailment plus quadratic reactive power injection
# f2 is the Lagrangian term for voltage violations and line current violations
# ===> Note the "control_bus_index" might be the index for measurement sensitivity analysis
# =================================================================================================#
return [f1,f]
def PV_costFun_gradient(x, coeff_p, coeff_q, Pmax):
grad = np.zeros(len(x))
for ii in range(int(len(x)/2)):
grad[ii] = -2*coeff_p*(Pmax[ii]*1000-x[ii]*1000)
grad[ii+int(len(x)/2)] = 2*coeff_q*x[ii+int(len(x)/2)]*1000
#grad[ii + int(len(x) / 2)] = 0
# =========================================Yiyun's Notes===========================================#
# x is the decision vector [P,Q]
# =================================================================================================#
return grad
def voltage_constraint_gradient(AllNodeNames,node_withPV, dual_upper, dual_lower, coeff_Vmag_p, coeff_Vmag_q):
node_noslackbus = AllNodeNames
node_noslackbus[0:3] = []
# =========================================Yiyun's Notes===========================================#
# remove the slack bus
# =================================================================================================#
grad_upper = np.matrix([0] * len(node_noslackbus)*2).transpose()
grad_lower = np.matrix([0] * len(node_noslackbus)*2).transpose()
count = 0
for node in node_noslackbus:
if node in node_withPV:
grad_upper[count] = dual_upper.transpose()*coeff_Vmag_p[:,count]
grad_upper[count+len(node_noslackbus)] = dual_upper.transpose() * coeff_Vmag_q[:,count]
grad_lower[count] = -dual_lower.transpose() * coeff_Vmag_p[:, count]
grad_lower[count + len(node_noslackbus)] = -dual_lower.transpose() * coeff_Vmag_q[:, count]
count = count + 1
return [grad_upper,grad_lower]
def current_constraint_gradient(AllNodeNames,node_withPV, dual_upper,coeff_Imag_p, coeff_Imag_q):
node_noslackbus = AllNodeNames
node_noslackbus[0:3] = []
grad_upper = np.matrix([0] * len(node_noslackbus)*2).transpose()
count = 0
for node in node_noslackbus:
if node in node_withPV:
grad_upper[count] = dual_upper.transpose()*coeff_Imag_p[:,count]
grad_upper[count+len(node_noslackbus)] = dual_upper.transpose() * coeff_Imag_q[:,count]
count = count + 1
return grad_upper
# =========================================Yiyun's Notes===========================================#
# PV_costFun_gradient, voltage_constraint_gradient, current_constraint_gradient and project_PV..
# ... are set up for updating the PV decision variables in eq(10)
# =================================================================================================#
def voltage_constraint(V1_mag):
g = V1_mag-1.05
g.append(0.95-V1_mag)
return g
def current_constraint(I1_mag,Imax):
g = []
g.append(I1_mag-Imax)
# =========================================Yiyun's Notes===========================================#
# assume single directional power flow
# voltage_constraint, current_constraint, and project_dualvariable are set up for updating the dual...
# ... variables in eq (11)
# =================================================================================================#
return g
def project_dualvariable(mu):
for ii in range(len(mu)):
mu[ii] = max(mu[ii],0)
# =========================================Yiyun's Notes===========================================#
# If the corresponding constraints in primal problem is in canonical form, then dual variable is >=0
# =================================================================================================#
return mu
def project_PV(x,Pmax,Sinv):
Qavailable = 0
Pavailable = 0
num = len(Sinv)
for ii in range(num):
if x[ii] > Pmax[ii]:
x[ii] = Pmax[ii]
elif x[ii] < 0:
x[ii] = 0
if Sinv[ii] > x[ii]:
Qmax = math.sqrt(Sinv[ii]*Sinv[ii]-x[ii]*x[ii])
else:
Qmax = 0
if x[ii+num] > Qmax:
x[ii+num] = Qmax
# elif x[ii + num] < 0:
# x[ii + num] = 0
elif x[ii+num] < -Qmax:
x[ii+num] = -Qmax
Pavailable = Pavailable + Pmax[ii]
Qavailable = Qavailable + Qmax
return [x,Pavailable,Qavailable]
def dual_update(mu,coeff_mu,constraint):
mu_new = mu + coeff_mu*constraint
mu_new = project_dualvariable(mu_new)
# =========================================Yiyun's Notes===========================================#
# normal way for update Lagrangian variable is by the sub-gradient of cost function
# Here is the equation (11) in the draft paper
# =================================================================================================#
return mu_new
def matrix_cal_for_subPower(V0, Y00, Y01, Y11, V1_noload):
diag_V0 = np.matrix([[complex(0, 0)] * 3] * 3)
diag_V0[0, 0] = V0[0]
diag_V0[1, 1] = V0[1]
diag_V0[2, 2] = V0[2]
K = diag_V0 * Y01.conj() * np.linalg.inv(Y11.conj())
g = diag_V0 * Y00.conj() * np.matrix(V0).transpose().conj() + diag_V0 * Y01.conj() * V1_noload.conj()
return[K,g]
def subPower_PQ(V1, PQ_node, K, g):
diag_V1 = np.matrix([[complex(0, 0)] * len(V1)] * len(V1))
for ii in range(len(V1)):
diag_V1[ii, ii] = V1[ii]
M = K * np.linalg.inv(diag_V1)
MR = M.real
MI = M.imag
P0 = g.real + (MR.dot(PQ_node.real)*1000 - MI.dot(PQ_node.imag)*1000)
Q0 = g.imag + (MR.dot(PQ_node.imag)*1000 + MI.dot(PQ_node.real)*1000)
P0 = P0/1000
Q0 = Q0/1000 # convert to kW/kVar
# =========================================Yiyun's Notes===========================================#
# Power injection at substation/feeder head
# =================================================================================================#
return [P0, Q0, M]
def sub_costFun_gradient(x, sub_ref, coeff_sub, sub_measure, M, node_withPV):
grad_a = np.matrix([0] * len(x)).transpose()
grad_b = np.matrix([0] * len(x)).transpose()
grad_c = np.matrix([0] * len(x)).transpose()
MR = M.real
MI = M.imag
count = 0
for node in node_withPV:
grad_a[count] = -MR[0, int(node)]
grad_b[count] = -MR[1, int(node)]
grad_c[count] = -MR[2, int(node)]
grad_a[count + len(node_withPV)] = MI[0, int(node)]
grad_b[count + len(node_withPV)] = MI[1, int(node)]
grad_c[count + len(node_withPV)] = MI[2, int(node)]
count = count + 1
res = coeff_sub * ((sub_measure[0] - sub_ref[0]) *1000* grad_a + (sub_measure[1] - sub_ref[1])*1000 * grad_b
+ (sub_measure[2] - sub_ref[2])*1000 * grad_c)
res = res/1000
return res
def projection(x,xmax,xmin):
for ii in range(len(x)):
if x.item(ii) > xmax[ii]:
x[ii] = xmax[ii]
if x.item(ii) < xmin[ii]:
x[ii] = xmin[ii]
return x
class DERMS:
def __init__(self, pvData,controlbus,controlelem,controlelem_limit,sub_node_names,sub_elem_names):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PV_name: names of all PVs in the zone
# PV_size: sizes of all PVs in the zone
# PV_location: busnames of all PVs in the zone
# controlbus: names of all controlled nodes
# sub_node_names: names of all nodes in the zone
# sub_node_names "include" controlbus
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.PV_name = pvData["pvName"]
self.PV_location = pvData["pvLocation"]
self.PV_size = pvData["pvSize"]
self.inverter_size = pvData["inverterSize"]
self.control_bus = controlbus
sub_node_names = [ii.upper() for ii in sub_node_names]
self.controlbus_index = [sub_node_names.index(ii.upper()) for ii in controlbus] # control bus index in the sub system (number)
# here
PVbus_index = []
for bus in self.PV_location:
temp = bus.split('.')
if len(temp) == 1:
temp = temp + ['1', '2', '3']
for ii in range(len(temp) - 1):
PVbus_index.append(sub_node_names.index((temp[0] + '.' + temp[ii + 1]).upper()))
# =========================================Yiyun's Notes===========================================#
# adding .1 .2 .3 following the number to recognize the three phases.
# =================================================================================================#
self.PVbus_index = PVbus_index
self.control_elem = controlelem
self.controlelem_limit = controlelem_limit
self.controlelem_index = [sub_elem_names.index(ii) for ii in controlelem] # control branches index in the sub system (number)
def monitor(self, dss, dssObjects, PVSystem_1phase):
PVpowers = []
for pv in PVSystem_1phase["Name"].tolist():
nPhases = dssObjects["Generators"][pv].GetValue("phases")
power = dssObjects["Generators"][pv].GetValue("Powers")
PVpowers.append([sum(power[::2])/nPhases, sum(power[1::2])/nPhases])
PVpowers = np.asarray(PVpowers)
Vmes = []
for bus in self.control_bus:
busName = bus.split('.')[0].lower()
Vmag = dssObjects["Buses"][busName].GetValue("puVmagAngle")[::2]
allbusnode = dss.Bus.Nodes()
phase = bus.split('.')[1]
index = allbusnode.index(int(phase))
Vnode = Vmag[index]
Vmes.append(Vnode)
Imes = []
for elem in self.control_elem:
className = elem.split('.')[0] + "s"
I = dssObjects[className][elem].GetValue("CurrentsMagAng")[::2][:3] #TODO: Why is there a hardcoded [:3] ?
Imes.append(I)
return [self.PV_location,PVpowers,Vmes,Imes]
def control(self, linear_PF_coeff, Options,stepsize,mu0,Vlimit,PVpower,Imes,Vmes,PV_Pmax_forecast):
coeff_p = Options["coeff_p"]
coeff_q = Options["coeff_q"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# linear_PF_coeff is the linear power flow model coefficients for the zone, and linear power flow model
# coefficients are the result vector from function "linear_powerflow_model"
# coeff_p, coeff_q are constant coefficients in PV cost function
# stepsize is a vector of stepsize constants
# mu0 is the dual variable from last time step: mu_Vmag_upper0, mu_Vmag_lower0, mu_I0
# Vlimit is the allowed voltage limit: Vupper and Vlower
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PVname = self.PV_name
NPV = len(PVname)
x0 = np.zeros(2 * NPV)
for ii in range(NPV):
x0[ii] = -PVpower[ii][0] # in kW
x0[ii + NPV] = -PVpower[ii][1] # in kVar
#coeff_V_P = linear_PF_coeff[0]
#coeff_V_Q = linear_PF_coeff[1]
#coeff_Vm = linear_PF_coeff[2]
coeff_Vmag_P = linear_PF_coeff[3]
coeff_Vmag_Q = linear_PF_coeff[4]
#coeff_Vmag_k = linear_PF_coeff[5]
coeff_I_P = linear_PF_coeff[6]
coeff_I_Q = linear_PF_coeff[7]
#coeff_I_const = linear_PF_coeff[8]
stepsize_xp = stepsize[0]
stepsize_xq = stepsize[1]
stepsize_mu = stepsize[2]
Vupper = Vlimit[0]
Vlower = Vlimit[1]
controlbus_index = self.controlbus_index
PVbus_index = self.PVbus_index
controlelem_index = self.controlelem_index
PV_inverter_size = self.inverter_size
Imes_limit = self.controlelem_limit
mu_Vmag_upper0 = mu0[0]
mu_Vmag_lower0 = mu0[1]
mu_I0 = mu0[2]
#print([max(mu_Vmag_upper0),max(mu_Vmag_lower0)])
# compute gradient
PVcost_fun_gradient = PV_costFun_gradient(x0, coeff_p, coeff_q, PV_Pmax_forecast)
Vmag_upper_gradient = np.concatenate((np.dot(coeff_Vmag_P[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_upper0),
np.dot(coeff_Vmag_Q[np.ix_([ii for ii in controlbus_index], [ii for ii in PVbus_index])].transpose(), mu_Vmag_upper0)),axis=0)
Vmag_lower_gradient = np.concatenate((np.dot(coeff_Vmag_P[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_lower0),
np.dot(coeff_Vmag_Q[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_lower0)),axis=0)
Vmag_gradient = Vmag_upper_gradient - Vmag_lower_gradient
if len(mu_I0)>0 :
temp_real = mu_I0 * np.array(Imes.real)
temp_imag = mu_I0 * np.array(Imes.imag)
I_gradient_real = np.concatenate((np.dot(
coeff_I_P[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].real.transpose(),
temp_real), np.dot(
coeff_I_Q[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].real.transpose(),
temp_real)), axis=0)
I_gradient_imag = np.concatenate((np.dot(
coeff_I_P[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].imag.transpose(),
temp_imag), np.dot(
coeff_I_Q[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].imag.transpose(),
temp_imag)), axis=0)
I_gradient = 2 * I_gradient_real + 2 * I_gradient_imag
else:
I_gradient = 0
gradient = PVcost_fun_gradient + Vmag_gradient + I_gradient / 1000
# compute x1, mu1
x1 = np.concatenate([x0[:NPV] - stepsize_xp * gradient[:NPV], x0[NPV:] - stepsize_xq * gradient[NPV:]])
#print('solved: '+str(sum(x1[0:NPV]))+','+str(sum(x1[NPV:]))) # in kW/kVar
[x1, Pmax_allPV, Qmax_allPV] = project_PV(x1, PV_Pmax_forecast, PV_inverter_size)
#print('Available P = '+str(Pmax_allPV)+' , Available Q = '+str(Qmax_allPV))
#print('projected: ' + str(sum(x1[0:NPV])) + ',' + str(sum(x1[NPV:]))) # in kW/kVar
x1 = np.array([round(ii, 5) for ii in x1])
mu_Vmag_lower1 = mu_Vmag_lower0 + stepsize_mu * (Vlower - np.array(Vmes))
mu_Vmag_upper1 = mu_Vmag_upper0 + stepsize_mu * (np.array(Vmes) - Vupper)
mu_Vmag_lower1 = project_dualvariable(mu_Vmag_lower1)
mu_Vmag_upper1 = project_dualvariable(mu_Vmag_upper1)
if mu_I0:
mu_I1 = mu_I0 + stepsize_mu / 300 * np.array(list(map(lambda x: x[0] * x[0] - x[1] * x[1], zip(Imes, Imes_limit))))
mu_I1 = project_dualvariable(mu_I1)
else:
mu_I1 = mu_I0
mu1 = [mu_Vmag_upper1,mu_Vmag_lower1,mu_I1]
# =========================================Yiyun's Notes===========================================#
# Each time of calling DERMS.control, it is a one step update of PV real and reactive power outputs
# =================================================================================================#
return [x1,mu1]
| 45.686667 | 182 | 0.555329 | import numpy as np
from scipy.sparse import lil_matrix
import scipy.sparse.linalg as sp
import scipy.sparse as sparse
import math
import csv
import matplotlib.pyplot as plt
def linear_powerflow_model(Y00,Y01,Y10,Y11_inv,I_coeff,V1,slack_no):
V1_conj = np.conj(V1[slack_no:])
V1_conj_inv = 1 / V1_conj
coeff_V = Y11_inv * V1_conj_inv
coeff_V_P = coeff_V
coeff_V_Q = -1j*coeff_V
coeff_Vm = -np.dot(Y11_inv,np.dot(Y10,V1[:slack_no]))
m = coeff_Vm
m_inv = 1 / coeff_Vm
coeff_Vmag_k = abs(m)
A = (np.multiply(coeff_V.transpose(),m_inv)).transpose()
coeff_Vmag_P = (np.multiply(A.real.transpose(),coeff_Vmag_k)).transpose()
coeff_Vmag_Q = (np.multiply((-1j*A).real.transpose(),coeff_Vmag_k)).transpose()
if len(I_coeff):
coeff_I_P = np.dot(I_coeff[:,slack_no:],coeff_V_P)
coeff_I_Q = np.dot(I_coeff[:,slack_no:],coeff_V_Q)
coeff_I_const = np.dot(I_coeff[:,slack_no:],coeff_Vm) + np.dot(I_coeff[:,:slack_no],V1[:slack_no])
else:
coeff_I_P = []
coeff_I_Q = []
coeff_I_const = []
# Output relations: Vmag = coeff_Vmag_P * Pnode + coeff_Vmag_Q * Qnode + coeff_Vm
# I = coeff_I_P * Pnode + coeff_I_Q * Qnode + coeff_I_const (complex value)
# ================================================================================================#
return coeff_V_P, coeff_V_Q, coeff_Vm, coeff_Vmag_P, coeff_Vmag_Q, coeff_Vmag_k, coeff_I_P, coeff_I_Q, coeff_I_const
def validate_linear_model(coeff_Vp,coeff_Vq,coeff_Vm,PQ_node,slack_number):
V_cal = coeff_Vm + np.dot(coeff_Vp,np.array([np.real(ii)*1000 for ii in PQ_node[slack_number:]])) + np.dot(coeff_Vq,np.array([np.imag(ii)*1000 for ii in PQ_node[slack_number:]]))
v_cal_1 = coeff_Vm + np.dot(coeff_Vp,np.conj(PQ_node[slack_number:]*1000))
#coeff_Vp*Pnode + coeff_Vq*Qnode + coeff_Vm
# =========================================Yiyun's Notes===========================================
return [V_cal,v_cal_1]
def check_VI_correct(V1,PQ_node,slack_number,coeff_V,coeff_Vm,coeff_Vmag_P,coeff_Vmag_Q,coeff_Vmag_k,Y10,Y11,coeff_I_P, coeff_I_Q, coeff_I_const,I_coeff):
V1_linear = np.dot(coeff_V,np.conj(PQ_node[slack_number:]*1000)) + coeff_Vm
V1_linear = list(V1_linear)
Vdiff = list(map(lambda x: abs(x[0]-x[1])/abs(x[0])*100,zip(V1[slack_number:],V1_linear)))
print(sum(Vdiff))
with open('voltage_diff.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(Vdiff)
f.close()
V1_mag_linear = np.dot(coeff_Vmag_P,(PQ_node[slack_number:]*1000).real) + np.dot(coeff_Vmag_Q,(PQ_node[slack_number:]*1000).imag) + coeff_Vmag_k
V1_mag_linear = list(V1_mag_linear)
Vdiff = list(map(lambda x: abs(abs(x[0])-x[1])/abs(x[0])*100,zip(V1[slack_number:],V1_mag_linear)))
print(sum(Vdiff))
with open('voltageMag_diff.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(Vdiff)
f.close()
Ibus = list(map(lambda x: (x[0]*1000/x[1]).conjugate(),zip(list(PQ_node)[slack_number:],V1[slack_number:])))
Ibus_cal_0 = np.dot(Y10,V1[0:slack_number])
Ibus_cal_1 = np.dot(Y11,V1[slack_number:])
Ibus_cal = list(map(lambda x: x[0]+x[1],zip(Ibus_cal_0,Ibus_cal_1)))
Idiff = list(map(lambda x: abs(x[0]-x[1]),zip(Ibus,Ibus_cal)))
print(sum(Idiff))
with open('currentBus_diff.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(Idiff)
f.close()
Ibranch = np.dot(I_coeff,V1)
Ibranch_cal = np.dot(I_coeff[:,slack_number:],V1_linear)+np.dot(I_coeff[:,0:slack_number],V1[:slack_number])
Ibranch_diff = list(map(lambda x: abs(x[0]-x[1]),zip(Ibranch,Ibranch_cal)))
print(sum(Ibranch_diff))
with open('current_diff.csv','w') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(Ibranch_diff)
f.close()
def costFun(x,dual_upper,dual_lower,v1_pu,Ppv_max,coeff_p,coeff_q,NPV,control_bus_index,Vupper,Vlower,dual_current,ThermalLimit,I1_mag):
f1 = 0
for ii in range(NPV):
f1 = f1 + coeff_p*(Ppv_max[ii]-x[ii])*(Ppv_max[ii]-x[ii])+coeff_q*x[ii+NPV]*x[ii+NPV]
v_evaluate = [v1_pu[ii] for ii in control_bus_index]
f2 = f1 + np.dot(dual_upper,np.array([max(ii-Vupper,0) for ii in v_evaluate])) + np.dot(dual_lower,np.array([max(Vlower-ii,0) for ii in v_evaluate]))
f3 = np.dot(dual_current,np.array([max(ii,0) for ii in list(map(lambda x: x[0]*x[0]-x[1]*x[1],zip(I1_mag,ThermalLimit)))]))
f = f2+f3
# f1 is the quadratic PV curtailment plus quadratic reactive power injection
# f2 is the Lagrangian term for voltage violations and line current violations
# ===> Note the "control_bus_index" might be the index for measurement sensitivity analysis
# =================================================================================================#
return [f1,f]
def PV_costFun_gradient(x, coeff_p, coeff_q, Pmax):
grad = np.zeros(len(x))
for ii in range(int(len(x)/2)):
grad[ii] = -2*coeff_p*(Pmax[ii]*1000-x[ii]*1000)
grad[ii+int(len(x)/2)] = 2*coeff_q*x[ii+int(len(x)/2)]*1000
#grad[ii + int(len(x) / 2)] = 0
# =========================================Yiyun's Notes===========================================
return grad
def voltage_constraint_gradient(AllNodeNames,node_withPV, dual_upper, dual_lower, coeff_Vmag_p, coeff_Vmag_q):
node_noslackbus = AllNodeNames
node_noslackbus[0:3] = []
# remove the slack bus
# =================================================================================================#
grad_upper = np.matrix([0] * len(node_noslackbus)*2).transpose()
grad_lower = np.matrix([0] * len(node_noslackbus)*2).transpose()
count = 0
for node in node_noslackbus:
if node in node_withPV:
grad_upper[count] = dual_upper.transpose()*coeff_Vmag_p[:,count]
grad_upper[count+len(node_noslackbus)] = dual_upper.transpose() * coeff_Vmag_q[:,count]
grad_lower[count] = -dual_lower.transpose() * coeff_Vmag_p[:, count]
grad_lower[count + len(node_noslackbus)] = -dual_lower.transpose() * coeff_Vmag_q[:, count]
count = count + 1
return [grad_upper,grad_lower]
def current_constraint_gradient(AllNodeNames,node_withPV, dual_upper,coeff_Imag_p, coeff_Imag_q):
node_noslackbus = AllNodeNames
node_noslackbus[0:3] = []
grad_upper = np.matrix([0] * len(node_noslackbus)*2).transpose()
count = 0
for node in node_noslackbus:
if node in node_withPV:
grad_upper[count] = dual_upper.transpose()*coeff_Imag_p[:,count]
grad_upper[count+len(node_noslackbus)] = dual_upper.transpose() * coeff_Imag_q[:,count]
count = count + 1
return grad_upper
# =========================================Yiyun's Notes===========================================
def voltage_constraint(V1_mag):
g = V1_mag-1.05
g.append(0.95-V1_mag)
return g
def current_constraint(I1_mag,Imax):
g = []
g.append(I1_mag-Imax)
# assume single directional power flow
# voltage_constraint, current_constraint, and project_dualvariable are set up for updating the dual...
# ... variables in eq (11)
# =================================================================================================#
return g
def project_dualvariable(mu):
for ii in range(len(mu)):
mu[ii] = max(mu[ii],0)
# =========================================Yiyun's Notes===========================================
return mu
def project_PV(x,Pmax,Sinv):
Qavailable = 0
Pavailable = 0
num = len(Sinv)
for ii in range(num):
if x[ii] > Pmax[ii]:
x[ii] = Pmax[ii]
elif x[ii] < 0:
x[ii] = 0
if Sinv[ii] > x[ii]:
Qmax = math.sqrt(Sinv[ii]*Sinv[ii]-x[ii]*x[ii])
else:
Qmax = 0
if x[ii+num] > Qmax:
x[ii+num] = Qmax
elif x[ii+num] < -Qmax:
x[ii+num] = -Qmax
Pavailable = Pavailable + Pmax[ii]
Qavailable = Qavailable + Qmax
return [x,Pavailable,Qavailable]
def dual_update(mu,coeff_mu,constraint):
mu_new = mu + coeff_mu*constraint
mu_new = project_dualvariable(mu_new)
# normal way for update Lagrangian variable is by the sub-gradient of cost function
# Here is the equation (11) in the draft paper
# =================================================================================================#
return mu_new
def matrix_cal_for_subPower(V0, Y00, Y01, Y11, V1_noload):
diag_V0 = np.matrix([[complex(0, 0)] * 3] * 3)
diag_V0[0, 0] = V0[0]
diag_V0[1, 1] = V0[1]
diag_V0[2, 2] = V0[2]
K = diag_V0 * Y01.conj() * np.linalg.inv(Y11.conj())
g = diag_V0 * Y00.conj() * np.matrix(V0).transpose().conj() + diag_V0 * Y01.conj() * V1_noload.conj()
return[K,g]
def subPower_PQ(V1, PQ_node, K, g):
diag_V1 = np.matrix([[complex(0, 0)] * len(V1)] * len(V1))
for ii in range(len(V1)):
diag_V1[ii, ii] = V1[ii]
M = K * np.linalg.inv(diag_V1)
MR = M.real
MI = M.imag
P0 = g.real + (MR.dot(PQ_node.real)*1000 - MI.dot(PQ_node.imag)*1000)
Q0 = g.imag + (MR.dot(PQ_node.imag)*1000 + MI.dot(PQ_node.real)*1000)
P0 = P0/1000
Q0 = Q0/1000 # convert to kW/kVar
# =========================================Yiyun's Notes===========================================
return [P0, Q0, M]
def sub_costFun_gradient(x, sub_ref, coeff_sub, sub_measure, M, node_withPV):
grad_a = np.matrix([0] * len(x)).transpose()
grad_b = np.matrix([0] * len(x)).transpose()
grad_c = np.matrix([0] * len(x)).transpose()
MR = M.real
MI = M.imag
count = 0
for node in node_withPV:
grad_a[count] = -MR[0, int(node)]
grad_b[count] = -MR[1, int(node)]
grad_c[count] = -MR[2, int(node)]
grad_a[count + len(node_withPV)] = MI[0, int(node)]
grad_b[count + len(node_withPV)] = MI[1, int(node)]
grad_c[count + len(node_withPV)] = MI[2, int(node)]
count = count + 1
res = coeff_sub * ((sub_measure[0] - sub_ref[0]) *1000* grad_a + (sub_measure[1] - sub_ref[1])*1000 * grad_b
+ (sub_measure[2] - sub_ref[2])*1000 * grad_c)
res = res/1000
return res
def projection(x,xmax,xmin):
for ii in range(len(x)):
if x.item(ii) > xmax[ii]:
x[ii] = xmax[ii]
if x.item(ii) < xmin[ii]:
x[ii] = xmin[ii]
return x
class DERMS:
def __init__(self, pvData,controlbus,controlelem,controlelem_limit,sub_node_names,sub_elem_names):
self.PV_name = pvData["pvName"]
self.PV_location = pvData["pvLocation"]
self.PV_size = pvData["pvSize"]
self.inverter_size = pvData["inverterSize"]
self.control_bus = controlbus
sub_node_names = [ii.upper() for ii in sub_node_names]
self.controlbus_index = [sub_node_names.index(ii.upper()) for ii in controlbus]
PVbus_index = []
for bus in self.PV_location:
temp = bus.split('.')
if len(temp) == 1:
temp = temp + ['1', '2', '3']
for ii in range(len(temp) - 1):
PVbus_index.append(sub_node_names.index((temp[0] + '.' + temp[ii + 1]).upper()))
# adding .1 .2 .3 following the number to recognize the three phases.
# =================================================================================================#
self.PVbus_index = PVbus_index
self.control_elem = controlelem
self.controlelem_limit = controlelem_limit
self.controlelem_index = [sub_elem_names.index(ii) for ii in controlelem] # control branches index in the sub system (number)
def monitor(self, dss, dssObjects, PVSystem_1phase):
PVpowers = []
for pv in PVSystem_1phase["Name"].tolist():
nPhases = dssObjects["Generators"][pv].GetValue("phases")
power = dssObjects["Generators"][pv].GetValue("Powers")
PVpowers.append([sum(power[::2])/nPhases, sum(power[1::2])/nPhases])
PVpowers = np.asarray(PVpowers)
Vmes = []
for bus in self.control_bus:
busName = bus.split('.')[0].lower()
Vmag = dssObjects["Buses"][busName].GetValue("puVmagAngle")[::2]
allbusnode = dss.Bus.Nodes()
phase = bus.split('.')[1]
index = allbusnode.index(int(phase))
Vnode = Vmag[index]
Vmes.append(Vnode)
Imes = []
for elem in self.control_elem:
className = elem.split('.')[0] + "s"
I = dssObjects[className][elem].GetValue("CurrentsMagAng")[::2][:3] #TODO: Why is there a hardcoded [:3] ?
Imes.append(I)
return [self.PV_location,PVpowers,Vmes,Imes]
def control(self, linear_PF_coeff, Options,stepsize,mu0,Vlimit,PVpower,Imes,Vmes,PV_Pmax_forecast):
coeff_p = Options["coeff_p"]
coeff_q = Options["coeff_q"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# linear_PF_coeff is the linear power flow model coefficients for the zone, and linear power flow model
# coefficients are the result vector from function "linear_powerflow_model"
# coeff_p, coeff_q are constant coefficients in PV cost function
# stepsize is a vector of stepsize constants
# mu0 is the dual variable from last time step: mu_Vmag_upper0, mu_Vmag_lower0, mu_I0
# Vlimit is the allowed voltage limit: Vupper and Vlower
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PVname = self.PV_name
NPV = len(PVname)
x0 = np.zeros(2 * NPV)
for ii in range(NPV):
x0[ii] = -PVpower[ii][0] # in kW
x0[ii + NPV] = -PVpower[ii][1] # in kVar
#coeff_V_P = linear_PF_coeff[0]
#coeff_V_Q = linear_PF_coeff[1]
#coeff_Vm = linear_PF_coeff[2]
coeff_Vmag_P = linear_PF_coeff[3]
coeff_Vmag_Q = linear_PF_coeff[4]
#coeff_Vmag_k = linear_PF_coeff[5]
coeff_I_P = linear_PF_coeff[6]
coeff_I_Q = linear_PF_coeff[7]
#coeff_I_const = linear_PF_coeff[8]
stepsize_xp = stepsize[0]
stepsize_xq = stepsize[1]
stepsize_mu = stepsize[2]
Vupper = Vlimit[0]
Vlower = Vlimit[1]
controlbus_index = self.controlbus_index
PVbus_index = self.PVbus_index
controlelem_index = self.controlelem_index
PV_inverter_size = self.inverter_size
Imes_limit = self.controlelem_limit
mu_Vmag_upper0 = mu0[0]
mu_Vmag_lower0 = mu0[1]
mu_I0 = mu0[2]
#print([max(mu_Vmag_upper0),max(mu_Vmag_lower0)])
# compute gradient
PVcost_fun_gradient = PV_costFun_gradient(x0, coeff_p, coeff_q, PV_Pmax_forecast)
Vmag_upper_gradient = np.concatenate((np.dot(coeff_Vmag_P[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_upper0),
np.dot(coeff_Vmag_Q[np.ix_([ii for ii in controlbus_index], [ii for ii in PVbus_index])].transpose(), mu_Vmag_upper0)),axis=0)
Vmag_lower_gradient = np.concatenate((np.dot(coeff_Vmag_P[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_lower0),
np.dot(coeff_Vmag_Q[np.ix_([ii for ii in controlbus_index],[ii for ii in PVbus_index])].transpose(), mu_Vmag_lower0)),axis=0)
Vmag_gradient = Vmag_upper_gradient - Vmag_lower_gradient
if len(mu_I0)>0 :
temp_real = mu_I0 * np.array(Imes.real)
temp_imag = mu_I0 * np.array(Imes.imag)
I_gradient_real = np.concatenate((np.dot(
coeff_I_P[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].real.transpose(),
temp_real), np.dot(
coeff_I_Q[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].real.transpose(),
temp_real)), axis=0)
I_gradient_imag = np.concatenate((np.dot(
coeff_I_P[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].imag.transpose(),
temp_imag), np.dot(
coeff_I_Q[np.ix_([ii for ii in controlelem_index], [ii for ii in PVbus_index])].imag.transpose(),
temp_imag)), axis=0)
I_gradient = 2 * I_gradient_real + 2 * I_gradient_imag
else:
I_gradient = 0
gradient = PVcost_fun_gradient + Vmag_gradient + I_gradient / 1000
# compute x1, mu1
x1 = np.concatenate([x0[:NPV] - stepsize_xp * gradient[:NPV], x0[NPV:] - stepsize_xq * gradient[NPV:]])
#print('solved: '+str(sum(x1[0:NPV]))+','+str(sum(x1[NPV:]))) # in kW/kVar
[x1, Pmax_allPV, Qmax_allPV] = project_PV(x1, PV_Pmax_forecast, PV_inverter_size)
#print('Available P = '+str(Pmax_allPV)+' , Available Q = '+str(Qmax_allPV))
#print('projected: ' + str(sum(x1[0:NPV])) + ',' + str(sum(x1[NPV:]))) # in kW/kVar
x1 = np.array([round(ii, 5) for ii in x1])
mu_Vmag_lower1 = mu_Vmag_lower0 + stepsize_mu * (Vlower - np.array(Vmes))
mu_Vmag_upper1 = mu_Vmag_upper0 + stepsize_mu * (np.array(Vmes) - Vupper)
mu_Vmag_lower1 = project_dualvariable(mu_Vmag_lower1)
mu_Vmag_upper1 = project_dualvariable(mu_Vmag_upper1)
if mu_I0:
mu_I1 = mu_I0 + stepsize_mu / 300 * np.array(list(map(lambda x: x[0] * x[0] - x[1] * x[1], zip(Imes, Imes_limit))))
mu_I1 = project_dualvariable(mu_I1)
else:
mu_I1 = mu_I0
mu1 = [mu_Vmag_upper1,mu_Vmag_lower1,mu_I1]
# =========================================Yiyun's Notes===========================================
return [x1,mu1]
| true | true |
f7274bbaa9b6a7c957dcc7b0ce646d02630be40a | 956 | py | Python | link_lang_spacy.py | bothub-it/bothub-nlp-ai-platform | 94f1fae57b8e81ed5f71839df6d47b1ee0df53f6 | [
"MIT"
] | null | null | null | link_lang_spacy.py | bothub-it/bothub-nlp-ai-platform | 94f1fae57b8e81ed5f71839df6d47b1ee0df53f6 | [
"MIT"
] | 2 | 2020-06-23T13:57:20.000Z | 2022-02-09T23:39:15.000Z | link_lang_spacy.py | bothub-it/bothub-nlp-ai-platform | 94f1fae57b8e81ed5f71839df6d47b1ee0df53f6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import plac
import importlib
from pathlib import Path
from spacy.util import get_package_path
from spacy.compat import symlink_to
@plac.annotations(
lang=plac.Annotation(help='Language code'),
lang_path=plac.Annotation(help='Language path'))
def link_lang_spacy(lang, lang_path):
origin_path = os.path.join(
str(get_package_path('spacy').resolve()),
'lang',
lang,
)
try:
symlink_to(
Path(origin_path),
os.path.abspath(lang_path),
)
try:
importlib.import_module('spacy.lang.{}'.format(lang))
print('link created')
except Exception as e:
print('link not created')
raise e
except Exception as e:
print('error to create link to {} from {}'.format(lang, lang_path))
raise e
if __name__ == '__main__':
plac.call(link_lang_spacy, sys.argv[1:]) | 25.157895 | 75 | 0.623431 |
import os
import sys
import plac
import importlib
from pathlib import Path
from spacy.util import get_package_path
from spacy.compat import symlink_to
@plac.annotations(
lang=plac.Annotation(help='Language code'),
lang_path=plac.Annotation(help='Language path'))
def link_lang_spacy(lang, lang_path):
origin_path = os.path.join(
str(get_package_path('spacy').resolve()),
'lang',
lang,
)
try:
symlink_to(
Path(origin_path),
os.path.abspath(lang_path),
)
try:
importlib.import_module('spacy.lang.{}'.format(lang))
print('link created')
except Exception as e:
print('link not created')
raise e
except Exception as e:
print('error to create link to {} from {}'.format(lang, lang_path))
raise e
if __name__ == '__main__':
plac.call(link_lang_spacy, sys.argv[1:]) | true | true |
f7274bcbf40683a5d160d96b2f06a72cde94327d | 4,927 | py | Python | backend/DankiBackEnd/settings.py | danielpassy/Translang-Deck | 60057dd4eecc929682bb5d154656380b05d040c5 | [
"MIT"
] | 1 | 2020-12-29T16:00:13.000Z | 2020-12-29T16:00:13.000Z | backend/DankiBackEnd/settings.py | danielpassy/Translang-Deck | 60057dd4eecc929682bb5d154656380b05d040c5 | [
"MIT"
] | 8 | 2020-12-08T23:20:01.000Z | 2021-01-28T22:23:22.000Z | backend/DankiBackEnd/settings.py | danielpassy/Translang-Deck | 60057dd4eecc929682bb5d154656380b05d040c5 | [
"MIT"
] | 1 | 2020-12-08T23:38:49.000Z | 2020-12-08T23:38:49.000Z | """
Django settings for DankiBackEnd project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
from os import path, getcwd
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get(
"SECRET_KEY", "(ac&ri0xuv9_!o#$$=$g#po&mkasdasdqwejqpoweaqaky-glk+vi^^!ka9f8%+$7"
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["translang.live", "www.translang.live", "localhost"]
# ALLOWED_HOSTS_ENV = os.environ.get('ALLOWED_HOST')
# Application definition
INSTALLED_APPS = [
"rest_framework",
"rest_framework.authtoken",
"backend",
# default apps
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "DankiBackEnd.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "DankiBackEnd.wsgi.application"
APPEND_SLASH = False
# logs
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"root": {"level": "INFO", "handlers": ["file"]},
"handlers": {
"file": {
"level": "INFO",
"class": "logging.FileHandler",
"filename": "C:/Users/Daniel/Documents/Apps/Anki_Card_Builder/Translang-Deck/backend/logs/django.log",
"formatter": "app",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
},
},
"loggers": {
"backend.views": {
"handlers": ["file", "console"],
"level": "INFO",
"propagate": True,
},
# "django": {
# "handlers": ["console"],
# "level": "INFO",
# "level": "DEBUG",
# },
},
"formatters": {
"app": {
"format": (
u"%(asctime)s [%(levelname)-8s] "
"(%(module)s.%(funcName)s) %(message)s"
),
"datefmt": "%Y-%m-%d %H:%M:%S",
},
},
}
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
# 'rest_framework.authentication.TokenAuthentication'
],
"DEFAULT_PARSER_CLASSES": [
"rest_framework.parsers.JSONParser",
"rest_framework.parsers.MultiPartParser",
],
}
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = "backend.User"
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
MEDIA_ROOT = path.join(BASE_DIR, "decks/", "outputdeck/")
STATIC_ROOT = path.join(BASE_DIR, "static/")
STATICFILE_DIRS = (path.join(BASE_DIR, "static/"),)
MEDIA_URL = "/decks/"
STATIC_URL = "/static/"
| 26.632432 | 114 | 0.635681 |
from pathlib import Path
from os import path, getcwd
import os
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
SECRET_KEY = os.environ.get(
"SECRET_KEY", "(ac&ri0xuv9_!o#$$=$g#po&mkasdasdqwejqpoweaqaky-glk+vi^^!ka9f8%+$7"
)
DEBUG = True
ALLOWED_HOSTS = ["translang.live", "www.translang.live", "localhost"]
# ALLOWED_HOSTS_ENV = os.environ.get('ALLOWED_HOST')
# Application definition
INSTALLED_APPS = [
"rest_framework",
"rest_framework.authtoken",
"backend",
# default apps
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "DankiBackEnd.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "DankiBackEnd.wsgi.application"
APPEND_SLASH = False
# logs
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"root": {"level": "INFO", "handlers": ["file"]},
"handlers": {
"file": {
"level": "INFO",
"class": "logging.FileHandler",
"filename": "C:/Users/Daniel/Documents/Apps/Anki_Card_Builder/Translang-Deck/backend/logs/django.log",
"formatter": "app",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
},
},
"loggers": {
"backend.views": {
"handlers": ["file", "console"],
"level": "INFO",
"propagate": True,
},
# "django": {
# "handlers": ["console"],
# "level": "INFO",
# "level": "DEBUG",
# },
},
"formatters": {
"app": {
"format": (
u"%(asctime)s [%(levelname)-8s] "
"(%(module)s.%(funcName)s) %(message)s"
),
"datefmt": "%Y-%m-%d %H:%M:%S",
},
},
}
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
# 'rest_framework.authentication.TokenAuthentication'
],
"DEFAULT_PARSER_CLASSES": [
"rest_framework.parsers.JSONParser",
"rest_framework.parsers.MultiPartParser",
],
}
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = "backend.User"
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
MEDIA_ROOT = path.join(BASE_DIR, "decks/", "outputdeck/")
STATIC_ROOT = path.join(BASE_DIR, "static/")
STATICFILE_DIRS = (path.join(BASE_DIR, "static/"),)
MEDIA_URL = "/decks/"
STATIC_URL = "/static/"
| true | true |
f7274bf22c6d405fafa87ecd7084bc1ec5559d84 | 4,583 | py | Python | examples/amac/fund_spider.py | acracker/ruia | b973a47270f72cc16344ac203c00ee4f6d835c04 | [
"MIT"
] | null | null | null | examples/amac/fund_spider.py | acracker/ruia | b973a47270f72cc16344ac203c00ee4f6d835c04 | [
"MIT"
] | null | null | null | examples/amac/fund_spider.py | acracker/ruia | b973a47270f72cc16344ac203c00ee4f6d835c04 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-01-17 13:49
# @Author : pang
# @File : fund.py
# @Software: PyCharm
import datetime
import os
import asyncio
import re
import logging
import time
import random
import aiohttp
from motor.motor_asyncio import AsyncIOMotorClient
from ruia import Request, Spider, Response
try:
from items import FundInfoItemV1
from settings import *
except ImportError:
import sys
sys.path[0] = os.path.dirname(os.path.abspath(__file__))
from items import FundInfoItemV1
from settings import *
# http://gs.amac.org.cn/amac-infodisc/api/pof/fund?rand=0.03935877331629101&page=0&size=20
"""
从协会网站抓取所有基金基本信息, 不包括扩展信息.
"""
class FundSpider(Spider):
request_config = {
'RETRIES': 1,
'DELAY': 1,
'TIMEOUT': 20
}
name = 'fund_spider'
concurrency = 3
kwargs = {
'proxy': HTTP_PROXY,
}
headers = {'Accept': 'application/json, text/javascript, */*; q=0.01', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive', 'Content-Length': '2', 'Content-Type': 'application/json', 'Host': 'gs.amac.org.cn',
'Origin': 'http://gs.amac.org.cn', 'Referer': 'http://gs.amac.org.cn/amac-infodisc/res/pof/fund/index.html',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'}
def __init__(self, middleware=None, loop=None, is_async_start=False):
super().__init__(middleware, loop, is_async_start)
self.client = AsyncIOMotorClient(MONGODB_URL, io_loop=loop)
self.db_name = DB_NAME
self.fund_collection = self.client[self.db_name]['fund']
async def get_total_pages(self):
url = "http://gs.amac.org.cn/amac-infodisc/api/pof/fund?rand={rand}&page=0&size=20".format(rand=random.random())
request = self.make_requests_from_url(url, data=b"{}", method="POST", res_type='json')
resp = await request.fetch()
if resp.status == 200:
if 'totalPages' in resp.html:
return resp.html['totalPages']
raise ValueError('failed to get total pages.')
async def start_requests(self):
num = await self.get_total_pages()
for i in range(num):
url = "http://gs.amac.org.cn/amac-infodisc/api/pof/fund?rand={rand}&page={page}&size=20".format(rand=random.random(), page=i)
yield self.make_requests_from_url(url, data=b"{}", method="POST", res_type='json')
async def parse(self, response: Response):
try:
data = response.html
if data is None or 'content' not in data:
return None
data = data['content']
update_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
for item in data:
row = dict()
row['register_number'] = item['fundNo']
row['full_name'] = str(item['fundName']).replace(' ', '')
row['company_name'] = item['managerName']
row['manager_type'] = item['managerType']
row['status'] = item['workingState']
try:
row['establish_date'] = datetime.datetime.fromtimestamp(item['establishDate'] / 1000).strftime("%Y%m%d")
except:
row['establish_date'] = item['establishDate']
row['company_url'] = item['managerUrl']
row['mandator_name'] = item['mandatorName']
row['last_quarter_update'] = item['lastQuarterUpdate']
row['is_depute_manage'] = item['isDeputeManage']
try:
row['put_on_record_date'] = datetime.datetime.fromtimestamp(item['putOnRecordDate'] / 1000).strftime("%Y%m%d")
except:
row['put_on_record_date'] = item['putOnRecordDate']
row['update_time'] = update_time
s = time.time()
await self.fund_collection.update_one({'register_number': row['register_number'], 'full_name': row['full_name']}, {'$set': row}, upsert=True)
e = time.time()
self.logger.info("采集基金[%s]信息, 存储耗时:%s s" % (row['register_number'], round(e - s, 2)))
except Exception as e:
self.logger.info("采集失败. url:%s" % response.url)
self.logger.exception(e)
await self.stop()
if __name__ == '__main__':
FundSpider.start()
| 40.201754 | 157 | 0.592625 |
import datetime
import os
import asyncio
import re
import logging
import time
import random
import aiohttp
from motor.motor_asyncio import AsyncIOMotorClient
from ruia import Request, Spider, Response
try:
from items import FundInfoItemV1
from settings import *
except ImportError:
import sys
sys.path[0] = os.path.dirname(os.path.abspath(__file__))
from items import FundInfoItemV1
from settings import *
class FundSpider(Spider):
request_config = {
'RETRIES': 1,
'DELAY': 1,
'TIMEOUT': 20
}
name = 'fund_spider'
concurrency = 3
kwargs = {
'proxy': HTTP_PROXY,
}
headers = {'Accept': 'application/json, text/javascript, */*; q=0.01', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive', 'Content-Length': '2', 'Content-Type': 'application/json', 'Host': 'gs.amac.org.cn',
'Origin': 'http://gs.amac.org.cn', 'Referer': 'http://gs.amac.org.cn/amac-infodisc/res/pof/fund/index.html',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'}
def __init__(self, middleware=None, loop=None, is_async_start=False):
super().__init__(middleware, loop, is_async_start)
self.client = AsyncIOMotorClient(MONGODB_URL, io_loop=loop)
self.db_name = DB_NAME
self.fund_collection = self.client[self.db_name]['fund']
async def get_total_pages(self):
url = "http://gs.amac.org.cn/amac-infodisc/api/pof/fund?rand={rand}&page=0&size=20".format(rand=random.random())
request = self.make_requests_from_url(url, data=b"{}", method="POST", res_type='json')
resp = await request.fetch()
if resp.status == 200:
if 'totalPages' in resp.html:
return resp.html['totalPages']
raise ValueError('failed to get total pages.')
async def start_requests(self):
num = await self.get_total_pages()
for i in range(num):
url = "http://gs.amac.org.cn/amac-infodisc/api/pof/fund?rand={rand}&page={page}&size=20".format(rand=random.random(), page=i)
yield self.make_requests_from_url(url, data=b"{}", method="POST", res_type='json')
async def parse(self, response: Response):
try:
data = response.html
if data is None or 'content' not in data:
return None
data = data['content']
update_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
for item in data:
row = dict()
row['register_number'] = item['fundNo']
row['full_name'] = str(item['fundName']).replace(' ', '')
row['company_name'] = item['managerName']
row['manager_type'] = item['managerType']
row['status'] = item['workingState']
try:
row['establish_date'] = datetime.datetime.fromtimestamp(item['establishDate'] / 1000).strftime("%Y%m%d")
except:
row['establish_date'] = item['establishDate']
row['company_url'] = item['managerUrl']
row['mandator_name'] = item['mandatorName']
row['last_quarter_update'] = item['lastQuarterUpdate']
row['is_depute_manage'] = item['isDeputeManage']
try:
row['put_on_record_date'] = datetime.datetime.fromtimestamp(item['putOnRecordDate'] / 1000).strftime("%Y%m%d")
except:
row['put_on_record_date'] = item['putOnRecordDate']
row['update_time'] = update_time
s = time.time()
await self.fund_collection.update_one({'register_number': row['register_number'], 'full_name': row['full_name']}, {'$set': row}, upsert=True)
e = time.time()
self.logger.info("采集基金[%s]信息, 存储耗时:%s s" % (row['register_number'], round(e - s, 2)))
except Exception as e:
self.logger.info("采集失败. url:%s" % response.url)
self.logger.exception(e)
await self.stop()
if __name__ == '__main__':
FundSpider.start()
| true | true |
f7274c9f9d6c7bc1762947fcfb7aab6dfe8470bc | 687 | py | Python | profiles/models.py | Samyak-jain09/QnA | 5044a947b84834cfc36554053a18cc1b12ad0f0e | [
"MIT"
] | null | null | null | profiles/models.py | Samyak-jain09/QnA | 5044a947b84834cfc36554053a18cc1b12ad0f0e | [
"MIT"
] | null | null | null | profiles/models.py | Samyak-jain09/QnA | 5044a947b84834cfc36554053a18cc1b12ad0f0e | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from sorl.thumbnail import ImageField
# Create your models here.
class Profile(models.Model):
user= models.OneToOneField(
User,
on_delete=models.CASCADE,
related_name="profile"
)
image = ImageField(upload_to='profiles')
def __str__(self) :
return self.user.username
@receiver(post_save,sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
| 24.535714 | 62 | 0.672489 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from sorl.thumbnail import ImageField
class Profile(models.Model):
user= models.OneToOneField(
User,
on_delete=models.CASCADE,
related_name="profile"
)
image = ImageField(upload_to='profiles')
def __str__(self) :
return self.user.username
@receiver(post_save,sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
| true | true |
f7274dc7c0bed6089595d1474055de95254fbc71 | 13,661 | py | Python | pyzombie/Handler.py | lanhel/pyzombie | dba35d98152e5d99d4231ab9124727ae47b3bf72 | [
"Apache-2.0"
] | null | null | null | pyzombie/Handler.py | lanhel/pyzombie | dba35d98152e5d99d4231ab9124727ae47b3bf72 | [
"Apache-2.0"
] | 1 | 2019-12-30T19:30:01.000Z | 2019-12-30T19:30:29.000Z | pyzombie/Handler.py | lanhel/pyzombie | dba35d98152e5d99d4231ab9124727ae47b3bf72 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#-------------------------------------------------------------------------------
"""pyzombie HTTP RESTful resource handler."""
__author__ = ('Lance Finn Helsten',)
__version__ = '1.0.1'
__copyright__ = """Copyright 2009 Lance Finn Helsten (helsten@acm.org)"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "reStructuredText en"
__all__ = ['Handler']
import sys
import os
from datetime import datetime
import mimetypes
import hashlib
import re
import cgi
import cgitb
import http.client
from .ZombieConfig import config, datadir
from .Executable import Executable
#cgitb.enable()
###
### TODO
###
### Pay attention to If-Modified-Since to allow return of 304 Not Modified
### Pay attention to If-None-Match to allow return of 304 Not Modified
### Pay attention to If-Unmodified-Since
### Pay attention to If-Modified-Since
CHUNK_SIZE = 256
FLUSHED = "Flushed"
class Handler:
"""Holds all the information necessary to handle a single resource dispatch.
Properties
----------
executable
The Executable object for this handler. In rare cases no executable
can be determined so this will return None.
"""
@classmethod
def initdispatch(cls, regex, allow, help):
cls.regex = re.compile(regex)
cls.allow = allow
cls.help = help
return cls
@classmethod
def match(cls, path):
"""Check to see if the path is recognized by the dispatch handler,
if so then return a dictionary of recognized parts, otherwise
return None."""
ret = None
mo = cls.regex.match(path)
if mo != None:
ret = mo.groupdict()
return ret
def __init__(self, req, urlargs):
self.req = req
self.urlargs = urlargs
self.content = "Single"
self.nocache = False
self.__status = None
self.headers = {}
self.lines = []
@property
def status(self):
return self.__status
@status.setter
def status(self, value):
self.__status = value
@property
def startstamp(self):
return self.req.server.stamp
@property
def startstamprfc850(self):
return self.req.date_time_string()
@property
def datadir(self):
return datadir()
@property
def executable(self, mediatype=None):
if not hasattr(self, "_Handler__executable"):
self.initexecutable()
return self.__executable
@property
def accept(self):
"""Return an ordered set of media types that will be accepted."""
if not hasattr(self, "acceptset"):
astr = self.req.headers["Accept"]
if astr is None:
astr = "text/html"
self.acceptset = self.__parseq(astr)
self.acceptset.append(None)
return self.acceptset
@property
def acceptlanguage(self):
"""Return an ordered set of languages that will be accepted."""
if not hasattr(self, "acceptlangset"):
astr = self.req.headers["Accept-Language"]
if astr is None:
astr = "en"
self.acceptlangset = self.__parseq(astr)
self.acceptlangset.append(None)
return self.acceptlangset
@property
def acceptencoding(self):
"""Return an ordered set of langauges that will be accepted."""
if not hasattr(self, "acceptencset"):
astr = self.req.headers["Accept-Encoding"]
if astr is None:
astr = ""
self.acceptencset = self.__parseq(astr)
self.acceptencset.append(None)
return self.acceptencset
def __parseq(self, astr):
qre = re.compile(r"([a-zA-Z*]+/[a-zA-Z*]+)(\s*;\s*q=(\d+(\.\d+))?)?")
astr = astr.split(",")
aset = ["DUMMY"]
weight = [0.0]
for a in astr:
q = 1.0
m = qre.match(a.strip())
if m:
a = m.group(1)
if m.group(3):
q = float(m.group(3))
for i, w in enumerate(weight):
if q > w:
aset.insert(i, a)
weight.insert(i, q)
break
return aset[:-1]
def initexecutable(self, mediatype=None):
"""This will initialize the executable property with a given media
type. Generally using the executable property directly will give
correct results. This is really only used when POST of a new exectuable
occurs."""
if hasattr(self, "_Handler__executable"):
raise AttributeError("Executable property is already initialized.")
if 'execname' in self.urlargs:
name = self.urlargs['execname']
else:
name = Executable.createname()
self.__executable = Executable.getcached(name, mediatype)
def serverurl(self, path):
"""Given a path to a resource create a full URL to that resource.
Parameters
----------
path
The relative path on the server to the resource.
Return
------
The URL that can be given to this server to find the given resource.
"""
return "http://{0}:{1}/{2}".format(
self.req.server.server_name,
self.req.server.server_port,
path)
def rfile_safe(self):
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
if sys.version_info >= (3, 2):
return self.req.rfile
else:
return HttpServerFP(self.req)
def multipart(self):
ctype, pdict = cgi.parse_header(self.req.headers['Content-Type'])
if ctype != 'multipart/form-data':
self.error(http.client.UNSUPPORTED_MEDIA_TYPE)
return None
fp = self.rfile_safe()
fs = cgi.FieldStorage(fp=fp, headers=self.req.headers,
environ={'REQUEST_METHOD':'POST'}, strict_parsing=True)
return fs
def readline(self):
"""Read a single line from the input stream in decoded format."""
pass
def writeline(self, line):
"""Write a single line of text to the output stream."""
self.lines.append(line)
def writelines(self, lines):
"""Write a string one line at a time to the output stream."""
for l in lines.splitlines():
self.writeline(l)
def writefile(self, path):
"""Read and then write the file from the given path to the output
stream. This will write all the headers before the file. If there is
an error reading the file then the appropriate HTTP error code will
be sent.
This is meant for static files. Dynamic files should use writeline
or writelines to operate.
Parameters
----------
path
The normalized path to the file.
"""
if os.path.isfile(path):
mediatype, enc = mimetypes.guess_type(path)
self.writefp(open(path, "rb"), mediatype=mediatype, enc=enc)
else:
self.error(http.client.NOT_FOUND)
def writefp(self, fp, mediatype="text/plain", enc=None, chunked=None):
"""Read from the given file object and write the data to the output
stream. If this is chunked then this will not return until the input
file object is closed.
Parameters
----------
fp
The file type object to read from.
chunked
If not ``None`` then the data should be sent in a chunked manner,
and the value should be a function that returns a boolean value
to indicate all data has been sent. The default is no chunked.
"""
self.req.send_response(http.client.OK)
self.req.send_header("Cache-Control", "public max-age={0}".format(self.req.server.maxagestatic))
self.req.send_header("Last-Modified", self.req.date_time_string())
if mediatype == None:
self.req.send_header("Content-Type", "application/octet-stream")
else:
if mediatype in ["text/plain", "text/html"]:
mediatype = "{0};UTF-8".format(mediatype)
self.req.send_header("Content-Type", mediatype)
if enc != None:
self.req.send_header("Content-Encoding", enc)
if chunked is not None:
self.__etag_init()
self.content = "Chunked"
self.req.send_header("Transfer-Encoding", "chunked")
self.req.end_headers()
length = 0
done = False
while not done:
data = fp.read(CHUNK_SIZE)
while not data and not done:
data = fp.read(CHUNK_SIZE)
done = chunked()
if data:
datalen = len(data)
length = length + datalen
self.__etag_feed(data)
self.req.wfile.write("{0:x}".format(datalen).encode("UTF-8"))
self.req.wfile.write(os.linesep.encode("UTF-8"))
if isinstance(data, str):
self.req.wfile.write(data.encode("UTF-8"))
elif isinstance(data, bytes):
self.req.wfile.write(data)
self.req.wfile.write(os.linesep.encode("UTF-8"))
self.req.wfile.write(b"0")
self.req.wfile.write(os.linesep.encode("UTF-8"))
self.req.send_header("Cache-Control", "public max-age={0}".format(self.req.server.maxagedynamic))
self.req.send_header("ETag", self.__etag_value())
self.req.wfile.write(os.linesep.encode("UTF-8"))
self.content = FLUSHED
else:
data = fp.read()
self.req.send_header("ETag", self.etag(data))
self.req.send_header("Content-Length", len(data))
self.req.end_headers()
self.req.wfile.write(data)
self.content = FLUSHED
def error(self, code, message=None):
self.req.send_error(code, message=message)
self.content = FLUSHED
def flush(self):
"""Flush the headers if they have not been written and all the lines
that have been written to the http output stream."""
if self.content == FLUSHED:
return
self.lines.append("")
buf = os.linesep.join(self.lines).encode("UTF-8")
self.lines = []
if not self.nocache:
if "Cache-Control" not in self.headers:
self.headers["Cache-Control"] = "public max-age={0}".format(self.req.server.maxagedynamic)
if "ETag" not in self.headers:
self.headers["ETag"] = self.etag(buf)
if self.content in ["Headers", "Single", "Chunked"]:
self.req.send_response(self.status)
for k in self.headers:
self.req.send_header(k, self.headers[k])
if self.content == "Headers":
self.req.end_headers()
self.content = FLUSHED
elif self.content == "Single":
self.req.send_header("Content-Length", len(buf))
self.req.end_headers()
self.req.wfile.write(buf)
self.content = FLUSHED
elif self.content == "Chunked":
pass
def etag(self, data):
"""Build an ETag representation for the data associated with the given
name."""
self.__etag_init()
self.__etag_feed(data)
return self.__etag_value()
def __etag_init(self):
self.__etag = hashlib.md5()
def __etag_feed(self, data):
if isinstance(data, str):
self.__etag.update(data.encode("UTF-8"))
elif isinstance(data, bytes):
self.__etag.update(data)
else:
self.__etag.update(str(data).encode("UTF-8"))
def __etag_value(self):
return self.__etag.hexdigest()
def __getitem__(self, key):
return self.headers[key]
def __setitem__(self, key, value):
self.headers[key] = value
class HttpServerFP():
"""This will wrap the http.server request rfile so an EOF will be returned
when reading from the rfile. That way the Content-Length is always handled
correctly. This will also convert the binary stream into a character stream.
"""
def __init__(self, req):
self.req = req
self.clen = int(self.req.headers['Content-Length'])
self.rfile = self.req.rfile
def read(self, size=-1):
if size < 0:
size = self.clen
if size > self.clen:
size = self.clen
ret = ''
if size > 0:
ret = self.rfile.read(size)
self.clen = self.clen - len(ret)
ret = str(ret, 'UTF-8')
return ret
| 33.982587 | 109 | 0.566284 |
__author__ = ('Lance Finn Helsten',)
__version__ = '1.0.1'
__copyright__ = """Copyright 2009 Lance Finn Helsten (helsten@acm.org)"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "reStructuredText en"
__all__ = ['Handler']
import sys
import os
from datetime import datetime
import mimetypes
import hashlib
import re
import cgi
import cgitb
import http.client
from .ZombieConfig import config, datadir
from .Executable import Executable
rlargs):
self.req = req
self.urlargs = urlargs
self.content = "Single"
self.nocache = False
self.__status = None
self.headers = {}
self.lines = []
@property
def status(self):
return self.__status
@status.setter
def status(self, value):
self.__status = value
@property
def startstamp(self):
return self.req.server.stamp
@property
def startstamprfc850(self):
return self.req.date_time_string()
@property
def datadir(self):
return datadir()
@property
def executable(self, mediatype=None):
if not hasattr(self, "_Handler__executable"):
self.initexecutable()
return self.__executable
@property
def accept(self):
if not hasattr(self, "acceptset"):
astr = self.req.headers["Accept"]
if astr is None:
astr = "text/html"
self.acceptset = self.__parseq(astr)
self.acceptset.append(None)
return self.acceptset
@property
def acceptlanguage(self):
if not hasattr(self, "acceptlangset"):
astr = self.req.headers["Accept-Language"]
if astr is None:
astr = "en"
self.acceptlangset = self.__parseq(astr)
self.acceptlangset.append(None)
return self.acceptlangset
@property
def acceptencoding(self):
if not hasattr(self, "acceptencset"):
astr = self.req.headers["Accept-Encoding"]
if astr is None:
astr = ""
self.acceptencset = self.__parseq(astr)
self.acceptencset.append(None)
return self.acceptencset
def __parseq(self, astr):
qre = re.compile(r"([a-zA-Z*]+/[a-zA-Z*]+)(\s*;\s*q=(\d+(\.\d+))?)?")
astr = astr.split(",")
aset = ["DUMMY"]
weight = [0.0]
for a in astr:
q = 1.0
m = qre.match(a.strip())
if m:
a = m.group(1)
if m.group(3):
q = float(m.group(3))
for i, w in enumerate(weight):
if q > w:
aset.insert(i, a)
weight.insert(i, q)
break
return aset[:-1]
def initexecutable(self, mediatype=None):
if hasattr(self, "_Handler__executable"):
raise AttributeError("Executable property is already initialized.")
if 'execname' in self.urlargs:
name = self.urlargs['execname']
else:
name = Executable.createname()
self.__executable = Executable.getcached(name, mediatype)
def serverurl(self, path):
return "http://{0}:{1}/{2}".format(
self.req.server.server_name,
self.req.server.server_port,
path)
def rfile_safe(self):
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
if sys.version_info >= (3, 2):
return self.req.rfile
else:
return HttpServerFP(self.req)
def multipart(self):
ctype, pdict = cgi.parse_header(self.req.headers['Content-Type'])
if ctype != 'multipart/form-data':
self.error(http.client.UNSUPPORTED_MEDIA_TYPE)
return None
fp = self.rfile_safe()
fs = cgi.FieldStorage(fp=fp, headers=self.req.headers,
environ={'REQUEST_METHOD':'POST'}, strict_parsing=True)
return fs
def readline(self):
pass
def writeline(self, line):
self.lines.append(line)
def writelines(self, lines):
for l in lines.splitlines():
self.writeline(l)
def writefile(self, path):
if os.path.isfile(path):
mediatype, enc = mimetypes.guess_type(path)
self.writefp(open(path, "rb"), mediatype=mediatype, enc=enc)
else:
self.error(http.client.NOT_FOUND)
def writefp(self, fp, mediatype="text/plain", enc=None, chunked=None):
self.req.send_response(http.client.OK)
self.req.send_header("Cache-Control", "public max-age={0}".format(self.req.server.maxagestatic))
self.req.send_header("Last-Modified", self.req.date_time_string())
if mediatype == None:
self.req.send_header("Content-Type", "application/octet-stream")
else:
if mediatype in ["text/plain", "text/html"]:
mediatype = "{0};UTF-8".format(mediatype)
self.req.send_header("Content-Type", mediatype)
if enc != None:
self.req.send_header("Content-Encoding", enc)
if chunked is not None:
self.__etag_init()
self.content = "Chunked"
self.req.send_header("Transfer-Encoding", "chunked")
self.req.end_headers()
length = 0
done = False
while not done:
data = fp.read(CHUNK_SIZE)
while not data and not done:
data = fp.read(CHUNK_SIZE)
done = chunked()
if data:
datalen = len(data)
length = length + datalen
self.__etag_feed(data)
self.req.wfile.write("{0:x}".format(datalen).encode("UTF-8"))
self.req.wfile.write(os.linesep.encode("UTF-8"))
if isinstance(data, str):
self.req.wfile.write(data.encode("UTF-8"))
elif isinstance(data, bytes):
self.req.wfile.write(data)
self.req.wfile.write(os.linesep.encode("UTF-8"))
self.req.wfile.write(b"0")
self.req.wfile.write(os.linesep.encode("UTF-8"))
self.req.send_header("Cache-Control", "public max-age={0}".format(self.req.server.maxagedynamic))
self.req.send_header("ETag", self.__etag_value())
self.req.wfile.write(os.linesep.encode("UTF-8"))
self.content = FLUSHED
else:
data = fp.read()
self.req.send_header("ETag", self.etag(data))
self.req.send_header("Content-Length", len(data))
self.req.end_headers()
self.req.wfile.write(data)
self.content = FLUSHED
def error(self, code, message=None):
self.req.send_error(code, message=message)
self.content = FLUSHED
def flush(self):
if self.content == FLUSHED:
return
self.lines.append("")
buf = os.linesep.join(self.lines).encode("UTF-8")
self.lines = []
if not self.nocache:
if "Cache-Control" not in self.headers:
self.headers["Cache-Control"] = "public max-age={0}".format(self.req.server.maxagedynamic)
if "ETag" not in self.headers:
self.headers["ETag"] = self.etag(buf)
if self.content in ["Headers", "Single", "Chunked"]:
self.req.send_response(self.status)
for k in self.headers:
self.req.send_header(k, self.headers[k])
if self.content == "Headers":
self.req.end_headers()
self.content = FLUSHED
elif self.content == "Single":
self.req.send_header("Content-Length", len(buf))
self.req.end_headers()
self.req.wfile.write(buf)
self.content = FLUSHED
elif self.content == "Chunked":
pass
def etag(self, data):
self.__etag_init()
self.__etag_feed(data)
return self.__etag_value()
def __etag_init(self):
self.__etag = hashlib.md5()
def __etag_feed(self, data):
if isinstance(data, str):
self.__etag.update(data.encode("UTF-8"))
elif isinstance(data, bytes):
self.__etag.update(data)
else:
self.__etag.update(str(data).encode("UTF-8"))
def __etag_value(self):
return self.__etag.hexdigest()
def __getitem__(self, key):
return self.headers[key]
def __setitem__(self, key, value):
self.headers[key] = value
class HttpServerFP():
def __init__(self, req):
self.req = req
self.clen = int(self.req.headers['Content-Length'])
self.rfile = self.req.rfile
def read(self, size=-1):
if size < 0:
size = self.clen
if size > self.clen:
size = self.clen
ret = ''
if size > 0:
ret = self.rfile.read(size)
self.clen = self.clen - len(ret)
ret = str(ret, 'UTF-8')
return ret
| true | true |
f7274e486eedd05c1c6a0c85be46f9c9e766ce1c | 1,073 | py | Python | SimpleShadowsocksSubscribeServer/views/subscribe.py | TomCzHen/py4S | c07b0a05c798809ef95e8ef47e87c877b82358fd | [
"MIT"
] | 1 | 2019-06-05T16:05:28.000Z | 2019-06-05T16:05:28.000Z | SimpleShadowsocksSubscribeServer/views/subscribe.py | TomCzHen/py4S | c07b0a05c798809ef95e8ef47e87c877b82358fd | [
"MIT"
] | null | null | null | SimpleShadowsocksSubscribeServer/views/subscribe.py | TomCzHen/py4S | c07b0a05c798809ef95e8ef47e87c877b82358fd | [
"MIT"
] | null | null | null | import uuid
from sanic import response
from sanic.exceptions import abort
from sanic.request import Request
from sanic.views import HTTPMethodView as SanicHTTPView
from ..cache import cache
class SubscribeView(SanicHTTPView):
async def get(self, request: Request, uid: uuid):
token = request.args.get('token')
num = request.args.get('max', '99')
try:
num = float(num)
except ValueError:
num = 0
else:
num = int(num)
subscribe = await cache.get(key=str(uid)) or abort(404)
accept_contents = request.headers.get('accept').split(',')
if 'text/html' in accept_contents:
abort(404)
if subscribe.token == token:
subscribe_file = await subscribe.output_file(num)
return response.raw(
subscribe_file,
content_type='application/octet-stream; charset=utf-8',
headers={"Content-Disposition": f"attachment; filename={uid}.txt"}
)
else:
abort(404)
| 26.825 | 82 | 0.59739 | import uuid
from sanic import response
from sanic.exceptions import abort
from sanic.request import Request
from sanic.views import HTTPMethodView as SanicHTTPView
from ..cache import cache
class SubscribeView(SanicHTTPView):
async def get(self, request: Request, uid: uuid):
token = request.args.get('token')
num = request.args.get('max', '99')
try:
num = float(num)
except ValueError:
num = 0
else:
num = int(num)
subscribe = await cache.get(key=str(uid)) or abort(404)
accept_contents = request.headers.get('accept').split(',')
if 'text/html' in accept_contents:
abort(404)
if subscribe.token == token:
subscribe_file = await subscribe.output_file(num)
return response.raw(
subscribe_file,
content_type='application/octet-stream; charset=utf-8',
headers={"Content-Disposition": f"attachment; filename={uid}.txt"}
)
else:
abort(404)
| true | true |
f7274e9c19524a8d036639345aeb06261fec049e | 5,177 | py | Python | tests/unit_tests/test_data_photon.py | norberto-schmidt/openmc | ff4844303154a68027b9c746300f5704f73e0875 | [
"MIT"
] | 262 | 2018-08-09T21:27:03.000Z | 2022-03-24T05:02:10.000Z | tests/unit_tests/test_data_photon.py | norberto-schmidt/openmc | ff4844303154a68027b9c746300f5704f73e0875 | [
"MIT"
] | 753 | 2018-08-03T15:26:57.000Z | 2022-03-29T23:54:48.000Z | tests/unit_tests/test_data_photon.py | norberto-schmidt/openmc | ff4844303154a68027b9c746300f5704f73e0875 | [
"MIT"
] | 196 | 2018-08-06T13:41:14.000Z | 2022-03-29T20:47:12.000Z | #!/usr/bin/env python
from collections.abc import Mapping, Callable
import os
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import openmc.data
@pytest.fixture(scope='module')
def elements_endf():
"""Dictionary of element ENDF data indexed by atomic symbol."""
endf_data = os.environ['OPENMC_ENDF_DATA']
elements = {'H': 1, 'O': 8, 'Al': 13, 'Cu': 29, 'Ag': 47, 'U': 92, 'Pu': 94}
data = {}
for symbol, Z in elements.items():
p_file = 'photoat-{:03}_{}_000.endf'.format(Z, symbol)
p_path = os.path.join(endf_data, 'photoat', p_file)
a_file = 'atom-{:03}_{}_000.endf'.format(Z, symbol)
a_path = os.path.join(endf_data, 'atomic_relax', a_file)
data[symbol] = openmc.data.IncidentPhoton.from_endf(p_path, a_path)
return data
@pytest.fixture()
def element(request, elements_endf):
"""Element ENDF data"""
return elements_endf[request.param]
@pytest.mark.parametrize(
'element, atomic_number', [
('Al', 13),
('Cu', 29),
('Pu', 94)
],
indirect=['element']
)
def test_attributes(element, atomic_number):
assert element.atomic_number == atomic_number
@pytest.mark.parametrize(
'element, subshell, binding_energy, num_electrons', [
('H', 'K', 13.61, 1.0),
('O', 'L3', 14.15, 2.67),
('U', 'P2', 34.09, 2.0)
],
indirect=['element']
)
def test_atomic_relaxation(element, subshell, binding_energy, num_electrons):
atom_relax = element.atomic_relaxation
assert isinstance(atom_relax, openmc.data.photon.AtomicRelaxation)
assert subshell in atom_relax.subshells
assert atom_relax.binding_energy[subshell] == binding_energy
assert atom_relax.num_electrons[subshell] == num_electrons
@pytest.mark.parametrize('element', ['Al', 'Cu', 'Pu'], indirect=True)
def test_transitions(element):
transitions = element.atomic_relaxation.transitions
assert transitions
assert isinstance(transitions, Mapping)
for matrix in transitions.values():
assert isinstance(matrix, pd.core.frame.DataFrame)
assert len(matrix.columns) == 4
assert sum(matrix['probability']) == pytest.approx(1.0)
@pytest.mark.parametrize(
'element, I, i_shell, ionization_energy, num_electrons', [
('H', 19.2, 0, 13.6, 1),
('O', 95.0, 2, 13.62, 4),
('U', 890.0, 25, 6.033, -3)
],
indirect=['element']
)
def test_bremsstrahlung(element, I, i_shell, ionization_energy, num_electrons):
brems = element.bremsstrahlung
assert isinstance(brems, Mapping)
assert brems['I'] == I
assert brems['num_electrons'][i_shell] == num_electrons
assert brems['ionization_energy'][i_shell] == ionization_energy
assert np.all(np.diff(brems['electron_energy']) > 0.0)
assert np.all(np.diff(brems['photon_energy']) > 0.0)
assert brems['photon_energy'][0] == 0.0
assert brems['photon_energy'][-1] == 1.0
assert brems['dcs'].shape == (200, 30)
@pytest.mark.parametrize(
'element, n_shell', [
('H', 1),
('O', 3),
('Al', 5)
],
indirect=['element']
)
def test_compton_profiles(element, n_shell):
profile = element.compton_profiles
assert profile
assert isinstance(profile, Mapping)
assert all(isinstance(x, Callable) for x in profile['J'])
assert all(len(x) == n_shell for x in profile.values())
@pytest.mark.parametrize(
'element, reaction', [
('Cu', 541),
('Ag', 502),
('Pu', 504)
],
indirect=['element']
)
def test_reactions(element, reaction):
reactions = element.reactions
assert all(isinstance(x, openmc.data.PhotonReaction) for x in reactions.values())
assert reaction in reactions
with pytest.raises(KeyError):
reactions[18]
@pytest.mark.parametrize('element', ['Pu'], indirect=True)
def test_export_to_hdf5(tmpdir, element):
filename = str(tmpdir.join('tmp.h5'))
element.export_to_hdf5(filename)
assert os.path.exists(filename)
# Read in data from hdf5
element2 = openmc.data.IncidentPhoton.from_hdf5(filename)
# Check for some cross section and datasets of element and element2
energy = np.logspace(np.log10(1.0), np.log10(1.0e10), num=100)
for mt in (502, 504, 515, 517, 522, 541, 570):
xs = element[mt].xs(energy)
xs2 = element2[mt].xs(energy)
assert np.allclose(xs, xs2)
assert element[502].scattering_factor == element2[502].scattering_factor
assert element.atomic_relaxation.transitions['O3'].equals(
element2.atomic_relaxation.transitions['O3'])
assert (element.compton_profiles['binding_energy'] ==
element2.compton_profiles['binding_energy']).all()
assert (element.bremsstrahlung['electron_energy'] ==
element2.bremsstrahlung['electron_energy']).all()
# Export to hdf5 again
element2.export_to_hdf5(filename, 'w')
def test_photodat_only(run_in_tmpdir):
endf_dir = Path(os.environ['OPENMC_ENDF_DATA'])
photoatomic_file = endf_dir / 'photoat' / 'photoat-001_H_000.endf'
data = openmc.data.IncidentPhoton.from_endf(photoatomic_file)
data.export_to_hdf5('tmp.h5', 'w') | 33.836601 | 85 | 0.66525 |
from collections.abc import Mapping, Callable
import os
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import openmc.data
@pytest.fixture(scope='module')
def elements_endf():
endf_data = os.environ['OPENMC_ENDF_DATA']
elements = {'H': 1, 'O': 8, 'Al': 13, 'Cu': 29, 'Ag': 47, 'U': 92, 'Pu': 94}
data = {}
for symbol, Z in elements.items():
p_file = 'photoat-{:03}_{}_000.endf'.format(Z, symbol)
p_path = os.path.join(endf_data, 'photoat', p_file)
a_file = 'atom-{:03}_{}_000.endf'.format(Z, symbol)
a_path = os.path.join(endf_data, 'atomic_relax', a_file)
data[symbol] = openmc.data.IncidentPhoton.from_endf(p_path, a_path)
return data
@pytest.fixture()
def element(request, elements_endf):
return elements_endf[request.param]
@pytest.mark.parametrize(
'element, atomic_number', [
('Al', 13),
('Cu', 29),
('Pu', 94)
],
indirect=['element']
)
def test_attributes(element, atomic_number):
assert element.atomic_number == atomic_number
@pytest.mark.parametrize(
'element, subshell, binding_energy, num_electrons', [
('H', 'K', 13.61, 1.0),
('O', 'L3', 14.15, 2.67),
('U', 'P2', 34.09, 2.0)
],
indirect=['element']
)
def test_atomic_relaxation(element, subshell, binding_energy, num_electrons):
atom_relax = element.atomic_relaxation
assert isinstance(atom_relax, openmc.data.photon.AtomicRelaxation)
assert subshell in atom_relax.subshells
assert atom_relax.binding_energy[subshell] == binding_energy
assert atom_relax.num_electrons[subshell] == num_electrons
@pytest.mark.parametrize('element', ['Al', 'Cu', 'Pu'], indirect=True)
def test_transitions(element):
transitions = element.atomic_relaxation.transitions
assert transitions
assert isinstance(transitions, Mapping)
for matrix in transitions.values():
assert isinstance(matrix, pd.core.frame.DataFrame)
assert len(matrix.columns) == 4
assert sum(matrix['probability']) == pytest.approx(1.0)
@pytest.mark.parametrize(
'element, I, i_shell, ionization_energy, num_electrons', [
('H', 19.2, 0, 13.6, 1),
('O', 95.0, 2, 13.62, 4),
('U', 890.0, 25, 6.033, -3)
],
indirect=['element']
)
def test_bremsstrahlung(element, I, i_shell, ionization_energy, num_electrons):
brems = element.bremsstrahlung
assert isinstance(brems, Mapping)
assert brems['I'] == I
assert brems['num_electrons'][i_shell] == num_electrons
assert brems['ionization_energy'][i_shell] == ionization_energy
assert np.all(np.diff(brems['electron_energy']) > 0.0)
assert np.all(np.diff(brems['photon_energy']) > 0.0)
assert brems['photon_energy'][0] == 0.0
assert brems['photon_energy'][-1] == 1.0
assert brems['dcs'].shape == (200, 30)
@pytest.mark.parametrize(
'element, n_shell', [
('H', 1),
('O', 3),
('Al', 5)
],
indirect=['element']
)
def test_compton_profiles(element, n_shell):
profile = element.compton_profiles
assert profile
assert isinstance(profile, Mapping)
assert all(isinstance(x, Callable) for x in profile['J'])
assert all(len(x) == n_shell for x in profile.values())
@pytest.mark.parametrize(
'element, reaction', [
('Cu', 541),
('Ag', 502),
('Pu', 504)
],
indirect=['element']
)
def test_reactions(element, reaction):
reactions = element.reactions
assert all(isinstance(x, openmc.data.PhotonReaction) for x in reactions.values())
assert reaction in reactions
with pytest.raises(KeyError):
reactions[18]
@pytest.mark.parametrize('element', ['Pu'], indirect=True)
def test_export_to_hdf5(tmpdir, element):
filename = str(tmpdir.join('tmp.h5'))
element.export_to_hdf5(filename)
assert os.path.exists(filename)
element2 = openmc.data.IncidentPhoton.from_hdf5(filename)
energy = np.logspace(np.log10(1.0), np.log10(1.0e10), num=100)
for mt in (502, 504, 515, 517, 522, 541, 570):
xs = element[mt].xs(energy)
xs2 = element2[mt].xs(energy)
assert np.allclose(xs, xs2)
assert element[502].scattering_factor == element2[502].scattering_factor
assert element.atomic_relaxation.transitions['O3'].equals(
element2.atomic_relaxation.transitions['O3'])
assert (element.compton_profiles['binding_energy'] ==
element2.compton_profiles['binding_energy']).all()
assert (element.bremsstrahlung['electron_energy'] ==
element2.bremsstrahlung['electron_energy']).all()
element2.export_to_hdf5(filename, 'w')
def test_photodat_only(run_in_tmpdir):
endf_dir = Path(os.environ['OPENMC_ENDF_DATA'])
photoatomic_file = endf_dir / 'photoat' / 'photoat-001_H_000.endf'
data = openmc.data.IncidentPhoton.from_endf(photoatomic_file)
data.export_to_hdf5('tmp.h5', 'w') | true | true |
f7274ee40debdeaa4b9393203cab8d398b64d837 | 756 | py | Python | v_python/conftest.py | spcartman/selenium_full_course | 673f25dcf2340c0c14666c7a91f774fd7659f0b1 | [
"MIT"
] | null | null | null | v_python/conftest.py | spcartman/selenium_full_course | 673f25dcf2340c0c14666c7a91f774fd7659f0b1 | [
"MIT"
] | null | null | null | v_python/conftest.py | spcartman/selenium_full_course | 673f25dcf2340c0c14666c7a91f774fd7659f0b1 | [
"MIT"
] | null | null | null | import pytest
import json
from os import path
from fixture.fixture import Fixture
with open(path.join(path.dirname(path.abspath(__file__)), 'config.json')) as f:
config = json.load(f)
@pytest.fixture(scope="session")
def app(request):
fixture = Fixture(admin_root=config['admin']['url'],
admin_countries_url=config['admin']['countries_url'],
admin_zones_url=config['admin']['zones_url'],
admin_catalog_url=config['admin']['catalog_url'],
admin_name=config['admin']['name'],
admin_password=config['admin']['password'],
shop_root=config['shop']['url'])
request.addfinalizer(fixture.destroy)
return fixture
| 34.363636 | 79 | 0.611111 | import pytest
import json
from os import path
from fixture.fixture import Fixture
with open(path.join(path.dirname(path.abspath(__file__)), 'config.json')) as f:
config = json.load(f)
@pytest.fixture(scope="session")
def app(request):
fixture = Fixture(admin_root=config['admin']['url'],
admin_countries_url=config['admin']['countries_url'],
admin_zones_url=config['admin']['zones_url'],
admin_catalog_url=config['admin']['catalog_url'],
admin_name=config['admin']['name'],
admin_password=config['admin']['password'],
shop_root=config['shop']['url'])
request.addfinalizer(fixture.destroy)
return fixture
| true | true |
f7275019da64b65a5d33210b547cf8cc8acaac87 | 1,094 | py | Python | bitoptions/widgets.py | amateja/django-bitoptions | 6e2d00d8b5d16c4a678a319c06313de6d2c75eda | [
"Unlicense"
] | 3 | 2016-03-16T03:11:01.000Z | 2016-12-31T06:20:57.000Z | bitoptions/widgets.py | amateja/django-bitoptions | 6e2d00d8b5d16c4a678a319c06313de6d2c75eda | [
"Unlicense"
] | 6 | 2018-06-12T12:40:22.000Z | 2018-06-18T09:46:46.000Z | bitoptions/widgets.py | amateja/django-bitoptions | 6e2d00d8b5d16c4a678a319c06313de6d2c75eda | [
"Unlicense"
] | null | null | null | from django import forms
from .utils import number2powers, BitOptions
class BitOptionsWidget(forms.CheckboxSelectMultiple):
"""
Default BitOptionsField widget to present every option (bit) as checkbox.
"""
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value of
this widget.
"""
return sum(map(int, super(BitOptionsWidget, self).value_from_datadict(
data, files, name)))
def render(self, name, value, attrs=None, renderer=None):
"""
Returns HTML for the widget, as a Unicode string.
"""
if isinstance(value, BitOptions):
value = list(number2powers(value.value))
elif isinstance(value, int):
value = list(number2powers(value))
try:
return super(BitOptionsWidget, self).render(name, value,
attrs, renderer)
except TypeError:
return super(BitOptionsWidget, self).render(name, value, attrs)
| 34.1875 | 79 | 0.606947 | from django import forms
from .utils import number2powers, BitOptions
class BitOptionsWidget(forms.CheckboxSelectMultiple):
def value_from_datadict(self, data, files, name):
return sum(map(int, super(BitOptionsWidget, self).value_from_datadict(
data, files, name)))
def render(self, name, value, attrs=None, renderer=None):
if isinstance(value, BitOptions):
value = list(number2powers(value.value))
elif isinstance(value, int):
value = list(number2powers(value))
try:
return super(BitOptionsWidget, self).render(name, value,
attrs, renderer)
except TypeError:
return super(BitOptionsWidget, self).render(name, value, attrs)
| true | true |
f72753101e10bb8565a1a21aa5043b6b642088a5 | 2,482 | py | Python | sdk/python/pulumi_azure_native/network/v20180601/get_virtual_network_gateway_learned_routes.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20180601/get_virtual_network_gateway_learned_routes.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20180601/get_virtual_network_gateway_learned_routes.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayLearnedRoutesResult',
'AwaitableGetVirtualNetworkGatewayLearnedRoutesResult',
'get_virtual_network_gateway_learned_routes',
]
@pulumi.output_type
class GetVirtualNetworkGatewayLearnedRoutesResult:
"""
List of virtual network gateway routes
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.GatewayRouteResponse']]:
"""
List of gateway routes
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayLearnedRoutesResult(GetVirtualNetworkGatewayLearnedRoutesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayLearnedRoutesResult(
value=self.value)
def get_virtual_network_gateway_learned_routes(resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayLearnedRoutesResult:
"""
List of virtual network gateway routes
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180601:getVirtualNetworkGatewayLearnedRoutes', __args__, opts=opts, typ=GetVirtualNetworkGatewayLearnedRoutesResult).value
return AwaitableGetVirtualNetworkGatewayLearnedRoutesResult(
value=__ret__.value)
| 37.044776 | 183 | 0.711523 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayLearnedRoutesResult',
'AwaitableGetVirtualNetworkGatewayLearnedRoutesResult',
'get_virtual_network_gateway_learned_routes',
]
@pulumi.output_type
class GetVirtualNetworkGatewayLearnedRoutesResult:
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.GatewayRouteResponse']]:
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayLearnedRoutesResult(GetVirtualNetworkGatewayLearnedRoutesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayLearnedRoutesResult(
value=self.value)
def get_virtual_network_gateway_learned_routes(resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayLearnedRoutesResult:
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180601:getVirtualNetworkGatewayLearnedRoutes', __args__, opts=opts, typ=GetVirtualNetworkGatewayLearnedRoutesResult).value
return AwaitableGetVirtualNetworkGatewayLearnedRoutesResult(
value=__ret__.value)
| true | true |
f727534c7c0bee61c6e00e981435b67795e4a56e | 9,381 | py | Python | 2016/maryland/scripts/maryland_election.py | democracyworks/hand-collection-to-vip | 0b6e90f6055c1325930b53905bba2d1bfc111457 | [
"BSD-3-Clause"
] | null | null | null | 2016/maryland/scripts/maryland_election.py | democracyworks/hand-collection-to-vip | 0b6e90f6055c1325930b53905bba2d1bfc111457 | [
"BSD-3-Clause"
] | 2 | 2016-08-12T20:52:24.000Z | 2021-02-08T20:24:27.000Z | 2016/maryland/scripts/maryland_election.py | democracyworks/hand-collection-to-vip | 0b6e90f6055c1325930b53905bba2d1bfc111457 | [
"BSD-3-Clause"
] | 1 | 2018-08-17T21:16:30.000Z | 2018-08-17T21:16:30.000Z | """
id,
date,
name,
election_type,
state_id,
is_statewide,
registration_info,
absentee_ballot_info,
results_uri,
polling_hours,
has_election_day_registration,
registration_deadline,
absentee_request_deadline,
hours_open_id
"""
import datetime
import csv
import config
from maryland_polling_location import PollingLocationTxt
import pandas as pd
class ElectionTxt(object):
def __init__(self, base_df, state_feed):
self.base_df = base_df
self.state_feed = state_feed
#print self.base_df
#print state_feed
def create_election_id(self, index):
"""Leading zeroes are added, if necessary, to maintain a
consistent id length.
"""
if index <= 9:
index_str = '000' + str(index)
elif index in range(10, 100):
index_str = '00' + str(index)
elif index in range(100, 1000):
index_str = '0' + str(index)
else:
index_str = str(index)
return 'e' + str(index_str)
def get_date(self):
"""#"""
return '2016-11-08'
def get_name(self):
"""#"""
return "2016 General Election"
def get_election_name(self):
return "2016 General"
def get_election_type(self):
"""#"""
return 'federal'
#def get_state_id(self):
# """#"""
# get state name, lower()
# pass
def create_state_id(self):
"""Creates the state_id by matching a key in the state_dict and retrieving
and modifying its value. A '0' is added, if necessary, to maintain a
consistent id length.
"""
# TODO: use fips code
for key, value in config.fips_dict.iteritems():
if key == config.state.lower():
print key, value
state_num = value
if state_num <=9:
state_num = '0' + str(state_num)
else:
state_num = str(state_num)
return 'st' + state_num
def is_statewide(self):
"""#"""
return 'true'
def registration_info(self):
"""#"""
return ''
def absentee_ballot_info(self):
"""#"""
return ''
def results_uri(self):
"""#"""
return ''
def polling_hours(self, hours):
"""Takes hours from polling_location."""
return hours
def has_election_day_registration(self):
"""#"""
return 'false'
def registration_deadline(self, index):
"""Grab registration_deadline from state_feed document."""
for index, row in self.state_feed.iterrows():
if row['office_name'] == config.state:
return row['registration_deadline']
else:
print 'Missing value at row ' + str(index) + '.'
return ''
def absentee_request_deadline(self, index):
"""Grab ballot_request_deadline_display from state_feed document."""
for index, row in self.state_feed.iterrows():
if row['office_name'] == config.state:
return row['ballot_request_deadline']
else:
print 'Missing value at row ' + str(index) + '.'
return ''
def hours_open_id(self):
"""#"""
return ''
def build_election_txt(self):
"""
New columns that match the 'schedule.txt' template are inserted into the DataFrame, apply() is
used to run methods that generate the values for each row of the new columns.
"""
self.base_df['id'] = self.base_df.apply(
lambda row: self.create_election_id(row['index']), axis=1)
self.base_df['date'] = self.base_df.apply(
lambda row: self.get_date(), axis=1)
self.base_df['name'] = self.base_df.apply(
lambda row: self.get_name(), axis=1)
self.base_df['election_type'] = self.base_df.apply(
lambda row: self.get_election_type(), axis=1)
self.base_df['state_id'] = self.base_df.apply(
lambda row: self.create_state_id(), axis=1)
self.base_df['is_statewide'] = self.base_df.apply(
lambda row: self.is_statewide(), axis=1)
self.base_df['registration_info'] = self.base_df.apply(
lambda row: self.registration_info(), axis=1)
self.base_df['absentee_ballot_info'] = self.base_df.apply(
lambda row: self.absentee_ballot_info(), axis=1)
self.base_df['results_uri'] = self.base_df.apply(
lambda row: self.results_uri(), axis=1)
self.base_df['polling_hours'] = self.base_df.apply(
lambda row: self.polling_hours(row['hours']), axis=1)
self.base_df['has_election_day_registration'] = self.base_df.apply(
lambda row: self.has_election_day_registration(), axis=1)
#
self.base_df['registration_deadline'] = self.base_df.apply(
lambda row: self.registration_deadline(row['index']), axis=1)
self.base_df['absentee_request_deadline'] = self.base_df.apply(
lambda row: self.absentee_request_deadline(row['index']), axis=1)
self.base_df['hours_open_id'] = self.base_df.apply(
lambda row: self.hours_open_id(), axis=1)
#print self.base_df
return self.base_df
def write(self):
et = self.build_election_txt()
et.drop(['loc_name', 'county', 'location', 'adr_1', 'adr_2', 'adr_3', 'city', 'state', 'zip', 'dirs',
'services', 'start_date', 'end_date', 'st_id', 'start_time', 'end_time',
'index', 'address_line', 'directions',
'hours', 'photo_uri', 'hours_open_id', 'is_drop_box', 'is_early_voting', 'lat', 'long', 'latlng', 'source_id'], inplace=True, axis=1)
cols = ["id", "date", "name", "election_type", "state_id", "is_statewide", "registration_info",
'absentee_ballot_info', 'results_uri', "polling_hours", 'has_election_day_registration', 'registration_deadline',
'absentee_request_deadline', 'hours_open_id']
et = et.reindex(columns=cols)
#print et
et.to_csv(config.output + 'election.txt', index=False, encoding='utf-8') # send to txt file
et.to_csv(config.output + 'election.csv', index=False, encoding='utf-8') # send to csv file
# def write_election_txt(self):
# output_path = "/home/acg/democracyworks/hand-collection-to-vip/minnesota/output/election.txt"
# try:
## f = open(output_path, 'ab')
# fieldnames = ['id', 'date', 'name', 'election_type', 'state_id', 'is_statewide',
# 'registration_info', 'absentee_ballot_info', 'results_uri',
# 'polling_hours', 'has_election_day_registration', 'registration_deadline',
# 'absentee_request_deadline', 'hours_open_id']
# writer = csv.DictWriter(f, fieldnames=fieldnames)
# writer.writeheader()
# writer.writerow({'id': self.create_id(),
# 'date': self.get_date(),
# 'name': self.get_name(),
# 'election_type': self.get_election_type(),
# 'state_id': self.create_state_id(),
# 'is_statewide': self.is_statewide(),
# 'registration_info': '',
# 'absentee_ballot_info': '',
# 'results_uri': self.results_uri(),
# 'polling_hours': '',
# 'has_election_day_registration': self.has_election_day_registration(),
# 'registration_deadline': self.registration_deadline(),
# 'absentee_request_deadline': self.absentee_request_deadline(),
# 'hours_open_id': self.hours_open_id()
# })
# finally:
# f.close()
if __name__ == '__main__':
state_feed_file = 'state_feed_info.csv'
early_voting_path =config.output + "intermediate_doc.csv"
#early_voting_path = "/Users/danielgilberg/Development/hand-collection-to-vip/polling_location/polling_location_input/kansas_early_voting_info.csv"
colnames = ['loc_name', 'county', 'location', 'adr_1', 'adr_2', 'adr_3', 'city', 'state', 'zip', 'dirs',
'services', 'start_date', 'end_date', 'st_id', 'start_time', 'end_time',
'index', 'address_line', 'directions',
'hours', 'photo_uri', 'hours_open_id', 'is_drop_box', 'is_early_voting', 'lat', 'long', 'latlng', 'source_id']
early_voting_df = pd.read_csv(early_voting_path, names=colnames, encoding='utf-8', skiprows=1)
early_voting_df['index'] = early_voting_df.index + 1
state_feed_path = config.input + state_feed_file
colnames = ['office_name', 'ocd_division', 'same_day_reg', 'election_date', 'election_name', 'registration_deadline',
"registration_deadline_display", 'ballot_request_deadline', 'ballot_request_deadline_display']
state_feed_df = pd.read_csv(state_feed_path, names=colnames, encoding='utf-8', skiprows=1)
state_feed_df['index'] = state_feed_df.index + 1
# print state_feed_df
et = ElectionTxt(early_voting_df, state_feed_df)
et.write() | 34.744444 | 151 | 0.58917 | """
id,
date,
name,
election_type,
state_id,
is_statewide,
registration_info,
absentee_ballot_info,
results_uri,
polling_hours,
has_election_day_registration,
registration_deadline,
absentee_request_deadline,
hours_open_id
"""
import datetime
import csv
import config
from maryland_polling_location import PollingLocationTxt
import pandas as pd
class ElectionTxt(object):
def __init__(self, base_df, state_feed):
self.base_df = base_df
self.state_feed = state_feed
def create_election_id(self, index):
"""Leading zeroes are added, if necessary, to maintain a
consistent id length.
"""
if index <= 9:
index_str = '000' + str(index)
elif index in range(10, 100):
index_str = '00' + str(index)
elif index in range(100, 1000):
index_str = '0' + str(index)
else:
index_str = str(index)
return 'e' + str(index_str)
def get_date(self):
"""#"""
return '2016-11-08'
def get_name(self):
"""#"""
return "2016 General Election"
def get_election_name(self):
return "2016 General"
def get_election_type(self):
"""#"""
return 'federal'
def create_state_id(self):
"""Creates the state_id by matching a key in the state_dict and retrieving
and modifying its value. A '0' is added, if necessary, to maintain a
consistent id length.
"""
for key, value in config.fips_dict.iteritems():
if key == config.state.lower():
print key, value
state_num = value
if state_num <=9:
state_num = '0' + str(state_num)
else:
state_num = str(state_num)
return 'st' + state_num
def is_statewide(self):
"""#"""
return 'true'
def registration_info(self):
"""#"""
return ''
def absentee_ballot_info(self):
"""#"""
return ''
def results_uri(self):
"""#"""
return ''
def polling_hours(self, hours):
"""Takes hours from polling_location."""
return hours
def has_election_day_registration(self):
"""#"""
return 'false'
def registration_deadline(self, index):
"""Grab registration_deadline from state_feed document."""
for index, row in self.state_feed.iterrows():
if row['office_name'] == config.state:
return row['registration_deadline']
else:
print 'Missing value at row ' + str(index) + '.'
return ''
def absentee_request_deadline(self, index):
"""Grab ballot_request_deadline_display from state_feed document."""
for index, row in self.state_feed.iterrows():
if row['office_name'] == config.state:
return row['ballot_request_deadline']
else:
print 'Missing value at row ' + str(index) + '.'
return ''
def hours_open_id(self):
"""#"""
return ''
def build_election_txt(self):
"""
New columns that match the 'schedule.txt' template are inserted into the DataFrame, apply() is
used to run methods that generate the values for each row of the new columns.
"""
self.base_df['id'] = self.base_df.apply(
lambda row: self.create_election_id(row['index']), axis=1)
self.base_df['date'] = self.base_df.apply(
lambda row: self.get_date(), axis=1)
self.base_df['name'] = self.base_df.apply(
lambda row: self.get_name(), axis=1)
self.base_df['election_type'] = self.base_df.apply(
lambda row: self.get_election_type(), axis=1)
self.base_df['state_id'] = self.base_df.apply(
lambda row: self.create_state_id(), axis=1)
self.base_df['is_statewide'] = self.base_df.apply(
lambda row: self.is_statewide(), axis=1)
self.base_df['registration_info'] = self.base_df.apply(
lambda row: self.registration_info(), axis=1)
self.base_df['absentee_ballot_info'] = self.base_df.apply(
lambda row: self.absentee_ballot_info(), axis=1)
self.base_df['results_uri'] = self.base_df.apply(
lambda row: self.results_uri(), axis=1)
self.base_df['polling_hours'] = self.base_df.apply(
lambda row: self.polling_hours(row['hours']), axis=1)
self.base_df['has_election_day_registration'] = self.base_df.apply(
lambda row: self.has_election_day_registration(), axis=1)
self.base_df['registration_deadline'] = self.base_df.apply(
lambda row: self.registration_deadline(row['index']), axis=1)
self.base_df['absentee_request_deadline'] = self.base_df.apply(
lambda row: self.absentee_request_deadline(row['index']), axis=1)
self.base_df['hours_open_id'] = self.base_df.apply(
lambda row: self.hours_open_id(), axis=1)
return self.base_df
def write(self):
et = self.build_election_txt()
et.drop(['loc_name', 'county', 'location', 'adr_1', 'adr_2', 'adr_3', 'city', 'state', 'zip', 'dirs',
'services', 'start_date', 'end_date', 'st_id', 'start_time', 'end_time',
'index', 'address_line', 'directions',
'hours', 'photo_uri', 'hours_open_id', 'is_drop_box', 'is_early_voting', 'lat', 'long', 'latlng', 'source_id'], inplace=True, axis=1)
cols = ["id", "date", "name", "election_type", "state_id", "is_statewide", "registration_info",
'absentee_ballot_info', 'results_uri', "polling_hours", 'has_election_day_registration', 'registration_deadline',
'absentee_request_deadline', 'hours_open_id']
et = et.reindex(columns=cols)
et.to_csv(config.output + 'election.txt', index=False, encoding='utf-8')
et.to_csv(config.output + 'election.csv', index=False, encoding='utf-8')
'__main__':
state_feed_file = 'state_feed_info.csv'
early_voting_path =config.output + "intermediate_doc.csv"
colnames = ['loc_name', 'county', 'location', 'adr_1', 'adr_2', 'adr_3', 'city', 'state', 'zip', 'dirs',
'services', 'start_date', 'end_date', 'st_id', 'start_time', 'end_time',
'index', 'address_line', 'directions',
'hours', 'photo_uri', 'hours_open_id', 'is_drop_box', 'is_early_voting', 'lat', 'long', 'latlng', 'source_id']
early_voting_df = pd.read_csv(early_voting_path, names=colnames, encoding='utf-8', skiprows=1)
early_voting_df['index'] = early_voting_df.index + 1
state_feed_path = config.input + state_feed_file
colnames = ['office_name', 'ocd_division', 'same_day_reg', 'election_date', 'election_name', 'registration_deadline',
"registration_deadline_display", 'ballot_request_deadline', 'ballot_request_deadline_display']
state_feed_df = pd.read_csv(state_feed_path, names=colnames, encoding='utf-8', skiprows=1)
state_feed_df['index'] = state_feed_df.index + 1
et = ElectionTxt(early_voting_df, state_feed_df)
et.write() | false | true |
f727542bee6fa712929c024c3afa8e0aad944e62 | 1,255 | py | Python | imaging_db/database/dataset.py | czbiohub/imagingDB | 1b1a58df3dbc94a43fb842cad198fb6c1c833326 | [
"MIT"
] | 5 | 2018-11-07T15:37:41.000Z | 2020-05-27T10:29:02.000Z | imaging_db/database/dataset.py | czbiohub/imagingDB | 1b1a58df3dbc94a43fb842cad198fb6c1c833326 | [
"MIT"
] | 39 | 2018-11-07T21:06:42.000Z | 2019-09-30T21:33:31.000Z | imaging_db/database/dataset.py | czbiohub/imagingDB | 1b1a58df3dbc94a43fb842cad198fb6c1c833326 | [
"MIT"
] | 2 | 2019-11-04T22:25:04.000Z | 2020-11-04T04:13:20.000Z | # coding=utf-8
from datetime import datetime
from sqlalchemy import Column, String, Integer, Boolean, ForeignKey, DateTime
from imaging_db.database.base import Base
def _serial_to_date_time(dataset_serial):
substrs = dataset_serial.split("-")
date_time = datetime(int(substrs[1]), # year
int(substrs[2]), # month
int(substrs[3]), # day
int(substrs[4]), # hour
int(substrs[5]), # minute
int(substrs[6]), # second
)
return date_time
class DataSet(Base):
__tablename__ = 'data_set'
id = Column(Integer, primary_key=True)
dataset_serial = Column(String)
description = Column(String)
microscope = Column(String)
frames = Column(Boolean)
date_time = Column(DateTime)
parent_id = Column(Integer, ForeignKey("data_set.id"))
def __init__(self, dataset_serial, description, microscope, frames, parent_id):
self.dataset_serial = dataset_serial
self.description = description
self.microscope = microscope
self.frames = frames
self.date_time = _serial_to_date_time(dataset_serial)
self.parent_id = parent_id
| 32.179487 | 83 | 0.620717 |
from datetime import datetime
from sqlalchemy import Column, String, Integer, Boolean, ForeignKey, DateTime
from imaging_db.database.base import Base
def _serial_to_date_time(dataset_serial):
substrs = dataset_serial.split("-")
date_time = datetime(int(substrs[1]),
int(substrs[2]),
int(substrs[3]),
int(substrs[4]),
int(substrs[5]),
int(substrs[6]),
)
return date_time
class DataSet(Base):
__tablename__ = 'data_set'
id = Column(Integer, primary_key=True)
dataset_serial = Column(String)
description = Column(String)
microscope = Column(String)
frames = Column(Boolean)
date_time = Column(DateTime)
parent_id = Column(Integer, ForeignKey("data_set.id"))
def __init__(self, dataset_serial, description, microscope, frames, parent_id):
self.dataset_serial = dataset_serial
self.description = description
self.microscope = microscope
self.frames = frames
self.date_time = _serial_to_date_time(dataset_serial)
self.parent_id = parent_id
| true | true |
f7275494b8cb9e77d969fcf6476d10ca21e0e71f | 3,818 | py | Python | salt/sdb/confidant.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 2 | 2015-09-21T14:13:30.000Z | 2016-02-12T11:33:46.000Z | salt/sdb/confidant.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 1 | 2019-09-06T13:57:28.000Z | 2019-09-06T13:57:28.000Z | salt/sdb/confidant.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 2 | 2017-01-05T16:14:59.000Z | 2019-01-31T23:15:25.000Z | # -*- coding: utf-8 -*-
'''
An SDB module for getting credentials from confidant.
Configuring the Confidant module
================================
The module can be configured via sdb in the minion config:
.. code-block:: yaml
confidant:
driver: confidant
# The URL of the confidant web service
url: 'https://confidant-production.example.com'
# The context to use for KMS authentication
auth_context:
from: example-production-iad
to: confidant-production-iad
user_type: service
# The KMS master key to use for authentication
auth_key: "alias/authnz"
# Cache file for KMS auth token
token_cache_file: /run/confidant/confidant_token
# The duration of the validity of a token, in minutes
token_duration: 60
# key, keyid and region can be defined in the profile, but it's generally
# best to use IAM roles or environment variables for AWS auth.
keyid: 98nh9h9h908h09kjjk
key: jhf908gyeghehe0he0g8h9u0j0n0n09hj09h0
region: us-east-1
:depends: confidant-common, confidant-client
Module Documentation
====================
'''
from __future__ import absolute_import
# Import python libs
import logging
import copy
# Import third party libs
try:
import confidant.client
import confidant.formatter
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Set up logging
log = logging.getLogger(__name__)
__virtualname__ = 'confidant'
def __virtual__():
'''
Only return if requests and boto are installed.
'''
if HAS_LIBS:
return __virtualname__
else:
return False
def get(key, profile=None):
'''
Read pillar data from Confidant via its API.
CLI Example:
salt myminion sdb.get 'sdb://confidant/credentials'
Valid keys are: credentials, credentials_metadata, result. credentials
returns a dict of joined credential_pairs, credentials_metadata returns a
dict of metadata relevant to the credentials mapped to the confidant
service, and result returns a bool that can be used to determine if the sdb
call succeded or failed to fetch credentials from confidant (or from local
cache). If result is false, the data in credentials or credentials_metadata
can't be trusted.
'''
# default to returning failure
ret = {'result': False, 'credentials': None, 'credentials_metadata': None}
profile_data = copy.deepcopy(profile)
if profile_data.get('disabled', False):
ret['result'] = True
return ret.get(key)
token_version = profile_data.get('token_version', 1)
try:
url = profile_data['url']
auth_key = profile_data['auth_key']
auth_context = profile_data['auth_context']
role = auth_context['from']
except (KeyError, TypeError):
msg = ('profile has undefined url, auth_key or auth_context')
log.debug(msg)
return ret.get(key)
region = profile_data.get('region', 'us-east-1')
token_duration = profile_data.get('token_duration', 60)
retries = profile_data.get('retries', 5)
token_cache_file = profile_data.get('token_cache_file')
backoff = profile_data.get('backoff', 1)
client = confidant.client.ConfidantClient(
url,
auth_key,
auth_context,
token_lifetime=token_duration,
token_version=token_version,
token_cache_file=token_cache_file,
region=region,
retries=retries,
backoff=backoff
)
try:
data = client.get_service(
role,
decrypt_blind=True
)
except confidant.client.TokenCreationError:
return ret.get(key)
if not data['result']:
return ret.get(key)
ret = confidant.formatter.combined_credential_pair_format(data)
ret['result'] = True
return ret.get(key)
| 29.828125 | 79 | 0.678104 |
from __future__ import absolute_import
import logging
import copy
try:
import confidant.client
import confidant.formatter
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
log = logging.getLogger(__name__)
__virtualname__ = 'confidant'
def __virtual__():
if HAS_LIBS:
return __virtualname__
else:
return False
def get(key, profile=None):
ret = {'result': False, 'credentials': None, 'credentials_metadata': None}
profile_data = copy.deepcopy(profile)
if profile_data.get('disabled', False):
ret['result'] = True
return ret.get(key)
token_version = profile_data.get('token_version', 1)
try:
url = profile_data['url']
auth_key = profile_data['auth_key']
auth_context = profile_data['auth_context']
role = auth_context['from']
except (KeyError, TypeError):
msg = ('profile has undefined url, auth_key or auth_context')
log.debug(msg)
return ret.get(key)
region = profile_data.get('region', 'us-east-1')
token_duration = profile_data.get('token_duration', 60)
retries = profile_data.get('retries', 5)
token_cache_file = profile_data.get('token_cache_file')
backoff = profile_data.get('backoff', 1)
client = confidant.client.ConfidantClient(
url,
auth_key,
auth_context,
token_lifetime=token_duration,
token_version=token_version,
token_cache_file=token_cache_file,
region=region,
retries=retries,
backoff=backoff
)
try:
data = client.get_service(
role,
decrypt_blind=True
)
except confidant.client.TokenCreationError:
return ret.get(key)
if not data['result']:
return ret.get(key)
ret = confidant.formatter.combined_credential_pair_format(data)
ret['result'] = True
return ret.get(key)
| true | true |
f72755a1c07aa293fbde7de86964d0198d4ff90b | 2,102 | py | Python | hair_seg/evaluate.py | eric91sanchez/hair_seg | 4f688daac0ec4ea906ff0462ae51634293e35447 | [
"MIT"
] | 4 | 2021-03-04T05:57:45.000Z | 2022-02-15T17:40:57.000Z | hair_seg/evaluate.py | vadik6666/hair-seg | 4f688daac0ec4ea906ff0462ae51634293e35447 | [
"MIT"
] | 4 | 2021-06-08T22:43:59.000Z | 2022-03-12T00:51:40.000Z | hair_seg/evaluate.py | vadik6666/hair_seg | 4f688daac0ec4ea906ff0462ae51634293e35447 | [
"MIT"
] | null | null | null | """
Evaluate
"""
import re
import math
import datetime
import random
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from loss import iou_loss, HairMattingLoss, acc_loss, F1_loss
from utils import create_multi_figure
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device("cuda" if USE_CUDA else "cpu")
def evalTest(test_data, model, args):
testloader = DataLoader(test_data, batch_size=4, shuffle=False)
hairmat_loss = HairMattingLoss(args.grad_lambda)
total_loss, total_iou, total_acc, total_f1 = 0, 0, 0, 0
for batch in testloader:
image, mask = (i.to(DEVICE) for i in batch)
pred = model(image)
total_loss += hairmat_loss(pred, mask, image).item()
iloss = iou_loss(pred, mask).item()
total_iou += iloss
aloss = acc_loss(pred, mask).item()
total_acc += aloss
floss = F1_loss(pred, mask).item()
total_f1 += floss
print("Testing Loss: ", total_loss / len(testloader))
print("Testing IOU: ", total_iou / len(testloader))
print("Testing Acc: ", total_acc / len(testloader))
print("Testing F1: ", total_f1 / len(testloader))
def evaluateOne(img, model, absolute=True):
img = img.to(DEVICE).unsqueeze(0)
pred = model(img)
if absolute:
pred[pred > 0.5] = 1.0
pred[pred <= 0.5] = 0.0
else:
pred[pred < 0.4] = 0
# pred[pred < .90] = 0
rows = [[img[0], pred[0]]]
create_multi_figure(rows, dye=True)
plt.savefig("result.jpg")
def evaluate(test_data, model, num, absolute=True):
rows = [None] * num
for i in range(num):
idx = random.randint(0, len(test_data) - 1)
image, mask = (i.to(DEVICE).unsqueeze(0) for i in test_data[idx])
pred = model(image)
if absolute:
pred[pred > 0.5] = 1.0
pred[pred <= 0.5] = 0.0
else:
pred[pred < 0.4] = 0
rows[i] = [image[0], mask[0], pred[0]] # get batch
create_multi_figure(rows, dye=True)
plt.savefig("result.jpg")
| 26.948718 | 73 | 0.62274 |
import re
import math
import datetime
import random
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from loss import iou_loss, HairMattingLoss, acc_loss, F1_loss
from utils import create_multi_figure
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device("cuda" if USE_CUDA else "cpu")
def evalTest(test_data, model, args):
testloader = DataLoader(test_data, batch_size=4, shuffle=False)
hairmat_loss = HairMattingLoss(args.grad_lambda)
total_loss, total_iou, total_acc, total_f1 = 0, 0, 0, 0
for batch in testloader:
image, mask = (i.to(DEVICE) for i in batch)
pred = model(image)
total_loss += hairmat_loss(pred, mask, image).item()
iloss = iou_loss(pred, mask).item()
total_iou += iloss
aloss = acc_loss(pred, mask).item()
total_acc += aloss
floss = F1_loss(pred, mask).item()
total_f1 += floss
print("Testing Loss: ", total_loss / len(testloader))
print("Testing IOU: ", total_iou / len(testloader))
print("Testing Acc: ", total_acc / len(testloader))
print("Testing F1: ", total_f1 / len(testloader))
def evaluateOne(img, model, absolute=True):
img = img.to(DEVICE).unsqueeze(0)
pred = model(img)
if absolute:
pred[pred > 0.5] = 1.0
pred[pred <= 0.5] = 0.0
else:
pred[pred < 0.4] = 0
rows = [[img[0], pred[0]]]
create_multi_figure(rows, dye=True)
plt.savefig("result.jpg")
def evaluate(test_data, model, num, absolute=True):
rows = [None] * num
for i in range(num):
idx = random.randint(0, len(test_data) - 1)
image, mask = (i.to(DEVICE).unsqueeze(0) for i in test_data[idx])
pred = model(image)
if absolute:
pred[pred > 0.5] = 1.0
pred[pred <= 0.5] = 0.0
else:
pred[pred < 0.4] = 0
rows[i] = [image[0], mask[0], pred[0]]
create_multi_figure(rows, dye=True)
plt.savefig("result.jpg")
| true | true |
f72755d6fecdb1531c133393712bc30acc965025 | 5,519 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations_async/_network_interface_load_balancers_operations_async.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2019-05-17T21:24:53.000Z | 2020-02-12T11:13:42.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations_async/_network_interface_load_balancers_operations_async.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 15 | 2019-07-12T18:18:04.000Z | 2019-07-25T20:55:51.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations_async/_network_interface_load_balancers_operations_async.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceLoadBalancersOperations:
"""NetworkInterfaceLoadBalancersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncIterable["models.NetworkInterfaceLoadBalancerListResult"]:
"""List all load balancers in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceLoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceLoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceLoadBalancerListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceLoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/loadBalancers'} # type: ignore
| 48.412281 | 196 | 0.673854 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceLoadBalancersOperations:
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncIterable["models.NetworkInterfaceLoadBalancerListResult"]:
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
def prepare_request(next_link=None):
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceLoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/loadBalancers'}
| true | true |
f727572624c9b1104e87598335f97c84bacafded | 842 | py | Python | src/testing/migrations/0006_auto_20210617_1656.py | DiceNameIsMy/testing_sitev2 | c973f796bd1bd7cfcfc53298a3884b92d2a36d27 | [
"MIT"
] | 1 | 2021-06-29T09:47:25.000Z | 2021-06-29T09:47:25.000Z | src/testing/migrations/0006_auto_20210617_1656.py | DiceNameIsMy/testing_sitev2 | c973f796bd1bd7cfcfc53298a3884b92d2a36d27 | [
"MIT"
] | null | null | null | src/testing/migrations/0006_auto_20210617_1656.py | DiceNameIsMy/testing_sitev2 | c973f796bd1bd7cfcfc53298a3884b92d2a36d27 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-06-17 10:56
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testing', '0005_useranswer_is_correct'),
]
operations = [
migrations.AddField(
model_name='usertestresult',
name='end_time',
field=models.DateTimeField(default=datetime.datetime(2021, 6, 17, 17, 56, 54, 101323)),
),
migrations.AddField(
model_name='usertestresult',
name='is_completed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='usertestresult',
name='start_time',
field=models.DateTimeField(default=datetime.datetime(2021, 6, 17, 16, 56, 54, 101302)),
),
]
| 28.066667 | 99 | 0.599762 |
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testing', '0005_useranswer_is_correct'),
]
operations = [
migrations.AddField(
model_name='usertestresult',
name='end_time',
field=models.DateTimeField(default=datetime.datetime(2021, 6, 17, 17, 56, 54, 101323)),
),
migrations.AddField(
model_name='usertestresult',
name='is_completed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='usertestresult',
name='start_time',
field=models.DateTimeField(default=datetime.datetime(2021, 6, 17, 16, 56, 54, 101302)),
),
]
| true | true |
f72757698a249ebc1ea831e90b3b83ef1660ea9a | 3,589 | py | Python | google/cloud/recommender/v1beta1/recommender-v1beta1-py/noxfile.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/recommender/v1beta1/recommender-v1beta1-py/noxfile.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/recommender/v1beta1/recommender-v1beta1-py/noxfile.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pathlib
import shutil
import subprocess
import sys
import nox # type: ignore
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt"
PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8")
nox.sessions = [
"unit",
"cover",
"mypy",
"check_lower_bounds"
# exclude update_lower_bounds from default
"docs",
]
@nox.session(python=['3.6', '3.7', '3.8', '3.9'])
def unit(session):
"""Run the unit test suite."""
session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio')
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google/cloud/recommender_v1beta1/',
'--cov-config=.coveragerc',
'--cov-report=term',
'--cov-report=html',
os.path.join('tests', 'unit', ''.join(session.posargs))
)
@nox.session(python='3.7')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=['3.6', '3.7'])
def mypy(session):
"""Run the type checker."""
session.install('mypy', 'types-pkg_resources')
session.install('.')
session.run(
'mypy',
'--explicit-package-bases',
'google',
)
@nox.session
def update_lower_bounds(session):
"""Update lower bounds in constraints.txt to match setup.py"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'update',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session
def check_lower_bounds(session):
"""Check lower bounds in setup.py are reflected in constraints file"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'check',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session(python='3.6')
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx<3.0.0", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| 26.984962 | 96 | 0.627751 |
import os
import pathlib
import shutil
import subprocess
import sys
import nox
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt"
PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8")
nox.sessions = [
"unit",
"cover",
"mypy",
"check_lower_bounds"
"docs",
]
@nox.session(python=['3.6', '3.7', '3.8', '3.9'])
def unit(session):
session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio')
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google/cloud/recommender_v1beta1/',
'--cov-config=.coveragerc',
'--cov-report=term',
'--cov-report=html',
os.path.join('tests', 'unit', ''.join(session.posargs))
)
@nox.session(python='3.7')
def cover(session):
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=['3.6', '3.7'])
def mypy(session):
session.install('mypy', 'types-pkg_resources')
session.install('.')
session.run(
'mypy',
'--explicit-package-bases',
'google',
)
@nox.session
def update_lower_bounds(session):
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'update',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session
def check_lower_bounds(session):
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'check',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session(python='3.6')
def docs(session):
session.install("-e", ".")
session.install("sphinx<3.0.0", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W",
"-T",
"-N",
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| true | true |
f727578d6163bc38fe54684ae5660bc222bf771a | 9,991 | py | Python | improver_tests/between_thresholds/test_between_thresholds.py | pnijhara/improver | 5961a6fab9a79cd63a943eff07bf79d4e5f0ff03 | [
"BSD-3-Clause"
] | null | null | null | improver_tests/between_thresholds/test_between_thresholds.py | pnijhara/improver | 5961a6fab9a79cd63a943eff07bf79d4e5f0ff03 | [
"BSD-3-Clause"
] | null | null | null | improver_tests/between_thresholds/test_between_thresholds.py | pnijhara/improver | 5961a6fab9a79cd63a943eff07bf79d4e5f0ff03 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for the OccurrenceBetweenThresholds plugin"""
import unittest
import iris
import numpy as np
from iris.tests import IrisTest
from improver.between_thresholds import OccurrenceBetweenThresholds
from ..set_up_test_cubes import set_up_percentile_cube, set_up_probability_cube
class Test_process(IrisTest):
"""Test the process method"""
def setUp(self):
"""Set up a test cube with probability data"""
data = np.array(
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[0.9, 0.9, 0.9], [0.8, 0.8, 0.8], [0.7, 0.7, 0.7]],
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3]],
[[0.0, 0.0, 0.0], [0.1, 0.1, 0.1], [0.1, 0.2, 0.2]],
],
dtype=np.float32,
)
temp_thresholds = np.array([279, 280, 281, 282], dtype=np.float32)
vis_thresholds = np.array([100, 1000, 5000, 10000], dtype=np.float32)
self.temp_cube = set_up_probability_cube(data, temp_thresholds)
self.vis_cube = set_up_probability_cube(
np.flip(data, axis=0),
vis_thresholds,
variable_name="visibility",
threshold_units="m",
spp__relative_to_threshold="below",
)
# set up a cube of rainfall rates in m s-1 (~1e-8 values)
self.precip_cube = self.temp_cube.copy()
self.precip_cube.coord("air_temperature").rename("rainfall_rate")
self.precip_cube.coord("rainfall_rate").var_name = "threshold"
self.precip_cube.coord("rainfall_rate").points = np.array(
[0, 0.25, 0.5, 1], dtype=np.float32
)
self.precip_cube.coord("rainfall_rate").units = "mm h-1"
self.precip_cube.coord("rainfall_rate").convert_units("m s-1")
def test_above_threshold(self):
"""Test values from an "above threshold" cube"""
threshold_ranges = [[280, 281], [281, 282]]
expected_data = np.array(
[
[[0.8, 0.7, 0.6], [0.7, 0.6, 0.5], [0.6, 0.5, 0.4]],
[[0.1, 0.2, 0.3], [0.0, 0.1, 0.2], [0.0, 0.0, 0.1]],
],
dtype=np.float32,
)
plugin = OccurrenceBetweenThresholds(threshold_ranges.copy(), "K")
result = plugin(self.temp_cube)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(
result.name(), "probability_of_air_temperature_between_thresholds"
)
self.assertArrayAlmostEqual(result.data, expected_data)
thresh_coord = result.coord("air_temperature")
self.assertArrayAlmostEqual(thresh_coord.points, [281.0, 282.0])
self.assertArrayAlmostEqual(thresh_coord.bounds, threshold_ranges)
self.assertEqual(
thresh_coord.attributes["spp__relative_to_threshold"], "between_thresholds"
)
def test_below_threshold(self):
"""Test values from a "below threshold" cube"""
threshold_ranges = [[1000, 5000]]
expected_data = np.array(
[[0.8, 0.7, 0.6], [0.7, 0.6, 0.5], [0.6, 0.5, 0.4]], dtype=np.float32
)
plugin = OccurrenceBetweenThresholds(threshold_ranges.copy(), "m")
result = plugin(self.vis_cube)
self.assertArrayAlmostEqual(result.data, expected_data)
self.assertArrayAlmostEqual(result.coord("visibility").points, [5000.0])
self.assertArrayAlmostEqual(result.coord("visibility").bounds, threshold_ranges)
def test_skip_threshold(self):
"""Test calculation works for non-adjacent thresholds"""
threshold_ranges = [[100, 1000], [1000, 10000]]
expected_data = np.array(
[
[[0.1, 0.2, 0.3], [0.0, 0.1, 0.2], [0.0, 0.0, 0.1]],
[[0.9, 0.8, 0.7], [0.9, 0.8, 0.7], [0.9, 0.8, 0.7]],
],
dtype=np.float32,
)
plugin = OccurrenceBetweenThresholds(threshold_ranges, "m")
result = plugin(self.vis_cube)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_threshold_units(self):
"""Test calculation works for thresholds specified in different units
from the cube data"""
threshold_ranges = [[0.1, 1], [1, 10]]
expected_data = np.array(
[
[[0.1, 0.2, 0.3], [0.0, 0.1, 0.2], [0.0, 0.0, 0.1]],
[[0.9, 0.8, 0.7], [0.9, 0.8, 0.7], [0.9, 0.8, 0.7]],
],
dtype=np.float32,
)
plugin = OccurrenceBetweenThresholds(threshold_ranges, "km")
result = plugin(self.vis_cube)
self.assertArrayAlmostEqual(result.data, expected_data)
# check original cube units are not modified
self.assertEqual(self.vis_cube.coord("visibility").units, "m")
# check output cube units match original cube
self.assertEqual(result.coord("visibility").units, "m")
self.assertArrayAlmostEqual(result.coord("visibility").points, [1000, 10000])
def test_error_non_probability_cube(self):
"""Test failure if cube doesn't contain probabilities"""
perc_cube = set_up_percentile_cube(
np.ones((3, 3, 3), dtype=np.float32),
np.array((25, 50, 75), dtype=np.float32),
)
plugin = OccurrenceBetweenThresholds([[25, 50]], "K")
msg = "Input is not a probability cube"
with self.assertRaisesRegex(ValueError, msg):
plugin(perc_cube)
def test_error_between_thresholds_cube(self):
"""Test failure if cube isn't above or below threshold"""
# use plugin to generate a "between_thresholds" cube...
between_thresholds_cube = OccurrenceBetweenThresholds(
[[280, 281], [281, 282]], "K"
)(self.temp_cube)
plugin = OccurrenceBetweenThresholds([[281, 282]], "K")
msg = "Input cube must contain"
with self.assertRaisesRegex(ValueError, msg):
plugin(between_thresholds_cube)
def test_error_thresholds_unavailable(self):
"""Test error if cube doesn't contain the required thresholds"""
threshold_ranges = [[10, 100], [1000, 30000]]
plugin = OccurrenceBetweenThresholds(threshold_ranges, "m")
msg = (
"visibility threshold 10 m is not available\n"
"visibility threshold 30000 m is not available"
)
with self.assertRaisesRegex(ValueError, msg):
plugin(self.vis_cube)
def test_threshold_matching_tolerance(self):
"""Test threshold matching succeeds for absolute values close to
zero"""
new_thresholds = np.array([272.15, 273.15, 274.15, 275.15], dtype=np.float32)
self.temp_cube.coord("air_temperature").points = new_thresholds
threshold_ranges = [[-1, 0], [0, 2]]
expected_data = np.array(
[
[[0.1, 0.1, 0.1], [0.2, 0.2, 0.2], [0.3, 0.3, 0.3]],
[[0.9, 0.9, 0.9], [0.7, 0.7, 0.7], [0.6, 0.5, 0.5]],
],
dtype=np.float32,
)
plugin = OccurrenceBetweenThresholds(threshold_ranges, "degC")
result = plugin(self.temp_cube)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_thresholds_indistinguishable(self):
"""Test behaviour in a case where cube extraction cannot work within a
tolerance of 1e-5"""
# set threshold ranges in m s-1
points = self.precip_cube.coord("rainfall_rate").points.copy()
threshold_ranges = [[points[1], points[2]]]
msg = "Plugin cannot distinguish between thresholds at"
with self.assertRaisesRegex(ValueError, msg):
OccurrenceBetweenThresholds(threshold_ranges, "m s-1")
def test_original_units_indistinguishable(self):
"""Test cubes where thresholds are indistinguisable in SI units can be
correctly processed using threshold ranges specified in a unit with
more than 1e-5 discrimination"""
expected_data = np.array(
[[0.8, 0.7, 0.6], [0.7, 0.6, 0.5], [0.6, 0.5, 0.4]], dtype=np.float32
)
threshold_ranges = [[0.25, 0.5]]
plugin = OccurrenceBetweenThresholds(threshold_ranges, "mm h-1")
result = plugin(self.precip_cube)
self.assertArrayAlmostEqual(result.data, expected_data)
if __name__ == "__main__":
unittest.main()
| 44.404444 | 88 | 0.624362 |
import unittest
import iris
import numpy as np
from iris.tests import IrisTest
from improver.between_thresholds import OccurrenceBetweenThresholds
from ..set_up_test_cubes import set_up_percentile_cube, set_up_probability_cube
class Test_process(IrisTest):
def setUp(self):
data = np.array(
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[0.9, 0.9, 0.9], [0.8, 0.8, 0.8], [0.7, 0.7, 0.7]],
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3]],
[[0.0, 0.0, 0.0], [0.1, 0.1, 0.1], [0.1, 0.2, 0.2]],
],
dtype=np.float32,
)
temp_thresholds = np.array([279, 280, 281, 282], dtype=np.float32)
vis_thresholds = np.array([100, 1000, 5000, 10000], dtype=np.float32)
self.temp_cube = set_up_probability_cube(data, temp_thresholds)
self.vis_cube = set_up_probability_cube(
np.flip(data, axis=0),
vis_thresholds,
variable_name="visibility",
threshold_units="m",
spp__relative_to_threshold="below",
)
self.precip_cube = self.temp_cube.copy()
self.precip_cube.coord("air_temperature").rename("rainfall_rate")
self.precip_cube.coord("rainfall_rate").var_name = "threshold"
self.precip_cube.coord("rainfall_rate").points = np.array(
[0, 0.25, 0.5, 1], dtype=np.float32
)
self.precip_cube.coord("rainfall_rate").units = "mm h-1"
self.precip_cube.coord("rainfall_rate").convert_units("m s-1")
def test_above_threshold(self):
threshold_ranges = [[280, 281], [281, 282]]
expected_data = np.array(
[
[[0.8, 0.7, 0.6], [0.7, 0.6, 0.5], [0.6, 0.5, 0.4]],
[[0.1, 0.2, 0.3], [0.0, 0.1, 0.2], [0.0, 0.0, 0.1]],
],
dtype=np.float32,
)
plugin = OccurrenceBetweenThresholds(threshold_ranges.copy(), "K")
result = plugin(self.temp_cube)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(
result.name(), "probability_of_air_temperature_between_thresholds"
)
self.assertArrayAlmostEqual(result.data, expected_data)
thresh_coord = result.coord("air_temperature")
self.assertArrayAlmostEqual(thresh_coord.points, [281.0, 282.0])
self.assertArrayAlmostEqual(thresh_coord.bounds, threshold_ranges)
self.assertEqual(
thresh_coord.attributes["spp__relative_to_threshold"], "between_thresholds"
)
def test_below_threshold(self):
threshold_ranges = [[1000, 5000]]
expected_data = np.array(
[[0.8, 0.7, 0.6], [0.7, 0.6, 0.5], [0.6, 0.5, 0.4]], dtype=np.float32
)
plugin = OccurrenceBetweenThresholds(threshold_ranges.copy(), "m")
result = plugin(self.vis_cube)
self.assertArrayAlmostEqual(result.data, expected_data)
self.assertArrayAlmostEqual(result.coord("visibility").points, [5000.0])
self.assertArrayAlmostEqual(result.coord("visibility").bounds, threshold_ranges)
def test_skip_threshold(self):
threshold_ranges = [[100, 1000], [1000, 10000]]
expected_data = np.array(
[
[[0.1, 0.2, 0.3], [0.0, 0.1, 0.2], [0.0, 0.0, 0.1]],
[[0.9, 0.8, 0.7], [0.9, 0.8, 0.7], [0.9, 0.8, 0.7]],
],
dtype=np.float32,
)
plugin = OccurrenceBetweenThresholds(threshold_ranges, "m")
result = plugin(self.vis_cube)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_threshold_units(self):
threshold_ranges = [[0.1, 1], [1, 10]]
expected_data = np.array(
[
[[0.1, 0.2, 0.3], [0.0, 0.1, 0.2], [0.0, 0.0, 0.1]],
[[0.9, 0.8, 0.7], [0.9, 0.8, 0.7], [0.9, 0.8, 0.7]],
],
dtype=np.float32,
)
plugin = OccurrenceBetweenThresholds(threshold_ranges, "km")
result = plugin(self.vis_cube)
self.assertArrayAlmostEqual(result.data, expected_data)
self.assertEqual(self.vis_cube.coord("visibility").units, "m")
self.assertEqual(result.coord("visibility").units, "m")
self.assertArrayAlmostEqual(result.coord("visibility").points, [1000, 10000])
def test_error_non_probability_cube(self):
perc_cube = set_up_percentile_cube(
np.ones((3, 3, 3), dtype=np.float32),
np.array((25, 50, 75), dtype=np.float32),
)
plugin = OccurrenceBetweenThresholds([[25, 50]], "K")
msg = "Input is not a probability cube"
with self.assertRaisesRegex(ValueError, msg):
plugin(perc_cube)
def test_error_between_thresholds_cube(self):
between_thresholds_cube = OccurrenceBetweenThresholds(
[[280, 281], [281, 282]], "K"
)(self.temp_cube)
plugin = OccurrenceBetweenThresholds([[281, 282]], "K")
msg = "Input cube must contain"
with self.assertRaisesRegex(ValueError, msg):
plugin(between_thresholds_cube)
def test_error_thresholds_unavailable(self):
threshold_ranges = [[10, 100], [1000, 30000]]
plugin = OccurrenceBetweenThresholds(threshold_ranges, "m")
msg = (
"visibility threshold 10 m is not available\n"
"visibility threshold 30000 m is not available"
)
with self.assertRaisesRegex(ValueError, msg):
plugin(self.vis_cube)
def test_threshold_matching_tolerance(self):
new_thresholds = np.array([272.15, 273.15, 274.15, 275.15], dtype=np.float32)
self.temp_cube.coord("air_temperature").points = new_thresholds
threshold_ranges = [[-1, 0], [0, 2]]
expected_data = np.array(
[
[[0.1, 0.1, 0.1], [0.2, 0.2, 0.2], [0.3, 0.3, 0.3]],
[[0.9, 0.9, 0.9], [0.7, 0.7, 0.7], [0.6, 0.5, 0.5]],
],
dtype=np.float32,
)
plugin = OccurrenceBetweenThresholds(threshold_ranges, "degC")
result = plugin(self.temp_cube)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_thresholds_indistinguishable(self):
points = self.precip_cube.coord("rainfall_rate").points.copy()
threshold_ranges = [[points[1], points[2]]]
msg = "Plugin cannot distinguish between thresholds at"
with self.assertRaisesRegex(ValueError, msg):
OccurrenceBetweenThresholds(threshold_ranges, "m s-1")
def test_original_units_indistinguishable(self):
expected_data = np.array(
[[0.8, 0.7, 0.6], [0.7, 0.6, 0.5], [0.6, 0.5, 0.4]], dtype=np.float32
)
threshold_ranges = [[0.25, 0.5]]
plugin = OccurrenceBetweenThresholds(threshold_ranges, "mm h-1")
result = plugin(self.precip_cube)
self.assertArrayAlmostEqual(result.data, expected_data)
if __name__ == "__main__":
unittest.main()
| true | true |
f72759c1213da47f3d4defce97fc37be89c0f650 | 142 | py | Python | service_app/service_app/doctype/service/test_service.py | NahomAraya/Erpnext-App | 4648aa95b1b6ebf4ef9f80c2c02dbeb22277531d | [
"MIT"
] | null | null | null | service_app/service_app/doctype/service/test_service.py | NahomAraya/Erpnext-App | 4648aa95b1b6ebf4ef9f80c2c02dbeb22277531d | [
"MIT"
] | null | null | null | service_app/service_app/doctype/service/test_service.py | NahomAraya/Erpnext-App | 4648aa95b1b6ebf4ef9f80c2c02dbeb22277531d | [
"MIT"
] | null | null | null | # Copyright (c) 2021, Appman and Contributors
# See license.txt
# import frappe
import unittest
class TestService(unittest.TestCase):
pass
| 15.777778 | 45 | 0.774648 |
import unittest
class TestService(unittest.TestCase):
pass
| true | true |
f72759c8061e47306fba82f635b426668b33c208 | 2,918 | py | Python | modules/swar/doc/splatted_prod.py | brycelelbach/nt2 | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | [
"BSL-1.0"
] | 1 | 2022-03-24T03:35:10.000Z | 2022-03-24T03:35:10.000Z | modules/swar/doc/splatted_prod.py | brycelelbach/nt2 | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | [
"BSL-1.0"
] | null | null | null | modules/swar/doc/splatted_prod.py | brycelelbach/nt2 | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | [
"BSL-1.0"
] | null | null | null | [ ## this file was manually modified by jt
{
'functor' : {
'arity' : '1',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'T',
},
'special' : ['swar'],
'simd_types' : ['real_'],
'type_defs' : [],
'types' : ['real_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'created by jt the 24/02/2011',
'simd_included' : ['#include <nt2/include/functions/prod.hpp>'],
'no_ulp' : 'True',
'notes' : [],
'stamp' : 'modified by jt the 24/02/2011',
},
'ranges' : {
'default' : [['nt2::Valmin<T>()', 'nt2::Valmax<T>()']],
'real_' : [['T(-100)', 'T(100)']],
'signed_int_' : [],
'unsigned_int_' : [],
},
'specific_values' : {
'default' : {
'nt2::One<T>()' : {'result' : 'nt2::One<r_t>()','ulp_thresh' : '0',},
'nt2::Zero<T>()' : {'result' : 'nt2::Zero<r_t>()','ulp_thresh' : '0',},
},
'real_' : {
'nt2::Inf<T>()' : {'result' : 'nt2::Inf<r_t>()','ulp_thresh' : '0',},
'nt2::Minf<T>()' : {'result' : 'nt2::Minf<r_t>()','ulp_thresh' : '0',},
'nt2::Mone<T>()' : {'result' : 'nt2::Mone<r_t>()','ulp_thresh' : '0',},
'nt2::Nan<T>()' : {'result' : 'nt2::Nan<r_t>()','ulp_thresh' : '0',},
'nt2::One<T>()' : {'result' : 'nt2::One<r_t>()','ulp_thresh' : '0',},
'nt2::Zero<T>()' : {'result' : 'nt2::Zero<r_t>()','ulp_thresh' : '0',},
},
'signed_int_' : {
'nt2::Mone<T>()' : {'result' : 'nt2::Mone<r_t>()','ulp_thresh' : '0',},
'nt2::One<T>()' : {'result' : 'nt2::One<r_t>()','ulp_thresh' : '0',},
'nt2::Zero<T>()' : {'result' : 'nt2::Zero<r_t>()','ulp_thresh' : '0',},
},
},
'verif_test' : {
'nb_rand' : {
'default' : 'NT2_NB_RANDOM_TEST',
},
'property_call' : {
'default' : ['nt2::splatted_prod(a0)'],
},
'property_value' : {
'default' : ['(a0)'],
},
'ulp_thresh' : {
'default' : ['0.5'],
},
'scalar_simul' :{
'default' : [
" T p= nt2::prod(a0);",
" for(uint32_t i=0; i<cardinal_of<n_t>::value; i++)",
" {",
" NT2_TEST_EQUAL(v[i],p);",
" }",
]
},
},
},
},
]
| 38.906667 | 90 | 0.331391 | [ ty' : '1',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'T',
},
'special' : ['swar'],
'simd_types' : ['real_'],
'type_defs' : [],
'types' : ['real_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'created by jt the 24/02/2011',
'simd_included' : ['#include <nt2/include/functions/prod.hpp>'],
'no_ulp' : 'True',
'notes' : [],
'stamp' : 'modified by jt the 24/02/2011',
},
'ranges' : {
'default' : [['nt2::Valmin<T>()', 'nt2::Valmax<T>()']],
'real_' : [['T(-100)', 'T(100)']],
'signed_int_' : [],
'unsigned_int_' : [],
},
'specific_values' : {
'default' : {
'nt2::One<T>()' : {'result' : 'nt2::One<r_t>()','ulp_thresh' : '0',},
'nt2::Zero<T>()' : {'result' : 'nt2::Zero<r_t>()','ulp_thresh' : '0',},
},
'real_' : {
'nt2::Inf<T>()' : {'result' : 'nt2::Inf<r_t>()','ulp_thresh' : '0',},
'nt2::Minf<T>()' : {'result' : 'nt2::Minf<r_t>()','ulp_thresh' : '0',},
'nt2::Mone<T>()' : {'result' : 'nt2::Mone<r_t>()','ulp_thresh' : '0',},
'nt2::Nan<T>()' : {'result' : 'nt2::Nan<r_t>()','ulp_thresh' : '0',},
'nt2::One<T>()' : {'result' : 'nt2::One<r_t>()','ulp_thresh' : '0',},
'nt2::Zero<T>()' : {'result' : 'nt2::Zero<r_t>()','ulp_thresh' : '0',},
},
'signed_int_' : {
'nt2::Mone<T>()' : {'result' : 'nt2::Mone<r_t>()','ulp_thresh' : '0',},
'nt2::One<T>()' : {'result' : 'nt2::One<r_t>()','ulp_thresh' : '0',},
'nt2::Zero<T>()' : {'result' : 'nt2::Zero<r_t>()','ulp_thresh' : '0',},
},
},
'verif_test' : {
'nb_rand' : {
'default' : 'NT2_NB_RANDOM_TEST',
},
'property_call' : {
'default' : ['nt2::splatted_prod(a0)'],
},
'property_value' : {
'default' : ['(a0)'],
},
'ulp_thresh' : {
'default' : ['0.5'],
},
'scalar_simul' :{
'default' : [
" T p= nt2::prod(a0);",
" for(uint32_t i=0; i<cardinal_of<n_t>::value; i++)",
" {",
" NT2_TEST_EQUAL(v[i],p);",
" }",
]
},
},
},
},
]
| true | true |
f7275a40150b0c0246d5e22711d983f8a33d9abc | 1,585 | py | Python | tests/service/watcher/test_util.py | vinifmor/guapow | 59a9a1e6706bacbcb3d4bbc762ff9264d5e6f582 | [
"Zlib"
] | 7 | 2021-10-06T17:02:13.000Z | 2022-03-22T10:45:23.000Z | tests/service/watcher/test_util.py | vinifmor/guapow | 59a9a1e6706bacbcb3d4bbc762ff9264d5e6f582 | [
"Zlib"
] | 2 | 2022-03-16T11:20:54.000Z | 2022-03-24T13:54:49.000Z | tests/service/watcher/test_util.py | vinifmor/guapow | 59a9a1e6706bacbcb3d4bbc762ff9264d5e6f582 | [
"Zlib"
] | null | null | null | from unittest import IsolatedAsyncioTestCase
from unittest.mock import patch, AsyncMock, call
from guapow import __app_name__
from guapow.service.watcher import util
class MapProcessesTest(IsolatedAsyncioTestCase):
@patch(f'{__app_name__}.service.watcher.util.async_syscall', side_effect=[(0, " 1 # a \n 2 # b \n"), (0, "1#/bin/a\n 2 # /bin/b -c \n")])
async def test__must_return_a_dict_with_pids_as_keys_and_tuples_as_values_with_the_cmd_and_comm(self, async_syscall: AsyncMock):
procs = await util.map_processes()
async_syscall.assert_has_awaits([call('ps -Ao "%p#%c" -ww --no-headers'),
call('ps -Ao "%p#%a" -ww --no-headers')], any_order=True)
self.assertIsInstance(procs, dict)
self.assertEqual({1: ('/bin/a', 'a'), 2: ('/bin/b -c', 'b')}, procs)
@patch(f'{__app_name__}.service.watcher.util.async_syscall', side_effect=[(0, "1#a\n3#c\n"), (0, "\n 2#/bin/b -c \n3#/bin/c\n")])
async def test__must_not_return_processes_with_comm_or_cmd_missing(self, async_syscall: AsyncMock):
procs = await util.map_processes()
self.assertEqual(2, async_syscall.await_count)
self.assertIsInstance(procs, dict)
self.assertEqual({3: ('/bin/c', 'c')}, procs)
@patch(f'{__app_name__}.service.watcher.util.async_syscall', return_value=(1, ""))
async def test__must_return_none_when_the_syscall_fails(self, async_syscall: AsyncMock):
procs = await util.map_processes()
self.assertEqual(2, async_syscall.await_count)
self.assertIsNone(procs)
| 49.53125 | 141 | 0.683912 | from unittest import IsolatedAsyncioTestCase
from unittest.mock import patch, AsyncMock, call
from guapow import __app_name__
from guapow.service.watcher import util
class MapProcessesTest(IsolatedAsyncioTestCase):
@patch(f'{__app_name__}.service.watcher.util.async_syscall', side_effect=[(0, " 1 # a \n 2 # b \n"), (0, "1#/bin/a\n 2 # /bin/b -c \n")])
async def test__must_return_a_dict_with_pids_as_keys_and_tuples_as_values_with_the_cmd_and_comm(self, async_syscall: AsyncMock):
procs = await util.map_processes()
async_syscall.assert_has_awaits([call('ps -Ao "%p#%c" -ww --no-headers'),
call('ps -Ao "%p#%a" -ww --no-headers')], any_order=True)
self.assertIsInstance(procs, dict)
self.assertEqual({1: ('/bin/a', 'a'), 2: ('/bin/b -c', 'b')}, procs)
@patch(f'{__app_name__}.service.watcher.util.async_syscall', side_effect=[(0, "1#a\n3#c\n"), (0, "\n 2#/bin/b -c \n3#/bin/c\n")])
async def test__must_not_return_processes_with_comm_or_cmd_missing(self, async_syscall: AsyncMock):
procs = await util.map_processes()
self.assertEqual(2, async_syscall.await_count)
self.assertIsInstance(procs, dict)
self.assertEqual({3: ('/bin/c', 'c')}, procs)
@patch(f'{__app_name__}.service.watcher.util.async_syscall', return_value=(1, ""))
async def test__must_return_none_when_the_syscall_fails(self, async_syscall: AsyncMock):
procs = await util.map_processes()
self.assertEqual(2, async_syscall.await_count)
self.assertIsNone(procs)
| true | true |
f7275ca4a9614ae34d5dae69713b06a583c3d368 | 179 | py | Python | posts/forms.py | Kelit/My_blog | 891f082ac6b7a02ffbc8d106168cb0fd017ba3ef | [
"Apache-2.0"
] | null | null | null | posts/forms.py | Kelit/My_blog | 891f082ac6b7a02ffbc8d106168cb0fd017ba3ef | [
"Apache-2.0"
] | null | null | null | posts/forms.py | Kelit/My_blog | 891f082ac6b7a02ffbc8d106168cb0fd017ba3ef | [
"Apache-2.0"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField
class PostForm(FlaskForm):
title = StringField('Заголовок')
body = TextAreaField('Текст')
| 22.375 | 46 | 0.77095 | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField
class PostForm(FlaskForm):
title = StringField('Заголовок')
body = TextAreaField('Текст')
| true | true |
f7275ccef7ac1443c9619306e2735d5ade2696fa | 64 | py | Python | ptfims/__init__.py | rbiswas4/ptfdata | f50efd077bbf091e5108a6c95b0e24e4768ca4e6 | [
"MIT"
] | null | null | null | ptfims/__init__.py | rbiswas4/ptfdata | f50efd077bbf091e5108a6c95b0e24e4768ca4e6 | [
"MIT"
] | null | null | null | ptfims/__init__.py | rbiswas4/ptfdata | f50efd077bbf091e5108a6c95b0e24e4768ca4e6 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from .ptfimages import *
| 21.333333 | 38 | 0.84375 | from __future__ import absolute_import
from .ptfimages import *
| true | true |
f7275d33269d0a2ea63c7f66122d5786bb653174 | 870 | py | Python | macWall.py | mathematics128/WinDD_Packaged_Wall | ec136adcf75e3f9c456149b995e2c0744bfe3c61 | [
"MIT"
] | null | null | null | macWall.py | mathematics128/WinDD_Packaged_Wall | ec136adcf75e3f9c456149b995e2c0744bfe3c61 | [
"MIT"
] | null | null | null | macWall.py | mathematics128/WinDD_Packaged_Wall | ec136adcf75e3f9c456149b995e2c0744bfe3c61 | [
"MIT"
] | null | null | null | from os import system, listdir
from PIL import Image
num = int(input('请输入你想生成的缩略图的长: ') )
for pic in listdir('.'):
if pic[-4:] == '.jpg':
tmp_pic = pic[:-4] + '.png'
temp_pic = pic[:-4] + '.bmp'
system('ffmpeg -i ' + pic + ' -vf scale=' + str(num) + ':-1 ' + tmp_pic)
system('ffmpeg -i ' + tmp_pic + ' -vf crop=' + str(num) + ':' + str(num * 0.5625) + ' ' + temp_pic)
img = Image.new('RGB', (num, int(num * 0.5625) ), (0, 0, 0) )
zd = eval(input('请输入图片按顺序对应的字典 (参考theme.json文件) : ') )
name = input('请输入图片的前缀名称: ')
for i in range(len(zd)):
i += 1
box = (int(num / len(zd) ) * (i - 1), 0, num, int(num * 0.5625) )
i = zd[i]
pic = Image.open(name + str(i) + '.bmp')
tmp = pic.crop(box)
img.paste(tmp, box)
pic.close()
system('del *.bmp *.png')
img.save('thumbnail.png')
img.close()
| 33.461538 | 108 | 0.514943 | from os import system, listdir
from PIL import Image
num = int(input('请输入你想生成的缩略图的长: ') )
for pic in listdir('.'):
if pic[-4:] == '.jpg':
tmp_pic = pic[:-4] + '.png'
temp_pic = pic[:-4] + '.bmp'
system('ffmpeg -i ' + pic + ' -vf scale=' + str(num) + ':-1 ' + tmp_pic)
system('ffmpeg -i ' + tmp_pic + ' -vf crop=' + str(num) + ':' + str(num * 0.5625) + ' ' + temp_pic)
img = Image.new('RGB', (num, int(num * 0.5625) ), (0, 0, 0) )
zd = eval(input('请输入图片按顺序对应的字典 (参考theme.json文件) : ') )
name = input('请输入图片的前缀名称: ')
for i in range(len(zd)):
i += 1
box = (int(num / len(zd) ) * (i - 1), 0, num, int(num * 0.5625) )
i = zd[i]
pic = Image.open(name + str(i) + '.bmp')
tmp = pic.crop(box)
img.paste(tmp, box)
pic.close()
system('del *.bmp *.png')
img.save('thumbnail.png')
img.close()
| true | true |
f7275daebdad04b10427a7fe30165f0dd0fc3904 | 533 | py | Python | root/messages.py | FilmyFather/TG-RenameBot | ae5e21c0da7c869c989a4ab7e1c79305f2ad3b61 | [
"MIT"
] | 46 | 2021-05-30T14:35:48.000Z | 2022-02-25T09:58:12.000Z | root/messages.py | FilmyFather/TG-RenameBot | ae5e21c0da7c869c989a4ab7e1c79305f2ad3b61 | [
"MIT"
] | 4 | 2021-08-10T14:11:52.000Z | 2021-12-30T17:59:28.000Z | root/messages.py | FilmyFather/TG-RenameBot | ae5e21c0da7c869c989a4ab7e1c79305f2ad3b61 | [
"MIT"
] | 102 | 2021-05-30T14:11:33.000Z | 2022-03-30T06:36:31.000Z | class Translation(object):
START_TEXT = "**I'm a Rename and Convert Bot\nJust send me any media to change file name.\nUse /help command for more details **"
######################
HELP_USER = """**>>Send File/Video\n>>Select desired Option\n>>And Done wait for it to process files**"""
DOWNLOAD_MSG = "**Downloading **⏬"
DOWNLOAD_FAIL_MSG = "**Failed to Download File**❎"
UPLOAD_MSG = "**Uploading** ⏫"
UPLOAD_FAIL_MSG = "**Failed to Upload File**❎"
UPLOAD_DONE_MSG = "**Uploaded Successfully 💡"
| 53.3 | 134 | 0.634146 | class Translation(object):
START_TEXT = "**I'm a Rename and Convert Bot\nJust send me any media to change file name.\nUse /help command for more details **"
######################
HELP_USER = """**>>Send File/Video\n>>Select desired Option\n>>And Done wait for it to process files**"""
DOWNLOAD_MSG = "**Downloading **⏬"
DOWNLOAD_FAIL_MSG = "**Failed to Download File**❎"
UPLOAD_MSG = "**Uploading** ⏫"
UPLOAD_FAIL_MSG = "**Failed to Upload File**❎"
UPLOAD_DONE_MSG = "**Uploaded Successfully 💡"
| true | true |
f7275dd92e798d7446dcdaf64a7b23e1afc7ec27 | 86,823 | py | Python | phriky_units/test_cps_units_checker.py | unl-nimbus-lab/phriky-units | 16c8cdd91de0899411b139e5a94fcb4ea8104ad2 | [
"MIT"
] | 22 | 2017-07-18T09:39:34.000Z | 2021-09-16T09:41:03.000Z | phriky_units/test_cps_units_checker.py | unl-nimbus-lab/phriky-units | 16c8cdd91de0899411b139e5a94fcb4ea8104ad2 | [
"MIT"
] | 9 | 2016-09-04T13:33:15.000Z | 2018-01-05T22:39:03.000Z | phriky_units/test_cps_units_checker.py | unl-nimbus-lab/phriky-units | 16c8cdd91de0899411b139e5a94fcb4ea8104ad2 | [
"MIT"
] | 4 | 2016-12-07T16:34:57.000Z | 2019-04-03T06:51:55.000Z | #!/usr/local/bin/python
import sys
# sys.path.append('/Users/jore/courses/NIMBUS/RESEARCH/CPS_TYPES/cps_units/')
import unittest
from detect_physical_unit_inconsistencies import CPSUnitsChecker
from unit_error_types import UnitErrorTypes
from unit_error import UnitError
import os
global_debug = False
global_debug_verbose = False
global_debug_AST = False
class TestStringMethods(unittest.TestCase):
def test_function_return_0(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
cps_unit_checker.debug_scope = False
dump_file = './dump_files_for_tests/test_it_function_return_0.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
units_for_f1 = []
# TEST THAT UNITS ARE ASSIGNED TO FUNCTION
for tw in cps_unit_checker.all_tree_walkers:
so = tw.symbol_helper.function_dictionary['scopeObject']
if so.function:
if so.function.name == 'f1':
units_for_f1 = so.function.return_units
self.assertEquals(units_for_f1, [{'meter': 1}], 'Incorrect units returned for function: f1 . Expected [{\'meter\':1}], received %s' % units_for_f1)
# TEST THAT UNITS ARE RECEIVED TO FUNCTION
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][12]['units']
my_oracle = [{'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_function_return_1(self):
''' x SHOULD END UP M/S, but so far THERE'S NO MECHANISM FOR PASSING UNITS IN TO A FUNCTION
'''
cps_unit_checker = CPSUnitsChecker()
dump_file = './dump_files_for_tests/test_it_function_return_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for tw in cps_unit_checker.all_tree_walkers:
so = tw.symbol_helper.function_dictionary['scopeObject']
if so.function:
if so.function.name == 'f1':
units_for_f1 = so.function.return_units
def test_comparisons_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST= False
dump_file = './dump_files_for_tests/test_it_comparisons_1.cpp.dump'
source_file = './dump_files_for_tests/test_it_comparisons_1.cpp'
cps_unit_checker.main_run_check(dump_file, source_file)
e = cps_unit_checker.errors[0]
# ORACLES
token_left_units_oracle = [{'meter': 1}]
token_right_units_oracle = [{'second': -1, 'meter': 1}]
# ASSERTIONS
self.assertEqual(e.token.str, '>')
self.assertEqual(e.token_left.units, token_left_units_oracle)
self.assertEqual(e.token_right.units, token_right_units_oracle)
def test_logical_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_logical_1.cpp.dump'
source_file = './dump_files_for_tests/test_it_logical_1.cpp'
cps_unit_checker.main_run_check(dump_file, source_file)
# TEST 1
e = cps_unit_checker.errors[0]
# ORACLES
token_right_units_oracle = [{'meter': 1}]
# ASSERTIONS
self.assertEqual(e.linenr, 13)
self.assertEqual(e.token.str, '&&')
self.assertEqual(e.token_right.units, token_right_units_oracle)
# TEST 2
e = cps_unit_checker.errors[1]
# ORACLES
token_left_units_oracle = [{'meter': 1}]
# ASSERTIONS
self.assertEqual(e.linenr, 18)
self.assertEqual(e.token.str, '||')
self.assertEqual(e.token_left.units, token_left_units_oracle)
def test_abs_0(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_abs.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
def test_abs_namespace_std_0(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_abs_namespace_std.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
def test_abs_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_verbose = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_abs_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 't'
var_linenr = 9
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
my_oracle = [{'second': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_abs_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_abs_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 's'
var_linenr =11
my_oracle = [{'meter': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_multiplication_assignment_in_multi_configurations_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_verbose = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_multiplication_assignment_in_multi_configurations.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'a_geometry_msgs_Accel.linear.x'
var_linenr = 19
my_oracle = [{'second': -4, 'meter': 2}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_unit_propagation_by_multiplication_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_verbose = False
#cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_unit_propagation_by_multiplication_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][14]['units']
my_oracle = [{'second': -4, 'meter': 2}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_unit_propagation_by_division_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_verbose = False
#cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_unit_propagation_by_division_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][14]['units']
my_oracle = None
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_mulitple_units_assigned(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_verbose = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_multiple_units_assigned_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
expected_errors = ["test_it_multiple_units_assigned_1.cpp : 11 MULTIPLE UNITS BY ASSIGNMENT: [{'second': -1, 'meter': 1}, {'second': -2, 'meter': 2}]"]
# self.assertListEqual([e['error_msg'] for e in cps_unit_checker.errors], expected_errors)
# TEST QUANTITY OF ERRORS
self.assertEqual(1, len(cps_unit_checker.errors))
# TEST TyPE OF ERROR
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
# TEST VALUE OF ERROR
var_name = 'a_geometry_msgs_Accel.linear.x'
var_linenr =11
my_oracle = [{'second': -2, 'meter': 1}, {'second': -4, 'meter': 2}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_sqrt_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_verbose = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_sqrt_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][12]['units']
my_oracle = [{'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_sqrt_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_verbose = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_sqrt_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][12]['units']
my_oracle = [{'second': -1, 'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_sqrt_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_verbose = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_sqrt_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][12]['units']
my_oracle = None
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_sqrt_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_verbose = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_sqrt_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][14]['units']
my_oracle = [{'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_sqrt_5(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_verbose = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_sqrt_5.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][14]['units']
my_oracle = [{'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_sqrt_half_units(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_verbose = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_sqrt_half_units.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][11]['units']
my_oracle = [{'meter': 0.5}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_atan2_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_verbose = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_atan2_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][7]['units']
my_oracle = [{'radian': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_atan2_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
#cps_unit_checker.debug_verbose = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_atan2_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][8]['units']
my_oracle = [{'radian': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_toSec(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_toSec_0.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'duration' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['duration'][7]['units']
my_oracle = [{'second': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'second' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['second'][9]['units']
my_oracle = [{'second': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_float_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_float_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict and 11 in s.var_ordered_dict['f']:
actual_units = s.var_ordered_dict['f'][11]['units']
my_oracle = [{'second': -1, 'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_float_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_float_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict and 11 in s.var_ordered_dict['f']:
actual_units = s.var_ordered_dict['f'][11]['units']
my_oracle = [{'second': -1, 'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_float_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_float_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][12]['units']
my_oracle = [{'second': -1, 'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_float_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_float_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][13]['units']
my_oracle = [{'second': -1, 'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_float_5(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_float_5.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][11]['units']
my_oracle = [{'second': -1, 'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_pow_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_pow_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr =10
my_oracle = [{'meter': 4}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_pow_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_pow_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 11
my_oracle = [{'meter': 4}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_pow_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_pow_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 10
my_oracle = [{'meter': 4}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:%s Expected: %s received %s' % (var_name, my_oracle, actual_units))
def test_floor_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_floor_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 's'
var_linenr = 8
my_oracle = [{'meter': 1, 'second':-1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_ceil_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_ceil_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 's'
var_linenr = 8
my_oracle = [{'meter': 1, 'second':-1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_acos_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_acos_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 7
my_oracle = [{'radian': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_asin_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_asin_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 7
my_oracle = [{'radian': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_atan_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_atan_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 7
my_oracle = [{'radian': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_ternary_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_ternary_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 9
my_oracle = [{'second': -1, 'meter': 1}, {'second': -1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_function_args_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_function_args_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# actual_units = None
f = cps_unit_checker.current_configuration.functions[0].arg_units
self.assertEqual(f[0][0]['linenr'], 13)
self.assertEqual(f[0][0]['units'], [{'meter': 1}])
def test_function_args_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_function_args_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for f in cps_unit_checker.current_configuration.functions[0].arg_units:
self.assertEqual(f[0]['linenr'], 13)
self.assertEqual(f[0]['units'], [{'meter': 1}])
def test_function_args_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_function_args_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
my_oracle_1 = 4
my_oracle_2 = [{'meter': 1}]
actual_units = None
all_units_list = cps_unit_checker.current_configuration.functions[0].arg_units
self.assertEqual(len(all_units_list), my_oracle_1)
for u in all_units_list:
self.assertEqual(u[0]['units'], my_oracle_2)
def test_function_args_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_function_args_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
my_oracle = [{'meter': 1}]
actual_units = None
for f in cps_unit_checker.current_configuration.functions:
for arg_u in f.arg_units:
for arg_use_on_line in arg_u:
self.assertEqual(arg_use_on_line['units'], my_oracle)
def test_function_args_5(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_function_args_5.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
my_oracle_1 = [{'meter': 1}]
my_oracle_2 = 15
f = cps_unit_checker.current_configuration.functions[0]
self.assertEqual(f.arg_units[0][0]['units'], my_oracle_1)
self.assertEqual(f.arg_units[0][0]['linenr'], my_oracle_2)
def test_division_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_division_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'x'
var_linenr = 9
my_oracle = [{'meter': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_division_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_division_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'x'
var_linenr = 9
my_oracle = [{'meter': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_division_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_division_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'x'
var_linenr = 9
my_oracle = [{'second': 2, 'meter': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_division_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_division_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'x'
var_linenr =10
my_oracle = [{'second': 2}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_logical_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_logical_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
def test_error_type_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_error_return_type_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.current_file_under_analysis = dump_file
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
def test_laser_scan_range_size_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_laser_scan_range_count_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'x'
var_linenr = 7
my_oracle = None
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(0, len(cps_unit_checker.errors))
def test_laser_scan_range_size_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_laser_scan_range_count_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'x'
var_linenr = 7
my_oracle = [{'meter':1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_ros_duration_isZero_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_ros_duration_isZero_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 't'
var_linenr = 6
my_oracle = None
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_ros_duration_isZero_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_ros_duration_isZero_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 't'
var_linenr = 6
my_oracle = [{'second':1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_ros_header_include_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/src/test_it_header_include_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(2, len(cps_unit_checker.errors))
def test_ros_header_include_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/src/test_it_header_include_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(3, len(cps_unit_checker.errors))
# WEAKER - SOMETHING STOCASTIC IS HAPPENING
e = cps_unit_checker.errors[0]
self.assertEqual(7, e.linenr)
self.assertEqual('./dump_files_for_tests/src/../include/test_it_header_include_2.h', e.get_file_URI_where_error_occured())
e = cps_unit_checker.errors[1]
self.assertEqual(5, e.linenr)
self.assertEqual('./dump_files_for_tests/src/test_it_header_include_2.cpp', e.get_file_URI_where_error_occured())
# DON'T ASSIGN UNITS TO ARRAYS WHEN array.empty() IS CALLED
def test_laser_range_empty_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_range_empty_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# DON'T ASSIGN UNITS TO ARRAYS WHEN time.isZero() IS CALLED
def test_ros_isZero_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_ros_isZero_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# DON'T ASSIGN UNITS DURING x = y = z = 0
def test_multiple_initialization_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_multiple_initialization.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# WEAKEN ASSIGNMENT WHEN MULTIPLIED BY A CONSTANT (INT)
def test_it_multiplication_with_constant_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_multiplication_with_constant_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# self.assertEqual(0, len(cps_unit_checker.errors))
var_name = 'f'
var_linenr = 9
my_oracle = [{'second':-1}]
actual_units = None
is_unit_propagation_based_on_constants = False
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
is_unit_propagation_based_on_constants = s.var_ordered_dict[var_name][var_linenr]['is_unit_propagation_based_on_constants']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertTrue(is_unit_propagation_based_on_constants, 'Unit inference should be weakened by constant interaction, but is still strong.')
# WEAKEN ASSIGNMENT WHEN MULTIPLIED BY A CONSTANT (FLOAT)
def test_it_multiplication_with_constant_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_multiplication_with_constant_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# self.assertEqual(0, len(cps_unit_checker.errors))
var_name = 'f'
var_linenr = 9
my_oracle = [{'second':-1}]
actual_units = None
is_unit_propagation_based_on_constants = False
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
is_unit_propagation_based_on_constants = s.var_ordered_dict[var_name][var_linenr]['is_unit_propagation_based_on_constants']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertTrue(is_unit_propagation_based_on_constants, 'Unit inference should be weakened by constant interaction, but is still strong.')
# WEAKEN ASSIGNMENT WHEN MULTIPLIED BY A CONSTANT (FLOAT)
def test_it_operator_with_unknown_variable_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_operator_with_unknown_variable_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# self.assertEqual(0, len(cps_unit_checker.errors))
var_name = 'f'
var_linenr = 10
my_oracle = [{'second':-1}]
actual_units = None
is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
is_unit_propagation_based_on_unknown_variable = s.var_ordered_dict[var_name][var_linenr]['is_unit_propagation_based_on_unknown_variable']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertTrue(is_unit_propagation_based_on_unknown_variable, 'Unit inference should be weakened by unknown variable interaction, but is still strong.')
# WEAKEN ERROR WHEN MULTIPLIED BY A CONSTANT
def test_it_operator_with_unknown_variable_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_operator_with_unknown_variable_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(2, len(cps_unit_checker.errors))
for e in cps_unit_checker.errors:
self.assertTrue(e.is_warning, 'Should be a warning but is not marked as such')
# WEAKEN ERROR WHEN MULTIPLIED BY A CONSTANT
def test_it_operator_with_unknown_variable_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_operator_with_unknown_variable_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(2, len(cps_unit_checker.errors))
# PROPAGATION ACROSS MIN MAX
def test_it_min_max_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_min_max_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 7
my_oracle = [{'second': -1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(0, len(cps_unit_checker.errors))
# PROPAGATION ACROSS MIN MAX
def test_it_min_max_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_min_max_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 8
my_oracle = [{'second': -1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(0, len(cps_unit_checker.errors))
# PROPAGATION ACROSS MIN MAX
def test_it_min_max_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_min_max_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 8
my_oracle = [{'second': -1}, {'second': -1, 'meter': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
self.assertFalse(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# PROPAGATION ACROSS MIN MAX
def test_it_min_max_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_min_max_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 9
my_oracle = [{'second': -1 }, {'second': -1, 'meter': 1}]
actual_units = None
is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
self.assertFalse(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# PROTECTION AGAINST MULTILINE
def test_it_multiline_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_multiline_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 25
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'second': -2.0, 'meter': 2.0}, {'second': -3.0, 'meter': 3.0}, {'second': -2.0, 'meter': 1.0}, {'second': -3.0, 'meter': 2.0}, {'second': -4.0, 'meter': 3.0}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
# KNOW FUNCTION quatToRPY
def test_it_quatToRPY_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quatToRPY_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'tw.linear.x'
var_linenr = 17
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian': 1.0}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
# WEAK INFERENCE WARNING
def test_it_weak_inference_multiplication_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_weak_inference_multiplication_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference: %s warning:%s ' % (e.var_name, str(e.is_warning))
var_name = 'tw.linear.x'
var_linenr = 19
my_oracle = [{'second': -1.0, 'meter': 1.0}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(0, len(cps_unit_checker.errors))
# WEAK INFERENCE WARNING
def test_it_weak_inference_multiplication_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_weak_inference_multiplication_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'tw.linear.x'
var_linenr = 22
my_oracle = [{'second': -1.0, 'meter': 1.0}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(0, len(cps_unit_checker.errors))
# STRONG INFERENCE BECAUSE ADDITION
def test_it_weak_inference_addition_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_weak_inference_addition_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
var_name = 'tw.linear.x'
var_linenr = 22
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(2, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
self.assertFalse(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
var_name = 'tw.linear.y'
var_linenr = 23
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertTrue(cps_unit_checker.errors[1].was_assigned_mutiple_units)
self.assertFalse(cps_unit_checker.errors[1].is_unit_propagation_based_on_unknown_variable)
self.assertFalse(cps_unit_checker.errors[1].is_warning)
# STRONG INFERENCE BECAUSE ADDITION - SWAPPED OPERAND ORDER
def test_it_weak_inference_addition_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_weak_inference_addition_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
var_name = 'tw.linear.x'
var_linenr = 22
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(2, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
self.assertFalse(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
var_name = 'tw.linear.y'
var_linenr = 23
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertTrue(cps_unit_checker.errors[1].was_assigned_mutiple_units)
self.assertFalse(cps_unit_checker.errors[1].is_unit_propagation_based_on_unknown_variable)
self.assertFalse(cps_unit_checker.errors[1].is_warning)
# STRONG INFERENCE BECAUSE ADDITION - SWAPPED OPERAND ORDER
def test_it_weak_inference_addition_3 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_weak_inference_addition_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
var_name = 'tw.linear.x'
var_linenr = 22
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1.0}, {'second':1.}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(2, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
self.assertTrue(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable)
self.assertTrue(cps_unit_checker.errors[0].is_warning)
# ADDITION STAND ALONE ERROR FOR ADDITION OF INCOMPATIBLE UNITS - STRONG
def test_it_addition_without_assignment_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_addition_without_assignment_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# ADDITION STAND ALONE ERROR FOR ADDITION OF INCOMPATIBLE UNITS - WEAK UNKNOWN VARIABLE
def test_it_addition_without_assignment_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_addition_without_assignment_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertTrue(cps_unit_checker.errors[0].is_warning)
# ADDITION STAND ALONE ERROR FOR ADDITION OF INCOMPATIBLE UNITS - WEAK CONSTANT
def test_it_addition_without_assignment_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_addition_without_assignment_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertTrue(cps_unit_checker.errors[0].is_warning)
# ADDITION STAND ALONE ERROR FOR SUBTRACTION OF INCOMPATIBLE UNITS - STRONG CONSTANT
def test_it_addition_without_assignment_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_addition_without_assignment_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# ADDITION OF RADIANS
def test_it_radian_addition_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_addition_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(2, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[1].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[1].is_warning)
# ADDITION OF RADIANS
def test_it_radian_addition_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_addition_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# MULTIPLICATION OF RADIANS
def test_it_radian_multiplication_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_multiplication_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# MULTIPLICATION OF RADIANS 2
def test_it_radian_multiplication_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_multiplication_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# MULTIPLICATION OF RADIANS
def test_it_radian_multiplication_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_multiplication_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# MULTIPLICATION OF RADIANS 2
def test_it_radian_multiplication_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_multiplication_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# getXYZ
def test_it_getXYZ_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_getXYZ_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
self.assertEqual(0, len(cps_unit_checker.errors))
# getXYZ
def test_it_getXYZ_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_getXYZ_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
var_name = 'tw.linear.x'
var_linenr = 10
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'meter':1}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
# getXYZ
def test_it_getXYZ_3 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_getXYZ_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'tw.linear.x'
var_linenr = 10
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'quaternion':1}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
# getXYZ
def test_it_getXYZ_4 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_getXYZ_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# getXYZ
def test_it_getXYZ_5 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_getXYZ_5.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# QUATERNION ADDITION 1
def test_it_quaternion_addition_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_addition_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(2, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[1].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[1].is_warning)
# QUATERNION ADDITION 2
def test_it_quaternion_addition_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_addition_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# QUATERNION ADDITION 3
def test_it_quaternion_addition_3 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_addition_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION ADDITION 4
def test_it_quaternion_addition_4 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_addition_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION MULTIPLICATION 1
def test_it_quaternion_multiplication_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_multiplication_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION MULTIPLICATION 2
def test_it_quaternion_multiplication_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_multiplication_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION MULTIPLICATION 3
def test_it_quaternion_multiplication_3 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_multiplication_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION MULTIPLICATION 4
def test_it_quaternion_multiplication_4 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_multiplication_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION MULTIPLICATION CLOSURE
def test_it_quaternion_closed_under_multiplication_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_closed_under_multiplication_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION MULTIPLICATION CLOSURE
def test_it_quaternion_closed_under_multiplication_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_closed_under_multiplication_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# RADIAN MULTIPLICATION CLOSURE
def test_it_radian_closed_under_multiplication_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_closed_under_multiplication_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# RADIAN MULTIPLICATION CLOSURE
def test_it_radian_closed_under_multiplication_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_closed_under_multiplication_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# dt Heuristic
def test_it_dt_heuristic (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_dt_heuristic_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# dt Heuristic
def test_it_plus_equals_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_plus_equals_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# dt Heuristic
def test_it_range_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_range_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# same named argument in interface scope bug
def test_it_scope_bug_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_cppcheck_scope_bug_at_argument_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
if __name__ == '__main__':
unittest.main()
| 52.588128 | 212 | 0.685176 |
import sys
import unittest
from detect_physical_unit_inconsistencies import CPSUnitsChecker
from unit_error_types import UnitErrorTypes
from unit_error import UnitError
import os
global_debug = False
global_debug_verbose = False
global_debug_AST = False
class TestStringMethods(unittest.TestCase):
def test_function_return_0(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
cps_unit_checker.debug_scope = False
dump_file = './dump_files_for_tests/test_it_function_return_0.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
units_for_f1 = []
for tw in cps_unit_checker.all_tree_walkers:
so = tw.symbol_helper.function_dictionary['scopeObject']
if so.function:
if so.function.name == 'f1':
units_for_f1 = so.function.return_units
self.assertEquals(units_for_f1, [{'meter': 1}], 'Incorrect units returned for function: f1 . Expected [{\'meter\':1}], received %s' % units_for_f1)
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][12]['units']
my_oracle = [{'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_function_return_1(self):
cps_unit_checker = CPSUnitsChecker()
dump_file = './dump_files_for_tests/test_it_function_return_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for tw in cps_unit_checker.all_tree_walkers:
so = tw.symbol_helper.function_dictionary['scopeObject']
if so.function:
if so.function.name == 'f1':
units_for_f1 = so.function.return_units
def test_comparisons_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST= False
dump_file = './dump_files_for_tests/test_it_comparisons_1.cpp.dump'
source_file = './dump_files_for_tests/test_it_comparisons_1.cpp'
cps_unit_checker.main_run_check(dump_file, source_file)
e = cps_unit_checker.errors[0]
token_left_units_oracle = [{'meter': 1}]
token_right_units_oracle = [{'second': -1, 'meter': 1}]
self.assertEqual(e.token.str, '>')
self.assertEqual(e.token_left.units, token_left_units_oracle)
self.assertEqual(e.token_right.units, token_right_units_oracle)
def test_logical_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_logical_1.cpp.dump'
source_file = './dump_files_for_tests/test_it_logical_1.cpp'
cps_unit_checker.main_run_check(dump_file, source_file)
e = cps_unit_checker.errors[0]
token_right_units_oracle = [{'meter': 1}]
self.assertEqual(e.linenr, 13)
self.assertEqual(e.token.str, '&&')
self.assertEqual(e.token_right.units, token_right_units_oracle)
e = cps_unit_checker.errors[1]
token_left_units_oracle = [{'meter': 1}]
self.assertEqual(e.linenr, 18)
self.assertEqual(e.token.str, '||')
self.assertEqual(e.token_left.units, token_left_units_oracle)
def test_abs_0(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
dump_file = './dump_files_for_tests/test_it_known_function_abs.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
def test_abs_namespace_std_0(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
dump_file = './dump_files_for_tests/test_it_known_function_abs_namespace_std.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
def test_abs_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_abs_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 't'
var_linenr = 9
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
my_oracle = [{'second': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_abs_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_abs_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 's'
var_linenr =11
my_oracle = [{'meter': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_multiplication_assignment_in_multi_configurations_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_multiplication_assignment_in_multi_configurations.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'a_geometry_msgs_Accel.linear.x'
var_linenr = 19
my_oracle = [{'second': -4, 'meter': 2}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_unit_propagation_by_multiplication_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
dump_file = './dump_files_for_tests/test_it_unit_propagation_by_multiplication_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][14]['units']
my_oracle = [{'second': -4, 'meter': 2}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_unit_propagation_by_division_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
dump_file = './dump_files_for_tests/test_it_unit_propagation_by_division_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][14]['units']
my_oracle = None
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_mulitple_units_assigned(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_multiple_units_assigned_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
expected_errors = ["test_it_multiple_units_assigned_1.cpp : 11 MULTIPLE UNITS BY ASSIGNMENT: [{'second': -1, 'meter': 1}, {'second': -2, 'meter': 2}]"]
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
var_name = 'a_geometry_msgs_Accel.linear.x'
var_linenr =11
my_oracle = [{'second': -2, 'meter': 1}, {'second': -4, 'meter': 2}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_sqrt_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_sqrt_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][12]['units']
my_oracle = [{'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_sqrt_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_sqrt_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][12]['units']
my_oracle = [{'second': -1, 'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_sqrt_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_sqrt_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][12]['units']
my_oracle = None
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_sqrt_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_sqrt_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][14]['units']
my_oracle = [{'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_sqrt_5(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_sqrt_5.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][14]['units']
my_oracle = [{'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_sqrt_half_units(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_sqrt_half_units.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'x' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['x'][11]['units']
my_oracle = [{'meter': 0.5}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_atan2_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_verbose = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_atan2_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][7]['units']
my_oracle = [{'radian': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_known_functions_atan2_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_atan2_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][8]['units']
my_oracle = [{'radian': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_toSec(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_toSec_0.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'duration' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['duration'][7]['units']
my_oracle = [{'second': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'second' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['second'][9]['units']
my_oracle = [{'second': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_float_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_float_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict and 11 in s.var_ordered_dict['f']:
actual_units = s.var_ordered_dict['f'][11]['units']
my_oracle = [{'second': -1, 'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_float_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_float_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict and 11 in s.var_ordered_dict['f']:
actual_units = s.var_ordered_dict['f'][11]['units']
my_oracle = [{'second': -1, 'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_float_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_float_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][12]['units']
my_oracle = [{'second': -1, 'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_float_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_float_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][13]['units']
my_oracle = [{'second': -1, 'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_float_5(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_float_5.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if 'f' in s.var_ordered_dict:
actual_units = s.var_ordered_dict['f'][11]['units']
my_oracle = [{'second': -1, 'meter': 1}]
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_pow_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_pow_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr =10
my_oracle = [{'meter': 4}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_pow_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_pow_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 11
my_oracle = [{'meter': 4}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_pow_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_pow_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 10
my_oracle = [{'meter': 4}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:%s Expected: %s received %s' % (var_name, my_oracle, actual_units))
def test_floor_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_floor_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 's'
var_linenr = 8
my_oracle = [{'meter': 1, 'second':-1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_ceil_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_ceil_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 's'
var_linenr = 8
my_oracle = [{'meter': 1, 'second':-1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_acos_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_acos_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 7
my_oracle = [{'radian': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_asin_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_asin_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 7
my_oracle = [{'radian': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_atan_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_known_function_atan_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 7
my_oracle = [{'radian': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_ternary_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_ternary_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 9
my_oracle = [{'second': -1, 'meter': 1}, {'second': -1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_function_args_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_function_args_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
f = cps_unit_checker.current_configuration.functions[0].arg_units
self.assertEqual(f[0][0]['linenr'], 13)
self.assertEqual(f[0][0]['units'], [{'meter': 1}])
def test_function_args_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_function_args_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
for f in cps_unit_checker.current_configuration.functions[0].arg_units:
self.assertEqual(f[0]['linenr'], 13)
self.assertEqual(f[0]['units'], [{'meter': 1}])
def test_function_args_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_function_args_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
my_oracle_1 = 4
my_oracle_2 = [{'meter': 1}]
actual_units = None
all_units_list = cps_unit_checker.current_configuration.functions[0].arg_units
self.assertEqual(len(all_units_list), my_oracle_1)
for u in all_units_list:
self.assertEqual(u[0]['units'], my_oracle_2)
def test_function_args_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_function_args_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
my_oracle = [{'meter': 1}]
actual_units = None
for f in cps_unit_checker.current_configuration.functions:
for arg_u in f.arg_units:
for arg_use_on_line in arg_u:
self.assertEqual(arg_use_on_line['units'], my_oracle)
def test_function_args_5(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_function_args_5.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
my_oracle_1 = [{'meter': 1}]
my_oracle_2 = 15
f = cps_unit_checker.current_configuration.functions[0]
self.assertEqual(f.arg_units[0][0]['units'], my_oracle_1)
self.assertEqual(f.arg_units[0][0]['linenr'], my_oracle_2)
def test_division_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_division_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'x'
var_linenr = 9
my_oracle = [{'meter': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_division_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_division_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'x'
var_linenr = 9
my_oracle = [{'meter': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_division_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_division_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'x'
var_linenr = 9
my_oracle = [{'second': 2, 'meter': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_division_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_division_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'x'
var_linenr =10
my_oracle = [{'second': 2}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_logical_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_logical_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
def test_error_type_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_error_return_type_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.current_file_under_analysis = dump_file
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
def test_laser_scan_range_size_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_laser_scan_range_count_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'x'
var_linenr = 7
my_oracle = None
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(0, len(cps_unit_checker.errors))
def test_laser_scan_range_size_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_laser_scan_range_count_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'x'
var_linenr = 7
my_oracle = [{'meter':1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_ros_duration_isZero_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_ros_duration_isZero_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 't'
var_linenr = 6
my_oracle = None
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_ros_duration_isZero_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_ros_duration_isZero_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 't'
var_linenr = 6
my_oracle = [{'second':1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
def test_ros_header_include_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/src/test_it_header_include_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(2, len(cps_unit_checker.errors))
def test_ros_header_include_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/src/test_it_header_include_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(3, len(cps_unit_checker.errors))
e = cps_unit_checker.errors[0]
self.assertEqual(7, e.linenr)
self.assertEqual('./dump_files_for_tests/src/../include/test_it_header_include_2.h', e.get_file_URI_where_error_occured())
e = cps_unit_checker.errors[1]
self.assertEqual(5, e.linenr)
self.assertEqual('./dump_files_for_tests/src/test_it_header_include_2.cpp', e.get_file_URI_where_error_occured())
def test_laser_range_empty_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_range_empty_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# DON'T ASSIGN UNITS TO ARRAYS WHEN time.isZero() IS CALLED
def test_ros_isZero_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_ros_isZero_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
def test_multiple_initialization_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_multiple_initialization.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# WEAKEN ASSIGNMENT WHEN MULTIPLIED BY A CONSTANT (INT)
def test_it_multiplication_with_constant_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_multiplication_with_constant_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# self.assertEqual(0, len(cps_unit_checker.errors))
var_name = 'f'
var_linenr = 9
my_oracle = [{'second':-1}]
actual_units = None
is_unit_propagation_based_on_constants = False
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
is_unit_propagation_based_on_constants = s.var_ordered_dict[var_name][var_linenr]['is_unit_propagation_based_on_constants']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertTrue(is_unit_propagation_based_on_constants, 'Unit inference should be weakened by constant interaction, but is still strong.')
# WEAKEN ASSIGNMENT WHEN MULTIPLIED BY A CONSTANT (FLOAT)
def test_it_multiplication_with_constant_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_multiplication_with_constant_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# self.assertEqual(0, len(cps_unit_checker.errors))
var_name = 'f'
var_linenr = 9
my_oracle = [{'second':-1}]
actual_units = None
is_unit_propagation_based_on_constants = False
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
is_unit_propagation_based_on_constants = s.var_ordered_dict[var_name][var_linenr]['is_unit_propagation_based_on_constants']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertTrue(is_unit_propagation_based_on_constants, 'Unit inference should be weakened by constant interaction, but is still strong.')
# WEAKEN ASSIGNMENT WHEN MULTIPLIED BY A CONSTANT (FLOAT)
def test_it_operator_with_unknown_variable_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_operator_with_unknown_variable_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# self.assertEqual(0, len(cps_unit_checker.errors))
var_name = 'f'
var_linenr = 10
my_oracle = [{'second':-1}]
actual_units = None
is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
is_unit_propagation_based_on_unknown_variable = s.var_ordered_dict[var_name][var_linenr]['is_unit_propagation_based_on_unknown_variable']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertTrue(is_unit_propagation_based_on_unknown_variable, 'Unit inference should be weakened by unknown variable interaction, but is still strong.')
# WEAKEN ERROR WHEN MULTIPLIED BY A CONSTANT
def test_it_operator_with_unknown_variable_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_operator_with_unknown_variable_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(2, len(cps_unit_checker.errors))
for e in cps_unit_checker.errors:
self.assertTrue(e.is_warning, 'Should be a warning but is not marked as such')
# WEAKEN ERROR WHEN MULTIPLIED BY A CONSTANT
def test_it_operator_with_unknown_variable_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_operator_with_unknown_variable_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(2, len(cps_unit_checker.errors))
# PROPAGATION ACROSS MIN MAX
def test_it_min_max_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_min_max_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 7
my_oracle = [{'second': -1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(0, len(cps_unit_checker.errors))
# PROPAGATION ACROSS MIN MAX
def test_it_min_max_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_min_max_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 8
my_oracle = [{'second': -1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(0, len(cps_unit_checker.errors))
# PROPAGATION ACROSS MIN MAX
def test_it_min_max_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_min_max_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 8
my_oracle = [{'second': -1}, {'second': -1, 'meter': 1}]
actual_units = None
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
self.assertFalse(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# PROPAGATION ACROSS MIN MAX
def test_it_min_max_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_min_max_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 9
my_oracle = [{'second': -1 }, {'second': -1, 'meter': 1}]
actual_units = None
is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
self.assertFalse(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# PROTECTION AGAINST MULTILINE
def test_it_multiline_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_multiline_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'f'
var_linenr = 25
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'second': -2.0, 'meter': 2.0}, {'second': -3.0, 'meter': 3.0}, {'second': -2.0, 'meter': 1.0}, {'second': -3.0, 'meter': 2.0}, {'second': -4.0, 'meter': 3.0}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
# KNOW FUNCTION quatToRPY
def test_it_quatToRPY_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quatToRPY_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'tw.linear.x'
var_linenr = 17
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian': 1.0}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
# WEAK INFERENCE WARNING
def test_it_weak_inference_multiplication_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_weak_inference_multiplication_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference: %s warning:%s ' % (e.var_name, str(e.is_warning))
var_name = 'tw.linear.x'
var_linenr = 19
my_oracle = [{'second': -1.0, 'meter': 1.0}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(0, len(cps_unit_checker.errors))
# WEAK INFERENCE WARNING
def test_it_weak_inference_multiplication_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_weak_inference_multiplication_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'tw.linear.x'
var_linenr = 22
my_oracle = [{'second': -1.0, 'meter': 1.0}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(0, len(cps_unit_checker.errors))
# STRONG INFERENCE BECAUSE ADDITION
def test_it_weak_inference_addition_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_weak_inference_addition_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
var_name = 'tw.linear.x'
var_linenr = 22
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(2, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
self.assertFalse(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
var_name = 'tw.linear.y'
var_linenr = 23
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertTrue(cps_unit_checker.errors[1].was_assigned_mutiple_units)
self.assertFalse(cps_unit_checker.errors[1].is_unit_propagation_based_on_unknown_variable)
self.assertFalse(cps_unit_checker.errors[1].is_warning)
# STRONG INFERENCE BECAUSE ADDITION - SWAPPED OPERAND ORDER
def test_it_weak_inference_addition_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_weak_inference_addition_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
var_name = 'tw.linear.x'
var_linenr = 22
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(2, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
self.assertFalse(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
var_name = 'tw.linear.y'
var_linenr = 23
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertTrue(cps_unit_checker.errors[1].was_assigned_mutiple_units)
self.assertFalse(cps_unit_checker.errors[1].is_unit_propagation_based_on_unknown_variable)
self.assertFalse(cps_unit_checker.errors[1].is_warning)
# STRONG INFERENCE BECAUSE ADDITION - SWAPPED OPERAND ORDER
def test_it_weak_inference_addition_3 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_weak_inference_addition_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
var_name = 'tw.linear.x'
var_linenr = 22
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'radian':1.0}, {'second':1.}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(2, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
self.assertTrue(cps_unit_checker.errors[0].is_unit_propagation_based_on_unknown_variable)
self.assertTrue(cps_unit_checker.errors[0].is_warning)
# ADDITION STAND ALONE ERROR FOR ADDITION OF INCOMPATIBLE UNITS - STRONG
def test_it_addition_without_assignment_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_addition_without_assignment_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# ADDITION STAND ALONE ERROR FOR ADDITION OF INCOMPATIBLE UNITS - WEAK UNKNOWN VARIABLE
def test_it_addition_without_assignment_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_addition_without_assignment_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertTrue(cps_unit_checker.errors[0].is_warning)
# ADDITION STAND ALONE ERROR FOR ADDITION OF INCOMPATIBLE UNITS - WEAK CONSTANT
def test_it_addition_without_assignment_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_addition_without_assignment_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertTrue(cps_unit_checker.errors[0].is_warning)
# ADDITION STAND ALONE ERROR FOR SUBTRACTION OF INCOMPATIBLE UNITS - STRONG CONSTANT
def test_it_addition_without_assignment_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_addition_without_assignment_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# ADDITION OF RADIANS
def test_it_radian_addition_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_addition_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(2, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[1].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[1].is_warning)
# ADDITION OF RADIANS
def test_it_radian_addition_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_addition_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# MULTIPLICATION OF RADIANS
def test_it_radian_multiplication_1(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_multiplication_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# MULTIPLICATION OF RADIANS 2
def test_it_radian_multiplication_2(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_multiplication_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# MULTIPLICATION OF RADIANS
def test_it_radian_multiplication_3(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_multiplication_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# MULTIPLICATION OF RADIANS 2
def test_it_radian_multiplication_4(self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_multiplication_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# getXYZ
def test_it_getXYZ_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_getXYZ_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
self.assertEqual(0, len(cps_unit_checker.errors))
# getXYZ
def test_it_getXYZ_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_getXYZ_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
var_name = 'tw.linear.x'
var_linenr = 10
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'meter':1}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
# getXYZ
def test_it_getXYZ_3 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_getXYZ_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
var_name = 'tw.linear.x'
var_linenr = 10
my_oracle = [{'second': -1.0, 'meter': 1.0}, {'quaternion':1}]
actual_units = None
# is_unit_propagation_based_on_unknown_variable = False
for s in cps_unit_checker.current_configuration.scopes:
# for v in s.var_ordered_dict:
# print v
if s.className == 'main':
if var_name in s.var_ordered_dict and var_linenr in s.var_ordered_dict[var_name]:
actual_units = s.var_ordered_dict[var_name][var_linenr]['units']
self.assertEquals(actual_units, my_oracle, 'Incorrect units assigned to symbol:x Expected: %s received %s' % (my_oracle, actual_units))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertTrue(cps_unit_checker.errors[0].was_assigned_mutiple_units)
# getXYZ
def test_it_getXYZ_4 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_getXYZ_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
# for e in cps_unit_checker.errors:
# print '\nweak inference addition : %s warning:%s ' % (e.var_name, str(e.is_warning))
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# getXYZ
def test_it_getXYZ_5 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_getXYZ_5.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# QUATERNION ADDITION 1
def test_it_quaternion_addition_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_addition_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(2, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
self.assertEqual(UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS, cps_unit_checker.errors[1].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[1].is_warning)
# QUATERNION ADDITION 2
def test_it_quaternion_addition_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_addition_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# QUATERNION ADDITION 3
def test_it_quaternion_addition_3 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_addition_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION ADDITION 4
def test_it_quaternion_addition_4 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_addition_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION MULTIPLICATION 1
def test_it_quaternion_multiplication_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_multiplication_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION MULTIPLICATION 2
def test_it_quaternion_multiplication_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_multiplication_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION MULTIPLICATION 3
def test_it_quaternion_multiplication_3 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_multiplication_3.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION MULTIPLICATION 4
def test_it_quaternion_multiplication_4 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_multiplication_4.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION MULTIPLICATION CLOSURE
def test_it_quaternion_closed_under_multiplication_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_closed_under_multiplication_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# QUATERNION MULTIPLICATION CLOSURE
def test_it_quaternion_closed_under_multiplication_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_quaternion_closed_under_multiplication_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# RADIAN MULTIPLICATION CLOSURE
def test_it_radian_closed_under_multiplication_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_closed_under_multiplication_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# RADIAN MULTIPLICATION CLOSURE
def test_it_radian_closed_under_multiplication_2 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_radian_closed_under_multiplication_2.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(1, len(cps_unit_checker.errors))
self.assertEqual(UnitErrorTypes.VARIABLE_MULTIPLE_UNITS, cps_unit_checker.errors[0].ERROR_TYPE)
self.assertFalse(cps_unit_checker.errors[0].is_warning)
# dt Heuristic
def test_it_dt_heuristic (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_dt_heuristic_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# dt Heuristic
def test_it_plus_equals_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_plus_equals_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# dt Heuristic
def test_it_range_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_range_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
# same named argument in interface scope bug
def test_it_scope_bug_1 (self):
cps_unit_checker = CPSUnitsChecker()
cps_unit_checker.debug = False
cps_unit_checker.debug_print_AST = False
dump_file = './dump_files_for_tests/test_it_cppcheck_scope_bug_at_argument_1.cpp.dump'
source_file = dump_file.replace('.dump','')
cps_unit_checker.main_run_check(dump_file, source_file)
self.assertEqual(0, len(cps_unit_checker.errors))
if __name__ == '__main__':
unittest.main()
| true | true |
f7275de79abc81f51af3a3242318153f50472d2c | 1,465 | py | Python | app/rooms/examples/eg003_export_data_from_room/controller.py | olegliubimov/code-examples-python | 7af8c58138a9dd0f3b0be12eff1768ae23e449d3 | [
"MIT"
] | 21 | 2020-05-13T21:08:44.000Z | 2022-02-18T01:32:16.000Z | app/rooms/examples/eg003_export_data_from_room/controller.py | olegliubimov/code-examples-python | 7af8c58138a9dd0f3b0be12eff1768ae23e449d3 | [
"MIT"
] | 8 | 2020-11-23T09:28:04.000Z | 2022-02-02T12:04:08.000Z | app/rooms/examples/eg003_export_data_from_room/controller.py | olegliubimov/code-examples-python | 7af8c58138a9dd0f3b0be12eff1768ae23e449d3 | [
"MIT"
] | 26 | 2020-05-12T22:20:01.000Z | 2022-03-09T10:57:27.000Z | from docusign_rooms import RoomsApi
from flask import session, request
from ...utils import create_rooms_api_client
class Eg003Controller:
@staticmethod
def get_args():
"""Get required session and request arguments"""
return {
"account_id": session["ds_account_id"], # Represents your {ACCOUNT_ID}
"access_token": session["ds_access_token"], # Represents your {ACCESS_TOKEN}
"room_id": request.form.get("room_id"),
}
@staticmethod
def get_rooms(args):
"""
1. Create an API client with headers
2. Get rooms
"""
# Step 1. Create an API client with headers
api_client = create_rooms_api_client(access_token=args["access_token"])
# Step 2. Get room templates
rooms_api = RoomsApi(api_client)
rooms = rooms_api.get_rooms(account_id=args["account_id"])
return rooms.rooms
@staticmethod
def worker(args):
"""
1. Create an API client with headers
2. Get room field data using SDK
"""
# Step 1. Create an API client with headers
api_client = create_rooms_api_client(access_token=args["access_token"])
# Step 2. Get room field data using SDK
rooms_api = RoomsApi(api_client)
response = rooms_api.get_room_field_data(
room_id=args['room_id'],
account_id=args["account_id"]
)
return response
| 31.170213 | 89 | 0.627304 | from docusign_rooms import RoomsApi
from flask import session, request
from ...utils import create_rooms_api_client
class Eg003Controller:
@staticmethod
def get_args():
return {
"account_id": session["ds_account_id"],
"access_token": session["ds_access_token"],
"room_id": request.form.get("room_id"),
}
@staticmethod
def get_rooms(args):
api_client = create_rooms_api_client(access_token=args["access_token"])
rooms_api = RoomsApi(api_client)
rooms = rooms_api.get_rooms(account_id=args["account_id"])
return rooms.rooms
@staticmethod
def worker(args):
api_client = create_rooms_api_client(access_token=args["access_token"])
rooms_api = RoomsApi(api_client)
response = rooms_api.get_room_field_data(
room_id=args['room_id'],
account_id=args["account_id"]
)
return response
| true | true |
f7275f87f8a46c54c59aa154abe1e2df4f3c1c6d | 18,246 | py | Python | tests/core/test_datamodules.py | lsqshr/pytorch-lightning | c6b68883879e38719688865aceac746477f0a9b9 | [
"Apache-2.0"
] | null | null | null | tests/core/test_datamodules.py | lsqshr/pytorch-lightning | c6b68883879e38719688865aceac746477f0a9b9 | [
"Apache-2.0"
] | null | null | null | tests/core/test_datamodules.py | lsqshr/pytorch-lightning | c6b68883879e38719688865aceac746477f0a9b9 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from argparse import ArgumentParser
from typing import Any, Dict
from unittest import mock
from unittest.mock import call, PropertyMock
import pytest
import torch
from pytorch_lightning import LightningDataModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.utilities import AttributeDict
from pytorch_lightning.utilities.model_helpers import is_overridden
from tests.helpers import BoringDataModule, BoringModel
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.runif import RunIf
from tests.helpers.simple_models import ClassificationModel
from tests.helpers.utils import reset_seed
@mock.patch("pytorch_lightning.trainer.trainer.Trainer.node_rank", new_callable=PropertyMock)
@mock.patch("pytorch_lightning.trainer.trainer.Trainer.local_rank", new_callable=PropertyMock)
def test_can_prepare_data(local_rank, node_rank):
model = BoringModel()
dm = BoringDataModule()
trainer = Trainer()
trainer.model = model
trainer.datamodule = dm
# 1 no DM
# prepare_data_per_node = True
# local rank = 0 (True)
trainer.prepare_data_per_node = True
dm.random_full = None
dm._has_prepared_data = False
local_rank.return_value = 0
assert trainer.local_rank == 0
assert trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is not None
# local rank = 1 (False)
dm.random_full = None
dm._has_prepared_data = False
local_rank.return_value = 1
assert trainer.local_rank == 1
assert not trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is None
# prepare_data_per_node = False (prepare across all nodes)
# global rank = 0 (True)
dm.random_full = None
dm._has_prepared_data = False
trainer.prepare_data_per_node = False
node_rank.return_value = 0
local_rank.return_value = 0
assert trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is not None
# global rank = 1 (False)
dm.random_full = None
dm._has_prepared_data = False
node_rank.return_value = 1
local_rank.return_value = 0
assert not trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is None
node_rank.return_value = 0
local_rank.return_value = 1
assert not trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is None
# 2 dm
# prepar per node = True
# local rank = 0 (True)
trainer.prepare_data_per_node = True
local_rank.return_value = 0
# is_overridden prepare data = True
# has been called
# False
dm._has_prepared_data = True
assert not trainer.data_connector.can_prepare_data()
# has not been called
# True
dm._has_prepared_data = False
assert trainer.data_connector.can_prepare_data()
# is_overridden prepare data = False
# True
dm.prepare_data = None
assert trainer.data_connector.can_prepare_data()
def test_hooks_no_recursion_error():
# hooks were appended in cascade every tine a new data module was instantiated leading to a recursion error.
# See https://github.com/PyTorchLightning/pytorch-lightning/issues/3652
class DummyDM(LightningDataModule):
def setup(self, *args, **kwargs):
pass
def prepare_data(self, *args, **kwargs):
pass
for i in range(1005):
dm = DummyDM()
dm.setup()
dm.prepare_data()
def test_helper_boringdatamodule():
dm = BoringDataModule()
dm.prepare_data()
dm.setup()
def test_helper_boringdatamodule_with_verbose_setup():
dm = BoringDataModule()
dm.prepare_data()
dm.setup("fit")
dm.setup("test")
def test_data_hooks_called():
dm = BoringDataModule()
assert not dm.has_prepared_data
assert not dm.has_setup_fit
assert not dm.has_setup_test
assert not dm.has_setup_validate
assert not dm.has_setup_predict
assert not dm.has_teardown_fit
assert not dm.has_teardown_test
assert not dm.has_teardown_validate
assert not dm.has_teardown_predict
dm.prepare_data()
assert dm.has_prepared_data
assert not dm.has_setup_fit
assert not dm.has_setup_test
assert not dm.has_setup_validate
assert not dm.has_setup_predict
assert not dm.has_teardown_fit
assert not dm.has_teardown_test
assert not dm.has_teardown_validate
assert not dm.has_teardown_predict
dm.setup()
assert dm.has_prepared_data
assert dm.has_setup_fit
assert dm.has_setup_test
assert dm.has_setup_validate
assert not dm.has_setup_predict
assert not dm.has_teardown_fit
assert not dm.has_teardown_test
assert not dm.has_teardown_validate
assert not dm.has_teardown_predict
dm.teardown()
assert dm.has_prepared_data
assert dm.has_setup_fit
assert dm.has_setup_test
assert dm.has_setup_validate
assert not dm.has_setup_predict
assert dm.has_teardown_fit
assert dm.has_teardown_test
assert dm.has_teardown_validate
assert not dm.has_teardown_predict
@pytest.mark.parametrize("use_kwarg", (False, True))
def test_data_hooks_called_verbose(use_kwarg):
dm = BoringDataModule()
dm.prepare_data()
assert not dm.has_setup_fit
assert not dm.has_setup_test
assert not dm.has_setup_validate
assert not dm.has_setup_predict
assert not dm.has_teardown_fit
assert not dm.has_teardown_test
assert not dm.has_teardown_validate
assert not dm.has_teardown_predict
dm.setup(stage="fit") if use_kwarg else dm.setup("fit")
assert dm.has_setup_fit
assert not dm.has_setup_validate
assert not dm.has_setup_test
assert not dm.has_setup_predict
dm.setup(stage="validate") if use_kwarg else dm.setup("validate")
assert dm.has_setup_fit
assert dm.has_setup_validate
assert not dm.has_setup_test
assert not dm.has_setup_predict
dm.setup(stage="test") if use_kwarg else dm.setup("test")
assert dm.has_setup_fit
assert dm.has_setup_validate
assert dm.has_setup_test
assert not dm.has_setup_predict
dm.setup(stage="predict") if use_kwarg else dm.setup("predict")
assert dm.has_setup_fit
assert dm.has_setup_validate
assert dm.has_setup_test
assert dm.has_setup_predict
dm.teardown(stage="fit") if use_kwarg else dm.teardown("fit")
assert dm.has_teardown_fit
assert not dm.has_teardown_validate
assert not dm.has_teardown_test
assert not dm.has_teardown_predict
dm.teardown(stage="validate") if use_kwarg else dm.teardown("validate")
assert dm.has_teardown_fit
assert dm.has_teardown_validate
assert not dm.has_teardown_test
assert not dm.has_teardown_predict
dm.teardown(stage="test") if use_kwarg else dm.teardown("test")
assert dm.has_teardown_fit
assert dm.has_teardown_validate
assert dm.has_teardown_test
assert not dm.has_teardown_predict
dm.teardown(stage="predict") if use_kwarg else dm.teardown("predict")
assert dm.has_teardown_fit
assert dm.has_teardown_validate
assert dm.has_teardown_test
assert dm.has_teardown_predict
def test_dm_add_argparse_args(tmpdir):
parser = ArgumentParser()
parser = BoringDataModule.add_argparse_args(parser)
args = parser.parse_args(["--data_dir", str(tmpdir)])
assert args.data_dir == str(tmpdir)
def test_dm_init_from_argparse_args(tmpdir):
parser = ArgumentParser()
parser = BoringDataModule.add_argparse_args(parser)
args = parser.parse_args(["--data_dir", str(tmpdir)])
dm = BoringDataModule.from_argparse_args(args)
dm.prepare_data()
dm.setup()
assert dm.data_dir == args.data_dir == str(tmpdir)
def test_dm_pickle_after_init():
dm = BoringDataModule()
pickle.dumps(dm)
def test_train_loop_only(tmpdir):
reset_seed()
dm = ClassifDataModule()
model = ClassificationModel()
model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
model.test_step = None
model.test_step_end = None
model.test_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None)
# fit model
trainer.fit(model, datamodule=dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.callback_metrics["train_loss"] < 1.0
def test_train_val_loop_only(tmpdir):
reset_seed()
dm = ClassifDataModule()
model = ClassificationModel()
model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None)
# fit model
trainer.fit(model, datamodule=dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.callback_metrics["train_loss"] < 1.0
def test_dm_checkpoint_save(tmpdir):
class CustomBoringModel(BoringModel):
def validation_step(self, batch, batch_idx):
out = super().validation_step(batch, batch_idx)
self.log("early_stop_on", out["x"])
return out
class CustomBoringDataModule(BoringDataModule):
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
checkpoint[self.__class__.__name__] = self.__class__.__name__
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
self.checkpoint_state = checkpoint.get(self.__class__.__name__)
reset_seed()
dm = CustomBoringDataModule()
model = CustomBoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=2,
limit_val_batches=1,
weights_summary=None,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on")],
)
# fit model
trainer.fit(model, dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
checkpoint_path = list(trainer.checkpoint_callback.best_k_models.keys())[0]
checkpoint = torch.load(checkpoint_path)
assert dm.__class__.__name__ in checkpoint
assert checkpoint[dm.__class__.__name__] == dm.__class__.__name__
def test_full_loop(tmpdir):
reset_seed()
dm = ClassifDataModule()
model = ClassificationModel()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None, deterministic=True)
# fit model
trainer.fit(model, dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert dm.trainer is not None
# validate
result = trainer.validate(model, dm)
assert dm.trainer is not None
assert result[0]["val_acc"] > 0.7
# test
result = trainer.test(model, dm)
assert dm.trainer is not None
assert result[0]["test_acc"] > 0.6
@RunIf(min_gpus=1)
@mock.patch("pytorch_lightning.accelerators.accelerator.Accelerator.lightning_module", new_callable=PropertyMock)
def test_dm_apply_batch_transfer_handler(get_module_mock):
expected_device = torch.device("cuda", 0)
class CustomBatch:
def __init__(self, data):
self.samples = data[0]
self.targets = data[1]
class CurrentTestDM(LightningDataModule):
rank = 0
transfer_batch_to_device_hook_rank = None
on_before_batch_transfer_hook_rank = None
on_after_batch_transfer_hook_rank = None
def on_before_batch_transfer(self, batch, dataloader_idx):
assert dataloader_idx == 0
self.on_before_batch_transfer_hook_rank = self.rank
self.rank += 1
batch.samples += 1
return batch
def on_after_batch_transfer(self, batch, dataloader_idx):
assert dataloader_idx == 0
assert batch.samples.device == batch.targets.device == expected_device
self.on_after_batch_transfer_hook_rank = self.rank
self.rank += 1
batch.targets *= 2
return batch
def transfer_batch_to_device(self, batch, device, dataloader_idx):
assert dataloader_idx == 0
self.transfer_batch_to_device_hook_rank = self.rank
self.rank += 1
batch.samples = batch.samples.to(device)
batch.targets = batch.targets.to(device)
return batch
dm = CurrentTestDM()
model = BoringModel()
batch = CustomBatch((torch.zeros(5, 32), torch.ones(5, 1, dtype=torch.long)))
trainer = Trainer(gpus=1)
# running .fit() would require us to implement custom data loaders, we mock the model reference instead
get_module_mock.return_value = model
if is_overridden("transfer_batch_to_device", dm):
model.transfer_batch_to_device = dm.transfer_batch_to_device
model.on_before_batch_transfer = dm.on_before_batch_transfer
model.transfer_batch_to_device = dm.transfer_batch_to_device
model.on_after_batch_transfer = dm.on_after_batch_transfer
batch_gpu = trainer.accelerator.batch_to_device(batch, expected_device)
assert dm.on_before_batch_transfer_hook_rank == 0
assert dm.transfer_batch_to_device_hook_rank == 1
assert dm.on_after_batch_transfer_hook_rank == 2
assert batch_gpu.samples.device == batch_gpu.targets.device == expected_device
assert torch.allclose(batch_gpu.samples.cpu(), torch.ones(5, 32))
assert torch.allclose(batch_gpu.targets.cpu(), torch.ones(5, 1, dtype=torch.long) * 2)
def test_dm_reload_dataloaders_every_n_epochs(tmpdir):
"""
Test datamodule, where trainer argument
reload_dataloaders_every_n_epochs is set to a non negative integer
"""
class CustomBoringDataModule(BoringDataModule):
def __init__(self):
super().__init__()
self._epochs_called_for = []
def train_dataloader(self):
assert self.trainer.current_epoch not in self._epochs_called_for
self._epochs_called_for.append(self.trainer.current_epoch)
return super().train_dataloader()
dm = CustomBoringDataModule()
model = BoringModel()
model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
model.test_step = None
model.test_step_end = None
model.test_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, limit_train_batches=2, reload_dataloaders_every_n_epochs=2)
trainer.fit(model, dm)
class DummyDS(torch.utils.data.Dataset):
def __getitem__(self, index):
return 1
def __len__(self):
return 100
class DummyIDS(torch.utils.data.IterableDataset):
def __iter__(self):
yield 1
@pytest.mark.parametrize("iterable", (False, True))
def test_dm_init_from_datasets_dataloaders(iterable):
ds = DummyIDS if iterable else DummyDS
train_ds = ds()
dm = LightningDataModule.from_datasets(train_ds, batch_size=4, num_workers=0)
with mock.patch("pytorch_lightning.core.datamodule.DataLoader") as dl_mock:
dm.train_dataloader()
dl_mock.assert_called_once_with(train_ds, batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True)
assert dm.val_dataloader() is None
assert dm.test_dataloader() is None
train_ds_sequence = [ds(), ds()]
dm = LightningDataModule.from_datasets(train_ds_sequence, batch_size=4, num_workers=0)
with mock.patch("pytorch_lightning.core.datamodule.DataLoader") as dl_mock:
dm.train_dataloader()
dl_mock.assert_has_calls(
[
call(train_ds_sequence[0], batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True),
call(train_ds_sequence[1], batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True),
]
)
assert dm.val_dataloader() is None
assert dm.test_dataloader() is None
valid_ds = ds()
test_ds = ds()
dm = LightningDataModule.from_datasets(val_dataset=valid_ds, test_dataset=test_ds, batch_size=2, num_workers=0)
with mock.patch("pytorch_lightning.core.datamodule.DataLoader") as dl_mock:
dm.val_dataloader()
dl_mock.assert_called_with(valid_ds, batch_size=2, shuffle=False, num_workers=0, pin_memory=True)
dm.test_dataloader()
dl_mock.assert_called_with(test_ds, batch_size=2, shuffle=False, num_workers=0, pin_memory=True)
assert dm.train_dataloader() is None
valid_dss = [ds(), ds()]
test_dss = [ds(), ds()]
dm = LightningDataModule.from_datasets(train_ds, valid_dss, test_dss, batch_size=4, num_workers=0)
with mock.patch("pytorch_lightning.core.datamodule.DataLoader") as dl_mock:
dm.val_dataloader()
dm.test_dataloader()
dl_mock.assert_has_calls(
[
call(valid_dss[0], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),
call(valid_dss[1], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),
call(test_dss[0], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),
call(test_dss[1], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),
]
)
class DataModuleWithHparams(LightningDataModule):
def __init__(self, arg0, arg1, kwarg0=None):
super().__init__()
self.save_hyperparameters()
def test_simple_hyperparameters_saving():
data = DataModuleWithHparams(10, "foo", kwarg0="bar")
assert data.hparams == AttributeDict({"arg0": 10, "arg1": "foo", "kwarg0": "bar"})
| 33.478899 | 120 | 0.71961 |
import pickle
from argparse import ArgumentParser
from typing import Any, Dict
from unittest import mock
from unittest.mock import call, PropertyMock
import pytest
import torch
from pytorch_lightning import LightningDataModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.utilities import AttributeDict
from pytorch_lightning.utilities.model_helpers import is_overridden
from tests.helpers import BoringDataModule, BoringModel
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.runif import RunIf
from tests.helpers.simple_models import ClassificationModel
from tests.helpers.utils import reset_seed
@mock.patch("pytorch_lightning.trainer.trainer.Trainer.node_rank", new_callable=PropertyMock)
@mock.patch("pytorch_lightning.trainer.trainer.Trainer.local_rank", new_callable=PropertyMock)
def test_can_prepare_data(local_rank, node_rank):
model = BoringModel()
dm = BoringDataModule()
trainer = Trainer()
trainer.model = model
trainer.datamodule = dm
trainer.prepare_data_per_node = True
dm.random_full = None
dm._has_prepared_data = False
local_rank.return_value = 0
assert trainer.local_rank == 0
assert trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is not None
dm.random_full = None
dm._has_prepared_data = False
local_rank.return_value = 1
assert trainer.local_rank == 1
assert not trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is None
dm.random_full = None
dm._has_prepared_data = False
trainer.prepare_data_per_node = False
node_rank.return_value = 0
local_rank.return_value = 0
assert trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is not None
dm.random_full = None
dm._has_prepared_data = False
node_rank.return_value = 1
local_rank.return_value = 0
assert not trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is None
node_rank.return_value = 0
local_rank.return_value = 1
assert not trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is None
trainer.prepare_data_per_node = True
local_rank.return_value = 0
dm._has_prepared_data = True
assert not trainer.data_connector.can_prepare_data()
dm._has_prepared_data = False
assert trainer.data_connector.can_prepare_data()
dm.prepare_data = None
assert trainer.data_connector.can_prepare_data()
def test_hooks_no_recursion_error():
class DummyDM(LightningDataModule):
def setup(self, *args, **kwargs):
pass
def prepare_data(self, *args, **kwargs):
pass
for i in range(1005):
dm = DummyDM()
dm.setup()
dm.prepare_data()
def test_helper_boringdatamodule():
dm = BoringDataModule()
dm.prepare_data()
dm.setup()
def test_helper_boringdatamodule_with_verbose_setup():
dm = BoringDataModule()
dm.prepare_data()
dm.setup("fit")
dm.setup("test")
def test_data_hooks_called():
dm = BoringDataModule()
assert not dm.has_prepared_data
assert not dm.has_setup_fit
assert not dm.has_setup_test
assert not dm.has_setup_validate
assert not dm.has_setup_predict
assert not dm.has_teardown_fit
assert not dm.has_teardown_test
assert not dm.has_teardown_validate
assert not dm.has_teardown_predict
dm.prepare_data()
assert dm.has_prepared_data
assert not dm.has_setup_fit
assert not dm.has_setup_test
assert not dm.has_setup_validate
assert not dm.has_setup_predict
assert not dm.has_teardown_fit
assert not dm.has_teardown_test
assert not dm.has_teardown_validate
assert not dm.has_teardown_predict
dm.setup()
assert dm.has_prepared_data
assert dm.has_setup_fit
assert dm.has_setup_test
assert dm.has_setup_validate
assert not dm.has_setup_predict
assert not dm.has_teardown_fit
assert not dm.has_teardown_test
assert not dm.has_teardown_validate
assert not dm.has_teardown_predict
dm.teardown()
assert dm.has_prepared_data
assert dm.has_setup_fit
assert dm.has_setup_test
assert dm.has_setup_validate
assert not dm.has_setup_predict
assert dm.has_teardown_fit
assert dm.has_teardown_test
assert dm.has_teardown_validate
assert not dm.has_teardown_predict
@pytest.mark.parametrize("use_kwarg", (False, True))
def test_data_hooks_called_verbose(use_kwarg):
dm = BoringDataModule()
dm.prepare_data()
assert not dm.has_setup_fit
assert not dm.has_setup_test
assert not dm.has_setup_validate
assert not dm.has_setup_predict
assert not dm.has_teardown_fit
assert not dm.has_teardown_test
assert not dm.has_teardown_validate
assert not dm.has_teardown_predict
dm.setup(stage="fit") if use_kwarg else dm.setup("fit")
assert dm.has_setup_fit
assert not dm.has_setup_validate
assert not dm.has_setup_test
assert not dm.has_setup_predict
dm.setup(stage="validate") if use_kwarg else dm.setup("validate")
assert dm.has_setup_fit
assert dm.has_setup_validate
assert not dm.has_setup_test
assert not dm.has_setup_predict
dm.setup(stage="test") if use_kwarg else dm.setup("test")
assert dm.has_setup_fit
assert dm.has_setup_validate
assert dm.has_setup_test
assert not dm.has_setup_predict
dm.setup(stage="predict") if use_kwarg else dm.setup("predict")
assert dm.has_setup_fit
assert dm.has_setup_validate
assert dm.has_setup_test
assert dm.has_setup_predict
dm.teardown(stage="fit") if use_kwarg else dm.teardown("fit")
assert dm.has_teardown_fit
assert not dm.has_teardown_validate
assert not dm.has_teardown_test
assert not dm.has_teardown_predict
dm.teardown(stage="validate") if use_kwarg else dm.teardown("validate")
assert dm.has_teardown_fit
assert dm.has_teardown_validate
assert not dm.has_teardown_test
assert not dm.has_teardown_predict
dm.teardown(stage="test") if use_kwarg else dm.teardown("test")
assert dm.has_teardown_fit
assert dm.has_teardown_validate
assert dm.has_teardown_test
assert not dm.has_teardown_predict
dm.teardown(stage="predict") if use_kwarg else dm.teardown("predict")
assert dm.has_teardown_fit
assert dm.has_teardown_validate
assert dm.has_teardown_test
assert dm.has_teardown_predict
def test_dm_add_argparse_args(tmpdir):
parser = ArgumentParser()
parser = BoringDataModule.add_argparse_args(parser)
args = parser.parse_args(["--data_dir", str(tmpdir)])
assert args.data_dir == str(tmpdir)
def test_dm_init_from_argparse_args(tmpdir):
parser = ArgumentParser()
parser = BoringDataModule.add_argparse_args(parser)
args = parser.parse_args(["--data_dir", str(tmpdir)])
dm = BoringDataModule.from_argparse_args(args)
dm.prepare_data()
dm.setup()
assert dm.data_dir == args.data_dir == str(tmpdir)
def test_dm_pickle_after_init():
dm = BoringDataModule()
pickle.dumps(dm)
def test_train_loop_only(tmpdir):
reset_seed()
dm = ClassifDataModule()
model = ClassificationModel()
model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
model.test_step = None
model.test_step_end = None
model.test_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None)
trainer.fit(model, datamodule=dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.callback_metrics["train_loss"] < 1.0
def test_train_val_loop_only(tmpdir):
reset_seed()
dm = ClassifDataModule()
model = ClassificationModel()
model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None)
trainer.fit(model, datamodule=dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.callback_metrics["train_loss"] < 1.0
def test_dm_checkpoint_save(tmpdir):
class CustomBoringModel(BoringModel):
def validation_step(self, batch, batch_idx):
out = super().validation_step(batch, batch_idx)
self.log("early_stop_on", out["x"])
return out
class CustomBoringDataModule(BoringDataModule):
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
checkpoint[self.__class__.__name__] = self.__class__.__name__
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
self.checkpoint_state = checkpoint.get(self.__class__.__name__)
reset_seed()
dm = CustomBoringDataModule()
model = CustomBoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=2,
limit_val_batches=1,
weights_summary=None,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on")],
)
trainer.fit(model, dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
checkpoint_path = list(trainer.checkpoint_callback.best_k_models.keys())[0]
checkpoint = torch.load(checkpoint_path)
assert dm.__class__.__name__ in checkpoint
assert checkpoint[dm.__class__.__name__] == dm.__class__.__name__
def test_full_loop(tmpdir):
reset_seed()
dm = ClassifDataModule()
model = ClassificationModel()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None, deterministic=True)
trainer.fit(model, dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert dm.trainer is not None
result = trainer.validate(model, dm)
assert dm.trainer is not None
assert result[0]["val_acc"] > 0.7
result = trainer.test(model, dm)
assert dm.trainer is not None
assert result[0]["test_acc"] > 0.6
@RunIf(min_gpus=1)
@mock.patch("pytorch_lightning.accelerators.accelerator.Accelerator.lightning_module", new_callable=PropertyMock)
def test_dm_apply_batch_transfer_handler(get_module_mock):
expected_device = torch.device("cuda", 0)
class CustomBatch:
def __init__(self, data):
self.samples = data[0]
self.targets = data[1]
class CurrentTestDM(LightningDataModule):
rank = 0
transfer_batch_to_device_hook_rank = None
on_before_batch_transfer_hook_rank = None
on_after_batch_transfer_hook_rank = None
def on_before_batch_transfer(self, batch, dataloader_idx):
assert dataloader_idx == 0
self.on_before_batch_transfer_hook_rank = self.rank
self.rank += 1
batch.samples += 1
return batch
def on_after_batch_transfer(self, batch, dataloader_idx):
assert dataloader_idx == 0
assert batch.samples.device == batch.targets.device == expected_device
self.on_after_batch_transfer_hook_rank = self.rank
self.rank += 1
batch.targets *= 2
return batch
def transfer_batch_to_device(self, batch, device, dataloader_idx):
assert dataloader_idx == 0
self.transfer_batch_to_device_hook_rank = self.rank
self.rank += 1
batch.samples = batch.samples.to(device)
batch.targets = batch.targets.to(device)
return batch
dm = CurrentTestDM()
model = BoringModel()
batch = CustomBatch((torch.zeros(5, 32), torch.ones(5, 1, dtype=torch.long)))
trainer = Trainer(gpus=1)
get_module_mock.return_value = model
if is_overridden("transfer_batch_to_device", dm):
model.transfer_batch_to_device = dm.transfer_batch_to_device
model.on_before_batch_transfer = dm.on_before_batch_transfer
model.transfer_batch_to_device = dm.transfer_batch_to_device
model.on_after_batch_transfer = dm.on_after_batch_transfer
batch_gpu = trainer.accelerator.batch_to_device(batch, expected_device)
assert dm.on_before_batch_transfer_hook_rank == 0
assert dm.transfer_batch_to_device_hook_rank == 1
assert dm.on_after_batch_transfer_hook_rank == 2
assert batch_gpu.samples.device == batch_gpu.targets.device == expected_device
assert torch.allclose(batch_gpu.samples.cpu(), torch.ones(5, 32))
assert torch.allclose(batch_gpu.targets.cpu(), torch.ones(5, 1, dtype=torch.long) * 2)
def test_dm_reload_dataloaders_every_n_epochs(tmpdir):
class CustomBoringDataModule(BoringDataModule):
def __init__(self):
super().__init__()
self._epochs_called_for = []
def train_dataloader(self):
assert self.trainer.current_epoch not in self._epochs_called_for
self._epochs_called_for.append(self.trainer.current_epoch)
return super().train_dataloader()
dm = CustomBoringDataModule()
model = BoringModel()
model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
model.test_step = None
model.test_step_end = None
model.test_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, limit_train_batches=2, reload_dataloaders_every_n_epochs=2)
trainer.fit(model, dm)
class DummyDS(torch.utils.data.Dataset):
def __getitem__(self, index):
return 1
def __len__(self):
return 100
class DummyIDS(torch.utils.data.IterableDataset):
def __iter__(self):
yield 1
@pytest.mark.parametrize("iterable", (False, True))
def test_dm_init_from_datasets_dataloaders(iterable):
ds = DummyIDS if iterable else DummyDS
train_ds = ds()
dm = LightningDataModule.from_datasets(train_ds, batch_size=4, num_workers=0)
with mock.patch("pytorch_lightning.core.datamodule.DataLoader") as dl_mock:
dm.train_dataloader()
dl_mock.assert_called_once_with(train_ds, batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True)
assert dm.val_dataloader() is None
assert dm.test_dataloader() is None
train_ds_sequence = [ds(), ds()]
dm = LightningDataModule.from_datasets(train_ds_sequence, batch_size=4, num_workers=0)
with mock.patch("pytorch_lightning.core.datamodule.DataLoader") as dl_mock:
dm.train_dataloader()
dl_mock.assert_has_calls(
[
call(train_ds_sequence[0], batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True),
call(train_ds_sequence[1], batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True),
]
)
assert dm.val_dataloader() is None
assert dm.test_dataloader() is None
valid_ds = ds()
test_ds = ds()
dm = LightningDataModule.from_datasets(val_dataset=valid_ds, test_dataset=test_ds, batch_size=2, num_workers=0)
with mock.patch("pytorch_lightning.core.datamodule.DataLoader") as dl_mock:
dm.val_dataloader()
dl_mock.assert_called_with(valid_ds, batch_size=2, shuffle=False, num_workers=0, pin_memory=True)
dm.test_dataloader()
dl_mock.assert_called_with(test_ds, batch_size=2, shuffle=False, num_workers=0, pin_memory=True)
assert dm.train_dataloader() is None
valid_dss = [ds(), ds()]
test_dss = [ds(), ds()]
dm = LightningDataModule.from_datasets(train_ds, valid_dss, test_dss, batch_size=4, num_workers=0)
with mock.patch("pytorch_lightning.core.datamodule.DataLoader") as dl_mock:
dm.val_dataloader()
dm.test_dataloader()
dl_mock.assert_has_calls(
[
call(valid_dss[0], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),
call(valid_dss[1], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),
call(test_dss[0], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),
call(test_dss[1], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),
]
)
class DataModuleWithHparams(LightningDataModule):
def __init__(self, arg0, arg1, kwarg0=None):
super().__init__()
self.save_hyperparameters()
def test_simple_hyperparameters_saving():
data = DataModuleWithHparams(10, "foo", kwarg0="bar")
assert data.hparams == AttributeDict({"arg0": 10, "arg1": "foo", "kwarg0": "bar"})
| true | true |
f7275fe5fae95e29aeb9f0a69b69a28a48dc9e9a | 2,048 | py | Python | jupyter_resource_usage/tests/test_basic.py | fcollonval/jupyter-resource-usage | 77ef2341efdee67a1457b6c6a0de5e001ca4c3aa | [
"BSD-2-Clause"
] | 1 | 2021-03-20T09:24:46.000Z | 2021-03-20T09:24:46.000Z | jupyter_resource_usage/tests/test_basic.py | fcollonval/jupyter-resource-usage | 77ef2341efdee67a1457b6c6a0de5e001ca4c3aa | [
"BSD-2-Clause"
] | null | null | null | jupyter_resource_usage/tests/test_basic.py | fcollonval/jupyter-resource-usage | 77ef2341efdee67a1457b6c6a0de5e001ca4c3aa | [
"BSD-2-Clause"
] | null | null | null | from mock import MagicMock
from mock import patch
class TestBasic:
"""Some basic tests, checking import, making sure APIs remain consistent, etc"""
def test_import_serverextension(self):
"""Check that serverextension hooks are available"""
from jupyter_resource_usage import (
_jupyter_server_extension_paths,
_jupyter_nbextension_paths,
load_jupyter_server_extension,
)
assert _jupyter_server_extension_paths() == [
{"module": "jupyter_resource_usage"}
]
assert _jupyter_nbextension_paths() == [
{
"section": "notebook",
"dest": "jupyter_resource_usage",
"src": "static",
"require": "jupyter_resource_usage/main",
}
]
# mock a notebook app
nbapp_mock = MagicMock()
nbapp_mock.web_app.settings = {"base_url": ""}
# mock these out for unit test
with patch("tornado.ioloop.PeriodicCallback") as periodic_callback_mock, patch(
"jupyter_resource_usage.ResourceUseDisplay"
) as resource_use_display_mock, patch(
"jupyter_resource_usage.PrometheusHandler"
) as prometheus_handler_mock, patch(
"jupyter_resource_usage.PSUtilMetricsLoader"
) as psutil_metrics_loader:
# load up with mock
load_jupyter_server_extension(nbapp_mock)
# assert that we installed the application in settings
print(nbapp_mock.web_app.settings)
assert (
"jupyter_resource_usage_display_config" in nbapp_mock.web_app.settings
)
# assert that we instantiated a periodic callback with the fake
# prometheus
assert periodic_callback_mock.return_value.start.call_count == 1
assert prometheus_handler_mock.call_count == 1
prometheus_handler_mock.assert_called_with(
psutil_metrics_loader(nbapp_mock)
)
| 35.929825 | 87 | 0.625488 | from mock import MagicMock
from mock import patch
class TestBasic:
def test_import_serverextension(self):
from jupyter_resource_usage import (
_jupyter_server_extension_paths,
_jupyter_nbextension_paths,
load_jupyter_server_extension,
)
assert _jupyter_server_extension_paths() == [
{"module": "jupyter_resource_usage"}
]
assert _jupyter_nbextension_paths() == [
{
"section": "notebook",
"dest": "jupyter_resource_usage",
"src": "static",
"require": "jupyter_resource_usage/main",
}
]
nbapp_mock = MagicMock()
nbapp_mock.web_app.settings = {"base_url": ""}
with patch("tornado.ioloop.PeriodicCallback") as periodic_callback_mock, patch(
"jupyter_resource_usage.ResourceUseDisplay"
) as resource_use_display_mock, patch(
"jupyter_resource_usage.PrometheusHandler"
) as prometheus_handler_mock, patch(
"jupyter_resource_usage.PSUtilMetricsLoader"
) as psutil_metrics_loader:
load_jupyter_server_extension(nbapp_mock)
print(nbapp_mock.web_app.settings)
assert (
"jupyter_resource_usage_display_config" in nbapp_mock.web_app.settings
)
assert periodic_callback_mock.return_value.start.call_count == 1
assert prometheus_handler_mock.call_count == 1
prometheus_handler_mock.assert_called_with(
psutil_metrics_loader(nbapp_mock)
)
| true | true |
f7275ff361eebaef39028e0b71a649811bfb8cc5 | 1,646 | py | Python | multitrackpy/mtt.py | bbo-lab/multitrackpy | a25ebdb94969b0682c851ab69ba5895173b581d0 | [
"BSD-3-Clause"
] | null | null | null | multitrackpy/mtt.py | bbo-lab/multitrackpy | a25ebdb94969b0682c851ab69ba5895173b581d0 | [
"BSD-3-Clause"
] | null | null | null | multitrackpy/mtt.py | bbo-lab/multitrackpy | a25ebdb94969b0682c851ab69ba5895173b581d0 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import h5py
from pprint import pprint
def read_calib(mtt_path):
mtt_file = h5py.File(mtt_path)
istracking = np.squeeze(np.asarray([mtt_file['mt']['cam_istracking']]) == 1)
calind = np.squeeze(np.int32(mtt_file['mt']['calind']))[istracking] - 1
mc = {
'Rglobal': np.asarray(mtt_file['mt']['mc']['Rglobal']).transpose((0, 2, 1)), # in reverse order in h5 file!
'Tglobal': np.asarray(mtt_file['mt']['mc']['Tglobal']),
'cal': []
}
for ci in calind:
mc['cal'].append({
'scaling': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['scaling'][ci, 0]]).T[0],
'icent': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['icent'][ci, 0]]).T[0],
'distortion_coefs': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['distortion_coefs'][ci, 0]]),
'sensorsize': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['sensorsize'][ci, 0]]).T[0],
'scale_pixels': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['scale_pixels'][ci, 0]]),
})
# pprint(mc)
return mc
def read_video_paths(vid_dir, mtt_path):
mtt_file = h5py.File(mtt_path)
istracking = np.squeeze(np.asarray([mtt_file['mt']['cam_istracking']]) == 1)
return [vid_dir + ''.join([chr(c) for c in mtt_file[mtt_file['mt']['vidname'][0, i]][:].T.astype(np.int)[0]]) for i
in np.where(istracking)[0]]
def read_spacecoords(mtt_path):
mtt_file = h5py.File(mtt_path)
return np.asarray(mtt_file['mt']['objmodel']['space_coord'])
def read_frame_n(mtt_path):
mtt_file = h5py.File(mtt_path)
return len(mtt_file['mt']['t'])
| 36.577778 | 119 | 0.600243 | import numpy as np
import h5py
from pprint import pprint
def read_calib(mtt_path):
mtt_file = h5py.File(mtt_path)
istracking = np.squeeze(np.asarray([mtt_file['mt']['cam_istracking']]) == 1)
calind = np.squeeze(np.int32(mtt_file['mt']['calind']))[istracking] - 1
mc = {
'Rglobal': np.asarray(mtt_file['mt']['mc']['Rglobal']).transpose((0, 2, 1)),
'Tglobal': np.asarray(mtt_file['mt']['mc']['Tglobal']),
'cal': []
}
for ci in calind:
mc['cal'].append({
'scaling': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['scaling'][ci, 0]]).T[0],
'icent': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['icent'][ci, 0]]).T[0],
'distortion_coefs': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['distortion_coefs'][ci, 0]]),
'sensorsize': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['sensorsize'][ci, 0]]).T[0],
'scale_pixels': np.asarray(mtt_file[mtt_file['mt']['mc']['cal']['scale_pixels'][ci, 0]]),
})
return mc
def read_video_paths(vid_dir, mtt_path):
mtt_file = h5py.File(mtt_path)
istracking = np.squeeze(np.asarray([mtt_file['mt']['cam_istracking']]) == 1)
return [vid_dir + ''.join([chr(c) for c in mtt_file[mtt_file['mt']['vidname'][0, i]][:].T.astype(np.int)[0]]) for i
in np.where(istracking)[0]]
def read_spacecoords(mtt_path):
mtt_file = h5py.File(mtt_path)
return np.asarray(mtt_file['mt']['objmodel']['space_coord'])
def read_frame_n(mtt_path):
mtt_file = h5py.File(mtt_path)
return len(mtt_file['mt']['t'])
| true | true |
f727605ce453fa3224aa6df44afa7128f0668b0c | 2,544 | py | Python | tao_action_recognition/data_generation/split_dataset.py | morrimeg/tao_toolkit_recipes | 011f5426e2cec44af5b686d0c6225836460202f8 | [
"MIT"
] | null | null | null | tao_action_recognition/data_generation/split_dataset.py | morrimeg/tao_toolkit_recipes | 011f5426e2cec44af5b686d0c6225836460202f8 | [
"MIT"
] | null | null | null | tao_action_recognition/data_generation/split_dataset.py | morrimeg/tao_toolkit_recipes | 011f5426e2cec44af5b686d0c6225836460202f8 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import shutil
import sys
root_path = sys.argv[1]
split_files_path = sys.argv[2]
target_train_path = sys.argv[3]
target_test_path = sys.argv[4]
if not os.path.exists(target_train_path):
os.makedirs(target_train_path)
if not os.path.exists(target_test_path):
os.makedirs(target_test_path)
train_cnt = 0
test_cnt = 0
for class_name in os.listdir(root_path):
split_files = os.path.join(split_files_path, class_name + "_test_split1.txt")
cls_train_path = os.path.join(target_train_path, class_name)
cls_test_path = os.path.join(target_test_path, class_name)
if not os.path.exists(cls_train_path):
os.makedirs(cls_train_path)
if not os.path.exists(cls_test_path):
os.makedirs(cls_test_path)
with open(split_files, "r") as f:
split_list = f.readlines()
for line in split_list:
video_name, label = line.split()
video_name = video_name.split(".")[0]
cur_path = os.path.join(root_path, class_name, video_name)
if int(label) == 1:
train_cnt += 1
des_path = os.path.join(target_train_path, class_name, video_name)
shutil.move(cur_path, des_path)
elif int(label) == 2:
test_cnt += 1
des_path = os.path.join(target_test_path, class_name, video_name)
shutil.move(cur_path, des_path)
print("Split 1: \n Train: {}\n Test: {}".format(train_cnt, test_cnt))
| 39.75 | 81 | 0.72445 |
import os
import shutil
import sys
root_path = sys.argv[1]
split_files_path = sys.argv[2]
target_train_path = sys.argv[3]
target_test_path = sys.argv[4]
if not os.path.exists(target_train_path):
os.makedirs(target_train_path)
if not os.path.exists(target_test_path):
os.makedirs(target_test_path)
train_cnt = 0
test_cnt = 0
for class_name in os.listdir(root_path):
split_files = os.path.join(split_files_path, class_name + "_test_split1.txt")
cls_train_path = os.path.join(target_train_path, class_name)
cls_test_path = os.path.join(target_test_path, class_name)
if not os.path.exists(cls_train_path):
os.makedirs(cls_train_path)
if not os.path.exists(cls_test_path):
os.makedirs(cls_test_path)
with open(split_files, "r") as f:
split_list = f.readlines()
for line in split_list:
video_name, label = line.split()
video_name = video_name.split(".")[0]
cur_path = os.path.join(root_path, class_name, video_name)
if int(label) == 1:
train_cnt += 1
des_path = os.path.join(target_train_path, class_name, video_name)
shutil.move(cur_path, des_path)
elif int(label) == 2:
test_cnt += 1
des_path = os.path.join(target_test_path, class_name, video_name)
shutil.move(cur_path, des_path)
print("Split 1: \n Train: {}\n Test: {}".format(train_cnt, test_cnt))
| true | true |
f72760a70f1f2dd346effdba76e317afc3c4c200 | 458 | py | Python | Tests/misc/eexec_test.py | odidev/fonttools | 27b5f568f562971d7fbf64eeb027ea61e4939db4 | [
"Apache-2.0",
"MIT"
] | 2,705 | 2016-09-27T10:02:12.000Z | 2022-03-31T09:37:46.000Z | Tests/misc/eexec_test.py | odidev/fonttools | 27b5f568f562971d7fbf64eeb027ea61e4939db4 | [
"Apache-2.0",
"MIT"
] | 1,599 | 2016-09-27T09:07:36.000Z | 2022-03-31T23:04:51.000Z | Tests/misc/eexec_test.py | odidev/fonttools | 27b5f568f562971d7fbf64eeb027ea61e4939db4 | [
"Apache-2.0",
"MIT"
] | 352 | 2016-10-07T04:18:15.000Z | 2022-03-30T07:35:01.000Z | from fontTools.misc.eexec import decrypt, encrypt
def test_decrypt():
testStr = b"\0\0asdadads asds\265"
decryptedStr, R = decrypt(testStr, 12321)
assert decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
assert R == 36142
def test_encrypt():
testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
encryptedStr, R = encrypt(testStr, 12321)
assert encryptedStr == b"\0\0asdadads asds\265"
assert R == 36142
| 28.625 | 74 | 0.68559 | from fontTools.misc.eexec import decrypt, encrypt
def test_decrypt():
testStr = b"\0\0asdadads asds\265"
decryptedStr, R = decrypt(testStr, 12321)
assert decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
assert R == 36142
def test_encrypt():
testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
encryptedStr, R = encrypt(testStr, 12321)
assert encryptedStr == b"\0\0asdadads asds\265"
assert R == 36142
| true | true |
f7276149bae5156f7cb9fe99fd0ec9a0e37e83ee | 2,019 | py | Python | jigsaw/migrations/0004_auto_20160315_2014.py | sxyu/Jiggly | af705453902b11d7bc1f298dce4698fdc9a470fe | [
"FSFAP"
] | 3 | 2018-03-29T13:31:31.000Z | 2022-02-26T04:49:40.000Z | jigsaw/migrations/0004_auto_20160315_2014.py | sxyu/Jiggly | af705453902b11d7bc1f298dce4698fdc9a470fe | [
"FSFAP"
] | null | null | null | jigsaw/migrations/0004_auto_20160315_2014.py | sxyu/Jiggly | af705453902b11d7bc1f298dce4698fdc9a470fe | [
"FSFAP"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-16 03:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import jigsaw.models
class Migration(migrations.Migration):
dependencies = [
('jigsaw', '0003_auto_20160315_1733'),
]
operations = [
migrations.CreateModel(
name='GameInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('public', models.BooleanField(default=False)),
('totaltime', models.IntegerField(default=1800)),
('passedtime', models.IntegerField(default=0, editable=False)),
],
),
migrations.CreateModel(
name='PrintDocument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(default=jigsaw.models._createId, max_length=32)),
('time', models.DateTimeField(auto_now_add=True)),
('round', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='prints', to='jigsaw.Round')),
],
),
migrations.RemoveField(
model_name='game',
name='public',
),
migrations.RemoveField(
model_name='player',
name='game',
),
migrations.AddField(
model_name='gameinstance',
name='game',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='instances', to='jigsaw.Game'),
),
migrations.AddField(
model_name='player',
name='instance',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='players', to='jigsaw.GameInstance'),
preserve_default=False,
),
]
| 36.709091 | 142 | 0.592372 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import jigsaw.models
class Migration(migrations.Migration):
dependencies = [
('jigsaw', '0003_auto_20160315_1733'),
]
operations = [
migrations.CreateModel(
name='GameInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('public', models.BooleanField(default=False)),
('totaltime', models.IntegerField(default=1800)),
('passedtime', models.IntegerField(default=0, editable=False)),
],
),
migrations.CreateModel(
name='PrintDocument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(default=jigsaw.models._createId, max_length=32)),
('time', models.DateTimeField(auto_now_add=True)),
('round', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='prints', to='jigsaw.Round')),
],
),
migrations.RemoveField(
model_name='game',
name='public',
),
migrations.RemoveField(
model_name='player',
name='game',
),
migrations.AddField(
model_name='gameinstance',
name='game',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='instances', to='jigsaw.Game'),
),
migrations.AddField(
model_name='player',
name='instance',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='players', to='jigsaw.GameInstance'),
preserve_default=False,
),
]
| true | true |
f727619381755861c088ab5d8fb34a9eb7540f17 | 341 | py | Python | LC/27.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | 2 | 2018-02-24T17:20:02.000Z | 2018-02-24T17:25:43.000Z | LC/27.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | LC/27.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
a=0
x=0
while(x<len(nums)):
if nums[x]==val:
nums.pop(x)
x-=1
x+=1
return len(nums) | 21.3125 | 39 | 0.384164 | class Solution(object):
def removeElement(self, nums, val):
a=0
x=0
while(x<len(nums)):
if nums[x]==val:
nums.pop(x)
x-=1
x+=1
return len(nums) | true | true |
f727626a0369587c267d59e56782c18664512665 | 11,871 | py | Python | django/contrib/admin/util.py | t11e/django | 447f5375d378dba3bac1ded0306fa0d1b8ab55a4 | [
"BSD-3-Clause"
] | 1 | 2016-05-08T13:32:33.000Z | 2016-05-08T13:32:33.000Z | django/contrib/admin/util.py | t11e/django | 447f5375d378dba3bac1ded0306fa0d1b8ab55a4 | [
"BSD-3-Clause"
] | null | null | null | django/contrib/admin/util.py | t11e/django | 447f5375d378dba3bac1ded0306fa0d1b8ab55a4 | [
"BSD-3-Clause"
] | null | null | null | from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import formats
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.encoding import force_unicode, smart_unicode, smart_str
from django.utils.translation import ungettext, ugettext as _
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.datastructures import SortedDict
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' characters. Similar to urllib.quote, except that the
quoting is slightly different so that it doesn't get automatically
unquoted by the Web browser.
"""
if not isinstance(s, basestring):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
for field in opts['fields']:
# type checking feels dirty, but it seems like the best way here
if type(field) == tuple:
field_names.extend(field)
else:
field_names.append(field)
return field_names
def _format_callback(obj, user, admin_site, levels_to_root, perms_needed):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
admin_url = '%s%s/%s/%s/' % ('../'*levels_to_root,
opts.app_label,
opts.object_name.lower(),
quote(obj._get_pk_val()))
if has_admin:
p = '%s.%s' % (opts.app_label,
opts.get_delete_permission())
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return mark_safe(u'%s: <a href="%s">%s</a>' %
(escape(capfirst(opts.verbose_name)),
admin_url,
escape(obj)))
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return u'%s: %s' % (capfirst(opts.verbose_name),
force_unicode(obj))
def get_deleted_objects(objs, opts, user, admin_site, levels_to_root=4):
"""
Find all objects related to ``objs`` that should also be
deleted. ``objs`` should be an iterable of objects.
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
`levels_to_root` defines the number of directories (../) to reach
the admin root path. In a change_view this is 4, in a change_list
view 2.
This is for backwards compatibility since the options.delete_selected
method uses this function also from a change_list view.
This will not be used if we can reverse the URL.
"""
collector = NestedObjects()
for obj in objs:
# TODO using a private model API!
obj._collect_sub_objects(collector)
perms_needed = set()
to_delete = collector.nested(_format_callback,
user=user,
admin_site=admin_site,
levels_to_root=levels_to_root,
perms_needed=perms_needed)
return to_delete, perms_needed
class NestedObjects(object):
"""
A directed acyclic graph collection that exposes the add() API
expected by Model._collect_sub_objects and can present its data as
a nested list of objects.
"""
def __init__(self):
# Use object keys of the form (model, pk) because actual model
# objects may not be unique
# maps object key to list of child keys
self.children = SortedDict()
# maps object key to parent key
self.parents = SortedDict()
# maps object key to actual object
self.seen = SortedDict()
def add(self, model, pk, obj,
parent_model=None, parent_obj=None, nullable=False):
"""
Add item ``obj`` to the graph. Returns True (and does nothing)
if the item has been seen already.
The ``parent_obj`` argument must already exist in the graph; if
not, it's ignored (but ``obj`` is still added with no
parent). In any case, Model._collect_sub_objects (for whom
this API exists) will never pass a parent that hasn't already
been added itself.
These restrictions in combination ensure the graph will remain
acyclic (but can have multiple roots).
``model``, ``pk``, and ``parent_model`` arguments are ignored
in favor of the appropriate lookups on ``obj`` and
``parent_obj``; unlike CollectedObjects, we can't maintain
independence from the knowledge that we're operating on model
instances, and we don't want to allow for inconsistency.
``nullable`` arg is ignored: it doesn't affect how the tree of
collected objects should be nested for display.
"""
model, pk = type(obj), obj._get_pk_val()
# auto-created M2M models don't interest us
if model._meta.auto_created:
return True
key = model, pk
if key in self.seen:
return True
self.seen.setdefault(key, obj)
if parent_obj is not None:
parent_model, parent_pk = (type(parent_obj),
parent_obj._get_pk_val())
parent_key = (parent_model, parent_pk)
if parent_key in self.seen:
self.children.setdefault(parent_key, list()).append(key)
self.parents.setdefault(key, parent_key)
def _nested(self, key, format_callback=None, **kwargs):
obj = self.seen[key]
if format_callback:
ret = [format_callback(obj, **kwargs)]
else:
ret = [obj]
children = []
for child in self.children.get(key, ()):
children.extend(self._nested(child, format_callback, **kwargs))
if children:
ret.append(children)
return ret
def nested(self, format_callback=None, **kwargs):
"""
Return the graph as a nested list.
Passes **kwargs back to the format_callback as kwargs.
"""
roots = []
for key in self.seen.keys():
if key not in self.parents:
roots.extend(self._nested(key, format_callback, **kwargs))
return roots
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_unicode(opts.verbose_name),
'verbose_name_plural': force_unicode(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = opts.get_field(name)
except models.FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and hasattr(model_admin, name) and
not name == '__str__' and not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def label_for_field(name, model, model_admin=None, return_attr=False):
attr = None
try:
label = model._meta.get_field_by_name(name)[0].verbose_name
except models.FieldDoesNotExist:
if name == "__unicode__":
label = force_unicode(model._meta.verbose_name)
elif name == "__str__":
label = smart_str(model._meta.verbose_name)
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = attr.__name__
else:
label = name
if return_attr:
return (label, attr)
else:
return label
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateField) or isinstance(field, models.TimeField):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
else:
return smart_unicode(value)
| 35.435821 | 94 | 0.598602 | from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import formats
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.encoding import force_unicode, smart_unicode, smart_str
from django.utils.translation import ungettext, ugettext as _
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.datastructures import SortedDict
def quote(s):
if not isinstance(s, basestring):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten_fieldsets(fieldsets):
field_names = []
for name, opts in fieldsets:
for field in opts['fields']:
# type checking feels dirty, but it seems like the best way here
if type(field) == tuple:
field_names.extend(field)
else:
field_names.append(field)
return field_names
def _format_callback(obj, user, admin_site, levels_to_root, perms_needed):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
admin_url = '%s%s/%s/%s/' % ('../'*levels_to_root,
opts.app_label,
opts.object_name.lower(),
quote(obj._get_pk_val()))
if has_admin:
p = '%s.%s' % (opts.app_label,
opts.get_delete_permission())
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return mark_safe(u'%s: <a href="%s">%s</a>' %
(escape(capfirst(opts.verbose_name)),
admin_url,
escape(obj)))
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return u'%s: %s' % (capfirst(opts.verbose_name),
force_unicode(obj))
def get_deleted_objects(objs, opts, user, admin_site, levels_to_root=4):
collector = NestedObjects()
for obj in objs:
# TODO using a private model API!
obj._collect_sub_objects(collector)
perms_needed = set()
to_delete = collector.nested(_format_callback,
user=user,
admin_site=admin_site,
levels_to_root=levels_to_root,
perms_needed=perms_needed)
return to_delete, perms_needed
class NestedObjects(object):
def __init__(self):
# Use object keys of the form (model, pk) because actual model
# objects may not be unique
# maps object key to list of child keys
self.children = SortedDict()
# maps object key to parent key
self.parents = SortedDict()
# maps object key to actual object
self.seen = SortedDict()
def add(self, model, pk, obj,
parent_model=None, parent_obj=None, nullable=False):
model, pk = type(obj), obj._get_pk_val()
# auto-created M2M models don't interest us
if model._meta.auto_created:
return True
key = model, pk
if key in self.seen:
return True
self.seen.setdefault(key, obj)
if parent_obj is not None:
parent_model, parent_pk = (type(parent_obj),
parent_obj._get_pk_val())
parent_key = (parent_model, parent_pk)
if parent_key in self.seen:
self.children.setdefault(parent_key, list()).append(key)
self.parents.setdefault(key, parent_key)
def _nested(self, key, format_callback=None, **kwargs):
obj = self.seen[key]
if format_callback:
ret = [format_callback(obj, **kwargs)]
else:
ret = [obj]
children = []
for child in self.children.get(key, ()):
children.extend(self._nested(child, format_callback, **kwargs))
if children:
ret.append(children)
return ret
def nested(self, format_callback=None, **kwargs):
roots = []
for key in self.seen.keys():
if key not in self.parents:
roots.extend(self._nested(key, format_callback, **kwargs))
return roots
def model_format_dict(obj):
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_unicode(opts.verbose_name),
'verbose_name_plural': force_unicode(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = opts.get_field(name)
except models.FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and hasattr(model_admin, name) and
not name == '__str__' and not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def label_for_field(name, model, model_admin=None, return_attr=False):
attr = None
try:
label = model._meta.get_field_by_name(name)[0].verbose_name
except models.FieldDoesNotExist:
if name == "__unicode__":
label = force_unicode(model._meta.verbose_name)
elif name == "__str__":
label = smart_str(model._meta.verbose_name)
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = attr.__name__
else:
label = name
if return_attr:
return (label, attr)
else:
return label
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateField) or isinstance(field, models.TimeField):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
else:
return smart_unicode(value)
| true | true |
f72762d80e22c0940fd6c0b7df91a5eb5427ea3d | 18,819 | py | Python | log_complete/model_403.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete/model_403.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete/model_403.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 100750.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| 91.354369 | 710 | 0.806525 |
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 100750.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| true | true |
f72765a52a463ef2846870cae24daa680f1ff6c0 | 11,200 | py | Python | toontown/catalog/CatalogAccessoryItemGlobals.py | CrankySupertoon01/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2021-02-13T22:40:50.000Z | 2021-02-13T22:40:50.000Z | toontown/catalog/CatalogAccessoryItemGlobals.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | toontown/catalog/CatalogAccessoryItemGlobals.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 2 | 2019-12-02T01:39:10.000Z | 2021-02-13T22:41:00.000Z | ATArticle = 0
ATString = 1
ATBasePrice = 2
ATReleased = 3
ATEmblemPrices = 4
AHat = 0
AGlasses = 1
ABackpack = 2
AShoes = 3
APriceBasic = 250
APriceBasicPlus = 400
APriceCool = 800
APriceAwesome = 1500
AccessoryTypes = {101: (AHat,
'hbb1',
APriceBasic,
1),
102: (AHat,
'hsf1',
APriceCool,
5),
103: (AHat,
'hrb1',
APriceBasic,
1),
104: (AHat,
'hsf2',
APriceCool,
0),
105: (AHat,
'hsf3',
APriceCool,
0),
106: (AHat,
'hrb2',
APriceBasicPlus,
3),
107: (AHat,
'hrb3',
APriceBasicPlus,
0),
108: (AHat,
'hht1',
APriceCool,
4),
109: (AHat,
'hht2',
APriceCool,
3),
110: (AHat,
'htp1',
APriceCool,
3),
111: (AHat,
'htp2',
APriceCool,
0),
112: (AHat,
'hav1',
3500,
0),
113: (AHat,
'hfp1',
3500,
0),
114: (AHat,
'hsg1',
3500,
0),
115: (AHat,
'hwt1',
3500,
0),
116: (AHat,
'hfz1',
APriceCool,
5),
117: (AHat,
'hgf1',
APriceCool,
1),
118: (AHat,
'hpt1',
APriceBasicPlus,
1),
119: (AHat,
'hpb1',
APriceBasicPlus,
6),
120: (AHat,
'hcr1',
10000,
5),
121: (AHat,
'hbb2',
APriceBasic,
2),
122: (AHat,
'hbb3',
APriceBasic,
2),
123: (AHat,
'hcw1',
APriceCool,
1),
124: (AHat,
'hpr1',
APriceAwesome,
1),
125: (AHat,
'hpp1',
APriceBasicPlus,
1),
126: (AHat,
'hfs1',
APriceCool,
1),
127: (AHat,
'hsb1',
APriceAwesome,
1),
128: (AHat,
'hst1',
APriceBasicPlus,
1),
129: (AHat,
'hsu1',
APriceCool,
1),
130: (AHat,
'hrb4',
APriceBasic,
1),
131: (AHat,
'hrb5',
APriceBasicPlus,
4),
132: (AHat,
'hrb6',
APriceBasic,
2),
133: (AHat,
'hrb7',
APriceBasicPlus,
6),
134: (AHat,
'hat1',
APriceCool,
2),
135: (AHat,
'hhd1',
APriceCool,
2),
136: (AHat,
'hbw1',
APriceCool,
6),
137: (AHat,
'hch1',
APriceCool,
5),
138: (AHat,
'hdt1',
APriceAwesome,
6),
139: (AHat,
'hft1',
APriceCool,
4),
140: (AHat,
'hfd1',
APriceCool,
6),
141: (AHat,
'hmk1',
APriceAwesome,
2),
142: (AHat,
'hft2',
APriceCool,
6),
143: (AHat,
'hhd2',
APriceCool,
3),
144: (AHat,
'hpc1',
APriceCool,
5),
145: (AHat,
'hrh1',
APriceCool,
2),
146: (AHat,
'hhm1',
2500,
2),
147: (AHat,
'hat2',
APriceCool,
2),
148: (AHat,
'htr1',
10000,
3),
149: (AHat,
'hhm2',
APriceAwesome,
2),
150: (AHat,
'hwz1',
APriceCool,
2),
151: (AHat,
'hwz2',
APriceCool,
2),
152: (AHat,
'hhm3',
APriceAwesome,
6),
153: (AHat,
'hhm4',
APriceAwesome,
5),
154: (AHat,
'hfp2',
APriceCool,
5),
155: (AHat,
'hhm5',
APriceAwesome,
4),
156: (AHat,
'hnp1',
APriceAwesome,
6),
157: (AHat,
'hpc2',
APriceAwesome,
3),
158: (AHat,
'hph1',
APriceAwesome,
4),
159: (AHat,
'hwg1',
APriceCool,
5),
160: (AHat,
'hbb4',
APriceBasic,
5),
161: (AHat,
'hbb5',
APriceBasic,
2),
162: (AHat,
'hbb6',
APriceBasic,
5),
163: (AHat,
'hsl1',
APriceCool,
5),
164: (AHat,
'hfr1',
3000,
4),
165: (AHat,
'hby1',
APriceAwesome,
5),
166: (AHat,
'hrb8',
APriceBasicPlus,
6),
167: (AHat,
'hjh1',
APriceAwesome,
3),
168: (AHat,
'hbb7',
APriceBasic,
6),
169: (AHat,
'hrb9',
APriceBasicPlus,
6),
170: (AHat,
'hwt2',
APriceAwesome,
4),
171: (AHat,
'hhw1',
APriceBasicPlus,
7),
172: (AHat,
'hhw2',
900,
7),
173: (AHat,
'hob1',
APriceAwesome,
6),
174: (AHat,
'hbn1',
APriceAwesome,
8),
175: (AHat,
'hpt2',
APriceCool,
9),
176: (AHat,
'kmh1',
APriceAwesome,
8),
201: (AGlasses,
'grd1',
APriceBasicPlus,
0),
202: (AGlasses,
'gmb1',
APriceCool,
1),
203: (AGlasses,
'gnr1',
APriceCool,
0),
204: (AGlasses,
'gst1',
APriceBasicPlus,
1),
205: (AGlasses,
'g3d1',
APriceCool,
1),
206: (AGlasses,
'gav1',
APriceCool,
1),
207: (AGlasses,
'gce1',
APriceCool,
2),
208: (AGlasses,
'gdk1',
APriceBasic,
1),
209: (AGlasses,
'gjo1',
APriceBasicPlus,
1),
210: (AGlasses,
'gsb1',
APriceAwesome,
1),
211: (AGlasses,
'ggl1',
APriceCool,
6),
212: (AGlasses,
'ggm1',
APriceBasicPlus,
2),
213: (AGlasses,
'ghg1',
APriceAwesome,
3),
214: (AGlasses,
'gie1',
APriceCool,
2),
215: (AGlasses,
'gmt1',
APriceCool,
2),
216: (AGlasses,
'gmt2',
APriceCool,
2),
217: (AGlasses,
'gmt3',
3500,
5),
218: (AGlasses,
'gmt4',
3500,
5),
219: (AGlasses,
'gmt5',
3500,
5),
220: (AGlasses,
'gmn1',
APriceAwesome,
6),
221: (AGlasses,
'gmo1',
APriceAwesome,
4),
222: (AGlasses,
'gsr1',
APriceBasicPlus,
5),
223: (AGlasses,
'ghw1',
APriceBasic,
0),
224: (AGlasses,
'ghw2',
APriceBasic,
7),
225: (AGlasses,
'gag1',
APriceAwesome,
8),
226: (AGlasses,
'ghy1',
APriceAwesome,
8),
301: (ABackpack,
'bpb1',
APriceBasic,
4),
302: (ABackpack,
'bpb2',
APriceBasic,
1),
303: (ABackpack,
'bpb3',
APriceBasic,
5),
304: (ABackpack,
'bpd1',
APriceBasicPlus,
4),
305: (ABackpack,
'bpd2',
APriceBasicPlus,
5),
306: (ABackpack,
'bwg1',
APriceCool,
2),
307: (ABackpack,
'bwg2',
APriceCool,
2),
308: (ABackpack,
'bwg3',
APriceCool,
1),
309: (ABackpack,
'bst1',
APriceAwesome,
1),
310: (ABackpack,
'bfn1',
APriceCool,
1),
311: (ABackpack,
'baw1',
APriceCool,
3),
312: (ABackpack,
'baw2',
APriceAwesome,
2),
313: (ABackpack,
'bwt1',
3000,
3),
314: (ABackpack,
'bwg4',
APriceAwesome,
6),
315: (ABackpack,
'bwg5',
3000,
5),
316: (ABackpack,
'bwg6',
3000,
4),
317: (ABackpack,
'bjp1',
3000,
1),
318: (ABackpack,
'blg1',
APriceCool,
2),
319: (ABackpack,
'bsa1',
2500,
5),
320: (ABackpack,
'bwg7',
APriceAwesome,
6),
321: (ABackpack,
'bsa2',
2000,
2),
322: (ABackpack,
'bsa3',
2000,
2),
323: (ABackpack,
'bap1',
5000,
4),
324: (ABackpack,
'bhw1',
900,
7),
325: (ABackpack,
'bhw2',
APriceBasicPlus,
7),
326: (ABackpack,
'bhw3',
APriceBasicPlus,
7),
327: (ABackpack,
'bhw4',
900,
7),
328: (ABackpack,
'bob1',
3000,
6),
329: (ABackpack,
'bfg1',
3000,
6),
330: (ABackpack,
'bfl1',
APriceAwesome,
8),
401: (AShoes,
'sat1',
APriceBasic,
3),
402: (AShoes,
'sat2',
APriceBasic,
1),
403: (AShoes,
'smb1',
APriceAwesome,
1),
404: (AShoes,
'scs1',
APriceBasicPlus,
6),
405: (AShoes,
'swt1',
APriceBasicPlus,
1),
406: (AShoes,
'smj1',
APriceBasicPlus,
1),
407: (AShoes,
'sdk1',
APriceBasic,
1),
408: (AShoes,
'sat3',
APriceBasic,
1),
409: (AShoes,
'scs2',
APriceBasicPlus,
1),
410: (AShoes,
'scs3',
APriceBasicPlus,
1),
411: (AShoes,
'scs4',
APriceBasicPlus,
1),
412: (AShoes,
'scb1',
APriceAwesome,
1),
413: (AShoes,
'sfb1',
APriceCool,
1),
414: (AShoes,
'sht1',
APriceAwesome,
4),
415: (AShoes,
'smj2',
APriceBasicPlus,
3),
416: (AShoes,
'smj3',
APriceBasicPlus,
4),
417: (AShoes,
'ssb1',
APriceAwesome,
2),
418: (AShoes,
'sts1',
APriceBasic,
5),
419: (AShoes,
'sts2',
APriceBasic,
4),
420: (AShoes,
'scs5',
APriceBasicPlus,
4),
421: (AShoes,
'smb2',
APriceAwesome,
3),
422: (AShoes,
'smb3',
APriceAwesome,
2),
423: (AShoes,
'smb4',
APriceAwesome,
5),
424: (AShoes,
'sfb2',
2000,
6),
425: (AShoes,
'sfb3',
2000,
4),
426: (AShoes,
'sfb4',
2000,
3),
427: (AShoes,
'sfb5',
2000,
5),
428: (AShoes,
'sfb6',
2000,
4),
429: (AShoes,
'slf1',
APriceBasicPlus,
3),
430: (AShoes,
'smj4',
APriceBasicPlus,
2),
431: (AShoes,
'smt1',
APriceAwesome,
4),
432: (AShoes,
'sox1',
APriceAwesome,
5),
433: (AShoes,
'srb1',
APriceAwesome,
6),
434: (AShoes,
'sst1',
3000,
3),
435: (AShoes,
'swb1',
APriceCool,
3),
436: (AShoes,
'swb2',
APriceCool,
4),
437: (AShoes,
'swk1',
APriceAwesome,
3),
438: (AShoes,
'scs6',
APriceBasicPlus,
0),
439: (AShoes,
'smb5',
APriceAwesome,
3),
440: (AShoes,
'sht2',
APriceAwesome,
4),
441: (AShoes,
'srb2',
APriceAwesome,
3),
442: (AShoes,
'sts3',
APriceBasic,
6),
443: (AShoes,
'sts4',
APriceBasic,
3),
444: (AShoes,
'sts5',
APriceBasic,
2),
445: (AShoes,
'srb3',
APriceCool,
5),
446: (AShoes,
'srb4',
APriceCool,
3),
447: (AShoes,
'sat4',
APriceBasic,
3),
448: (AShoes,
'shw1',
APriceCool,
7),
449: (AShoes,
'shw2',
APriceCool,
7)}
| 15.176152 | 29 | 0.413304 | ATArticle = 0
ATString = 1
ATBasePrice = 2
ATReleased = 3
ATEmblemPrices = 4
AHat = 0
AGlasses = 1
ABackpack = 2
AShoes = 3
APriceBasic = 250
APriceBasicPlus = 400
APriceCool = 800
APriceAwesome = 1500
AccessoryTypes = {101: (AHat,
'hbb1',
APriceBasic,
1),
102: (AHat,
'hsf1',
APriceCool,
5),
103: (AHat,
'hrb1',
APriceBasic,
1),
104: (AHat,
'hsf2',
APriceCool,
0),
105: (AHat,
'hsf3',
APriceCool,
0),
106: (AHat,
'hrb2',
APriceBasicPlus,
3),
107: (AHat,
'hrb3',
APriceBasicPlus,
0),
108: (AHat,
'hht1',
APriceCool,
4),
109: (AHat,
'hht2',
APriceCool,
3),
110: (AHat,
'htp1',
APriceCool,
3),
111: (AHat,
'htp2',
APriceCool,
0),
112: (AHat,
'hav1',
3500,
0),
113: (AHat,
'hfp1',
3500,
0),
114: (AHat,
'hsg1',
3500,
0),
115: (AHat,
'hwt1',
3500,
0),
116: (AHat,
'hfz1',
APriceCool,
5),
117: (AHat,
'hgf1',
APriceCool,
1),
118: (AHat,
'hpt1',
APriceBasicPlus,
1),
119: (AHat,
'hpb1',
APriceBasicPlus,
6),
120: (AHat,
'hcr1',
10000,
5),
121: (AHat,
'hbb2',
APriceBasic,
2),
122: (AHat,
'hbb3',
APriceBasic,
2),
123: (AHat,
'hcw1',
APriceCool,
1),
124: (AHat,
'hpr1',
APriceAwesome,
1),
125: (AHat,
'hpp1',
APriceBasicPlus,
1),
126: (AHat,
'hfs1',
APriceCool,
1),
127: (AHat,
'hsb1',
APriceAwesome,
1),
128: (AHat,
'hst1',
APriceBasicPlus,
1),
129: (AHat,
'hsu1',
APriceCool,
1),
130: (AHat,
'hrb4',
APriceBasic,
1),
131: (AHat,
'hrb5',
APriceBasicPlus,
4),
132: (AHat,
'hrb6',
APriceBasic,
2),
133: (AHat,
'hrb7',
APriceBasicPlus,
6),
134: (AHat,
'hat1',
APriceCool,
2),
135: (AHat,
'hhd1',
APriceCool,
2),
136: (AHat,
'hbw1',
APriceCool,
6),
137: (AHat,
'hch1',
APriceCool,
5),
138: (AHat,
'hdt1',
APriceAwesome,
6),
139: (AHat,
'hft1',
APriceCool,
4),
140: (AHat,
'hfd1',
APriceCool,
6),
141: (AHat,
'hmk1',
APriceAwesome,
2),
142: (AHat,
'hft2',
APriceCool,
6),
143: (AHat,
'hhd2',
APriceCool,
3),
144: (AHat,
'hpc1',
APriceCool,
5),
145: (AHat,
'hrh1',
APriceCool,
2),
146: (AHat,
'hhm1',
2500,
2),
147: (AHat,
'hat2',
APriceCool,
2),
148: (AHat,
'htr1',
10000,
3),
149: (AHat,
'hhm2',
APriceAwesome,
2),
150: (AHat,
'hwz1',
APriceCool,
2),
151: (AHat,
'hwz2',
APriceCool,
2),
152: (AHat,
'hhm3',
APriceAwesome,
6),
153: (AHat,
'hhm4',
APriceAwesome,
5),
154: (AHat,
'hfp2',
APriceCool,
5),
155: (AHat,
'hhm5',
APriceAwesome,
4),
156: (AHat,
'hnp1',
APriceAwesome,
6),
157: (AHat,
'hpc2',
APriceAwesome,
3),
158: (AHat,
'hph1',
APriceAwesome,
4),
159: (AHat,
'hwg1',
APriceCool,
5),
160: (AHat,
'hbb4',
APriceBasic,
5),
161: (AHat,
'hbb5',
APriceBasic,
2),
162: (AHat,
'hbb6',
APriceBasic,
5),
163: (AHat,
'hsl1',
APriceCool,
5),
164: (AHat,
'hfr1',
3000,
4),
165: (AHat,
'hby1',
APriceAwesome,
5),
166: (AHat,
'hrb8',
APriceBasicPlus,
6),
167: (AHat,
'hjh1',
APriceAwesome,
3),
168: (AHat,
'hbb7',
APriceBasic,
6),
169: (AHat,
'hrb9',
APriceBasicPlus,
6),
170: (AHat,
'hwt2',
APriceAwesome,
4),
171: (AHat,
'hhw1',
APriceBasicPlus,
7),
172: (AHat,
'hhw2',
900,
7),
173: (AHat,
'hob1',
APriceAwesome,
6),
174: (AHat,
'hbn1',
APriceAwesome,
8),
175: (AHat,
'hpt2',
APriceCool,
9),
176: (AHat,
'kmh1',
APriceAwesome,
8),
201: (AGlasses,
'grd1',
APriceBasicPlus,
0),
202: (AGlasses,
'gmb1',
APriceCool,
1),
203: (AGlasses,
'gnr1',
APriceCool,
0),
204: (AGlasses,
'gst1',
APriceBasicPlus,
1),
205: (AGlasses,
'g3d1',
APriceCool,
1),
206: (AGlasses,
'gav1',
APriceCool,
1),
207: (AGlasses,
'gce1',
APriceCool,
2),
208: (AGlasses,
'gdk1',
APriceBasic,
1),
209: (AGlasses,
'gjo1',
APriceBasicPlus,
1),
210: (AGlasses,
'gsb1',
APriceAwesome,
1),
211: (AGlasses,
'ggl1',
APriceCool,
6),
212: (AGlasses,
'ggm1',
APriceBasicPlus,
2),
213: (AGlasses,
'ghg1',
APriceAwesome,
3),
214: (AGlasses,
'gie1',
APriceCool,
2),
215: (AGlasses,
'gmt1',
APriceCool,
2),
216: (AGlasses,
'gmt2',
APriceCool,
2),
217: (AGlasses,
'gmt3',
3500,
5),
218: (AGlasses,
'gmt4',
3500,
5),
219: (AGlasses,
'gmt5',
3500,
5),
220: (AGlasses,
'gmn1',
APriceAwesome,
6),
221: (AGlasses,
'gmo1',
APriceAwesome,
4),
222: (AGlasses,
'gsr1',
APriceBasicPlus,
5),
223: (AGlasses,
'ghw1',
APriceBasic,
0),
224: (AGlasses,
'ghw2',
APriceBasic,
7),
225: (AGlasses,
'gag1',
APriceAwesome,
8),
226: (AGlasses,
'ghy1',
APriceAwesome,
8),
301: (ABackpack,
'bpb1',
APriceBasic,
4),
302: (ABackpack,
'bpb2',
APriceBasic,
1),
303: (ABackpack,
'bpb3',
APriceBasic,
5),
304: (ABackpack,
'bpd1',
APriceBasicPlus,
4),
305: (ABackpack,
'bpd2',
APriceBasicPlus,
5),
306: (ABackpack,
'bwg1',
APriceCool,
2),
307: (ABackpack,
'bwg2',
APriceCool,
2),
308: (ABackpack,
'bwg3',
APriceCool,
1),
309: (ABackpack,
'bst1',
APriceAwesome,
1),
310: (ABackpack,
'bfn1',
APriceCool,
1),
311: (ABackpack,
'baw1',
APriceCool,
3),
312: (ABackpack,
'baw2',
APriceAwesome,
2),
313: (ABackpack,
'bwt1',
3000,
3),
314: (ABackpack,
'bwg4',
APriceAwesome,
6),
315: (ABackpack,
'bwg5',
3000,
5),
316: (ABackpack,
'bwg6',
3000,
4),
317: (ABackpack,
'bjp1',
3000,
1),
318: (ABackpack,
'blg1',
APriceCool,
2),
319: (ABackpack,
'bsa1',
2500,
5),
320: (ABackpack,
'bwg7',
APriceAwesome,
6),
321: (ABackpack,
'bsa2',
2000,
2),
322: (ABackpack,
'bsa3',
2000,
2),
323: (ABackpack,
'bap1',
5000,
4),
324: (ABackpack,
'bhw1',
900,
7),
325: (ABackpack,
'bhw2',
APriceBasicPlus,
7),
326: (ABackpack,
'bhw3',
APriceBasicPlus,
7),
327: (ABackpack,
'bhw4',
900,
7),
328: (ABackpack,
'bob1',
3000,
6),
329: (ABackpack,
'bfg1',
3000,
6),
330: (ABackpack,
'bfl1',
APriceAwesome,
8),
401: (AShoes,
'sat1',
APriceBasic,
3),
402: (AShoes,
'sat2',
APriceBasic,
1),
403: (AShoes,
'smb1',
APriceAwesome,
1),
404: (AShoes,
'scs1',
APriceBasicPlus,
6),
405: (AShoes,
'swt1',
APriceBasicPlus,
1),
406: (AShoes,
'smj1',
APriceBasicPlus,
1),
407: (AShoes,
'sdk1',
APriceBasic,
1),
408: (AShoes,
'sat3',
APriceBasic,
1),
409: (AShoes,
'scs2',
APriceBasicPlus,
1),
410: (AShoes,
'scs3',
APriceBasicPlus,
1),
411: (AShoes,
'scs4',
APriceBasicPlus,
1),
412: (AShoes,
'scb1',
APriceAwesome,
1),
413: (AShoes,
'sfb1',
APriceCool,
1),
414: (AShoes,
'sht1',
APriceAwesome,
4),
415: (AShoes,
'smj2',
APriceBasicPlus,
3),
416: (AShoes,
'smj3',
APriceBasicPlus,
4),
417: (AShoes,
'ssb1',
APriceAwesome,
2),
418: (AShoes,
'sts1',
APriceBasic,
5),
419: (AShoes,
'sts2',
APriceBasic,
4),
420: (AShoes,
'scs5',
APriceBasicPlus,
4),
421: (AShoes,
'smb2',
APriceAwesome,
3),
422: (AShoes,
'smb3',
APriceAwesome,
2),
423: (AShoes,
'smb4',
APriceAwesome,
5),
424: (AShoes,
'sfb2',
2000,
6),
425: (AShoes,
'sfb3',
2000,
4),
426: (AShoes,
'sfb4',
2000,
3),
427: (AShoes,
'sfb5',
2000,
5),
428: (AShoes,
'sfb6',
2000,
4),
429: (AShoes,
'slf1',
APriceBasicPlus,
3),
430: (AShoes,
'smj4',
APriceBasicPlus,
2),
431: (AShoes,
'smt1',
APriceAwesome,
4),
432: (AShoes,
'sox1',
APriceAwesome,
5),
433: (AShoes,
'srb1',
APriceAwesome,
6),
434: (AShoes,
'sst1',
3000,
3),
435: (AShoes,
'swb1',
APriceCool,
3),
436: (AShoes,
'swb2',
APriceCool,
4),
437: (AShoes,
'swk1',
APriceAwesome,
3),
438: (AShoes,
'scs6',
APriceBasicPlus,
0),
439: (AShoes,
'smb5',
APriceAwesome,
3),
440: (AShoes,
'sht2',
APriceAwesome,
4),
441: (AShoes,
'srb2',
APriceAwesome,
3),
442: (AShoes,
'sts3',
APriceBasic,
6),
443: (AShoes,
'sts4',
APriceBasic,
3),
444: (AShoes,
'sts5',
APriceBasic,
2),
445: (AShoes,
'srb3',
APriceCool,
5),
446: (AShoes,
'srb4',
APriceCool,
3),
447: (AShoes,
'sat4',
APriceBasic,
3),
448: (AShoes,
'shw1',
APriceCool,
7),
449: (AShoes,
'shw2',
APriceCool,
7)}
| true | true |
f727679b242e26a0cafd9fa4682d96dd99b90d0a | 2,309 | py | Python | Extra_python/Scripts/manualDel.py | JPGarCar/HORS | e06d4be00921d09f89406da5e64bbb5717c8bf07 | [
"MIT"
] | 1 | 2019-12-23T22:43:46.000Z | 2019-12-23T22:43:46.000Z | Extra_python/Scripts/manualDel.py | JPGarCar/HORS | e06d4be00921d09f89406da5e64bbb5717c8bf07 | [
"MIT"
] | 18 | 2021-01-15T02:35:48.000Z | 2021-12-08T17:39:51.000Z | Extra_python/Scripts/manualDel.py | JPGarCar/HORS | e06d4be00921d09f89406da5e64bbb5717c8bf07 | [
"MIT"
] | null | null | null | from cs50 import SQL
db = SQL("sqlite:///immuns.db")
global currentUser
def manualDel(number, curUser):
stem = db.execute("SELECT * FROM :dataBase WHERE id=:ids", dataBase=curUser, ids=number)
for stoop in stem:
comm = stoop["committee"]
db.execute("UPDATE generalList SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE generalList SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
if comm[-2:] == "MS":
db.execute("UPDATE msen SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE mssp SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE msen SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE mssp SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
elif comm[-2:] == "HS":
db.execute("UPDATE hsen SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE hssp SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE hsen SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE hssp SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
else:
db.execute("UPDATE hsen SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE hsen SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("DELETE FROM :dataBase WHERE id=:ids", dataBase=curUser, ids=number)
currentUser = "GuillermoLopezIndividualGuillermo"
numberList = [1]
for number in numberList:
manualDel(number, currentUser)
| 69.969697 | 149 | 0.672152 | from cs50 import SQL
db = SQL("sqlite:///immuns.db")
global currentUser
def manualDel(number, curUser):
stem = db.execute("SELECT * FROM :dataBase WHERE id=:ids", dataBase=curUser, ids=number)
for stoop in stem:
comm = stoop["committee"]
db.execute("UPDATE generalList SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE generalList SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
if comm[-2:] == "MS":
db.execute("UPDATE msen SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE mssp SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE msen SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE mssp SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
elif comm[-2:] == "HS":
db.execute("UPDATE hsen SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE hssp SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE hsen SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE hssp SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
else:
db.execute("UPDATE hsen SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("UPDATE hsen SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"])
db.execute("DELETE FROM :dataBase WHERE id=:ids", dataBase=curUser, ids=number)
currentUser = "GuillermoLopezIndividualGuillermo"
numberList = [1]
for number in numberList:
manualDel(number, currentUser)
| true | true |
f72767e3fe7d8c9e8e9895a0ec1f3c6a2f6fe9d2 | 1,765 | py | Python | products/migrations/0001_initial.py | UB-ES-2021-A1/wannasell-backend | 84360b2985fc28971867601373697f39303e396b | [
"Unlicense"
] | null | null | null | products/migrations/0001_initial.py | UB-ES-2021-A1/wannasell-backend | 84360b2985fc28971867601373697f39303e396b | [
"Unlicense"
] | 62 | 2021-11-22T21:52:44.000Z | 2021-12-17T15:07:02.000Z | products/migrations/0001_initial.py | UB-ES-2021-A1/wannasell-backend | 84360b2985fc28971867601373697f39303e396b | [
"Unlicense"
] | null | null | null | # Generated by Django 3.2.8 on 2021-10-18 11:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField(max_length=500)),
('description', models.TextField(blank=True, max_length=1000)),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
('category', models.CharField(choices=[('CO', 'Coches'), ('MO', 'Motos'), ('MA', 'Moda y Accesorios'), ('IM', 'Immobiliaria'), ('TV', 'TV, Audio y Foto'), ('TE', 'Móviles y Telefonía'), ('IE', 'Informática y Electrónica'), ('DO', 'Deporte y Ocio'), ('BI', 'Bicicletas'), ('CV', 'Consolas y Videojuegos'), ('HJ', 'Hogar y Jardín'), ('ED', 'Electrodomésticos'), ('CU', 'Cine, Libros y Música'), ('NI', 'Niños y Bebés'), ('CC', 'Coleccionismo'), ('CT', 'Construcción y reformas'), ('IN', 'Industria y Agricultura'), ('EM', 'Empleo'), ('SE', 'Servicios'), ('OT', 'Otros')], default='OT', max_length=2)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, upload_to='products/%Y/%m/%d')),
('product', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='images', to='products.product')),
],
),
]
| 51.911765 | 615 | 0.586969 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField(max_length=500)),
('description', models.TextField(blank=True, max_length=1000)),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
('category', models.CharField(choices=[('CO', 'Coches'), ('MO', 'Motos'), ('MA', 'Moda y Accesorios'), ('IM', 'Immobiliaria'), ('TV', 'TV, Audio y Foto'), ('TE', 'Móviles y Telefonía'), ('IE', 'Informática y Electrónica'), ('DO', 'Deporte y Ocio'), ('BI', 'Bicicletas'), ('CV', 'Consolas y Videojuegos'), ('HJ', 'Hogar y Jardín'), ('ED', 'Electrodomésticos'), ('CU', 'Cine, Libros y Música'), ('NI', 'Niños y Bebés'), ('CC', 'Coleccionismo'), ('CT', 'Construcción y reformas'), ('IN', 'Industria y Agricultura'), ('EM', 'Empleo'), ('SE', 'Servicios'), ('OT', 'Otros')], default='OT', max_length=2)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, upload_to='products/%Y/%m/%d')),
('product', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='images', to='products.product')),
],
),
]
| true | true |
f727686cecf1ae4d8cd0131fdaf4bd65fedf922c | 8,921 | py | Python | python_samples/Python_Examples/ibm_db/ibm_db-special_columns.py | adrianmahjour/db2-samples | ff984aec81c5c08ce28443d896c0818cfae4f789 | [
"Apache-2.0"
] | 54 | 2019-08-02T13:15:07.000Z | 2022-03-21T17:36:48.000Z | python_samples/Python_Examples/ibm_db/ibm_db-special_columns.py | junsulee75/db2-samples | d9ee03101cad1f9167eebc1609b4151559124017 | [
"Apache-2.0"
] | 13 | 2019-07-26T13:51:16.000Z | 2022-03-25T21:43:52.000Z | python_samples/Python_Examples/ibm_db/ibm_db-special_columns.py | junsulee75/db2-samples | d9ee03101cad1f9167eebc1609b4151559124017 | [
"Apache-2.0"
] | 75 | 2019-07-20T04:53:24.000Z | 2022-03-23T20:56:55.000Z | #! /usr/bin/python3
#-------------------------------------------------------------------------------------------------#
# NAME: ibm_db-special_columns.py #
# #
# PURPOSE: This program is designed to illustrate how to use the ibm_db.special_columns() API. #
# #
# Additional APIs used: #
# ibm_db.fetch_assoc() #
# #
# USAGE: Log in as a Db2 database instance user (for example, db2inst1) and issue the #
# following command from a terminal window: #
# #
# ./ibm_db-special_columns.py #
# #
#-------------------------------------------------------------------------------------------------#
# DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY #
# #
# (C) COPYRIGHT International Business Machines Corp. 2018, 2019 All Rights Reserved #
# Licensed Materials - Property of IBM #
# #
# US Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA ADP #
# Schedule Contract with IBM Corp. #
# #
# The following source code ("Sample") is owned by International Business Machines Corporation #
# or one of its subsidiaries ("IBM") and is copyrighted and licensed, not sold. You may use, #
# copy, modify, and distribute the Sample in any form without payment to IBM, for the purpose #
# of assisting you in the creation of Python applications using the ibm_db library. #
# #
# The Sample code is provided to you on an "AS IS" basis, without warranty of any kind. IBM #
# HEREBY EXPRESSLY DISCLAIMS ALL WARRANTIES, EITHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. #
# Some jurisdictions do not allow for the exclusion or limitation of implied warranties, so the #
# above limitations or exclusions may not apply to you. IBM shall not be liable for any damages #
# you suffer as a result of using, copying, modifying or distributing the Sample, even if IBM #
# has been advised of the possibility of such damages. #
#-------------------------------------------------------------------------------------------------#
# Load The Appropriate Python Modules
import sys # Provides Information About Python Interpreter Constants, Functions, & Methods
import ibm_db # Contains The APIs Needed To Work With Db2 Databases
#-------------------------------------------------------------------------------------------------#
# Import The Db2ConnectionMgr Class Definition, Attributes, And Methods That Have Been Defined #
# In The File Named "ibm_db_tools.py"; This Class Contains The Programming Logic Needed To #
# Establish And Terminate A Connection To A Db2 Server Or Database #
#-------------------------------------------------------------------------------------------------#
from ibm_db_tools import Db2ConnectionMgr
#-------------------------------------------------------------------------------------------------#
# Import The ipynb_exit Class Definition, Attributes, And Methods That Have Been Defined In The #
# File Named "ipynb_exit.py"; This Class Contains The Programming Logic Needed To Allow "exit()" #
# Functionality To Work Without Raising An Error Or Stopping The Kernel If The Application Is #
# Invoked In A Jupyter Notebook #
#-------------------------------------------------------------------------------------------------#
from ipynb_exit import exit
# Define And Initialize The Appropriate Variables
dbName = "SAMPLE"
userID = "db2inst1"
passWord = "Passw0rd"
dbConnection = None
schemaName = userID.upper()
tableName = "EMPLOYEE"
resultSet = False
dataRecord = False
sqlDataTypes = {0 : "SQL_UNKNOWN_TYPE", 1 : "SQL_CHAR", 2 : "SQL_NUMERIC", 3 : "SQL_DECIMAL",
4 : "SQL_INTEGER", 5 : "SQL_SMALLINT", 6 : "SQL_FLOAT", 7 : "SQL_REAL", 8 : "SQL_DOUBLE",
9 : "SQL_DATETIME", 12 : "SQL_VARCHAR", 16 : "SQL_BOOLEAN", 19 : "SQL_ROW",
91 : "SQL_TYPE_DATE", 92 : "SQL_TYPE_TIME", 93 : "SQL_TYPE_TIMESTAMP",
95 : "SQL_TYPE_TIMESTAMP_WITH_TIMEZONE", -8 : "SQL_WCHAR", -9 : "SQL_WVARCHAR",
-10 : "SQL_WLONGVARCHAR", -95 : "SQL_GRAPHIC", -96 : "SQL_VARGRAPHIC",
-97 : "SQL_LONGVARGRAPHIC", -98 : "SQL_BLOB", -99 : "SQL_CLOB", -350 : "SQL_DBCLOB",
-360 : "SQL_DECFLOAT", -370 : "SQL_XML", -380 : "SQL_CURSORHANDLE", -400 : "SQL_DATALINK",
-450 : "SQL_USER_DEFINED_TYPE"}
# Create An Instance Of The Db2ConnectionMgr Class And Use It To Connect To A Db2 Database
conn = Db2ConnectionMgr('DB', dbName, '', '', userID, passWord)
conn.openConnection()
if conn.returnCode is True:
dbConnection = conn.connectionID
else:
conn.closeConnection()
exit(-1)
# Attempt To Retrieve Information About Unique Row Identifier Columns That Have Been
# Defined For The Table Specified
print("Obtaining information about unique row identifier columns that have been")
print("defined for the " + schemaName + "." + tableName + " table ... ", end="")
try:
resultSet = ibm_db.special_columns(dbConnection, None, schemaName, tableName, 0)
except Exception:
pass
# If The Information Desired Could Not Be Retrieved, Display An Error Message And Exit
if resultSet is False:
print("\nERROR: Unable to obtain the information desired\n.")
conn.closeConnection()
exit(-1)
# Otherwise, Complete The Status Message
else:
print("Done!\n")
# As Long As There Are Records (That Were Produced By The ibm_db.special_columns API), ...
noData = False
loopCounter = 1
while noData is False:
# Retrieve A Record And Store It In A Python Dictionary
try:
dataRecord = ibm_db.fetch_assoc(resultSet)
except:
pass
# If The Data Could Not Be Retrieved Or If There Was No Data To Retrieve, Set The
# "No Data" Flag And Exit The Loop
if dataRecord is False:
noData = True
# Otherwise, Display The Information Retrieved
else:
# Display Record Header Information
print("Primary key/unique index " + str(loopCounter) + " details:")
print("___________________________________")
# Display The Information Stored In The Data Record Retrieved
print("Column name : {}" .format(dataRecord['COLUMN_NAME']))
print("Data type : {}" .format(dataRecord['TYPE_NAME']))
print("SQL data type : ", end="")
print(sqlDataTypes.get(dataRecord['DATA_TYPE']))
print("Column size : {}" .format(dataRecord['COLUMN_SIZE']))
print("Buffer size : {}" .format(dataRecord['BUFFER_LENGTH']))
print("Scale (decimal digits) : {}" .format(dataRecord['DECIMAL_DIGITS']))
print("Scope : ", end="")
if dataRecord['SCOPE'] == 0:
print("Row ID is valid only while the\n" + " " * 25, end="")
print("cursor is positioned on the row")
elif dataRecord['SCOPE'] == 1:
print("Row ID is valid for the\n" + " " * 25, end="")
print("duration of the transaction")
elif dataRecord['SCOPE'] == 2:
print("Row ID is valid for the\n" + " " * 25, end="")
print("duration of the connection")
# Increment The loopCounter Variable And Print A Blank Line To Separate The
# Records From Each Other
loopCounter += 1
print()
# Close The Database Connection That Was Opened Earlier
conn.closeConnection()
# Return Control To The Operating System
exit()
| 56.462025 | 99 | 0.512723 |
import sys
import ibm_db
from ibm_db_tools import Db2ConnectionMgr
from ipynb_exit import exit
dbName = "SAMPLE"
userID = "db2inst1"
passWord = "Passw0rd"
dbConnection = None
schemaName = userID.upper()
tableName = "EMPLOYEE"
resultSet = False
dataRecord = False
sqlDataTypes = {0 : "SQL_UNKNOWN_TYPE", 1 : "SQL_CHAR", 2 : "SQL_NUMERIC", 3 : "SQL_DECIMAL",
4 : "SQL_INTEGER", 5 : "SQL_SMALLINT", 6 : "SQL_FLOAT", 7 : "SQL_REAL", 8 : "SQL_DOUBLE",
9 : "SQL_DATETIME", 12 : "SQL_VARCHAR", 16 : "SQL_BOOLEAN", 19 : "SQL_ROW",
91 : "SQL_TYPE_DATE", 92 : "SQL_TYPE_TIME", 93 : "SQL_TYPE_TIMESTAMP",
95 : "SQL_TYPE_TIMESTAMP_WITH_TIMEZONE", -8 : "SQL_WCHAR", -9 : "SQL_WVARCHAR",
-10 : "SQL_WLONGVARCHAR", -95 : "SQL_GRAPHIC", -96 : "SQL_VARGRAPHIC",
-97 : "SQL_LONGVARGRAPHIC", -98 : "SQL_BLOB", -99 : "SQL_CLOB", -350 : "SQL_DBCLOB",
-360 : "SQL_DECFLOAT", -370 : "SQL_XML", -380 : "SQL_CURSORHANDLE", -400 : "SQL_DATALINK",
-450 : "SQL_USER_DEFINED_TYPE"}
conn = Db2ConnectionMgr('DB', dbName, '', '', userID, passWord)
conn.openConnection()
if conn.returnCode is True:
dbConnection = conn.connectionID
else:
conn.closeConnection()
exit(-1)
print("Obtaining information about unique row identifier columns that have been")
print("defined for the " + schemaName + "." + tableName + " table ... ", end="")
try:
resultSet = ibm_db.special_columns(dbConnection, None, schemaName, tableName, 0)
except Exception:
pass
if resultSet is False:
print("\nERROR: Unable to obtain the information desired\n.")
conn.closeConnection()
exit(-1)
else:
print("Done!\n")
noData = False
loopCounter = 1
while noData is False:
try:
dataRecord = ibm_db.fetch_assoc(resultSet)
except:
pass
if dataRecord is False:
noData = True
else:
print("Primary key/unique index " + str(loopCounter) + " details:")
print("___________________________________")
print("Column name : {}" .format(dataRecord['COLUMN_NAME']))
print("Data type : {}" .format(dataRecord['TYPE_NAME']))
print("SQL data type : ", end="")
print(sqlDataTypes.get(dataRecord['DATA_TYPE']))
print("Column size : {}" .format(dataRecord['COLUMN_SIZE']))
print("Buffer size : {}" .format(dataRecord['BUFFER_LENGTH']))
print("Scale (decimal digits) : {}" .format(dataRecord['DECIMAL_DIGITS']))
print("Scope : ", end="")
if dataRecord['SCOPE'] == 0:
print("Row ID is valid only while the\n" + " " * 25, end="")
print("cursor is positioned on the row")
elif dataRecord['SCOPE'] == 1:
print("Row ID is valid for the\n" + " " * 25, end="")
print("duration of the transaction")
elif dataRecord['SCOPE'] == 2:
print("Row ID is valid for the\n" + " " * 25, end="")
print("duration of the connection")
loopCounter += 1
print()
conn.closeConnection()
exit()
| true | true |
f72768f88bd6b9a784e985befa28896108619edb | 3,690 | py | Python | tests/integration/test_unsteady_ring_vortex_lattice_method_static_geometry.py | KamiGazi/PteraSoftware | 3b6f6bfb8db776970674234cb524c338ecc82df1 | [
"MIT"
] | null | null | null | tests/integration/test_unsteady_ring_vortex_lattice_method_static_geometry.py | KamiGazi/PteraSoftware | 3b6f6bfb8db776970674234cb524c338ecc82df1 | [
"MIT"
] | null | null | null | tests/integration/test_unsteady_ring_vortex_lattice_method_static_geometry.py | KamiGazi/PteraSoftware | 3b6f6bfb8db776970674234cb524c338ecc82df1 | [
"MIT"
] | null | null | null | """This is a testing case for the unsteady ring vortex lattice method solver with
static geometry.
Based on an equivalent XFLR5 testing case, the expected output for this case is:
CL: 0.588
CDi: 0.011
Cm: -0.197
Note: The expected output was created using XFLR5's inviscid VLM2 analysis type,
which is a ring vortex lattice method solver. The geometry in this case is static.
Therefore the results of this unsteady solver should converge to be close to XFLR5's
static result.
This module contains the following classes:
TestUnsteadyRingVortexLatticeMethodStaticGeometry: This is a class for testing
the unsteady ring vortex lattice method solver on static geometry.
This module contains the following exceptions:
None
This module contains the following functions:
None
"""
import unittest
import pterasoftware as ps
from tests.integration.fixtures import solver_fixtures
class TestUnsteadyRingVortexLatticeMethodStaticGeometry(unittest.TestCase):
"""This is a class for testing the unsteady ring vortex lattice method solver on
static geometry.
This class contains the following public methods:
setUp: This method sets up the test.
tearDown: This method tears down the test.
test_method: This method tests the solver's output.
This class contains the following class attributes:
None
Subclassing:
This class is not meant to be subclassed.
"""
def setUp(self):
"""This method sets up the test.
:return: None
"""
# Create the unsteady method solver.
self.unsteady_ring_vortex_lattice_method_validation_solver = (
solver_fixtures.make_unsteady_ring_vortex_lattice_method_validation_solver_with_static_geometry()
)
def tearDown(self):
"""This method tears down the test.
:return: None
"""
del self.unsteady_ring_vortex_lattice_method_validation_solver
def test_method(self):
"""This method tests the solver's output.
:return: None
"""
# Run the solver.
self.unsteady_ring_vortex_lattice_method_validation_solver.run(
prescribed_wake=True
)
this_solver = self.unsteady_ring_vortex_lattice_method_validation_solver
this_airplane = this_solver.current_airplanes[0]
# Calculate the percent errors of the output.
c_di_expected = 0.011
c_di_calculated = this_airplane.total_near_field_force_coefficients_wind_axes[0]
c_di_error = abs(c_di_calculated - c_di_expected) / c_di_expected
c_l_expected = 0.588
c_l_calculated = this_airplane.total_near_field_force_coefficients_wind_axes[2]
c_l_error = abs(c_l_calculated - c_l_expected) / c_l_expected
c_m_expected = -0.197
c_m_calculated = this_airplane.total_near_field_moment_coefficients_wind_axes[1]
c_m_error = abs(c_m_calculated - c_m_expected) / c_m_expected
# Set the allowable percent error.
allowable_error = 0.10
ps.output.animate(
unsteady_solver=self.unsteady_ring_vortex_lattice_method_validation_solver,
show_wake_vortices=True,
show_delta_pressures=True,
keep_file=False,
)
ps.output.plot_results_versus_time(
unsteady_solver=self.unsteady_ring_vortex_lattice_method_validation_solver
)
# Assert that the percent errors are less than the allowable error.
self.assertTrue(abs(c_di_error) < allowable_error)
self.assertTrue(abs(c_l_error) < allowable_error)
self.assertTrue(abs(c_m_error) < allowable_error)
| 32.946429 | 109 | 0.712737 | import unittest
import pterasoftware as ps
from tests.integration.fixtures import solver_fixtures
class TestUnsteadyRingVortexLatticeMethodStaticGeometry(unittest.TestCase):
def setUp(self):
self.unsteady_ring_vortex_lattice_method_validation_solver = (
solver_fixtures.make_unsteady_ring_vortex_lattice_method_validation_solver_with_static_geometry()
)
def tearDown(self):
del self.unsteady_ring_vortex_lattice_method_validation_solver
def test_method(self):
self.unsteady_ring_vortex_lattice_method_validation_solver.run(
prescribed_wake=True
)
this_solver = self.unsteady_ring_vortex_lattice_method_validation_solver
this_airplane = this_solver.current_airplanes[0]
c_di_expected = 0.011
c_di_calculated = this_airplane.total_near_field_force_coefficients_wind_axes[0]
c_di_error = abs(c_di_calculated - c_di_expected) / c_di_expected
c_l_expected = 0.588
c_l_calculated = this_airplane.total_near_field_force_coefficients_wind_axes[2]
c_l_error = abs(c_l_calculated - c_l_expected) / c_l_expected
c_m_expected = -0.197
c_m_calculated = this_airplane.total_near_field_moment_coefficients_wind_axes[1]
c_m_error = abs(c_m_calculated - c_m_expected) / c_m_expected
allowable_error = 0.10
ps.output.animate(
unsteady_solver=self.unsteady_ring_vortex_lattice_method_validation_solver,
show_wake_vortices=True,
show_delta_pressures=True,
keep_file=False,
)
ps.output.plot_results_versus_time(
unsteady_solver=self.unsteady_ring_vortex_lattice_method_validation_solver
)
self.assertTrue(abs(c_di_error) < allowable_error)
self.assertTrue(abs(c_l_error) < allowable_error)
self.assertTrue(abs(c_m_error) < allowable_error)
| true | true |
f7276a118dd544956c6a75767571b4963130bb32 | 192 | py | Python | setup.py | ericlee0803/lookahead_release | 373295f11be81d82b1c69eeadeec32ae96f26b1f | [
"MIT"
] | 3 | 2020-06-17T20:25:12.000Z | 2020-11-24T17:21:59.000Z | setup.py | ericlee0803/lookahead_release | 373295f11be81d82b1c69eeadeec32ae96f26b1f | [
"MIT"
] | null | null | null | setup.py | ericlee0803/lookahead_release | 373295f11be81d82b1c69eeadeec32ae96f26b1f | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='lookahead',
version='0.0.2',
packages=find_packages(),
install_requires=['scikit-learn', 'scipy', 'numpy', 'qmcpy']
)
| 21.333333 | 64 | 0.671875 | from setuptools import setup, find_packages
setup(
name='lookahead',
version='0.0.2',
packages=find_packages(),
install_requires=['scikit-learn', 'scipy', 'numpy', 'qmcpy']
)
| true | true |
f7276a2c611535e6562f2d0b10dcd978082fd085 | 10,047 | py | Python | gabriel/server/gabriel3/proxy/common.py | lee4138/6d-pose-estimation-with-ml-in-ar | e29162c82c867d4a8177322d7d49a55c5fd90639 | [
"MIT"
] | 7 | 2020-02-04T10:58:58.000Z | 2021-11-26T07:37:22.000Z | gabriel/server/gabriel3/proxy/common.py | buaafw/6d-pose-estimation-with-ml-in-ar | e29162c82c867d4a8177322d7d49a55c5fd90639 | [
"MIT"
] | 1 | 2021-02-19T03:56:10.000Z | 2021-02-19T03:56:10.000Z | gabriel/server/gabriel3/proxy/common.py | buaafw/6d-pose-estimation-with-ml-in-ar | e29162c82c867d4a8177322d7d49a55c5fd90639 | [
"MIT"
] | 8 | 2019-12-05T10:05:36.000Z | 2021-01-27T14:09:53.000Z | #!/usr/bin/env python
#
# Cloudlet Infrastructure for Mobile Computing
#
# Author: Kiryong Ha <krha@cmu.edu>
# Zhuo Chen <zhuoc@cs.cmu.edu>
#
# Copyright (C) 2011-2013 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import multiprocessing
import queue
import select
import socket
import struct
import sys
import threading
import time
import traceback
import gabriel3
LOG = gabriel3.logging.getLogger(__name__)
class ProxyError(Exception):
pass
class SensorReceiveClient(gabriel3.network.CommonClient):
"""
This client will receive data from the control server as much as possible.
And put the data into the @output_queue, so that the other thread (@CognitiveProcessThread) can use the data
"""
def __init__(self, control_addr, output_queue):
gabriel3.network.CommonClient.__init__(self, control_addr)
self.output_queue = output_queue
def __repr__(self):
return "Sensor Receive Client"
def _handle_input_data(self):
# receive data from control VM
header_size = struct.unpack("!I", self._recv_all(4))[0]
data_size = struct.unpack("!I", self._recv_all(4))[0]
header_str = self._recv_all(header_size)
data = self._recv_all(data_size)
header_json = json.loads(header_str)
# add header data for measurement
if gabriel3.Debug.TIME_MEASUREMENT:
header_json[gabriel3.Protocol_measurement.JSON_KEY_APP_RECV_TIME] = time.time()
# token buffer - discard if the token(queue) is not available
if self.output_queue.full():
try:
self.output_queue.get_nowait()
except queue.Empty as e:
pass
self.output_queue.put((header_json, data))
class CognitiveProcessThread(threading.Thread):
'''
The thread that does real processing.
It takes input data from @data_queue and puts output data into @output_queue.
An interesting cognitive engine should implement its own @handle function.
'''
def __init__(self, data_queue, output_queue, engine_id = None):
self.data_queue = data_queue
self.output_queue = output_queue
self.engine_id = engine_id
self.stop = threading.Event()
threading.Thread.__init__(self, target = self.run)
def __repr__(self):
return "Cognitive Processing Thread"
def run(self):
while(not self.stop.wait(0.0001)):
try:
(header, data) = self.data_queue.get(timeout = 0.0001)
if header is None or data is None:
LOG.warning("header or data in data_queue is not valid!")
continue
except queue.Empty as e:
continue
## the real processing
# header can be changed directly in the proxy (design choice made for backward compatibility)
result = self.handle(header, data) # header is in JSON format
if result is None: # A special return that marks the result useless
continue
## put return data into output queue
rtn_json = header
rtn_json[gabriel3.Protocol_client.JSON_KEY_ENGINE_ID] = self.engine_id
if gabriel3.Debug.TIME_MEASUREMENT:
rtn_json[gabriel3.Protocol_measurement.JSON_KEY_APP_SENT_TIME] = time.time()
self.output_queue.put( (json.dumps(rtn_json), result) )
LOG.info("[TERMINATE] Finish %s" % str(self))
def handle(self, header, data): # header is in JSON format
return None
def terminate(self):
self.stop.set()
class MasterProxyThread(threading.Thread):
'''
The thread that distributes data to multiple worker threads.
Similar to @CognitiveProcessThread, it takes input data from @data_queue.
However, is should implement its own @handle function to decide where the data goes.
'''
def __init__(self, data_queue, engine_id = None):
self.data_queue = data_queue
self.engine_id = engine_id
self.stop = threading.Event()
threading.Thread.__init__(self, target = self.run)
def __repr__(self):
return "Cognitive Processing Thread"
def run(self):
while(not self.stop.wait(0.0001)):
try:
(header, data) = self.data_queue.get(timeout = 0.0001)
if header is None or data is None:
LOG.warning("header or data in data_queue is not valid!")
continue
except queue.Empty as e:
continue
## the real processing
self.handle(header, data) # header is in JSON format
LOG.info("[TERMINATE] Finish %s" % str(self))
def handle(self, header, data): # header is in JSON format
pass
def terminate(self):
self.stop.set()
class ResultPublishClient(gabriel3.network.CommonClient):
"""
This client will publish processed result from @data_queue to the ucomm server.
"""
def __init__(self, ucomm_addr, data_queue, log_flag = True):
gabriel3.network.CommonClient.__init__(self, ucomm_addr)
self.data_queue = data_queue
if not log_flag:
import logging
LOG.setLevel(logging.CRITICAL + 1)
def __repr__(self):
return "Result Publish Client"
def _handle_queue_data(self):
try:
rtn_header, rtn_data = self.data_queue.get(timeout = 0.0001)
total_size = len(rtn_header) + len(rtn_data)
# packet format: total size, header size, header, data
packet = struct.pack("!II{}s{}s".format(len(rtn_header), len(rtn_data)), total_size, len(rtn_header), str.encode(rtn_header), str.encode(rtn_data))
self.sock.sendall(packet)
LOG.info("sending result to ucomm: %s" % gabriel3.util.print_rtn(json.loads(rtn_header)))
except queue.Empty as e:
pass
class DataPublishHandler(gabriel3.network.CommonHandler):
def setup(self):
LOG.info("New receiver connected to data stream")
super(DataPublishHandler, self).setup()
self.data_queue = multiprocessing.Queue(gabriel3.Const.MAX_FRAME_SIZE)
# receive engine name
data_size = struct.unpack("!I", self._recv_all(4))[0]
self.engine_id = self._recv_all(data_size)
LOG.info("Got engine name: %s" % self.engine_id)
self.engine_number = self._register_engine(self.data_queue, self.engine_id)
# send engine sequence number back
packet = struct.pack("!I", self.engine_number)
self.request.send(packet)
self.wfile.flush()
def __repr__(self):
return "Data Publish Server"
def _handle_queue_data(self):
try:
(header, data) = self.data_queue.get(timeout = 0.0001)
header_str = json.dumps(header)
# send data
packet = struct.pack("!II%ds%ds" % (len(header_str), len(data)), len(header_str), len(data), header_str, data)
self.request.send(packet)
self.wfile.flush()
# receive result
header_size = struct.unpack("!I", self._recv_all(4))[0]
header_str = self._recv_all(header_size)
header = json.loads(header_str)
state_size = struct.unpack("!I", self._recv_all(4))[0]
state = self._recv_all(state_size)
header[gabriel3.Protocol_client.JSON_KEY_ENGINE_ID] = self.engine_id
header[gabriel3.Protocol_client.JSON_KEY_ENGINE_NUMBER] = self.engine_number
try:
self.server.output_queue.put_nowait( (header, state) )
except queue.Full as e:
LOG.error("%s: output queue shouldn't be full" % self)
except queue.Empty as e:
pass
def _register_engine(self, queue, engine_id):
'''
Registers the new engine.
The data server will publish data to only one engine with the same @engine_id.
Returns the seq number of current engine among all engines that share the same @engine_id.
'''
engine_info = self.server.queue_dict.get(engine_id, None)
if engine_info is None:
self.server.queue_dict[engine_id] = {'queues': [queue], 'tokens': [1]}
return 0
else:
engine_info['queues'].append(queue)
engine_info['tokens'].append(1)
return len(engine_info['queues']) - 1
def _unregister_engine(self, queue, engine_id):
#TODO
pass
def terminate(self):
LOG.info("Offloading engine disconnected from video stream")
self._unregister_engine(self.data_queue, engine_id)
super(DataPublishHandler, self).terminate()
class DataPublishServer(gabriel3.network.CommonServer):
def __init__(self, port, handler, queue_dict, output_queue):
gabriel3.network.CommonServer.__init__(self, port, handler) # cannot use super because it's old style class
LOG.info("* Data publish server(%s) configuration" % str(self.handler))
LOG.info(" - Open TCP Server at %s" % (str(self.server_address)))
LOG.info(" - Disable nagle (No TCP delay) : %s" %
str(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)))
LOG.info("-" * 50)
self.queue_dict = queue_dict
self.output_queue = output_queue
def terminate(self):
gabriel3.network.CommonServer.terminate(self)
| 36.270758 | 159 | 0.643973 |
import json
import multiprocessing
import queue
import select
import socket
import struct
import sys
import threading
import time
import traceback
import gabriel3
LOG = gabriel3.logging.getLogger(__name__)
class ProxyError(Exception):
pass
class SensorReceiveClient(gabriel3.network.CommonClient):
def __init__(self, control_addr, output_queue):
gabriel3.network.CommonClient.__init__(self, control_addr)
self.output_queue = output_queue
def __repr__(self):
return "Sensor Receive Client"
def _handle_input_data(self):
header_size = struct.unpack("!I", self._recv_all(4))[0]
data_size = struct.unpack("!I", self._recv_all(4))[0]
header_str = self._recv_all(header_size)
data = self._recv_all(data_size)
header_json = json.loads(header_str)
if gabriel3.Debug.TIME_MEASUREMENT:
header_json[gabriel3.Protocol_measurement.JSON_KEY_APP_RECV_TIME] = time.time()
if self.output_queue.full():
try:
self.output_queue.get_nowait()
except queue.Empty as e:
pass
self.output_queue.put((header_json, data))
class CognitiveProcessThread(threading.Thread):
def __init__(self, data_queue, output_queue, engine_id = None):
self.data_queue = data_queue
self.output_queue = output_queue
self.engine_id = engine_id
self.stop = threading.Event()
threading.Thread.__init__(self, target = self.run)
def __repr__(self):
return "Cognitive Processing Thread"
def run(self):
while(not self.stop.wait(0.0001)):
try:
(header, data) = self.data_queue.get(timeout = 0.0001)
if header is None or data is None:
LOG.warning("header or data in data_queue is not valid!")
continue
except queue.Empty as e:
continue
result = self.handle(header, data)
if result is None:
continue
rtn_json[gabriel3.Protocol_client.JSON_KEY_ENGINE_ID] = self.engine_id
if gabriel3.Debug.TIME_MEASUREMENT:
rtn_json[gabriel3.Protocol_measurement.JSON_KEY_APP_SENT_TIME] = time.time()
self.output_queue.put( (json.dumps(rtn_json), result) )
LOG.info("[TERMINATE] Finish %s" % str(self))
def handle(self, header, data):
return None
def terminate(self):
self.stop.set()
class MasterProxyThread(threading.Thread):
def __init__(self, data_queue, engine_id = None):
self.data_queue = data_queue
self.engine_id = engine_id
self.stop = threading.Event()
threading.Thread.__init__(self, target = self.run)
def __repr__(self):
return "Cognitive Processing Thread"
def run(self):
while(not self.stop.wait(0.0001)):
try:
(header, data) = self.data_queue.get(timeout = 0.0001)
if header is None or data is None:
LOG.warning("header or data in data_queue is not valid!")
continue
except queue.Empty as e:
continue
dle(header, data)
LOG.info("[TERMINATE] Finish %s" % str(self))
def handle(self, header, data):
pass
def terminate(self):
self.stop.set()
class ResultPublishClient(gabriel3.network.CommonClient):
def __init__(self, ucomm_addr, data_queue, log_flag = True):
gabriel3.network.CommonClient.__init__(self, ucomm_addr)
self.data_queue = data_queue
if not log_flag:
import logging
LOG.setLevel(logging.CRITICAL + 1)
def __repr__(self):
return "Result Publish Client"
def _handle_queue_data(self):
try:
rtn_header, rtn_data = self.data_queue.get(timeout = 0.0001)
total_size = len(rtn_header) + len(rtn_data)
packet = struct.pack("!II{}s{}s".format(len(rtn_header), len(rtn_data)), total_size, len(rtn_header), str.encode(rtn_header), str.encode(rtn_data))
self.sock.sendall(packet)
LOG.info("sending result to ucomm: %s" % gabriel3.util.print_rtn(json.loads(rtn_header)))
except queue.Empty as e:
pass
class DataPublishHandler(gabriel3.network.CommonHandler):
def setup(self):
LOG.info("New receiver connected to data stream")
super(DataPublishHandler, self).setup()
self.data_queue = multiprocessing.Queue(gabriel3.Const.MAX_FRAME_SIZE)
data_size = struct.unpack("!I", self._recv_all(4))[0]
self.engine_id = self._recv_all(data_size)
LOG.info("Got engine name: %s" % self.engine_id)
self.engine_number = self._register_engine(self.data_queue, self.engine_id)
packet = struct.pack("!I", self.engine_number)
self.request.send(packet)
self.wfile.flush()
def __repr__(self):
return "Data Publish Server"
def _handle_queue_data(self):
try:
(header, data) = self.data_queue.get(timeout = 0.0001)
header_str = json.dumps(header)
packet = struct.pack("!II%ds%ds" % (len(header_str), len(data)), len(header_str), len(data), header_str, data)
self.request.send(packet)
self.wfile.flush()
header_size = struct.unpack("!I", self._recv_all(4))[0]
header_str = self._recv_all(header_size)
header = json.loads(header_str)
state_size = struct.unpack("!I", self._recv_all(4))[0]
state = self._recv_all(state_size)
header[gabriel3.Protocol_client.JSON_KEY_ENGINE_ID] = self.engine_id
header[gabriel3.Protocol_client.JSON_KEY_ENGINE_NUMBER] = self.engine_number
try:
self.server.output_queue.put_nowait( (header, state) )
except queue.Full as e:
LOG.error("%s: output queue shouldn't be full" % self)
except queue.Empty as e:
pass
def _register_engine(self, queue, engine_id):
engine_info = self.server.queue_dict.get(engine_id, None)
if engine_info is None:
self.server.queue_dict[engine_id] = {'queues': [queue], 'tokens': [1]}
return 0
else:
engine_info['queues'].append(queue)
engine_info['tokens'].append(1)
return len(engine_info['queues']) - 1
def _unregister_engine(self, queue, engine_id):
#TODO
pass
def terminate(self):
LOG.info("Offloading engine disconnected from video stream")
self._unregister_engine(self.data_queue, engine_id)
super(DataPublishHandler, self).terminate()
class DataPublishServer(gabriel3.network.CommonServer):
def __init__(self, port, handler, queue_dict, output_queue):
gabriel3.network.CommonServer.__init__(self, port, handler) # cannot use super because it's old style class
LOG.info("* Data publish server(%s) configuration" % str(self.handler))
LOG.info(" - Open TCP Server at %s" % (str(self.server_address)))
LOG.info(" - Disable nagle (No TCP delay) : %s" %
str(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)))
LOG.info("-" * 50)
self.queue_dict = queue_dict
self.output_queue = output_queue
def terminate(self):
gabriel3.network.CommonServer.terminate(self)
| true | true |
f7276a69899ad99ba6680fb0b931907cb13b39e3 | 2,402 | py | Python | src/wormhole/test/dilate/test_parse.py | dmgolembiowski/magic-wormhole | d517a10282d5e56f300db462b1a6eec517202af7 | [
"MIT"
] | 2,801 | 2021-01-10T16:37:14.000Z | 2022-03-31T19:02:50.000Z | src/wormhole/test/dilate/test_parse.py | dmgolembiowski/magic-wormhole | d517a10282d5e56f300db462b1a6eec517202af7 | [
"MIT"
] | 52 | 2021-01-10T01:54:00.000Z | 2022-03-11T13:12:41.000Z | src/wormhole/test/dilate/test_parse.py | dmgolembiowski/magic-wormhole | d517a10282d5e56f300db462b1a6eec517202af7 | [
"MIT"
] | 106 | 2021-01-21T14:32:22.000Z | 2022-03-18T10:33:09.000Z | from __future__ import print_function, unicode_literals
import mock
from twisted.trial import unittest
from ..._dilation.connection import (parse_record, encode_record,
KCM, Ping, Pong, Open, Data, Close, Ack)
class Parse(unittest.TestCase):
def test_parse(self):
self.assertEqual(parse_record(b"\x00"), KCM())
self.assertEqual(parse_record(b"\x01\x55\x44\x33\x22"),
Ping(ping_id=b"\x55\x44\x33\x22"))
self.assertEqual(parse_record(b"\x02\x55\x44\x33\x22"),
Pong(ping_id=b"\x55\x44\x33\x22"))
self.assertEqual(parse_record(b"\x03\x00\x00\x02\x01\x00\x00\x01\x00"),
Open(scid=513, seqnum=256))
self.assertEqual(parse_record(b"\x04\x00\x00\x02\x02\x00\x00\x01\x01dataaa"),
Data(scid=514, seqnum=257, data=b"dataaa"))
self.assertEqual(parse_record(b"\x05\x00\x00\x02\x03\x00\x00\x01\x02"),
Close(scid=515, seqnum=258))
self.assertEqual(parse_record(b"\x06\x00\x00\x01\x03"),
Ack(resp_seqnum=259))
with mock.patch("wormhole._dilation.connection.log.err") as le:
with self.assertRaises(ValueError):
parse_record(b"\x07unknown")
self.assertEqual(le.mock_calls,
[mock.call("received unknown message type: {}".format(
b"\x07unknown"))])
def test_encode(self):
self.assertEqual(encode_record(KCM()), b"\x00")
self.assertEqual(encode_record(Ping(ping_id=b"ping")), b"\x01ping")
self.assertEqual(encode_record(Pong(ping_id=b"pong")), b"\x02pong")
self.assertEqual(encode_record(Open(scid=65536, seqnum=16)),
b"\x03\x00\x01\x00\x00\x00\x00\x00\x10")
self.assertEqual(encode_record(Data(scid=65537, seqnum=17, data=b"dataaa")),
b"\x04\x00\x01\x00\x01\x00\x00\x00\x11dataaa")
self.assertEqual(encode_record(Close(scid=65538, seqnum=18)),
b"\x05\x00\x01\x00\x02\x00\x00\x00\x12")
self.assertEqual(encode_record(Ack(resp_seqnum=19)),
b"\x06\x00\x00\x00\x13")
with self.assertRaises(TypeError) as ar:
encode_record("not a record")
self.assertEqual(str(ar.exception), "not a record")
| 53.377778 | 85 | 0.594088 | from __future__ import print_function, unicode_literals
import mock
from twisted.trial import unittest
from ..._dilation.connection import (parse_record, encode_record,
KCM, Ping, Pong, Open, Data, Close, Ack)
class Parse(unittest.TestCase):
def test_parse(self):
self.assertEqual(parse_record(b"\x00"), KCM())
self.assertEqual(parse_record(b"\x01\x55\x44\x33\x22"),
Ping(ping_id=b"\x55\x44\x33\x22"))
self.assertEqual(parse_record(b"\x02\x55\x44\x33\x22"),
Pong(ping_id=b"\x55\x44\x33\x22"))
self.assertEqual(parse_record(b"\x03\x00\x00\x02\x01\x00\x00\x01\x00"),
Open(scid=513, seqnum=256))
self.assertEqual(parse_record(b"\x04\x00\x00\x02\x02\x00\x00\x01\x01dataaa"),
Data(scid=514, seqnum=257, data=b"dataaa"))
self.assertEqual(parse_record(b"\x05\x00\x00\x02\x03\x00\x00\x01\x02"),
Close(scid=515, seqnum=258))
self.assertEqual(parse_record(b"\x06\x00\x00\x01\x03"),
Ack(resp_seqnum=259))
with mock.patch("wormhole._dilation.connection.log.err") as le:
with self.assertRaises(ValueError):
parse_record(b"\x07unknown")
self.assertEqual(le.mock_calls,
[mock.call("received unknown message type: {}".format(
b"\x07unknown"))])
def test_encode(self):
self.assertEqual(encode_record(KCM()), b"\x00")
self.assertEqual(encode_record(Ping(ping_id=b"ping")), b"\x01ping")
self.assertEqual(encode_record(Pong(ping_id=b"pong")), b"\x02pong")
self.assertEqual(encode_record(Open(scid=65536, seqnum=16)),
b"\x03\x00\x01\x00\x00\x00\x00\x00\x10")
self.assertEqual(encode_record(Data(scid=65537, seqnum=17, data=b"dataaa")),
b"\x04\x00\x01\x00\x01\x00\x00\x00\x11dataaa")
self.assertEqual(encode_record(Close(scid=65538, seqnum=18)),
b"\x05\x00\x01\x00\x02\x00\x00\x00\x12")
self.assertEqual(encode_record(Ack(resp_seqnum=19)),
b"\x06\x00\x00\x00\x13")
with self.assertRaises(TypeError) as ar:
encode_record("not a record")
self.assertEqual(str(ar.exception), "not a record")
| true | true |
f7276c0f73e6ebdbc9164c77803a6a39d802249f | 4,350 | py | Python | tests/test_stream_xep_0047.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | tests/test_stream_xep_0047.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | tests/test_stream_xep_0047.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | import asyncio
import threading
import time
import unittest
from slixmpp.test import SlixTest
class TestInBandByteStreams(SlixTest):
def setUp(self):
self.stream_start(plugins=['xep_0047', 'xep_0030'])
def tearDown(self):
self.stream_close()
def testOpenStream(self):
"""Test requesting a stream, successfully"""
events = []
def on_stream_start(stream):
events.append('ibb_stream_start')
self.xmpp.add_event_handler('ibb_stream_start', on_stream_start)
self.xmpp['xep_0047'].open_stream('tester@localhost/receiver',
sid='testing')
self.send("""
<iq type="set" to="tester@localhost/receiver" id="1">
<open xmlns="http://jabber.org/protocol/ibb"
sid="testing"
block-size="4096"
stanza="iq" />
</iq>
""")
self.recv("""
<iq type="result" id="1"
to="tester@localhost"
from="tester@localhost/receiver" />
""")
self.assertEqual(events, ['ibb_stream_start'])
def testAysncOpenStream(self):
"""Test requesting a stream, aysnc"""
events = set()
def on_stream_start(stream):
events.add('ibb_stream_start')
def stream_callback(iq):
events.add('callback')
self.xmpp.add_event_handler('ibb_stream_start', on_stream_start)
self.xmpp['xep_0047'].open_stream('tester@localhost/receiver',
sid='testing',
callback=stream_callback)
self.send("""
<iq type="set" to="tester@localhost/receiver" id="1">
<open xmlns="http://jabber.org/protocol/ibb"
sid="testing"
block-size="4096"
stanza="iq" />
</iq>
""")
self.recv("""
<iq type="result" id="1"
to="tester@localhost"
from="tester@localhost/receiver" />
""")
self.assertEqual(events, {'ibb_stream_start', 'callback'})
async def testSendData(self):
"""Test sending data over an in-band bytestream."""
streams = []
data = []
def on_stream_start(stream):
streams.append(stream)
def on_stream_data(d):
data.append(d['data'])
self.xmpp.add_event_handler('ibb_stream_start', on_stream_start)
self.xmpp.add_event_handler('ibb_stream_data', on_stream_data)
self.xmpp['xep_0047'].open_stream('tester@localhost/receiver',
sid='testing')
self.send("""
<iq type="set" to="tester@localhost/receiver" id="1">
<open xmlns="http://jabber.org/protocol/ibb"
sid="testing"
block-size="4096"
stanza="iq" />
</iq>
""")
self.recv("""
<iq type="result" id="1"
to="tester@localhost"
from="tester@localhost/receiver" />
""")
stream = streams[0]
# Test sending data out
await stream.send("Testing")
self.send("""
<iq type="set" id="2"
from="tester@localhost"
to="tester@localhost/receiver">
<data xmlns="http://jabber.org/protocol/ibb"
seq="0"
sid="testing">
VGVzdGluZw==
</data>
</iq>
""")
self.recv("""
<iq type="result" id="2"
to="tester@localhost"
from="tester@localhost/receiver" />
""")
# Test receiving data
self.recv("""
<iq type="set" id="A"
to="tester@localhost"
from="tester@localhost/receiver">
<data xmlns="http://jabber.org/protocol/ibb"
seq="0"
sid="testing">
aXQgd29ya3Mh
</data>
</iq>
""")
self.send("""
<iq type="result" id="A"
to="tester@localhost/receiver" />
""")
self.assertEqual(data, [b'it works!'])
suite = unittest.TestLoader().loadTestsFromTestCase(TestInBandByteStreams)
| 27.018634 | 74 | 0.50092 | import asyncio
import threading
import time
import unittest
from slixmpp.test import SlixTest
class TestInBandByteStreams(SlixTest):
def setUp(self):
self.stream_start(plugins=['xep_0047', 'xep_0030'])
def tearDown(self):
self.stream_close()
def testOpenStream(self):
events = []
def on_stream_start(stream):
events.append('ibb_stream_start')
self.xmpp.add_event_handler('ibb_stream_start', on_stream_start)
self.xmpp['xep_0047'].open_stream('tester@localhost/receiver',
sid='testing')
self.send("""
<iq type="set" to="tester@localhost/receiver" id="1">
<open xmlns="http://jabber.org/protocol/ibb"
sid="testing"
block-size="4096"
stanza="iq" />
</iq>
""")
self.recv("""
<iq type="result" id="1"
to="tester@localhost"
from="tester@localhost/receiver" />
""")
self.assertEqual(events, ['ibb_stream_start'])
def testAysncOpenStream(self):
events = set()
def on_stream_start(stream):
events.add('ibb_stream_start')
def stream_callback(iq):
events.add('callback')
self.xmpp.add_event_handler('ibb_stream_start', on_stream_start)
self.xmpp['xep_0047'].open_stream('tester@localhost/receiver',
sid='testing',
callback=stream_callback)
self.send("""
<iq type="set" to="tester@localhost/receiver" id="1">
<open xmlns="http://jabber.org/protocol/ibb"
sid="testing"
block-size="4096"
stanza="iq" />
</iq>
""")
self.recv("""
<iq type="result" id="1"
to="tester@localhost"
from="tester@localhost/receiver" />
""")
self.assertEqual(events, {'ibb_stream_start', 'callback'})
async def testSendData(self):
streams = []
data = []
def on_stream_start(stream):
streams.append(stream)
def on_stream_data(d):
data.append(d['data'])
self.xmpp.add_event_handler('ibb_stream_start', on_stream_start)
self.xmpp.add_event_handler('ibb_stream_data', on_stream_data)
self.xmpp['xep_0047'].open_stream('tester@localhost/receiver',
sid='testing')
self.send("""
<iq type="set" to="tester@localhost/receiver" id="1">
<open xmlns="http://jabber.org/protocol/ibb"
sid="testing"
block-size="4096"
stanza="iq" />
</iq>
""")
self.recv("""
<iq type="result" id="1"
to="tester@localhost"
from="tester@localhost/receiver" />
""")
stream = streams[0]
await stream.send("Testing")
self.send("""
<iq type="set" id="2"
from="tester@localhost"
to="tester@localhost/receiver">
<data xmlns="http://jabber.org/protocol/ibb"
seq="0"
sid="testing">
VGVzdGluZw==
</data>
</iq>
""")
self.recv("""
<iq type="result" id="2"
to="tester@localhost"
from="tester@localhost/receiver" />
""")
self.recv("""
<iq type="set" id="A"
to="tester@localhost"
from="tester@localhost/receiver">
<data xmlns="http://jabber.org/protocol/ibb"
seq="0"
sid="testing">
aXQgd29ya3Mh
</data>
</iq>
""")
self.send("""
<iq type="result" id="A"
to="tester@localhost/receiver" />
""")
self.assertEqual(data, [b'it works!'])
suite = unittest.TestLoader().loadTestsFromTestCase(TestInBandByteStreams)
| true | true |
f7276ccb81e55d56903c23dd551e1c32e13eedf8 | 402 | py | Python | Lab-assignment/A-2/digitalRoot.py | HembramBeta777/Python-Programming | 827611b0613d9d953d13fb04ea9b5c5ac3c510f2 | [
"BSD-3-Clause"
] | 2 | 2020-09-01T04:58:16.000Z | 2021-01-30T03:45:52.000Z | Lab-assignment/A-2/digitalRoot.py | HembramBeta777/Python-Programming | 827611b0613d9d953d13fb04ea9b5c5ac3c510f2 | [
"BSD-3-Clause"
] | null | null | null | Lab-assignment/A-2/digitalRoot.py | HembramBeta777/Python-Programming | 827611b0613d9d953d13fb04ea9b5c5ac3c510f2 | [
"BSD-3-Clause"
] | null | null | null | # PROGRAM: To find the digital root of an integer
# FILE: digitalRoot.py
# CREATED BY: Santosh Hembram
# DATED: 23-09-20
num = int(input("Enter an integer: "))
temp = num
sum = 10
while(sum>=10):
sum = 0
while(num!=0):
dg = num % 10
sum = sum + dg
num = num // 10
num = sum
print("The digital root of ",temp,"is",sum)
| 15.461538 | 50 | 0.517413 |
num = int(input("Enter an integer: "))
temp = num
sum = 10
while(sum>=10):
sum = 0
while(num!=0):
dg = num % 10
sum = sum + dg
num = num // 10
num = sum
print("The digital root of ",temp,"is",sum)
| true | true |
f7276d8b2cac2a3f653f6513bcab4a0e6a780d71 | 1,583 | py | Python | airflow/migrations/versions/0101_a3bcd0914482_add_data_compressed_to_serialized_dag.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 8,092 | 2016-04-27T20:32:29.000Z | 2019-01-05T07:39:33.000Z | airflow/migrations/versions/0101_a3bcd0914482_add_data_compressed_to_serialized_dag.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 2,961 | 2016-05-05T07:16:16.000Z | 2019-01-05T08:47:59.000Z | airflow/migrations/versions/0101_a3bcd0914482_add_data_compressed_to_serialized_dag.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 3,546 | 2016-05-04T20:33:16.000Z | 2019-01-05T05:14:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add data_compressed to serialized_dag
Revision ID: a3bcd0914482
Revises: e655c0453f75
Create Date: 2022-02-03 22:40:59.841119
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a3bcd0914482'
down_revision = 'e655c0453f75'
branch_labels = None
depends_on = None
airflow_version = '2.3.0'
def upgrade():
with op.batch_alter_table('serialized_dag') as batch_op:
batch_op.alter_column('data', existing_type=sa.JSON, nullable=True)
batch_op.add_column(sa.Column('data_compressed', sa.LargeBinary, nullable=True))
def downgrade():
with op.batch_alter_table('serialized_dag') as batch_op:
batch_op.alter_column('data', existing_type=sa.JSON, nullable=False)
batch_op.drop_column('data_compressed')
| 32.979167 | 88 | 0.760581 |
import sqlalchemy as sa
from alembic import op
revision = 'a3bcd0914482'
down_revision = 'e655c0453f75'
branch_labels = None
depends_on = None
airflow_version = '2.3.0'
def upgrade():
with op.batch_alter_table('serialized_dag') as batch_op:
batch_op.alter_column('data', existing_type=sa.JSON, nullable=True)
batch_op.add_column(sa.Column('data_compressed', sa.LargeBinary, nullable=True))
def downgrade():
with op.batch_alter_table('serialized_dag') as batch_op:
batch_op.alter_column('data', existing_type=sa.JSON, nullable=False)
batch_op.drop_column('data_compressed')
| true | true |
f7276e95cabbe199d785db08683a32a5ff10b6c4 | 3,062 | py | Python | code/options.py | frizman04/language-style-transfer-python3 | 9110eb9d5b72d2926f805ac258915c0f1a369638 | [
"Apache-2.0"
] | 3 | 2018-07-11T07:41:58.000Z | 2022-02-10T09:34:32.000Z | code/options.py | frizman04/language-style-transfer-python3 | 9110eb9d5b72d2926f805ac258915c0f1a369638 | [
"Apache-2.0"
] | null | null | null | code/options.py | frizman04/language-style-transfer-python3 | 9110eb9d5b72d2926f805ac258915c0f1a369638 | [
"Apache-2.0"
] | null | null | null | import sys
import argparse
import pprint
def load_arguments():
argparser = argparse.ArgumentParser(sys.argv[0])
argparser.add_argument('--train',
type=str,
default='')
argparser.add_argument('--dev',
type=str,
default='')
argparser.add_argument('--test',
type=str,
default='')
argparser.add_argument('--online_testing',
type=bool,
default=False)
argparser.add_argument('--output',
type=str,
default='')
argparser.add_argument('--vocab',
type=str,
default='')
argparser.add_argument('--embedding',
type=str,
default='')
argparser.add_argument('--model',
type=str,
default='')
argparser.add_argument('--load_model',
type=bool,
default=False)
argparser.add_argument('--batch_size',
type=int,
default=64)
argparser.add_argument('--max_epochs',
type=int,
default=20)
argparser.add_argument('--steps_per_checkpoint',
type=int,
default=1000)
argparser.add_argument('--max_seq_length',
type=int,
default=20)
argparser.add_argument('--max_train_size',
type=int,
default=-1)
argparser.add_argument('--beam',
type=int,
default=1)
argparser.add_argument('--dropout_keep_prob',
type=float,
default=0.5)
argparser.add_argument('--n_layers',
type=int,
default=1)
argparser.add_argument('--dim_y',
type=int,
default=200)
argparser.add_argument('--dim_z',
type=int,
default=500)
argparser.add_argument('--dim_emb',
type=int,
default=100)
argparser.add_argument('--learning_rate',
type=float,
default=0.0001)
#argparser.add_argument('--learning_rate_decay',
# type=float,
# default=0.5)
argparser.add_argument('--rho', # loss_g - rho * loss_d
type=float,
default=1)
argparser.add_argument('--gamma_init', # softmax(logit / gamma)
type=float,
default=1)
argparser.add_argument('--gamma_decay',
type=float,
default=0.5)
argparser.add_argument('--gamma_min',
type=float,
default=0.001)
argparser.add_argument('--filter_sizes',
type=str,
default='3,4,5')
argparser.add_argument('--n_filters',
type=int,
default=128)
args = argparser.parse_args()
print('------------------------------------------------')
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(vars(args))
print('------------------------------------------------')
return args
| 29.728155 | 81 | 0.497061 | import sys
import argparse
import pprint
def load_arguments():
argparser = argparse.ArgumentParser(sys.argv[0])
argparser.add_argument('--train',
type=str,
default='')
argparser.add_argument('--dev',
type=str,
default='')
argparser.add_argument('--test',
type=str,
default='')
argparser.add_argument('--online_testing',
type=bool,
default=False)
argparser.add_argument('--output',
type=str,
default='')
argparser.add_argument('--vocab',
type=str,
default='')
argparser.add_argument('--embedding',
type=str,
default='')
argparser.add_argument('--model',
type=str,
default='')
argparser.add_argument('--load_model',
type=bool,
default=False)
argparser.add_argument('--batch_size',
type=int,
default=64)
argparser.add_argument('--max_epochs',
type=int,
default=20)
argparser.add_argument('--steps_per_checkpoint',
type=int,
default=1000)
argparser.add_argument('--max_seq_length',
type=int,
default=20)
argparser.add_argument('--max_train_size',
type=int,
default=-1)
argparser.add_argument('--beam',
type=int,
default=1)
argparser.add_argument('--dropout_keep_prob',
type=float,
default=0.5)
argparser.add_argument('--n_layers',
type=int,
default=1)
argparser.add_argument('--dim_y',
type=int,
default=200)
argparser.add_argument('--dim_z',
type=int,
default=500)
argparser.add_argument('--dim_emb',
type=int,
default=100)
argparser.add_argument('--learning_rate',
type=float,
default=0.0001)
argparser.add_argument('--rho',
type=float,
default=1)
argparser.add_argument('--gamma_init',
type=float,
default=1)
argparser.add_argument('--gamma_decay',
type=float,
default=0.5)
argparser.add_argument('--gamma_min',
type=float,
default=0.001)
argparser.add_argument('--filter_sizes',
type=str,
default='3,4,5')
argparser.add_argument('--n_filters',
type=int,
default=128)
args = argparser.parse_args()
print('------------------------------------------------')
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(vars(args))
print('------------------------------------------------')
return args
| true | true |
f7276ecb99f6a3382aea6076e38bd2ab95a340d1 | 239 | py | Python | tests/configs/fragment_config_overrides.py | aptdamia/jirahub | f12516254784db23367f96ad7e6bb1b127d9de3f | [
"BSD-3-Clause"
] | 10 | 2018-06-18T19:46:21.000Z | 2022-03-03T20:50:15.000Z | tests/configs/fragment_config_overrides.py | aptdamia/jirahub | f12516254784db23367f96ad7e6bb1b127d9de3f | [
"BSD-3-Clause"
] | 19 | 2018-04-13T15:01:51.000Z | 2022-01-20T21:21:15.000Z | tests/configs/fragment_config_overrides.py | aptdamia/jirahub | f12516254784db23367f96ad7e6bb1b127d9de3f | [
"BSD-3-Clause"
] | 9 | 2018-04-04T19:14:21.000Z | 2021-02-25T07:52:12.000Z | # This config is incomplete, but will specify all the required key
# when combined with fragment_config_base.py.
c.jira.project_key = "TEST"
c.jira.max_retries = 7
c.jira.sync_milestones = False
c.github.repository = "testing/test-repo"
| 26.555556 | 66 | 0.774059 |
c.jira.project_key = "TEST"
c.jira.max_retries = 7
c.jira.sync_milestones = False
c.github.repository = "testing/test-repo"
| true | true |
f7276f21393a47035bbe973f09006fec1326f4ec | 4,735 | py | Python | members/amit/clf/data_generator_binary.py | Leofltt/rg_sound_generation | 8e79b4d9dce028def43284f80521a2ec61d0066c | [
"MIT"
] | null | null | null | members/amit/clf/data_generator_binary.py | Leofltt/rg_sound_generation | 8e79b4d9dce028def43284f80521a2ec61d0066c | [
"MIT"
] | null | null | null | members/amit/clf/data_generator_binary.py | Leofltt/rg_sound_generation | 8e79b4d9dce028def43284f80521a2ec61d0066c | [
"MIT"
] | null | null | null | import random
import shutil
import os
import numpy as np
import data_loader
import audio_processing
from typing import Dict
from loguru import logger
from tqdm import tqdm
from pprint import pprint
class DataGenerator:
def __init__(self, conf: Dict, batch_size: int = 8):
assert "csv_file_path" in conf
assert "base_dir" in conf
self.conf = conf.copy()
self.batch_size = batch_size
self.examples = data_loader.data_loader(conf)
self.num_examples = len(self.examples)
self.train = {0: [], 1: []}
self.valid = {0: [], 1: []}
self.train_counts = {0: 0, 1: 0}
self.valid_counts = {0: 0, 1: 0}
self.num_train = 0
self.num_valid = 0
self.classes = [0, 1]
self.input_shapes = {
"spec": (),
"hpss": ()
}
logger.info("DataGenerator instantiated")
self.preprocess()
logger.info("Preprocessing complete")
def preprocess(self):
logger.info("Preprocessing examples")
logger.info(f"{self.input_shapes['spec']} = Current input shape for spec")
folder = os.path.join(self.conf.get("preprocess_dir"))
if self.conf.get("reset_data"):
if os.path.isdir(folder):
shutil.rmtree(folder)
if not os.path.isdir(folder):
os.mkdir(folder)
min_level = 50 - self.conf.get("threshold")
max_level = 50 + self.conf.get("threshold")
valid_split = int(self.conf.get("valid_split") * 100)
logger.info(f"Min level {min_level}, Max level {max_level}")
for key, value in tqdm(self.examples.items()):
audio_file_name = value["audio_file_name"]
file_path = os.path.join(self.conf.get("base_dir"), f"{audio_file_name}.wav")
current_class = 1
for j, feature in enumerate(self.conf.get("features")):
current_val = int(value[feature])
current_class = -1
if current_val < min_level:
current_class = 0
elif current_val > max_level:
current_class = 1
if current_class == -1:
continue
target_file_path = os.path.join(self.conf.get("preprocess_dir"), audio_file_name)
if not os.path.isfile(f"{target_file_path}.spec.npy"):
spec, hpss = audio_processing.get_features(file_path, self.conf)
self.input_shapes["spec"] = spec.shape
self.input_shapes["hpss"] = hpss.shape
np.save(f"{target_file_path}.spec", spec)
np.save(f"{target_file_path}.hpss", hpss)
elif len(self.input_shapes["spec"]) == 0:
spec = np.load(f"{target_file_path}.spec.npy")
hpss = np.load(f"{target_file_path}.hpss.npy")
logger.info("Setting input shapes based on previous files")
logger.info(f"{spec.shape}, {hpss.shape}")
self.input_shapes["spec"] = spec.shape
self.input_shapes["hpss"] = hpss.shape
if random.randint(0, 99) < valid_split:
self.valid[current_class].append(target_file_path)
self.valid_counts[current_class] += 1
else:
self.train[current_class].append(target_file_path)
self.train_counts[current_class] += 1
self.num_train = sum(list(self.train_counts.values()))
self.num_valid = sum(list(self.train_counts.values()))
logger.info("Class counts in training set")
pprint(self.train_counts)
logger.info("Class counts in validation set")
pprint(self.valid_counts)
def generator(self, set_name: str):
assert set_name in ["train", "valid"], "Set name must be either train or valid"
while True:
spec_batch = np.zeros((self.batch_size,) + self.input_shapes["spec"])
hpss_batch = np.zeros((self.batch_size,) + self.input_shapes["hpss"])
y_batch = np.zeros((self.batch_size, ))
current_set = eval(f"self.{set_name}")
for i in range(0, self.batch_size):
target_class = random.choice([0, 1])
example_file = random.choice(current_set[target_class])
example_spec = np.load(f"{example_file}.spec.npy") * self.conf.get("scale_factor")
example_hpss = np.load(f"{example_file}.hpss.npy") * self.conf.get("scale_factor")
spec_batch[i] = example_spec
hpss_batch[i] = example_hpss
y_batch[i] = target_class
yield {"spec": spec_batch, "hpss": hpss_batch}, {"output": y_batch}
| 39.132231 | 98 | 0.583316 | import random
import shutil
import os
import numpy as np
import data_loader
import audio_processing
from typing import Dict
from loguru import logger
from tqdm import tqdm
from pprint import pprint
class DataGenerator:
def __init__(self, conf: Dict, batch_size: int = 8):
assert "csv_file_path" in conf
assert "base_dir" in conf
self.conf = conf.copy()
self.batch_size = batch_size
self.examples = data_loader.data_loader(conf)
self.num_examples = len(self.examples)
self.train = {0: [], 1: []}
self.valid = {0: [], 1: []}
self.train_counts = {0: 0, 1: 0}
self.valid_counts = {0: 0, 1: 0}
self.num_train = 0
self.num_valid = 0
self.classes = [0, 1]
self.input_shapes = {
"spec": (),
"hpss": ()
}
logger.info("DataGenerator instantiated")
self.preprocess()
logger.info("Preprocessing complete")
def preprocess(self):
logger.info("Preprocessing examples")
logger.info(f"{self.input_shapes['spec']} = Current input shape for spec")
folder = os.path.join(self.conf.get("preprocess_dir"))
if self.conf.get("reset_data"):
if os.path.isdir(folder):
shutil.rmtree(folder)
if not os.path.isdir(folder):
os.mkdir(folder)
min_level = 50 - self.conf.get("threshold")
max_level = 50 + self.conf.get("threshold")
valid_split = int(self.conf.get("valid_split") * 100)
logger.info(f"Min level {min_level}, Max level {max_level}")
for key, value in tqdm(self.examples.items()):
audio_file_name = value["audio_file_name"]
file_path = os.path.join(self.conf.get("base_dir"), f"{audio_file_name}.wav")
current_class = 1
for j, feature in enumerate(self.conf.get("features")):
current_val = int(value[feature])
current_class = -1
if current_val < min_level:
current_class = 0
elif current_val > max_level:
current_class = 1
if current_class == -1:
continue
target_file_path = os.path.join(self.conf.get("preprocess_dir"), audio_file_name)
if not os.path.isfile(f"{target_file_path}.spec.npy"):
spec, hpss = audio_processing.get_features(file_path, self.conf)
self.input_shapes["spec"] = spec.shape
self.input_shapes["hpss"] = hpss.shape
np.save(f"{target_file_path}.spec", spec)
np.save(f"{target_file_path}.hpss", hpss)
elif len(self.input_shapes["spec"]) == 0:
spec = np.load(f"{target_file_path}.spec.npy")
hpss = np.load(f"{target_file_path}.hpss.npy")
logger.info("Setting input shapes based on previous files")
logger.info(f"{spec.shape}, {hpss.shape}")
self.input_shapes["spec"] = spec.shape
self.input_shapes["hpss"] = hpss.shape
if random.randint(0, 99) < valid_split:
self.valid[current_class].append(target_file_path)
self.valid_counts[current_class] += 1
else:
self.train[current_class].append(target_file_path)
self.train_counts[current_class] += 1
self.num_train = sum(list(self.train_counts.values()))
self.num_valid = sum(list(self.train_counts.values()))
logger.info("Class counts in training set")
pprint(self.train_counts)
logger.info("Class counts in validation set")
pprint(self.valid_counts)
def generator(self, set_name: str):
assert set_name in ["train", "valid"], "Set name must be either train or valid"
while True:
spec_batch = np.zeros((self.batch_size,) + self.input_shapes["spec"])
hpss_batch = np.zeros((self.batch_size,) + self.input_shapes["hpss"])
y_batch = np.zeros((self.batch_size, ))
current_set = eval(f"self.{set_name}")
for i in range(0, self.batch_size):
target_class = random.choice([0, 1])
example_file = random.choice(current_set[target_class])
example_spec = np.load(f"{example_file}.spec.npy") * self.conf.get("scale_factor")
example_hpss = np.load(f"{example_file}.hpss.npy") * self.conf.get("scale_factor")
spec_batch[i] = example_spec
hpss_batch[i] = example_hpss
y_batch[i] = target_class
yield {"spec": spec_batch, "hpss": hpss_batch}, {"output": y_batch}
| true | true |
f727702ee7977a991a5617a1ff32f85463c61f40 | 4,108 | py | Python | feedback_system/findTable.py | bshrram/Graduation-Project---Omnidirectional-Conveyor-Table | 6414fbcb3d53f3c3351c25ac8b48aa73397c250d | [
"MIT"
] | 1 | 2020-09-24T05:06:17.000Z | 2020-09-24T05:06:17.000Z | feedback_system/findTable.py | bshrram/Graduation-Project---Omnidirectional-Conveyor-Table | 6414fbcb3d53f3c3351c25ac8b48aa73397c250d | [
"MIT"
] | null | null | null | feedback_system/findTable.py | bshrram/Graduation-Project---Omnidirectional-Conveyor-Table | 6414fbcb3d53f3c3351c25ac8b48aa73397c250d | [
"MIT"
] | 1 | 2020-12-13T13:31:08.000Z | 2020-12-13T13:31:08.000Z | import numpy as np
import cv2 as cv
flann_params= dict(algorithm = 6,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
def init_feature():
"""initialize feature detector and matcher algorithm
"""
detector = cv.ORB_create(3000)
norm = cv.NORM_HAMMING
#matcher = cv.BFMatcher(norm)
matcher = cv.FlannBasedMatcher(flann_params, {})
return detector, matcher
def filter_matches(kp1, kp2, matches, ratio = 0.8):
"""filter matches to keep strong matches only
"""
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
return p1, p2, list(kp_pairs)
c = []
def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
vis = np.zeros((max(h1, h2), w1+w2, 3), np.uint8)
vis[:h1, :w1, :3] = img1
vis[:h2, w1:w1+w2, :3] = img2
img3 = vis
h3, w3 = img3.shape[:2]
if H is not None:
corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
corners1 = np.float32( cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0))
corners = np.int32( cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
c = corners
cv.polylines(vis, [corners], True, (0, 0, 255))
if status is None:
status = np.ones(len(kp_pairs), np.bool_)
p1, p2 = [], []
for kpp in kp_pairs:
p1.append(np.int32(kpp[0].pt))
p2.append(np.int32(np.array(kpp[1].pt) + [w1, 0]))
green = (0, 255, 0)
red = (0, 0, 255)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
col = green
cv.circle(vis, (x1, y1), 2, col, -1)
cv.circle(vis, (x2, y2), 2, col, -1)
else:
col = red
r = 2
thickness = 3
cv.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
cv.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
cv.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)
cv.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
cv.line(vis, (x1, y1), (x2, y2), green)
cv.imshow(win, vis)
return corners1
scale_percent =25
img1 = cv.imread(cv.samples.findFile('table7A.jpg'))
width = int(img1.shape[1] * scale_percent / 100)
height = int(img1.shape[0] * scale_percent / 100)
#img1 = cv.resize(img1, (width,height))
detector, matcher = init_feature()
# apply orb on table image
kp1, desc1 = detector.detectAndCompute(img1, None)
def getCorners(frame):
# apply orb on frame
kp2, desc2 = detector.detectAndCompute(frame, None)
print('matching...')
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2)
#filter matches and keep strong matches
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
if len(p1) >= 4:
# H: transformation matrix
H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
else:
H, status = None, None
print('%d matches found, not enough for homography estimation' % len(p1))
corners = explore_match('find_table', img1, frame, kp_pairs, status, H)
return corners
def getTableFromFrame (corners, frame):
h1, w1 = img1.shape[:2]
h2, w2 = frame.shape[:2]
vis = np.zeros((max(h1, h2), w1+w2, 3), np.uint8)
vis[:h1, :w1, :3] = img1
vis[:h2, w1:w1+w2, :3] = frame
pts1 = corners
pts2 = np.float32([[0,0],[w1,0],[w1,h1], [0,h1]])
M = cv.getPerspectiveTransform(pts1,pts2)
# print((w1, h1))
dst = cv.warpPerspective(vis, M,(w1,h1))
return dst
| 31.844961 | 110 | 0.565725 | import numpy as np
import cv2 as cv
flann_params= dict(algorithm = 6,
table_number = 6,
key_size = 12,
multi_probe_level = 1)
def init_feature():
detector = cv.ORB_create(3000)
norm = cv.NORM_HAMMING
matcher = cv.FlannBasedMatcher(flann_params, {})
return detector, matcher
def filter_matches(kp1, kp2, matches, ratio = 0.8):
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
return p1, p2, list(kp_pairs)
c = []
def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
vis = np.zeros((max(h1, h2), w1+w2, 3), np.uint8)
vis[:h1, :w1, :3] = img1
vis[:h2, w1:w1+w2, :3] = img2
img3 = vis
h3, w3 = img3.shape[:2]
if H is not None:
corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
corners1 = np.float32( cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0))
corners = np.int32( cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
c = corners
cv.polylines(vis, [corners], True, (0, 0, 255))
if status is None:
status = np.ones(len(kp_pairs), np.bool_)
p1, p2 = [], []
for kpp in kp_pairs:
p1.append(np.int32(kpp[0].pt))
p2.append(np.int32(np.array(kpp[1].pt) + [w1, 0]))
green = (0, 255, 0)
red = (0, 0, 255)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
col = green
cv.circle(vis, (x1, y1), 2, col, -1)
cv.circle(vis, (x2, y2), 2, col, -1)
else:
col = red
r = 2
thickness = 3
cv.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
cv.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
cv.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)
cv.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
cv.line(vis, (x1, y1), (x2, y2), green)
cv.imshow(win, vis)
return corners1
scale_percent =25
img1 = cv.imread(cv.samples.findFile('table7A.jpg'))
width = int(img1.shape[1] * scale_percent / 100)
height = int(img1.shape[0] * scale_percent / 100)
detector, matcher = init_feature()
kp1, desc1 = detector.detectAndCompute(img1, None)
def getCorners(frame):
kp2, desc2 = detector.detectAndCompute(frame, None)
print('matching...')
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2)
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
if len(p1) >= 4:
H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
else:
H, status = None, None
print('%d matches found, not enough for homography estimation' % len(p1))
corners = explore_match('find_table', img1, frame, kp_pairs, status, H)
return corners
def getTableFromFrame (corners, frame):
h1, w1 = img1.shape[:2]
h2, w2 = frame.shape[:2]
vis = np.zeros((max(h1, h2), w1+w2, 3), np.uint8)
vis[:h1, :w1, :3] = img1
vis[:h2, w1:w1+w2, :3] = frame
pts1 = corners
pts2 = np.float32([[0,0],[w1,0],[w1,h1], [0,h1]])
M = cv.getPerspectiveTransform(pts1,pts2)
dst = cv.warpPerspective(vis, M,(w1,h1))
return dst
| true | true |
f72771a425e944529c8133e292bae69e1f9dc774 | 1,013 | py | Python | tensorlayer/package_info.py | Officium/tensorlayer | 89bd7646cff2bc77c6569f2a51d48bc1e80229e4 | [
"Apache-2.0"
] | 2 | 2020-10-18T15:43:49.000Z | 2020-10-27T14:52:48.000Z | tensorlayer/package_info.py | sheiiva/tensorlayer | 5d692fe87ac4d4439506b5c4827399fd5a8ab5da | [
"Apache-2.0"
] | null | null | null | tensorlayer/package_info.py | sheiiva/tensorlayer | 5d692fe87ac4d4439506b5c4827399fd5a8ab5da | [
"Apache-2.0"
] | 1 | 2020-10-15T13:15:40.000Z | 2020-10-15T13:15:40.000Z | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""Deep learning and Reinforcement learning library for Researchers and Engineers."""
MAJOR = 2
MINOR = 1
PATCH = 1
PRE_RELEASE = ''
# Use the following formatting: (major, minor, patch, prerelease)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
__package_name__ = 'tensorlayer'
__contact_names__ = 'TensorLayer Contributors'
__contact_emails__ = 'tensorlayer@gmail.com'
__homepage__ = 'http://tensorlayer.readthedocs.io/en/latest/'
__repository_url__ = 'https://github.com/tensorlayer/tensorlayer'
__download_url__ = 'https://github.com/tensorlayer/tensorlayer'
__description__ = 'High Level Tensorflow Deep Learning Library for Researcher and Engineer.'
__license__ = 'apache'
__keywords__ = 'deep learning, machine learning, computer vision, nlp, '
__keywords__ += 'supervised learning, unsupervised learning, reinforcement learning, tensorflow'
| 40.52 | 96 | 0.757157 |
MAJOR = 2
MINOR = 1
PATCH = 1
PRE_RELEASE = ''
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
__package_name__ = 'tensorlayer'
__contact_names__ = 'TensorLayer Contributors'
__contact_emails__ = 'tensorlayer@gmail.com'
__homepage__ = 'http://tensorlayer.readthedocs.io/en/latest/'
__repository_url__ = 'https://github.com/tensorlayer/tensorlayer'
__download_url__ = 'https://github.com/tensorlayer/tensorlayer'
__description__ = 'High Level Tensorflow Deep Learning Library for Researcher and Engineer.'
__license__ = 'apache'
__keywords__ = 'deep learning, machine learning, computer vision, nlp, '
__keywords__ += 'supervised learning, unsupervised learning, reinforcement learning, tensorflow'
| true | true |
f7277265e7244b7ff4545ccd5788d0ae944adadd | 167 | py | Python | setup.py | contolini/regulations-site | c31a9ce3097910877657f61b4c19a4ccbd0f967f | [
"CC0-1.0"
] | null | null | null | setup.py | contolini/regulations-site | c31a9ce3097910877657f61b4c19a4ccbd0f967f | [
"CC0-1.0"
] | null | null | null | setup.py | contolini/regulations-site | c31a9ce3097910877657f61b4c19a4ccbd0f967f | [
"CC0-1.0"
] | null | null | null | from setuptools import setup, find_packages
setup(
name = "regulations",
version = "0.1.0",
license = "public domain",
packages = find_packages()
)
| 18.555556 | 43 | 0.646707 | from setuptools import setup, find_packages
setup(
name = "regulations",
version = "0.1.0",
license = "public domain",
packages = find_packages()
)
| true | true |
f7277275036c1c4151a7b860aa018f1c5a139255 | 3,246 | py | Python | scripts/ServerPoolMachinesHealth.py | sumedhpb/TAF | fc6f4cb8dc0b8234393f2e52a7b4a1aa723d9449 | [
"Apache-2.0"
] | null | null | null | scripts/ServerPoolMachinesHealth.py | sumedhpb/TAF | fc6f4cb8dc0b8234393f2e52a7b4a1aa723d9449 | [
"Apache-2.0"
] | null | null | null | scripts/ServerPoolMachinesHealth.py | sumedhpb/TAF | fc6f4cb8dc0b8234393f2e52a7b4a1aa723d9449 | [
"Apache-2.0"
] | null | null | null | from com.jcraft.jsch import JSchException
from com.jcraft.jsch import JSch
from org.python.core.util import FileUtil
from java.time import Duration
from com.couchbase.client.java import Cluster, ClusterOptions
from com.couchbase.client.java.env import ClusterEnvironment
from com.couchbase.client.core.env import TimeoutConfig, IoConfig
import sys
failed = []
swapiness_cmd = "echo 0 > /proc/sys/vm/swappiness;echo \"net.ipv4.conf.all.arp_notify = 1\" > /etc/sysctl.conf;echo \"#Set swappiness to 0 to avoid swapping\" >> /etc/sysctl.conf;echo vm.swappiness = 0 >> /etc/sysctl.conf"
thp_cmd = "echo never > /sys/kernel/mm/transparent_hugepage/enabled"
disable_firewall = "systemctl stop firewalld; systemctl disable firewalld; ls -l /data"
ulimit_cmd = "ulimit -n 500000;echo \"* soft nofile 500000\" > /etc/security/limits.conf;echo \"* hard nofile 500000\" >> /etc/security/limits.conf;"
def run(command, server):
output = []
error = []
jsch = JSch()
session = jsch.getSession("root", server, 22)
session.setPassword("couchbase")
session.setConfig("StrictHostKeyChecking", "no")
try:
session.connect(10000)
except JSchException:
failed.append(server)
try:
_ssh_client = session.openChannel("exec")
_ssh_client.setInputStream(None)
_ssh_client.setErrStream(None)
instream = _ssh_client.getInputStream()
errstream = _ssh_client.getErrStream()
_ssh_client.setCommand(command)
_ssh_client.connect()
fu1 = FileUtil.wrap(instream)
for line in fu1.readlines():
output.append(line)
fu1.close()
fu2 = FileUtil.wrap(errstream)
for line in fu2.readlines():
error.append(line)
fu2.close()
_ssh_client.disconnect()
session.disconnect()
except JSchException as e:
print("JSch exception on %s: %s" % (server, str(e)))
return output, error
def execute(cmd="free -m"):
cluster_env = ClusterEnvironment.builder().ioConfig(IoConfig.numKvConnections(25)).timeoutConfig(TimeoutConfig.builder().connectTimeout(Duration.ofSeconds(20)).kvTimeout(Duration.ofSeconds(10)))
cluster_options = ClusterOptions.clusterOptions("Administrator", "esabhcuoc").environment(cluster_env.build())
cluster = Cluster.connect("172.23.104.162", cluster_options)
STATEMENT = "select meta().id from `QE-server-pool` where os='centos' and '12hrreg' in poolId or 'regression' in poolId or 'magmareg' in poolId;"
result = cluster.query(STATEMENT);
count = 1
for server in result.rowsAsObject():
server = server.get("id")
print("--+--+--+--+-- %s. SERVER: %s --+--+--+--+--" % (count, server))
count += 1
output, error = run(cmd, server)
if output:
print(output)
if error:
print(error)
try:
param = sys.argv[1]
if "thp" in param.lower():
execute(thp_cmd)
if "firewall" in param.lower():
execute(disable_firewall)
if "swapiness" in param.lower():
execute(swapiness_cmd)
if "ulimit" in param.lower():
execute(ulimit_cmd)
except:
execute()
if failed:
for server in failed:
print("ssh failed: %s" % server)
| 37.744186 | 222 | 0.66451 | from com.jcraft.jsch import JSchException
from com.jcraft.jsch import JSch
from org.python.core.util import FileUtil
from java.time import Duration
from com.couchbase.client.java import Cluster, ClusterOptions
from com.couchbase.client.java.env import ClusterEnvironment
from com.couchbase.client.core.env import TimeoutConfig, IoConfig
import sys
failed = []
swapiness_cmd = "echo 0 > /proc/sys/vm/swappiness;echo \"net.ipv4.conf.all.arp_notify = 1\" > /etc/sysctl.conf;echo \"
thp_cmd = "echo never > /sys/kernel/mm/transparent_hugepage/enabled"
disable_firewall = "systemctl stop firewalld; systemctl disable firewalld; ls -l /data"
ulimit_cmd = "ulimit -n 500000;echo \"* soft nofile 500000\" > /etc/security/limits.conf;echo \"* hard nofile 500000\" >> /etc/security/limits.conf;"
def run(command, server):
output = []
error = []
jsch = JSch()
session = jsch.getSession("root", server, 22)
session.setPassword("couchbase")
session.setConfig("StrictHostKeyChecking", "no")
try:
session.connect(10000)
except JSchException:
failed.append(server)
try:
_ssh_client = session.openChannel("exec")
_ssh_client.setInputStream(None)
_ssh_client.setErrStream(None)
instream = _ssh_client.getInputStream()
errstream = _ssh_client.getErrStream()
_ssh_client.setCommand(command)
_ssh_client.connect()
fu1 = FileUtil.wrap(instream)
for line in fu1.readlines():
output.append(line)
fu1.close()
fu2 = FileUtil.wrap(errstream)
for line in fu2.readlines():
error.append(line)
fu2.close()
_ssh_client.disconnect()
session.disconnect()
except JSchException as e:
print("JSch exception on %s: %s" % (server, str(e)))
return output, error
def execute(cmd="free -m"):
cluster_env = ClusterEnvironment.builder().ioConfig(IoConfig.numKvConnections(25)).timeoutConfig(TimeoutConfig.builder().connectTimeout(Duration.ofSeconds(20)).kvTimeout(Duration.ofSeconds(10)))
cluster_options = ClusterOptions.clusterOptions("Administrator", "esabhcuoc").environment(cluster_env.build())
cluster = Cluster.connect("172.23.104.162", cluster_options)
STATEMENT = "select meta().id from `QE-server-pool` where os='centos' and '12hrreg' in poolId or 'regression' in poolId or 'magmareg' in poolId;"
result = cluster.query(STATEMENT);
count = 1
for server in result.rowsAsObject():
server = server.get("id")
print("--+--+--+--+-- %s. SERVER: %s --+--+--+--+--" % (count, server))
count += 1
output, error = run(cmd, server)
if output:
print(output)
if error:
print(error)
try:
param = sys.argv[1]
if "thp" in param.lower():
execute(thp_cmd)
if "firewall" in param.lower():
execute(disable_firewall)
if "swapiness" in param.lower():
execute(swapiness_cmd)
if "ulimit" in param.lower():
execute(ulimit_cmd)
except:
execute()
if failed:
for server in failed:
print("ssh failed: %s" % server)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.