hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72f74e9499225c66ece09f508b8f7dac132b713 | 1,812 | py | Python | experiments/analysis/edge_bound/training_nlf/camera_nlf_training.py | haihabi/GenerativeCRB | d53c01bec7214bb087fbe17dba241e12eb60858e | [
"MIT"
] | null | null | null | experiments/analysis/edge_bound/training_nlf/camera_nlf_training.py | haihabi/GenerativeCRB | d53c01bec7214bb087fbe17dba241e12eb60858e | [
"MIT"
] | null | null | null | experiments/analysis/edge_bound/training_nlf/camera_nlf_training.py | haihabi/GenerativeCRB | d53c01bec7214bb087fbe17dba241e12eb60858e | [
"MIT"
] | null | null | null | import torch
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from experiments.data_model.image_denoising.noise_dataset import NoiseDataSet
from experiments.models_architecture.camera_nlf_flow import generate_nlf_flow
def train_step(in_noise, in_cond_vector):
opt.zero_grad()
loss = flow.nll_mean(in_noise, in_cond_vector)
loss.backward()
loss_list.append(loss.item())
opt.step()
if __name__ == '__main__':
lr = 1e-4
patch_size = 32
n_epochs = 5
batch_size = 32
n_iter_per_epoch = 1000
input_shape = [4, patch_size, patch_size]
trained_alpha = True
flow = generate_nlf_flow(input_shape, trained_alpha)
opt = torch.optim.Adam(flow.parameters(), lr=lr)
nds = NoiseDataSet("/data/datasets/SIDD_Medium_Raw/Data", n_pat_per_im=5000)
nds_dl = DataLoader(nds, batch_size=batch_size, shuffle=True)
loss_best = np.inf
for n in range(n_epochs):
loss_list = []
for noise, clean, cam, iso in tqdm(nds_dl):
noise, clean, cam, iso = noise.cuda(), clean.cuda(), cam.long().cuda(), iso.cuda()
clean = torch.permute(clean, (0, 3, 1, 2)).float()
noise = torch.permute(noise, (0, 3, 1, 2)).float()
cond_vector = [clean, iso, cam]
train_step(noise, cond_vector)
loss_current = sum(loss_list) / len(loss_list)
print(loss_current)
if loss_current < loss_best:
flow_name = "flow_nlf_best.pt" if trained_alpha else "flow_gaussian_best.pt"
torch.save(flow.state_dict(), f"./{flow_name}")
loss_best = loss_current
print(f"Update Best To:{loss_current}")
flow_name = "flow_nlf.pt" if trained_alpha else "flow_gaussian.pt"
torch.save(flow.state_dict(), f"./{flow_name}")
| 34.846154 | 94 | 0.667219 | import torch
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from experiments.data_model.image_denoising.noise_dataset import NoiseDataSet
from experiments.models_architecture.camera_nlf_flow import generate_nlf_flow
def train_step(in_noise, in_cond_vector):
opt.zero_grad()
loss = flow.nll_mean(in_noise, in_cond_vector)
loss.backward()
loss_list.append(loss.item())
opt.step()
if __name__ == '__main__':
lr = 1e-4
patch_size = 32
n_epochs = 5
batch_size = 32
n_iter_per_epoch = 1000
input_shape = [4, patch_size, patch_size]
trained_alpha = True
flow = generate_nlf_flow(input_shape, trained_alpha)
opt = torch.optim.Adam(flow.parameters(), lr=lr)
nds = NoiseDataSet("/data/datasets/SIDD_Medium_Raw/Data", n_pat_per_im=5000)
nds_dl = DataLoader(nds, batch_size=batch_size, shuffle=True)
loss_best = np.inf
for n in range(n_epochs):
loss_list = []
for noise, clean, cam, iso in tqdm(nds_dl):
noise, clean, cam, iso = noise.cuda(), clean.cuda(), cam.long().cuda(), iso.cuda()
clean = torch.permute(clean, (0, 3, 1, 2)).float()
noise = torch.permute(noise, (0, 3, 1, 2)).float()
cond_vector = [clean, iso, cam]
train_step(noise, cond_vector)
loss_current = sum(loss_list) / len(loss_list)
print(loss_current)
if loss_current < loss_best:
flow_name = "flow_nlf_best.pt" if trained_alpha else "flow_gaussian_best.pt"
torch.save(flow.state_dict(), f"./{flow_name}")
loss_best = loss_current
print(f"Update Best To:{loss_current}")
flow_name = "flow_nlf.pt" if trained_alpha else "flow_gaussian.pt"
torch.save(flow.state_dict(), f"./{flow_name}")
| true | true |
f72f75554d82d37ec9ea899454d23be4c88365df | 4,843 | py | Python | torch/distributed/elastic/metrics/__init__.py | MagiaSN/pytorch | 7513455c743d3d644b45a804902c1a0d14b69f45 | [
"Intel"
] | 1 | 2021-04-11T08:27:46.000Z | 2021-04-11T08:27:46.000Z | torch/distributed/elastic/metrics/__init__.py | MagiaSN/pytorch | 7513455c743d3d644b45a804902c1a0d14b69f45 | [
"Intel"
] | 1 | 2022-01-18T12:17:29.000Z | 2022-01-18T12:17:29.000Z | torch/distributed/elastic/metrics/__init__.py | MagiaSN/pytorch | 7513455c743d3d644b45a804902c1a0d14b69f45 | [
"Intel"
] | 2 | 2021-07-02T10:18:21.000Z | 2021-08-18T10:10:28.000Z | #!/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Metrics API
**Overview**:
The metrics API in torchelastic is used to publish telemetry metrics.
It is designed to be used by torchelastic's internal modules to
publish metrics for the end user with the goal of increasing visibility
and helping with debugging. However you may use the same API in your
jobs to publish metrics to the same metrics ``sink``.
A ``metric`` can be thought of as timeseries data
and is uniquely identified by the string-valued tuple
``(metric_group, metric_name)``.
torchelastic makes no assumptions about what a ``metric_group`` is
and what relationship it has with ``metric_name``. It is totally up
to the user to use these two fields to uniquely identify a metric.
.. note:: The metric group ``torchelastic`` is reserved by torchelastic for
platform level metrics that it produces.
For instance torchelastic may output the latency (in milliseconds)
of a re-rendezvous operation from the agent as
``(torchelastic, agent.rendezvous.duration.ms)``
A sensible way to use metric groups is to map them to a stage or module
in your job. You may also encode certain high level properties
the job such as the region or stage (dev vs prod).
**Publish Metrics**:
Using torchelastic's metrics API is similar to using python's logging
framework. You first have to configure a metrics handler before
trying to add metric data.
The example below measures the latency for the ``calculate()`` function.
::
import time
import torch.distributed.elastic.metrics as metrics
# makes all metrics other than the one from "my_module" to go /dev/null
metrics.configure(metrics.NullMetricsHandler())
metrics.configure(metrics.ConsoleMetricsHandler(), "my_module")
def my_method():
start = time.time()
calculate()
end = time.time()
metrics.put_metric("calculate_latency", int(end-start), "my_module")
You may also use the torch.distributed.elastic.metrics.prof` decorator
to conveniently and succinctly profile functions
::
# -- in module examples.foobar --
import torch.distributed.elastic.metrics as metrics
metrics.configure(metrics.ConsoleMetricsHandler(), "foobar")
metrics.configure(metrics.ConsoleMetricsHandler(), "Bar")
@metrics.prof
def foo():
pass
class Bar():
@metrics.prof
def baz():
pass
``@metrics.prof`` will publish the following metrics
::
<leaf_module or classname>.success - 1 if the function finished successfully
<leaf_module or classname>.failure - 1 if the function threw an exception
<leaf_module or classname>.duration.ms - function duration in milliseconds
**Configuring Metrics Handler**:
`torch.distributed.elastic.metrics.MetricHandler` is responsible for emitting
the added metric values to a particular destination. Metric groups can be
configured with different metric handlers.
By default torchelastic emits all metrics to ``/dev/null``.
By adding the following configuration metrics,
``torchelastic`` and ``my_app`` metric groups will be printed out to
console.
::
import torch.distributed.elastic.metrics as metrics
metrics.configure(metrics.ConsoleMetricHandler(), group = "torchelastic")
metrics.configure(metrics.ConsoleMetricHandler(), group = "my_app")
**Writing a Custom Metric Handler**:
If you want your metrics to be emitted to a custom location, implement
the `torch.distributed.elastic.metrics.MetricHandler` interface
and configure your job to use your custom metric handler.
Below is a toy example that prints the metrics to ``stdout``
::
import torch.distributed.elastic.metrics as metrics
class StdoutMetricHandler(metrics.MetricHandler):
def emit(self, metric_data):
ts = metric_data.timestamp
group = metric_data.group_name
name = metric_data.name
value = metric_data.value
print(f"[{ts}][{group}]: {name}={value}")
metrics.configure(StdoutMetricHandler(), group="my_app")
Now all metrics in the group ``my_app`` will be printed to stdout as:
::
[1574213883.4182858][my_app]: my_metric=<value>
[1574213940.5237644][my_app]: my_metric=<value>
"""
from typing import Optional
from .api import ( # noqa F401
ConsoleMetricHandler,
MetricData,
MetricHandler,
MetricsConfig,
NullMetricHandler,
configure,
get_elapsed_time_ms,
getStream,
prof,
profile,
publish_metric,
put_metric,
)
def initialize_metrics(cfg: Optional[MetricsConfig] = None):
pass
try:
from torch.distributed.elastic.metrics.static_init import * # type: ignore # noqa: F401 F403
except ModuleNotFoundError:
pass
| 29.530488 | 97 | 0.742308 |
from typing import Optional
from .api import (
ConsoleMetricHandler,
MetricData,
MetricHandler,
MetricsConfig,
NullMetricHandler,
configure,
get_elapsed_time_ms,
getStream,
prof,
profile,
publish_metric,
put_metric,
)
def initialize_metrics(cfg: Optional[MetricsConfig] = None):
pass
try:
from torch.distributed.elastic.metrics.static_init import * FoundError:
pass
| true | true |
f72f76abe1221bf6bd92370f84f0906ef075e999 | 3,133 | py | Python | src/dataset_builder.py | elangovana/large-scale-ptm-ppi | cc835df915d12dd20c35f9cea5e40365200a6d3d | [
"MIT"
] | 1 | 2022-02-25T22:06:39.000Z | 2022-02-25T22:06:39.000Z | src/dataset_builder.py | elangovana/ppi-aimed | cc835df915d12dd20c35f9cea5e40365200a6d3d | [
"MIT"
] | null | null | null | src/dataset_builder.py | elangovana/ppi-aimed | cc835df915d12dd20c35f9cea5e40365200a6d3d | [
"MIT"
] | null | null | null | import logging
import os
from torch.utils.data import DataLoader
from locator import Locator
class DatasetBuilder:
def __init__(self, val_data, dataset_factory_name, tokenisor_factory_name, train_data=None, num_workers=None,
batch_size=8, addition_args_dict=None):
self._addition_args_dict = addition_args_dict
self.train_data = train_data
self.val_data = val_data
self.batch_size = batch_size
self._dataset_factory = Locator().get(dataset_factory_name)
self._tokenisor_factory = Locator().get(tokenisor_factory_name)
self.num_workers = num_workers or os.cpu_count() - 1
if self.num_workers <= 0:
self.num_workers = 0
self._tokenisor = None
self._train_dataloader = None
self._train_dataset = None
self._val_dataset = None
self._val_dataloader = None
self._scorers = None
self._label_mapper = None
@property
def _logger(self):
return logging.getLogger(__name__)
def get_tokenisor(self):
self._logger.info("Retrieving Tokeniser")
if self._tokenisor is None:
self._tokenisor = self._tokenisor_factory.get_tokenisor(**self._addition_args_dict)
return self._tokenisor
def get_train_dataset(self):
if self._train_dataset is None:
self._train_dataset = self._dataset_factory.get_dataset(self.train_data,
preprocessors=self.get_tokenisor(),
**self._addition_args_dict)
return self._train_dataset
def get_val_dataset(self):
if self._val_dataset is None:
self._val_dataset = self._dataset_factory.get_dataset(self.val_data, preprocessors=self.get_tokenisor(),
**self._addition_args_dict)
return self._val_dataset
def get_label_mapper(self):
if self._label_mapper is None:
self._label_mapper = self._dataset_factory.get_label_mapper()
return self._label_mapper
def num_classes(self):
return self.get_label_mapper().num_classes
def positive_label_index(self):
return self._label_mapper.positive_label_index
def get_scorers(self):
if self._scorers is None:
self._scorers = self._dataset_factory.get_scorers()
return self._scorers
def get_train_dataloader(self):
if self._train_dataloader is None:
self._train_dataloader = DataLoader(dataset=self.get_train_dataset(), num_workers=self.num_workers,
batch_size=self.batch_size, shuffle=True)
return self._train_dataloader
def get_val_dataloader(self):
if self._val_dataloader is None:
self._val_dataloader = DataLoader(dataset=self.get_val_dataset(), num_workers=self.num_workers,
batch_size=self.batch_size, shuffle=False)
return self._val_dataloader
| 34.811111 | 116 | 0.635812 | import logging
import os
from torch.utils.data import DataLoader
from locator import Locator
class DatasetBuilder:
def __init__(self, val_data, dataset_factory_name, tokenisor_factory_name, train_data=None, num_workers=None,
batch_size=8, addition_args_dict=None):
self._addition_args_dict = addition_args_dict
self.train_data = train_data
self.val_data = val_data
self.batch_size = batch_size
self._dataset_factory = Locator().get(dataset_factory_name)
self._tokenisor_factory = Locator().get(tokenisor_factory_name)
self.num_workers = num_workers or os.cpu_count() - 1
if self.num_workers <= 0:
self.num_workers = 0
self._tokenisor = None
self._train_dataloader = None
self._train_dataset = None
self._val_dataset = None
self._val_dataloader = None
self._scorers = None
self._label_mapper = None
@property
def _logger(self):
return logging.getLogger(__name__)
def get_tokenisor(self):
self._logger.info("Retrieving Tokeniser")
if self._tokenisor is None:
self._tokenisor = self._tokenisor_factory.get_tokenisor(**self._addition_args_dict)
return self._tokenisor
def get_train_dataset(self):
if self._train_dataset is None:
self._train_dataset = self._dataset_factory.get_dataset(self.train_data,
preprocessors=self.get_tokenisor(),
**self._addition_args_dict)
return self._train_dataset
def get_val_dataset(self):
if self._val_dataset is None:
self._val_dataset = self._dataset_factory.get_dataset(self.val_data, preprocessors=self.get_tokenisor(),
**self._addition_args_dict)
return self._val_dataset
def get_label_mapper(self):
if self._label_mapper is None:
self._label_mapper = self._dataset_factory.get_label_mapper()
return self._label_mapper
def num_classes(self):
return self.get_label_mapper().num_classes
def positive_label_index(self):
return self._label_mapper.positive_label_index
def get_scorers(self):
if self._scorers is None:
self._scorers = self._dataset_factory.get_scorers()
return self._scorers
def get_train_dataloader(self):
if self._train_dataloader is None:
self._train_dataloader = DataLoader(dataset=self.get_train_dataset(), num_workers=self.num_workers,
batch_size=self.batch_size, shuffle=True)
return self._train_dataloader
def get_val_dataloader(self):
if self._val_dataloader is None:
self._val_dataloader = DataLoader(dataset=self.get_val_dataset(), num_workers=self.num_workers,
batch_size=self.batch_size, shuffle=False)
return self._val_dataloader
| true | true |
f72f78af5664f6874058767edba4aca0c9a4cc9f | 2,687 | py | Python | app/recipe/tests/test_ingredient_api.py | samderlust/recipe-app-api | 44d63426fe2875bd57900203d9dccc14550f1f9d | [
"MIT"
] | null | null | null | app/recipe/tests/test_ingredient_api.py | samderlust/recipe-app-api | 44d63426fe2875bd57900203d9dccc14550f1f9d | [
"MIT"
] | null | null | null | app/recipe/tests/test_ingredient_api.py | samderlust/recipe-app-api | 44d63426fe2875bd57900203d9dccc14550f1f9d | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientAPITest(TestCase):
"""Test public avaliable ingredient API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required to access the endpoint"""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientAPITest(TestCase):
"""Test private API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@test.com',
'password'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
"Test get list of ingredient"
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""Test that ingredients for authenticated are returned"""
user2 = get_user_model().objects.create_user(
'user2@test.com',
'password'
)
Ingredient.objects.create(user=user2, name='Kale')
ingredient = Ingredient.objects.create(user=self.user, name='Egg')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
"""Test create a new ingredient"""
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
"""Test creating invalid ingredient fails"""
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| 30.534091 | 74 | 0.676219 | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientAPITest(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientAPITest(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@test.com',
'password'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
user2 = get_user_model().objects.create_user(
'user2@test.com',
'password'
)
Ingredient.objects.create(user=user2, name='Kale')
ingredient = Ingredient.objects.create(user=self.user, name='Egg')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| true | true |
f72f7970ff216f32d358651dab960a4f8b67cce2 | 5,358 | py | Python | migrations/versions/a725247ae9b2_initial_migration.py | np1e/whoami_backend | d91540885c81194489b4b9d0dc67acfe81a59688 | [
"MIT"
] | null | null | null | migrations/versions/a725247ae9b2_initial_migration.py | np1e/whoami_backend | d91540885c81194489b4b9d0dc67acfe81a59688 | [
"MIT"
] | null | null | null | migrations/versions/a725247ae9b2_initial_migration.py | np1e/whoami_backend | d91540885c81194489b4b9d0dc67acfe81a59688 | [
"MIT"
] | null | null | null | """initial migration
Revision ID: a725247ae9b2
Revises:
Create Date: 2021-04-08 08:45:24.584283
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a725247ae9b2'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('collection',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=30), nullable=False),
sa.Column('default', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('game',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('key', sa.String(length=16), nullable=True),
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('state', sa.Enum('WAITING', 'RUNNING', 'FINISHED', name='gamestate'), nullable=True),
sa.Column('max_players', sa.Integer(), nullable=True),
sa.Column('current_player_id', sa.String(length=36), nullable=True),
sa.Column('awaitingGuessVote', sa.Boolean(), nullable=True),
#sa.ForeignKeyConstraint(['current_player_id'], ['player._id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id'),
sa.UniqueConstraint('key')
)
op.create_table('image',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('image_url', sa.String(), nullable=True),
sa.Column('license', sa.String(), nullable=True),
sa.Column('creator', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_table('player',
sa.Column('_id', sa.String(length=36), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.Column('game_id', sa.String(length=36), nullable=True),
sa.Column('connected', sa.Boolean(), nullable=True),
sa.Column('ready', sa.Boolean(), nullable=True),
sa.Column('sid', sa.String(), nullable=True),
sa.Column('is_creator', sa.Boolean(), nullable=True),
sa.Column('character_id', sa.Integer(), nullable=True),
sa.Column('guesses', sa.Integer(), nullable=True),
sa.Column('guessed', sa.Boolean(), nullable=True),
#sa.ForeignKeyConstraint(['character_id'], ['character.id'], ),
#sa.ForeignKeyConstraint(['game_id'], ['game.id'], ),
sa.PrimaryKeyConstraint('_id'),
sa.UniqueConstraint('_id')
)
op.create_table('tag',
sa.Column('name', sa.String(length=20), nullable=False),
sa.PrimaryKeyConstraint('name'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('username', sa.String(length=18), nullable=False),
sa.Column('password_hash', sa.String(length=94), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('character',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('collection_id', sa.Integer(), nullable=True),
sa.Column('image_id', sa.String(length=36), nullable=True),
#sa.ForeignKeyConstraint(['collection_id'], ['collection.id'], ),
#sa.ForeignKeyConstraint(['image_id'], ['image.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('tags',
sa.Column('tag_name', sa.String(length=20), nullable=False),
sa.Column('collection_id', sa.Integer(), nullable=False),
#sa.ForeignKeyConstraint(['collection_id'], ['collection.id'], ),
#sa.ForeignKeyConstraint(['tag_name'], ['tag.name'], ),
sa.PrimaryKeyConstraint('tag_name', 'collection_id')
)
op.create_table('used_collections',
sa.Column('game_id', sa.String(length=36), nullable=False),
sa.Column('collection_id', sa.Integer(), nullable=False),
#sa.ForeignKeyConstraint(['collection_id'], ['collection.id'], ),
#sa.ForeignKeyConstraint(['game_id'], ['game.id'], ),
sa.PrimaryKeyConstraint('game_id', 'collection_id')
)
op.create_table('vote',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('result', sa.Boolean(), nullable=True),
sa.Column('game_id', sa.String(length=36), nullable=True),
sa.Column('player_id', sa.String(length=36), nullable=True),
#sa.ForeignKeyConstraint(['game_id'], ['game.id'], ),
#sa.ForeignKeyConstraint(['player_id'], ['player._id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('votes',
sa.Column('game_id', sa.String(length=36), nullable=False),
sa.Column('vote_id', sa.Integer(), nullable=False),
#sa.ForeignKeyConstraint(['game_id'], ['game.id'], ),
#sa.ForeignKeyConstraint(['vote_id'], ['vote.id'], ),
sa.PrimaryKeyConstraint('game_id', 'vote_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('votes')
op.drop_table('vote')
op.drop_table('used_collections')
op.drop_table('tags')
op.drop_table('character')
op.drop_table('user')
op.drop_table('tag')
op.drop_table('player')
op.drop_table('image')
op.drop_table('game')
op.drop_table('collection')
# ### end Alembic commands ###
| 39.985075 | 99 | 0.66424 | from alembic import op
import sqlalchemy as sa
revision = 'a725247ae9b2'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
('default', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('game',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('key', sa.String(length=16), nullable=True),
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('state', sa.Enum('WAITING', 'RUNNING', 'FINISHED', name='gamestate'), nullable=True),
sa.Column('max_players', sa.Integer(), nullable=True),
sa.Column('current_player_id', sa.String(length=36), nullable=True),
sa.Column('awaitingGuessVote', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id'),
sa.UniqueConstraint('key')
)
op.create_table('image',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('image_url', sa.String(), nullable=True),
sa.Column('license', sa.String(), nullable=True),
sa.Column('creator', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_table('player',
sa.Column('_id', sa.String(length=36), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.Column('game_id', sa.String(length=36), nullable=True),
sa.Column('connected', sa.Boolean(), nullable=True),
sa.Column('ready', sa.Boolean(), nullable=True),
sa.Column('sid', sa.String(), nullable=True),
sa.Column('is_creator', sa.Boolean(), nullable=True),
sa.Column('character_id', sa.Integer(), nullable=True),
sa.Column('guesses', sa.Integer(), nullable=True),
sa.Column('guessed', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('_id'),
sa.UniqueConstraint('_id')
)
op.create_table('tag',
sa.Column('name', sa.String(length=20), nullable=False),
sa.PrimaryKeyConstraint('name'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('username', sa.String(length=18), nullable=False),
sa.Column('password_hash', sa.String(length=94), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('character',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('collection_id', sa.Integer(), nullable=True),
sa.Column('image_id', sa.String(length=36), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('tags',
sa.Column('tag_name', sa.String(length=20), nullable=False),
sa.Column('collection_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('tag_name', 'collection_id')
)
op.create_table('used_collections',
sa.Column('game_id', sa.String(length=36), nullable=False),
sa.Column('collection_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('game_id', 'collection_id')
)
op.create_table('vote',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('result', sa.Boolean(), nullable=True),
sa.Column('game_id', sa.String(length=36), nullable=True),
sa.Column('player_id', sa.String(length=36), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('votes',
sa.Column('game_id', sa.String(length=36), nullable=False),
sa.Column('vote_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('game_id', 'vote_id')
)
le('image')
op.drop_table('game')
op.drop_table('collection')
| true | true |
f72f7a106d50691196cea4aa0e76813201841350 | 2,499 | py | Python | ndscheduler/default_settings.py | SquisEat/ndscheduler | 14df862cdafca37f46009419b7627989978c5803 | [
"BSD-2-Clause"
] | null | null | null | ndscheduler/default_settings.py | SquisEat/ndscheduler | 14df862cdafca37f46009419b7627989978c5803 | [
"BSD-2-Clause"
] | null | null | null | ndscheduler/default_settings.py | SquisEat/ndscheduler | 14df862cdafca37f46009419b7627989978c5803 | [
"BSD-2-Clause"
] | null | null | null | """Default settings."""
import logging
import os
#
# Development mode or production mode
# If DEBUG is True, then auto-reload is enabled, i.e., when code is modified, server will be
# reloaded immediately
#
DEBUG = True
#
# Static Assets
#
# The web UI is a single page app. All javascripts/css files should be in STATIC_DIR_PATH
#
STATIC_DIR_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static')
TEMPLATE_DIR_PATH = STATIC_DIR_PATH
APP_INDEX_PAGE = 'index.html'
#
# Server setup
#
HTTP_PORT = 7777
HTTP_ADDRESS = '127.0.0.1'
TORNADO_MAX_WORKERS = 8
#
# ApScheduler settings
#
THREAD_POOL_SIZE = 4
JOB_MAX_INSTANCES = 3
JOB_COALESCE = True
TIMEZONE = 'UTC'
# When a job is misfired -- A job were to run at a specific time, but due to some
# reason (e.g., scheduler restart), we miss that run.
#
# By default, if a job is misfired within 1 hour, the scheduler will rerun it.
# Otherwise, if it's misfired over 1 hour, the scheduler will not rerun it.
JOB_MISFIRE_GRACE_SEC = 3600
#
# Database settings
#
JOBS_TABLENAME = 'scheduler_jobs'
EXECUTIONS_TABLENAME = 'scheduler_execution'
AUDIT_LOGS_TABLENAME = 'scheduler_jobauditlog'
DATABASE_TABLENAMES = {
'jobs_tablename': JOBS_TABLENAME,
'executions_tablename': EXECUTIONS_TABLENAME,
'auditlogs_tablename': AUDIT_LOGS_TABLENAME
}
# See different database providers in ndscheduler/core/datastore/providers/
# SQLite
#
DATABASE_CLASS = 'ndscheduler.corescheduler.datastore.providers.sqlite.DatastoreSqlite'
DATABASE_CONFIG_DICT = {
'file_path': 'datastore.db'
}
# Postgres
#
# DATABASE_CLASS = 'ndscheduler.corescheduler.datastore.providers.postgres.DatastorePostgres'
# DATABASE_CONFIG_DICT = {
# 'user': 'username',
# 'password': '',
# 'hostname': 'localhost',
# 'port': 5432,
# 'database': 'scheduler',
# 'sslmode': 'disable'
# }
# MySQL
#
# DATABASE_CLASS = 'ndscheduler.corescheduler.datastore.providers.mysql.DatastoreMySQL'
# DATABASE_CONFIG_DICT = {
# 'user': 'username',
# 'password': '',
# 'hostname': 'localhost',
# 'port': 3306,
# 'database': 'scheduler'
# }
# ndschedule is based on apscheduler. Here we can customize the apscheduler's main scheduler class
# Please see ndscheduler/core/scheduler/base.py
SCHEDULER_CLASS = 'ndscheduler.corescheduler.core.base.BaseScheduler'
#
# Set logging level
#
logging.getLogger(__name__).setLevel(logging.INFO)
# Packages that contains job classes, e.g., simple_scheduler.jobs
JOB_CLASS_PACKAGES = []
| 24.262136 | 98 | 0.735494 |
import logging
import os
DEBUG = True
STATIC_DIR_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static')
TEMPLATE_DIR_PATH = STATIC_DIR_PATH
APP_INDEX_PAGE = 'index.html'
HTTP_PORT = 7777
HTTP_ADDRESS = '127.0.0.1'
TORNADO_MAX_WORKERS = 8
THREAD_POOL_SIZE = 4
JOB_MAX_INSTANCES = 3
JOB_COALESCE = True
TIMEZONE = 'UTC'
JOB_MISFIRE_GRACE_SEC = 3600
#
# Database settings
#
JOBS_TABLENAME = 'scheduler_jobs'
EXECUTIONS_TABLENAME = 'scheduler_execution'
AUDIT_LOGS_TABLENAME = 'scheduler_jobauditlog'
DATABASE_TABLENAMES = {
'jobs_tablename': JOBS_TABLENAME,
'executions_tablename': EXECUTIONS_TABLENAME,
'auditlogs_tablename': AUDIT_LOGS_TABLENAME
}
# See different database providers in ndscheduler/core/datastore/providers/
# SQLite
#
DATABASE_CLASS = 'ndscheduler.corescheduler.datastore.providers.sqlite.DatastoreSqlite'
DATABASE_CONFIG_DICT = {
'file_path': 'datastore.db'
}
# Postgres
#
# DATABASE_CLASS = 'ndscheduler.corescheduler.datastore.providers.postgres.DatastorePostgres'
# DATABASE_CONFIG_DICT = {
# 'user': 'username',
# 'password': '',
# 'hostname': 'localhost',
# 'port': 5432,
# 'database': 'scheduler',
# 'sslmode': 'disable'
# }
# MySQL
#
# DATABASE_CLASS = 'ndscheduler.corescheduler.datastore.providers.mysql.DatastoreMySQL'
# DATABASE_CONFIG_DICT = {
# 'user': 'username',
# 'password': '',
# 'hostname': 'localhost',
# 'port': 3306,
# 'database': 'scheduler'
# }
# ndschedule is based on apscheduler. Here we can customize the apscheduler's main scheduler class
SCHEDULER_CLASS = 'ndscheduler.corescheduler.core.base.BaseScheduler'
logging.getLogger(__name__).setLevel(logging.INFO)
JOB_CLASS_PACKAGES = []
| true | true |
f72f7a7cce9f58fe52ac8e93e43957260898eeda | 1,281 | py | Python | tests/test_utils/test_import_namespaces.py | rajshruti18/biolinkml | 451e71c9d3fd11aa3b08c6a713d9ab8b127ece77 | [
"CC0-1.0"
] | 25 | 2019-07-05T01:16:18.000Z | 2021-03-22T20:49:25.000Z | tests/test_utils/test_import_namespaces.py | rajshruti18/biolinkml | 451e71c9d3fd11aa3b08c6a713d9ab8b127ece77 | [
"CC0-1.0"
] | 299 | 2019-03-05T15:15:30.000Z | 2021-04-08T23:25:41.000Z | tests/test_utils/test_import_namespaces.py | rajshruti18/biolinkml | 451e71c9d3fd11aa3b08c6a713d9ab8b127ece77 | [
"CC0-1.0"
] | 19 | 2019-05-23T17:46:47.000Z | 2021-03-25T06:45:55.000Z | import unittest
from biolinkml.generators.shexgen import ShExGenerator
from tests.test_utils.environment import env
class URLImportTestCase(unittest.TestCase):
@unittest.skipIf(False, "Finish implementing this")
def test_import_from_url(self):
""" Validate namespace bindings """
shex = ShExGenerator(env.input_path('import_test_l2.yaml')).serialize()
self.assertEqual("""BASE <http://example.org/l2/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX l1: <http://example.org/l1/>
PREFIX base: <http://example.org/b/>
l1:Int xsd:integer
base:String xsd:string
base:BaseClass CLOSED {
( $base:BaseClass_tes base:base_slot @base:String ? ;
rdf:type [ base:BaseClass ] ?
)
}
l1:L1Class (
CLOSED {
( $l1:L1Class_tes ( l1:l1_slot1 @base:String ? ;
l1:l1_slot2 @l1:Int ?
) ;
rdf:type [ l1:L1Class ] ?
)
} OR @<L2Class>
)
<L2Class> CLOSED {
( $<L2Class_tes> ( &l1:L1Class_tes ;
rdf:type [ l1:L1Class ] ? ;
<l2_slot1> @base:String ? ;
<l2_slot2> @l1:Int ?
) ;
rdf:type [ <L2Class> ] ?
)
}""", shex.strip())
if __name__ == '__main__':
unittest.main()
| 24.169811 | 79 | 0.613583 | import unittest
from biolinkml.generators.shexgen import ShExGenerator
from tests.test_utils.environment import env
class URLImportTestCase(unittest.TestCase):
@unittest.skipIf(False, "Finish implementing this")
def test_import_from_url(self):
shex = ShExGenerator(env.input_path('import_test_l2.yaml')).serialize()
self.assertEqual("""BASE <http://example.org/l2/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX l1: <http://example.org/l1/>
PREFIX base: <http://example.org/b/>
l1:Int xsd:integer
base:String xsd:string
base:BaseClass CLOSED {
( $base:BaseClass_tes base:base_slot @base:String ? ;
rdf:type [ base:BaseClass ] ?
)
}
l1:L1Class (
CLOSED {
( $l1:L1Class_tes ( l1:l1_slot1 @base:String ? ;
l1:l1_slot2 @l1:Int ?
) ;
rdf:type [ l1:L1Class ] ?
)
} OR @<L2Class>
)
<L2Class> CLOSED {
( $<L2Class_tes> ( &l1:L1Class_tes ;
rdf:type [ l1:L1Class ] ? ;
<l2_slot1> @base:String ? ;
<l2_slot2> @l1:Int ?
) ;
rdf:type [ <L2Class> ] ?
)
}""", shex.strip())
if __name__ == '__main__':
unittest.main()
| true | true |
f72f7ae3ee8c69c4af98bcfa7cfeb00f3ed16fef | 82 | py | Python | demo/demoproject/demoapp/views.py | mrc75/django-easy-reports | 5a92e8e1fd199ee3fd0fdfd5b47d84fe72861a0a | [
"BSD-1-Clause"
] | 2 | 2015-05-28T10:35:54.000Z | 2016-11-18T04:33:26.000Z | demo/demoproject/demoapp/views.py | mrc75/django-easy-reports | 5a92e8e1fd199ee3fd0fdfd5b47d84fe72861a0a | [
"BSD-1-Clause"
] | 1 | 2015-10-25T01:50:04.000Z | 2015-10-25T01:50:04.000Z | demo/demoproject/demoapp/views.py | saxix/django-easy-reports | 81679b0c49d728c198601f7ee3a726a66cae49b5 | [
"BSD-1-Clause"
] | null | null | null | from ereports.views import ReportIndex
class ReportsView(ReportIndex):
pass
| 13.666667 | 38 | 0.792683 | from ereports.views import ReportIndex
class ReportsView(ReportIndex):
pass
| true | true |
f72f7b324f2181b534f98b9140d1f67ec53e65b6 | 597 | py | Python | mms/utils/__init__.py | abhinavs95/mxnet-model-server | 901c1a9a2def8373cd9a91c8d2f47248eed281cc | [
"Apache-2.0"
] | 1 | 2019-01-10T20:56:25.000Z | 2019-01-10T20:56:25.000Z | mms/utils/__init__.py | frankfliu/mxnet-model-server | ce36c9e35efc17efe0fb79bb7019bdf3593131a5 | [
"Apache-2.0"
] | null | null | null | mms/utils/__init__.py | frankfliu/mxnet-model-server | ce36c9e35efc17efe0fb79bb7019bdf3593131a5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Util files for MMS
"""
| 42.642857 | 75 | 0.752094 | true | true | |
f72f7b9ba7292667009bfeeb0edba74ee8da34be | 74,707 | py | Python | lib/galaxy/webapps/galaxy/api/workflows.py | itisAliRH/galaxy | b3b693ea0788f773442c8481472a87f43ccb10d7 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/webapps/galaxy/api/workflows.py | itisAliRH/galaxy | b3b693ea0788f773442c8481472a87f43ccb10d7 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/webapps/galaxy/api/workflows.py | itisAliRH/galaxy | b3b693ea0788f773442c8481472a87f43ccb10d7 | [
"CC-BY-3.0"
] | null | null | null | """
API operations for Workflows
"""
import hashlib
import json
import logging
import os
from typing import (
Any,
Dict,
List,
Optional,
)
from fastapi import (
Body,
Path,
Query,
Response,
status,
)
from gxformat2._yaml import ordered_dump
from markupsafe import escape
from pydantic import Extra
from galaxy import (
exceptions,
model,
util,
)
from galaxy.files.uris import (
stream_url_to_str,
validate_uri_access,
)
from galaxy.managers.context import ProvidesUserContext
from galaxy.managers.jobs import (
fetch_job_states,
invocation_job_source_iter,
summarize_job_metrics,
)
from galaxy.managers.workflows import (
MissingToolsException,
RefactorRequest,
WorkflowCreateOptions,
WorkflowUpdateOptions,
)
from galaxy.model.item_attrs import UsesAnnotations
from galaxy.schema.fields import EncodedDatabaseIdField
from galaxy.schema.schema import (
AsyncFile,
AsyncTaskResultSummary,
SetSlugPayload,
ShareWithPayload,
ShareWithStatus,
SharingStatus,
StoreContentSource,
WorkflowSortByEnum,
WriteStoreToPayload,
)
from galaxy.structured_app import StructuredApp
from galaxy.tool_shed.galaxy_install.install_manager import InstallRepositoryManager
from galaxy.tools import recommendations
from galaxy.tools.parameters import populate_state
from galaxy.tools.parameters.basic import workflow_building_modes
from galaxy.util.sanitize_html import sanitize_html
from galaxy.version import VERSION
from galaxy.web import (
expose_api,
expose_api_anonymous,
expose_api_anonymous_and_sessionless,
expose_api_raw,
expose_api_raw_anonymous_and_sessionless,
format_return_as_json,
)
from galaxy.webapps.base.controller import (
SharableMixin,
url_for,
UsesStoredWorkflowMixin,
)
from galaxy.webapps.base.webapp import GalaxyWebTransaction
from galaxy.webapps.galaxy.services.base import (
ConsumesModelStores,
ServesExportStores,
)
from galaxy.webapps.galaxy.services.invocations import (
InvocationIndexPayload,
InvocationSerializationParams,
InvocationsService,
PrepareStoreDownloadPayload,
)
from galaxy.webapps.galaxy.services.workflows import (
WorkflowIndexPayload,
WorkflowsService,
)
from galaxy.workflow.extract import extract_workflow
from galaxy.workflow.modules import module_factory
from galaxy.workflow.run import queue_invoke
from galaxy.workflow.run_request import build_workflow_run_configs
from . import (
BaseGalaxyAPIController,
depends,
DependsOnTrans,
IndexQueryTag,
Router,
search_query_param,
)
log = logging.getLogger(__name__)
router = Router(tags=["workflows"])
class CreateInvocationFromStore(StoreContentSource):
history_id: Optional[str]
class Config:
extra = Extra.allow
class WorkflowsAPIController(
BaseGalaxyAPIController,
UsesStoredWorkflowMixin,
UsesAnnotations,
SharableMixin,
ServesExportStores,
ConsumesModelStores,
):
service: WorkflowsService = depends(WorkflowsService)
invocations_service: InvocationsService = depends(InvocationsService)
def __init__(self, app: StructuredApp):
super().__init__(app)
self.history_manager = app.history_manager
self.workflow_manager = app.workflow_manager
self.workflow_contents_manager = app.workflow_contents_manager
self.tool_recommendations = recommendations.ToolRecommendations()
@expose_api
def get_workflow_menu(self, trans: ProvidesUserContext, **kwd):
"""
Get workflows present in the tools panel
GET /api/workflows/menu
"""
user = trans.user
ids_in_menu = [x.stored_workflow_id for x in user.stored_workflow_menu_entries]
workflows = self.get_workflows_list(trans, **kwd)
return {"ids_in_menu": ids_in_menu, "workflows": workflows}
@expose_api
def set_workflow_menu(self, trans: GalaxyWebTransaction, payload=None, **kwd):
"""
Save workflow menu to be shown in the tool panel
PUT /api/workflows/menu
"""
payload = payload or {}
user = trans.user
workflow_ids = payload.get("workflow_ids")
if workflow_ids is None:
workflow_ids = []
elif type(workflow_ids) != list:
workflow_ids = [workflow_ids]
workflow_ids_decoded = []
# Decode the encoded workflow ids
for ids in workflow_ids:
workflow_ids_decoded.append(trans.security.decode_id(ids))
sess = trans.sa_session
# This explicit remove seems like a hack, need to figure out
# how to make the association do it automatically.
for m in user.stored_workflow_menu_entries:
sess.delete(m)
user.stored_workflow_menu_entries = []
q = sess.query(model.StoredWorkflow)
# To ensure id list is unique
seen_workflow_ids = set()
for wf_id in workflow_ids_decoded:
if wf_id in seen_workflow_ids:
continue
else:
seen_workflow_ids.add(wf_id)
m = model.StoredWorkflowMenuEntry()
m.stored_workflow = q.get(wf_id)
user.stored_workflow_menu_entries.append(m)
sess.flush()
message = "Menu updated."
trans.set_message(message)
return {"message": message, "status": "done"}
def get_workflows_list(
self,
trans: ProvidesUserContext,
missing_tools=False,
show_published=None,
show_shared=None,
show_hidden=False,
show_deleted=False,
**kwd,
):
"""
Displays a collection of workflows.
:param show_published: Optional boolean to include published workflows
If unspecified this behavior depends on whether the request
is coming from an authenticated session. The default is true
for annonymous API requests and false otherwise.
:type show_published: boolean
:param show_hidden: if True, show hidden workflows
:type show_hidden: boolean
:param show_deleted: if True, show deleted workflows
:type show_deleted: boolean
:param show_shared: Optional boolean to include shared workflows.
If unspecified this behavior depends on show_deleted/show_hidden.
Defaulting to false if show_hidden or show_deleted is true or else
false.
:param missing_tools: if True, include a list of missing tools per workflow
:type missing_tools: boolean
"""
show_published = util.string_as_bool_or_none(show_published)
show_hidden = util.string_as_bool(show_hidden)
show_deleted = util.string_as_bool(show_deleted)
missing_tools = util.string_as_bool(missing_tools)
show_shared = util.string_as_bool_or_none(show_shared)
payload = WorkflowIndexPayload(
show_published=show_published,
show_hidden=show_hidden,
show_deleted=show_deleted,
show_shared=show_shared,
missing_tools=missing_tools,
)
workflows, _ = self.service.index(trans, payload)
return workflows
@expose_api_anonymous_and_sessionless
def show(self, trans: GalaxyWebTransaction, id, **kwd):
"""
GET /api/workflows/{encoded_workflow_id}
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
Displays information needed to run a workflow.
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwd)
if stored_workflow.importable is False and stored_workflow.user != trans.user and not trans.user_is_admin:
if (
trans.sa_session.query(model.StoredWorkflowUserShareAssociation)
.filter_by(user=trans.user, stored_workflow=stored_workflow)
.count()
== 0
):
message = "Workflow is neither importable, nor owned by or shared with current user"
raise exceptions.ItemAccessibilityException(message)
if kwd.get("legacy", False):
style = "legacy"
else:
style = "instance"
version = kwd.get("version")
if version is None and util.string_as_bool(kwd.get("instance", "false")):
# A Workflow instance may not be the latest workflow version attached to StoredWorkflow.
# This figures out the correct version so that we return the correct Workflow and version.
workflow_id = self.decode_id(id)
for i, workflow in enumerate(reversed(stored_workflow.workflows)):
if workflow.id == workflow_id:
version = i
break
return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style=style, version=version)
@expose_api
def show_versions(self, trans: GalaxyWebTransaction, workflow_id, **kwds):
"""
GET /api/workflows/{encoded_workflow_id}/versions
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
Lists all versions of this workflow.
"""
instance = util.string_as_bool(kwds.get("instance", "false"))
stored_workflow = self.workflow_manager.get_stored_accessible_workflow(
trans, workflow_id, by_stored_id=not instance
)
return [
{"version": i, "update_time": str(w.update_time), "steps": len(w.steps)}
for i, w in enumerate(reversed(stored_workflow.workflows))
]
@expose_api
def create(self, trans: GalaxyWebTransaction, payload=None, **kwd):
"""
POST /api/workflows
Create workflows in various ways.
:param from_history_id: Id of history to extract a workflow from.
:type from_history_id: str
:param job_ids: If from_history_id is set - optional list of jobs to include when extracting a workflow from history
:type job_ids: str
:param dataset_ids: If from_history_id is set - optional list of HDA "hid"s corresponding to workflow inputs when extracting a workflow from history
:type dataset_ids: str
:param dataset_collection_ids: If from_history_id is set - optional list of HDCA "hid"s corresponding to workflow inputs when extracting a workflow from history
:type dataset_collection_ids: str
:param workflow_name: If from_history_id is set - name of the workflow to create when extracting a workflow from history
:type workflow_name: str
"""
ways_to_create = {
"archive_source",
"from_history_id",
"from_path",
"shared_workflow_id",
"workflow",
}
if trans.user_is_bootstrap_admin:
raise exceptions.RealUserRequiredException("Only real users can create or run workflows.")
if payload is None or len(ways_to_create.intersection(payload)) == 0:
message = f"One parameter among - {', '.join(ways_to_create)} - must be specified"
raise exceptions.RequestParameterMissingException(message)
if len(ways_to_create.intersection(payload)) > 1:
message = f"Only one parameter among - {', '.join(ways_to_create)} - must be specified"
raise exceptions.RequestParameterInvalidException(message)
if "archive_source" in payload:
archive_source = payload["archive_source"]
archive_file = payload.get("archive_file")
archive_data = None
if archive_source:
validate_uri_access(archive_source, trans.user_is_admin, trans.app.config.fetch_url_allowlist_ips)
if archive_source.startswith("file://"):
workflow_src = {"src": "from_path", "path": archive_source[len("file://") :]}
payload["workflow"] = workflow_src
return self.__api_import_new_workflow(trans, payload, **kwd)
elif archive_source == "trs_tool":
trs_server = payload.get("trs_server")
trs_tool_id = payload.get("trs_tool_id")
trs_version_id = payload.get("trs_version_id")
import_source = None
archive_data = self.app.trs_proxy.get_version_descriptor(trs_server, trs_tool_id, trs_version_id)
else:
try:
archive_data = stream_url_to_str(
archive_source, trans.app.file_sources, prefix="gx_workflow_download"
)
import_source = "URL"
except Exception:
raise exceptions.MessageException(f"Failed to open URL '{escape(archive_source)}'.")
elif hasattr(archive_file, "file"):
uploaded_file = archive_file.file
uploaded_file_name = uploaded_file.name
if os.path.getsize(os.path.abspath(uploaded_file_name)) > 0:
archive_data = util.unicodify(uploaded_file.read())
import_source = "uploaded file"
else:
raise exceptions.MessageException("You attempted to upload an empty file.")
else:
raise exceptions.MessageException("Please provide a URL or file.")
return self.__api_import_from_archive(trans, archive_data, import_source, payload=payload)
if "from_history_id" in payload:
from_history_id = payload.get("from_history_id")
from_history_id = self.decode_id(from_history_id)
history = self.history_manager.get_accessible(from_history_id, trans.user, current_history=trans.history)
job_ids = [self.decode_id(_) for _ in payload.get("job_ids", [])]
dataset_ids = payload.get("dataset_ids", [])
dataset_collection_ids = payload.get("dataset_collection_ids", [])
workflow_name = payload["workflow_name"]
stored_workflow = extract_workflow(
trans=trans,
user=trans.user,
history=history,
job_ids=job_ids,
dataset_ids=dataset_ids,
dataset_collection_ids=dataset_collection_ids,
workflow_name=workflow_name,
)
item = stored_workflow.to_dict(value_mapper={"id": trans.security.encode_id})
item["url"] = url_for("workflow", id=item["id"])
return item
if "from_path" in payload:
from_path = payload.get("from_path")
object_id = payload.get("object_id")
workflow_src = {"src": "from_path", "path": from_path}
if object_id is not None:
workflow_src["object_id"] = object_id
payload["workflow"] = workflow_src
return self.__api_import_new_workflow(trans, payload, **kwd)
if "shared_workflow_id" in payload:
workflow_id = payload["shared_workflow_id"]
return self.__api_import_shared_workflow(trans, workflow_id, payload)
if "workflow" in payload:
return self.__api_import_new_workflow(trans, payload, **kwd)
# This was already raised above, but just in case...
raise exceptions.RequestParameterMissingException("No method for workflow creation supplied.")
@expose_api_raw_anonymous_and_sessionless
def workflow_dict(self, trans: GalaxyWebTransaction, workflow_id, **kwd):
"""
GET /api/workflows/{encoded_workflow_id}/download
Returns a selected workflow.
:type style: str
:param style: Style of export. The default is 'export', which is the meant to be used
with workflow import endpoints. Other formats such as 'instance', 'editor',
'run' are more tied to the GUI and should not be considered stable APIs.
The default format for 'export' is specified by the
admin with the `default_workflow_export_format` config
option. Style can be specified as either 'ga' or 'format2' directly
to be explicit about which format to download.
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
"""
stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id, **kwd)
style = kwd.get("style", "export")
download_format = kwd.get("format")
version = kwd.get("version")
history_id = kwd.get("history_id")
history = None
if history_id:
history = self.history_manager.get_accessible(
self.decode_id(history_id), trans.user, current_history=trans.history
)
ret_dict = self.workflow_contents_manager.workflow_to_dict(
trans, stored_workflow, style=style, version=version, history=history
)
if download_format == "json-download":
sname = stored_workflow.name
sname = "".join(c in util.FILENAME_VALID_CHARS and c or "_" for c in sname)[0:150]
if ret_dict.get("format-version", None) == "0.1":
extension = "ga"
else:
extension = "gxwf.json"
trans.response.headers[
"Content-Disposition"
] = f'attachment; filename="Galaxy-Workflow-{sname}.{extension}"'
trans.response.set_content_type("application/galaxy-archive")
if style == "format2" and download_format != "json-download":
return ordered_dump(ret_dict)
else:
return format_return_as_json(ret_dict, pretty=True)
@expose_api
def delete(self, trans: ProvidesUserContext, id, **kwd):
"""
DELETE /api/workflows/{encoded_workflow_id}
Deletes a specified workflow
Author: rpark
copied from galaxy.web.controllers.workflows.py (delete)
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwd)
# check to see if user has permissions to selected workflow
if stored_workflow.user != trans.user and not trans.user_is_admin:
raise exceptions.InsufficientPermissionsException()
# Mark a workflow as deleted
stored_workflow.deleted = True
trans.sa_session.flush()
# TODO: Unsure of response message to let api know that a workflow was successfully deleted
return f"Workflow '{stored_workflow.name}' successfully deleted"
@expose_api
def import_new_workflow_deprecated(self, trans: GalaxyWebTransaction, payload, **kwd):
"""
POST /api/workflows/upload
Importing dynamic workflows from the api. Return newly generated workflow id.
Author: rpark
# currently assumes payload['workflow'] is a json representation of a workflow to be inserted into the database
Deprecated in favor to POST /api/workflows with encoded 'workflow' in
payload the same way.
"""
return self.__api_import_new_workflow(trans, payload, **kwd)
@expose_api
def update(self, trans: GalaxyWebTransaction, id, payload, **kwds):
"""
PUT /api/workflows/{id}
Update the workflow stored with ``id``.
:type id: str
:param id: the encoded id of the workflow to update
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false by default.
:type instance: boolean
:type payload: dict
:param payload: a dictionary containing any or all the
:workflow:
the json description of the workflow as would be
produced by GET workflows/<id>/download or
given to `POST workflows`
The workflow contents will be updated to target this.
:name:
optional string name for the workflow, if not present in payload,
name defaults to existing name
:annotation:
optional string annotation for the workflow, if not present in payload,
annotation defaults to existing annotation
:menu_entry:
optional boolean marking if the workflow should appear in the user\'s menu,
if not present, workflow menu entries are not modified
:tags:
optional list containing list of tags to add to the workflow (overwriting
existing tags), if not present, tags are not modified
:from_tool_form:
True iff encoded state coming in is encoded for the tool form.
:rtype: dict
:returns: serialized version of the workflow
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwds)
workflow_dict = payload.get("workflow", {})
workflow_dict.update({k: v for k, v in payload.items() if k not in workflow_dict})
if workflow_dict:
require_flush = False
raw_workflow_description = self.__normalize_workflow(trans, workflow_dict)
workflow_dict = raw_workflow_description.as_dict
new_workflow_name = workflow_dict.get("name")
old_workflow = stored_workflow.latest_workflow
name_updated = new_workflow_name and new_workflow_name != stored_workflow.name
steps_updated = "steps" in workflow_dict
if name_updated and not steps_updated:
sanitized_name = sanitize_html(new_workflow_name or old_workflow.name)
workflow = old_workflow.copy(user=trans.user)
workflow.stored_workflow = stored_workflow
workflow.name = sanitized_name
stored_workflow.name = sanitized_name
stored_workflow.latest_workflow = workflow
trans.sa_session.add(workflow, stored_workflow)
require_flush = True
if "hidden" in workflow_dict and stored_workflow.hidden != workflow_dict["hidden"]:
stored_workflow.hidden = workflow_dict["hidden"]
require_flush = True
if "published" in workflow_dict and stored_workflow.published != workflow_dict["published"]:
stored_workflow.published = workflow_dict["published"]
require_flush = True
if "importable" in workflow_dict and stored_workflow.importable != workflow_dict["importable"]:
stored_workflow.importable = workflow_dict["importable"]
require_flush = True
if "annotation" in workflow_dict and not steps_updated:
newAnnotation = sanitize_html(workflow_dict["annotation"])
self.add_item_annotation(trans.sa_session, trans.user, stored_workflow, newAnnotation)
require_flush = True
if "menu_entry" in workflow_dict or "show_in_tool_panel" in workflow_dict:
show_in_panel = workflow_dict.get("menu_entry") or workflow_dict.get("show_in_tool_panel")
stored_workflow_menu_entries = trans.user.stored_workflow_menu_entries
decoded_id = trans.security.decode_id(id)
if show_in_panel:
workflow_ids = [wf.stored_workflow_id for wf in stored_workflow_menu_entries]
if decoded_id not in workflow_ids:
menu_entry = model.StoredWorkflowMenuEntry()
menu_entry.stored_workflow = stored_workflow
stored_workflow_menu_entries.append(menu_entry)
trans.sa_session.add(menu_entry)
require_flush = True
else:
# remove if in list
entries = {x.stored_workflow_id: x for x in stored_workflow_menu_entries}
if decoded_id in entries:
stored_workflow_menu_entries.remove(entries[decoded_id])
require_flush = True
# set tags
if "tags" in workflow_dict:
trans.app.tag_handler.set_tags_from_list(
user=trans.user, item=stored_workflow, new_tags_list=workflow_dict["tags"]
)
if require_flush:
trans.sa_session.flush()
if "steps" in workflow_dict:
try:
workflow_update_options = WorkflowUpdateOptions(**payload)
workflow, errors = self.workflow_contents_manager.update_workflow_from_raw_description(
trans,
stored_workflow,
raw_workflow_description,
workflow_update_options,
)
except MissingToolsException:
raise exceptions.MessageException(
"This workflow contains missing tools. It cannot be saved until they have been removed from the workflow or installed."
)
else:
message = "Updating workflow requires dictionary containing 'workflow' attribute with new JSON description."
raise exceptions.RequestParameterInvalidException(message)
return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style="instance")
@expose_api
def refactor(self, trans, id, payload, **kwds):
"""
* PUT /api/workflows/{id}/refactor
updates the workflow stored with ``id``
:type id: str
:param id: the encoded id of the workflow to update
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
:type payload: dict
:param payload: a dictionary containing list of actions to apply.
:rtype: dict
:returns: serialized version of the workflow
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwds)
refactor_request = RefactorRequest(**payload)
return self.workflow_contents_manager.refactor(trans, stored_workflow, refactor_request)
@expose_api
def build_module(self, trans: GalaxyWebTransaction, payload=None):
"""
POST /api/workflows/build_module
Builds module models for the workflow editor.
"""
if payload is None:
payload = {}
inputs = payload.get("inputs", {})
trans.workflow_building_mode = workflow_building_modes.ENABLED
module = module_factory.from_dict(trans, payload, from_tool_form=True)
if "tool_state" not in payload:
module_state: Dict[str, Any] = {}
populate_state(trans, module.get_inputs(), inputs, module_state, check=False)
module.recover_state(module_state, from_tool_form=True)
return {
"label": inputs.get("__label", ""),
"annotation": inputs.get("__annotation", ""),
"name": module.get_name(),
"tool_state": module.get_state(),
"content_id": module.get_content_id(),
"inputs": module.get_all_inputs(connectable_only=True),
"outputs": module.get_all_outputs(),
"config_form": module.get_config_form(),
"post_job_actions": module.get_post_job_actions(inputs),
}
@expose_api
def get_tool_predictions(self, trans: ProvidesUserContext, payload, **kwd):
"""
POST /api/workflows/get_tool_predictions
Fetch predicted tools for a workflow
:type payload: dict
:param payload:
a dictionary containing two parameters
'tool_sequence' - comma separated sequence of tool ids
'remote_model_url' - (optional) path to the deep learning model
"""
remote_model_url = payload.get("remote_model_url", trans.app.config.tool_recommendation_model_path)
tool_sequence = payload.get("tool_sequence", "")
if "tool_sequence" not in payload or remote_model_url is None:
return
tool_sequence, recommended_tools = self.tool_recommendations.get_predictions(
trans, tool_sequence, remote_model_url
)
return {"current_tool": tool_sequence, "predicted_data": recommended_tools}
#
# -- Helper methods --
#
def __api_import_from_archive(self, trans: GalaxyWebTransaction, archive_data, source=None, payload=None):
payload = payload or {}
try:
data = json.loads(archive_data)
except Exception:
if "GalaxyWorkflow" in archive_data:
data = {"yaml_content": archive_data}
else:
raise exceptions.MessageException("The data content does not appear to be a valid workflow.")
if not data:
raise exceptions.MessageException("The data content is missing.")
raw_workflow_description = self.__normalize_workflow(trans, data)
workflow_create_options = WorkflowCreateOptions(**payload)
workflow, missing_tool_tups = self._workflow_from_dict(
trans, raw_workflow_description, workflow_create_options, source=source
)
workflow_id = workflow.id
workflow = workflow.latest_workflow
response = {
"message": f"Workflow '{escape(workflow.name)}' imported successfully.",
"status": "success",
"id": trans.security.encode_id(workflow_id),
}
if workflow.has_errors:
response["message"] = "Imported, but some steps in this workflow have validation errors."
response["status"] = "error"
elif len(workflow.steps) == 0:
response["message"] = "Imported, but this workflow has no steps."
response["status"] = "error"
elif workflow.has_cycles:
response["message"] = "Imported, but this workflow contains cycles."
response["status"] = "error"
return response
def __api_import_new_workflow(self, trans: GalaxyWebTransaction, payload, **kwd):
data = payload["workflow"]
raw_workflow_description = self.__normalize_workflow(trans, data)
workflow_create_options = WorkflowCreateOptions(**payload)
workflow, missing_tool_tups = self._workflow_from_dict(
trans,
raw_workflow_description,
workflow_create_options,
)
# galaxy workflow newly created id
workflow_id = workflow.id
# api encoded, id
encoded_id = trans.security.encode_id(workflow_id)
item = workflow.to_dict(value_mapper={"id": trans.security.encode_id})
item["annotations"] = [x.annotation for x in workflow.annotations]
item["url"] = url_for("workflow", id=encoded_id)
item["owner"] = workflow.user.username
item["number_of_steps"] = len(workflow.latest_workflow.steps)
return item
def __normalize_workflow(self, trans: GalaxyWebTransaction, as_dict):
return self.workflow_contents_manager.normalize_workflow_format(trans, as_dict)
@expose_api
def import_shared_workflow_deprecated(self, trans: GalaxyWebTransaction, payload, **kwd):
"""
POST /api/workflows/import
Import a workflow shared by other users.
:param workflow_id: the workflow id (required)
:type workflow_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
# Pull parameters out of payload.
workflow_id = payload.get("workflow_id", None)
if workflow_id is None:
raise exceptions.ObjectAttributeMissingException("Missing required parameter 'workflow_id'.")
self.__api_import_shared_workflow(trans, workflow_id, payload)
def __api_import_shared_workflow(self, trans: GalaxyWebTransaction, workflow_id, payload, **kwd):
try:
stored_workflow = self.get_stored_workflow(trans, workflow_id, check_ownership=False)
except Exception:
raise exceptions.ObjectNotFound(f"Malformed workflow id ( {workflow_id} ) specified.")
if stored_workflow.importable is False:
raise exceptions.ItemAccessibilityException(
"The owner of this workflow has disabled imports via this link."
)
elif stored_workflow.deleted:
raise exceptions.ItemDeletionException("You can't import this workflow because it has been deleted.")
imported_workflow = self._import_shared_workflow(trans, stored_workflow)
item = imported_workflow.to_dict(value_mapper={"id": trans.security.encode_id})
encoded_id = trans.security.encode_id(imported_workflow.id)
item["url"] = url_for("workflow", id=encoded_id)
return item
@expose_api
def invoke(self, trans: GalaxyWebTransaction, workflow_id, payload, **kwd):
"""
POST /api/workflows/{encoded_workflow_id}/invocations
Schedule the workflow specified by `workflow_id` to run.
.. note:: This method takes the same arguments as
:func:`galaxy.webapps.galaxy.api.workflows.WorkflowsAPIController.create` above.
:raises: exceptions.MessageException, exceptions.RequestParameterInvalidException
"""
# Get workflow + accessibility check.
stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id, instance=kwd.get("instance", False))
workflow = stored_workflow.latest_workflow
run_configs = build_workflow_run_configs(trans, workflow, payload)
is_batch = payload.get("batch")
if not is_batch and len(run_configs) != 1:
raise exceptions.RequestParameterInvalidException("Must specify 'batch' to use batch parameters.")
require_exact_tool_versions = util.string_as_bool(payload.get("require_exact_tool_versions", "true"))
tools = self.workflow_contents_manager.get_all_tools(workflow)
missing_tools = [
tool
for tool in tools
if not self.app.toolbox.has_tool(
tool["tool_id"], tool_version=tool["tool_version"], exact=require_exact_tool_versions
)
]
if missing_tools:
missing_tools_message = "Workflow was not invoked; the following required tools are not installed: "
if require_exact_tool_versions:
missing_tools_message += ", ".join(
[f"{tool['tool_id']} (version {tool['tool_version']})" for tool in missing_tools]
)
else:
missing_tools_message += ", ".join([tool["tool_id"] for tool in missing_tools])
raise exceptions.MessageException(missing_tools_message)
invocations = []
for run_config in run_configs:
workflow_scheduler_id = payload.get("scheduler", None)
# TODO: workflow scheduler hints
work_request_params = dict(scheduler=workflow_scheduler_id)
workflow_invocation = queue_invoke(
trans=trans,
workflow=workflow,
workflow_run_config=run_config,
request_params=work_request_params,
flush=False,
)
invocations.append(workflow_invocation)
trans.sa_session.flush()
invocations = [self.encode_all_ids(trans, invocation.to_dict(), recursive=True) for invocation in invocations]
if is_batch:
return invocations
else:
return invocations[0]
@expose_api
def index_invocations(self, trans: GalaxyWebTransaction, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations
GET /api/invocations
Get the list of a user's workflow invocations. If workflow_id is supplied
(either via URL or query parameter) it should be an encoded StoredWorkflow id
and returned invocations will be restricted to that workflow. history_id (an encoded
History id) can be used to further restrict the query. If neither a workflow_id or
history_id is supplied, all the current user's workflow invocations will be indexed
(as determined by the invocation being executed on one of the user's histories).
:param workflow_id: an encoded stored workflow id to restrict query to
:type workflow_id: str
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
:param history_id: an encoded history id to restrict query to
:type history_id: str
:param job_id: an encoded job id to restrict query to
:type job_id: str
:param user_id: an encoded user id to restrict query to, must be own id if not admin user
:type user_id: str
:param view: level of detail to return per invocation 'element' or 'collection'.
:type view: str
:param step_details: If 'view' is 'element', also include details on individual steps.
:type step_details: bool
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
invocation_payload = InvocationIndexPayload(**kwd)
serialization_params = InvocationSerializationParams(**kwd)
invocations, total_matches = self.invocations_service.index(trans, invocation_payload, serialization_params)
trans.response.headers["total_matches"] = total_matches
return invocations
@expose_api_anonymous
def create_invocations_from_store(self, trans, payload, **kwd):
"""
POST /api/invocations/from_store
Create invocation(s) from a supplied model store.
Input can be an archive describing a Galaxy model store containing an
workflow invocation - for instance one created with with write_store
or prepare_store_download endpoint.
"""
create_payload = CreateInvocationFromStore(**payload)
serialization_params = InvocationSerializationParams(**payload)
# refactor into a service...
return self._create_from_store(trans, create_payload, serialization_params)
def _create_from_store(
self, trans, payload: CreateInvocationFromStore, serialization_params: InvocationSerializationParams
):
history = self.history_manager.get_owned(
self.decode_id(payload.history_id), trans.user, current_history=trans.history
)
object_tracker = self.create_objects_from_store(
trans,
payload,
history=history,
)
return self.invocations_service.serialize_workflow_invocations(
object_tracker.invocations_by_key.values(), serialization_params
)
@expose_api
def show_invocation(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}
GET /api/invocations/{invocation_id}
Get detailed description of workflow invocation
:param invocation_id: the invocation id (required)
:type invocation_id: str
:param step_details: fetch details about individual invocation steps
and populate a steps attribute in the resulting
dictionary. Defaults to false.
:type step_details: bool
:param legacy_job_state: If step_details is true, and this is set to true
populate the invocation step state with the job state
instead of the invocation step state. This will also
produce one step per job in mapping jobs to mimic the
older behavior with respect to collections. Partially
scheduled steps may provide incomplete information
and the listed steps outputs are the mapped over
step outputs but the individual job outputs
when this is set - at least for now.
:type legacy_job_state: bool
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.get_invocation(trans, decoded_workflow_invocation_id, eager=True)
if not workflow_invocation:
raise exceptions.ObjectNotFound()
return self.__encode_invocation(workflow_invocation, **kwd)
@expose_api
def cancel_invocation(self, trans: ProvidesUserContext, invocation_id, **kwd):
"""
DELETE /api/workflows/{workflow_id}/invocations/{invocation_id}
DELETE /api/invocations/{invocation_id}
Cancel the specified workflow invocation.
:param invocation_id: the usage id (required)
:type invocation_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.cancel_invocation(trans, decoded_workflow_invocation_id)
return self.__encode_invocation(workflow_invocation, **kwd)
@expose_api
def show_invocation_report(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/report
GET /api/invocations/{invocation_id}/report
Get JSON summarizing invocation for reporting.
"""
kwd["format"] = "json"
return self.workflow_manager.get_invocation_report(trans, invocation_id, **kwd)
@expose_api_raw
def show_invocation_report_pdf(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/report.pdf
GET /api/invocations/{invocation_id}/report.pdf
Get JSON summarizing invocation for reporting.
"""
kwd["format"] = "pdf"
trans.response.set_content_type("application/pdf")
return self.workflow_manager.get_invocation_report(trans, invocation_id, **kwd)
def _generate_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.get_invocation(trans, decoded_workflow_invocation_id)
history = workflow_invocation.history
workflow = workflow_invocation.workflow
stored_workflow = workflow.stored_workflow
# pull in the user info from those who the history and workflow has been shared with
contributing_users = [stored_workflow.user]
# may want to extend this to have more reviewers.
reviewing_users = [stored_workflow.user]
encoded_workflow_id = trans.security.encode_id(stored_workflow.id)
encoded_history_id = trans.security.encode_id(history.id)
dict_workflow = json.loads(self.workflow_dict(trans, encoded_workflow_id))
spec_version = kwd.get("spec_version", "https://w3id.org/ieee/ieee-2791-schema/2791object.json")
for i, w in enumerate(reversed(stored_workflow.workflows)):
if workflow == w:
current_version = i
contributors = []
for contributing_user in contributing_users:
contributor = {
"orcid": kwd.get("xref", []),
"name": contributing_user.username,
"affiliation": "",
"contribution": ["authoredBy"],
"email": contributing_user.email,
}
contributors.append(contributor)
reviewers = []
for reviewer in reviewing_users:
reviewer = {
"status": "approved",
"reviewer_comment": "",
"date": workflow_invocation.update_time.isoformat(),
"reviewer": {
"orcid": kwd.get("orcid", []),
"name": contributing_user.username,
"affiliation": "",
"contribution": "curatedBy",
"email": contributing_user.email,
},
}
reviewers.append(reviewer)
provenance_domain = {
"name": workflow.name,
"version": current_version,
"review": reviewers,
"derived_from": url_for("workflow", id=encoded_workflow_id, qualified=True),
"created": workflow_invocation.create_time.isoformat(),
"modified": workflow_invocation.update_time.isoformat(),
"contributors": contributors,
"license": "https://spdx.org/licenses/CC-BY-4.0.html",
}
keywords = []
for tag in stored_workflow.tags:
keywords.append(tag.user_tname)
for tag in history.tags:
if tag.user_tname not in keywords:
keywords.append(tag.user_tname)
metrics = {}
tools, input_subdomain, output_subdomain, pipeline_steps, software_prerequisites = [], [], [], [], []
for step in workflow_invocation.steps:
if step.workflow_step.type == "tool":
workflow_outputs_list, output_list, input_list = set(), [], []
for wo in step.workflow_step.workflow_outputs:
workflow_outputs_list.add(wo.output_name)
for job in step.jobs:
metrics[i] = summarize_job_metrics(trans, job)
for job_input in job.input_datasets:
if hasattr(job_input.dataset, "dataset_id"):
encoded_dataset_id = trans.security.encode_id(job_input.dataset.dataset_id)
input_obj = {
# TODO: that should maybe be a step prefix + element identifier where appropriate.
"filename": job_input.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_input.dataset.create_time.isoformat(),
}
input_list.append(input_obj)
for job_output in job.output_datasets:
if hasattr(job_output.dataset, "dataset_id"):
encoded_dataset_id = trans.security.encode_id(job_output.dataset.dataset_id)
output_obj = {
"filename": job_output.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_output.dataset.create_time.isoformat(),
}
output_list.append(output_obj)
if job_output.name in workflow_outputs_list:
output = {
"mediatype": job_output.dataset.extension,
"uri": {
"filename": job_output.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_output.dataset.create_time.isoformat(),
},
}
output_subdomain.append(output)
workflow_step = step.workflow_step
step_index = workflow_step.order_index
current_step = dict_workflow["steps"][str(step_index)]
pipeline_step = {
"step_number": step_index,
"name": current_step["name"],
"description": current_step["annotation"],
"version": current_step["tool_version"],
"prerequisite": kwd.get("prerequisite", []),
"input_list": input_list,
"output_list": output_list,
}
pipeline_steps.append(pipeline_step)
try:
software_prerequisite = {
"name": current_step["content_id"],
"version": current_step["tool_version"],
"uri": {"uri": current_step["content_id"], "access_time": current_step["uuid"]},
}
if software_prerequisite["uri"]["uri"] not in tools:
software_prerequisites.append(software_prerequisite)
tools.append(software_prerequisite["uri"]["uri"])
except Exception:
continue
if step.workflow_step.type == "data_input" and step.output_datasets:
for output_assoc in step.output_datasets:
encoded_dataset_id = trans.security.encode_id(output_assoc.dataset_id)
input_obj = {
"filename": step.workflow_step.label,
"uri": url_for(
"history_content", history_id=encoded_history_id, id=encoded_dataset_id, qualified=True
),
"access_time": step.workflow_step.update_time.isoformat(),
}
input_subdomain.append(input_obj)
if step.workflow_step.type == "data_collection_input" and step.output_dataset_collections:
for output_dataset_collection_association in step.output_dataset_collections:
encoded_dataset_id = trans.security.encode_id(
output_dataset_collection_association.dataset_collection_id
)
input_obj = {
"filename": step.workflow_step.label,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
type="dataset_collection",
qualified=True,
),
"access_time": step.workflow_step.update_time.isoformat(),
}
input_subdomain.append(input_obj)
usability_domain = []
for a in stored_workflow.annotations:
usability_domain.append(a.annotation)
for h in history.annotations:
usability_domain.append(h.annotation)
parametric_domain = []
for inv_step in workflow_invocation.steps:
try:
for k, v in inv_step.workflow_step.tool_inputs.items():
param, value, step = k, v, inv_step.workflow_step.order_index
parametric_domain.append({"param": param, "value": value, "step": step})
except Exception:
continue
execution_domain = {
"script_access_type": "a_galaxy_workflow",
"script": [url_for("workflows", encoded_workflow_id=encoded_workflow_id, qualified=True)],
"script_driver": "Galaxy",
"software_prerequisites": software_prerequisites,
"external_data_endpoints": [
{"name": "Access to Galaxy", "url": url_for("/", qualified=True)},
kwd.get("external_data_endpoints"),
],
"environment_variables": kwd.get("environment_variables", {}),
}
extension = [
{
"extension_schema": "https://raw.githubusercontent.com/biocompute-objects/extension_domain/6d2cd8482e6075746984662edcf78b57d3d38065/galaxy/galaxy_extension.json",
"galaxy_extension": {
"galaxy_url": url_for("/", qualified=True),
"galaxy_version": VERSION,
# TODO:
# 'aws_estimate': aws_estimate,
# 'job_metrics': metrics
},
}
]
error_domain = {
"empirical_error": kwd.get("empirical_error", []),
"algorithmic_error": kwd.get("algorithmic_error", []),
}
bco_dict = {
"provenance_domain": provenance_domain,
"usability_domain": usability_domain,
"extension_domain": extension,
"description_domain": {
"keywords": keywords,
"xref": kwd.get("xref", []),
"platform": ["Galaxy"],
"pipeline_steps": pipeline_steps,
},
"execution_domain": execution_domain,
"parametric_domain": parametric_domain,
"io_domain": {
"input_subdomain": input_subdomain,
"output_subdomain": output_subdomain,
},
"error_domain": error_domain,
}
# Generate etag from the BCO excluding object_id and spec_version, as
# specified in https://github.com/biocompute-objects/BCO_Specification/blob/main/docs/top-level.md#203-etag-etag
etag = hashlib.sha256(json.dumps(bco_dict, sort_keys=True).encode()).hexdigest()
bco_dict.update(
{
"object_id": url_for(
controller=f"api/invocations/{invocation_id}", action="biocompute", qualified=True
),
"spec_version": spec_version,
"etag": etag,
}
)
return bco_dict
@expose_api
def export_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/invocations/{invocations_id}/biocompute
Return a BioCompute Object for the workflow invocation.
The BioCompute Object endpoints are in beta - important details such
as how inputs and outputs are represented, how the workflow is encoded,
and how author and version information is encoded, and how URLs are
generated will very likely change in important ways over time.
"""
return self._generate_invocation_bco(trans, invocation_id, **kwd)
@expose_api_raw
def download_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/invocations/{invocations_id}/biocompute/download
Returns a selected BioCompute Object as a file for download (HTTP
headers configured with filename and such).
The BioCompute Object endpoints are in beta - important details such
as how inputs and outputs are represented, how the workflow is encoded,
and how author and version information is encoded, and how URLs are
generated will very likely change in important ways over time.
"""
ret_dict = self._generate_invocation_bco(trans, invocation_id, **kwd)
trans.response.headers["Content-Disposition"] = f'attachment; filename="bco_{invocation_id}.json"'
trans.response.set_content_type("application/json")
return format_return_as_json(ret_dict, pretty=True)
@expose_api
def invocation_step(self, trans, invocation_id, step_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/steps/{step_id}
GET /api/invocations/{invocation_id}/steps/{step_id}
:param invocation_id: the invocation id (required)
:type invocation_id: str
:param step_id: encoded id of the WorkflowInvocationStep (required)
:type step_id: str
:param payload: payload containing update action information
for running workflow.
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_invocation_step_id = self.decode_id(step_id)
invocation_step = self.workflow_manager.get_invocation_step(trans, decoded_invocation_step_id)
return self.__encode_invocation_step(trans, invocation_step)
@expose_api_anonymous_and_sessionless
def invocation_step_jobs_summary(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/step_jobs_summary
GET /api/invocations/{invocation_id}/step_jobs_summary
return job state summary info aggregated across per step of the workflow invocation
Warning: We allow anyone to fetch job state information about any object they
can guess an encoded ID for - it isn't considered protected data. This keeps
polling IDs as part of state calculation for large histories and collections as
efficient as possible.
:param invocation_id: the invocation id (required)
:type invocation_id: str
:rtype: dict[]
:returns: an array of job summary object dictionaries for each step
"""
decoded_invocation_id = self.decode_id(invocation_id)
ids = []
types = []
for (job_source_type, job_source_id, _) in invocation_job_source_iter(trans.sa_session, decoded_invocation_id):
ids.append(job_source_id)
types.append(job_source_type)
return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)]
@expose_api_anonymous_and_sessionless
def invocation_jobs_summary(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/jobs_summary
GET /api/invocations/{invocation_id}/jobs_summary
return job state summary info aggregated across all current jobs of workflow invocation
Warning: We allow anyone to fetch job state information about any object they
can guess an encoded ID for - it isn't considered protected data. This keeps
polling IDs as part of state calculation for large histories and collections as
efficient as possible.
:param invocation_id: the invocation id (required)
:type invocation_id: str
:rtype: dict
:returns: a job summary object merged for all steps in workflow invocation
"""
ids = [self.decode_id(invocation_id)]
types = ["WorkflowInvocation"]
return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)][0]
@expose_api
def update_invocation_step(self, trans: GalaxyWebTransaction, invocation_id, step_id, payload, **kwd):
"""
PUT /api/workflows/{workflow_id}/invocations/{invocation_id}/steps/{step_id}
PUT /api/invocations/{invocation_id}/steps/{step_id}
Update state of running workflow step invocation - still very nebulous
but this would be for stuff like confirming paused steps can proceed
etc....
:param invocation_id: the usage id (required)
:type invocation_id: str
:param step_id: encoded id of the WorkflowInvocationStep (required)
:type step_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_invocation_step_id = self.decode_id(step_id)
action = payload.get("action", None)
invocation_step = self.workflow_manager.update_invocation_step(
trans,
decoded_invocation_step_id,
action=action,
)
return self.__encode_invocation_step(trans, invocation_step)
def _workflow_from_dict(self, trans, data, workflow_create_options, source=None):
"""Creates a workflow from a dict.
Created workflow is stored in the database and returned.
"""
publish = workflow_create_options.publish
importable = workflow_create_options.is_importable
if publish and not importable:
raise exceptions.RequestParameterInvalidException("Published workflow must be importable.")
workflow_contents_manager = self.app.workflow_contents_manager
raw_workflow_description = workflow_contents_manager.ensure_raw_description(data)
created_workflow = workflow_contents_manager.build_workflow_from_raw_description(
trans,
raw_workflow_description,
workflow_create_options,
source=source,
)
if importable:
self._make_item_accessible(trans.sa_session, created_workflow.stored_workflow)
trans.sa_session.flush()
self._import_tools_if_needed(trans, workflow_create_options, raw_workflow_description)
return created_workflow.stored_workflow, created_workflow.missing_tools
def _import_tools_if_needed(self, trans, workflow_create_options, raw_workflow_description):
if not workflow_create_options.import_tools:
return
if not trans.user_is_admin:
raise exceptions.AdminRequiredException()
data = raw_workflow_description.as_dict
tools = {}
for key in data["steps"]:
item = data["steps"][key]
if item is not None:
if "tool_shed_repository" in item:
tool_shed_repository = item["tool_shed_repository"]
if (
"owner" in tool_shed_repository
and "changeset_revision" in tool_shed_repository
and "name" in tool_shed_repository
and "tool_shed" in tool_shed_repository
):
toolstr = (
tool_shed_repository["owner"]
+ tool_shed_repository["changeset_revision"]
+ tool_shed_repository["name"]
+ tool_shed_repository["tool_shed"]
)
tools[toolstr] = tool_shed_repository
irm = InstallRepositoryManager(self.app)
install_options = workflow_create_options.install_options
for k in tools:
item = tools[k]
tool_shed_url = f"https://{item['tool_shed']}/"
name = item["name"]
owner = item["owner"]
changeset_revision = item["changeset_revision"]
irm.install(tool_shed_url, name, owner, changeset_revision, install_options)
def __encode_invocation_step(self, trans: ProvidesUserContext, invocation_step):
return self.encode_all_ids(trans, invocation_step.to_dict("element"), True)
def __get_stored_accessible_workflow(self, trans, workflow_id, **kwd):
instance = util.string_as_bool(kwd.get("instance", "false"))
return self.workflow_manager.get_stored_accessible_workflow(trans, workflow_id, by_stored_id=not instance)
def __get_stored_workflow(self, trans, workflow_id, **kwd):
instance = util.string_as_bool(kwd.get("instance", "false"))
return self.workflow_manager.get_stored_workflow(trans, workflow_id, by_stored_id=not instance)
def __encode_invocation(self, invocation, **kwd):
params = InvocationSerializationParams(**kwd)
return self.invocations_service.serialize_workflow_invocation(invocation, params)
StoredWorkflowIDPathParam: EncodedDatabaseIdField = Path(
..., title="Stored Workflow ID", description="The encoded database identifier of the Stored Workflow."
)
InvocationIDPathParam: EncodedDatabaseIdField = Path(
..., title="Invocation ID", description="The encoded database identifier of the Invocation."
)
DeletedQueryParam: bool = Query(
default=False, title="Display deleted", description="Whether to restrict result to deleted workflows."
)
HiddenQueryParam: bool = Query(
default=False, title="Display hidden", description="Whether to restrict result to hidden workflows."
)
MissingToolsQueryParam: bool = Query(
default=False,
title="Display missing tools",
description="Whether to include a list of missing tools per workflow entry",
)
ShowPublishedQueryParam: Optional[bool] = Query(default=None, title="Include published workflows.", description="")
ShowSharedQueryParam: Optional[bool] = Query(
default=None, title="Include workflows shared with authenticated user.", description=""
)
SortByQueryParam: Optional[WorkflowSortByEnum] = Query(
default=None,
title="Sort workflow index by this attribute",
description="In unspecified, default ordering depends on other parameters but generally the user's own workflows appear first based on update time",
)
SortDescQueryParam: Optional[bool] = Query(
default=None,
title="Sort Descending",
description="Sort in descending order?",
)
LimitQueryParam: Optional[int] = Query(default=None, title="Limit number of queries.")
OffsetQueryParam: Optional[int] = Query(
default=0,
title="Number of workflows to skip in sorted query (to enable pagination).",
)
query_tags = [
IndexQueryTag("name", "The stored workflow's name.", "n"),
IndexQueryTag(
"tag",
"The workflow's tag, if the tag contains a colon an approach will be made to match the key and value of the tag separately.",
"t",
),
IndexQueryTag("user", "The stored workflow's owner's username.", "u"),
IndexQueryTag(
"is:published",
"Include only published workflows in the final result. Be sure the the query parameter `show_published` is set to `true` if to include all published workflows and not just the requesting user's.",
),
IndexQueryTag(
"is:share_with_me",
"Include only workflows shared with the requesting user. Be sure the the query parameter `show_shared` is set to `true` if to include shared workflows.",
),
]
SearchQueryParam: Optional[str] = search_query_param(
model_name="Stored Workflow",
tags=query_tags,
free_text_fields=["name", "tag", "user"],
)
SkipStepCountsQueryParam: bool = Query(
default=False,
title="Skip step counts.",
description="Set this to true to skip joining workflow step counts and optimize the resulting index query. Response objects will not contain step counts.",
)
@router.cbv
class FastAPIWorkflows:
service: WorkflowsService = depends(WorkflowsService)
invocations_service: InvocationsService = depends(InvocationsService)
@router.get(
"/api/workflows",
summary="Lists stored workflows viewable by the user.",
response_description="A list with summary stored workflow information per viewable entry.",
)
def index(
self,
response: Response,
trans: ProvidesUserContext = DependsOnTrans,
show_deleted: bool = DeletedQueryParam,
show_hidden: bool = HiddenQueryParam,
missing_tools: bool = MissingToolsQueryParam,
show_published: Optional[bool] = ShowPublishedQueryParam,
show_shared: Optional[bool] = ShowSharedQueryParam,
sort_by: Optional[WorkflowSortByEnum] = SortByQueryParam,
sort_desc: Optional[bool] = SortDescQueryParam,
limit: Optional[int] = LimitQueryParam,
offset: Optional[int] = OffsetQueryParam,
search: Optional[str] = SearchQueryParam,
skip_step_counts: bool = SkipStepCountsQueryParam,
) -> List[Dict[str, Any]]:
"""Lists stored workflows viewable by the user."""
payload = WorkflowIndexPayload(
show_published=show_published,
show_hidden=show_hidden,
show_deleted=show_deleted,
show_shared=show_shared,
missing_tools=missing_tools,
sort_by=sort_by,
sort_desc=sort_desc,
limit=limit,
offset=offset,
search=search,
skip_step_counts=skip_step_counts,
)
workflows, total_matches = self.service.index(trans, payload, include_total_count=True)
response.headers["total_matches"] = str(total_matches)
return workflows
@router.get(
"/api/workflows/{id}/sharing",
summary="Get the current sharing status of the given item.",
)
def sharing(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Return the sharing status of the item."""
return self.service.shareable_service.sharing(trans, id)
@router.put(
"/api/workflows/{id}/enable_link_access",
summary="Makes this item accessible by a URL link.",
)
def enable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Makes this item accessible by a URL link and return the current sharing status."""
return self.service.shareable_service.enable_link_access(trans, id)
@router.put(
"/api/workflows/{id}/disable_link_access",
summary="Makes this item inaccessible by a URL link.",
)
def disable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Makes this item inaccessible by a URL link and return the current sharing status."""
return self.service.shareable_service.disable_link_access(trans, id)
@router.put(
"/api/workflows/{id}/publish",
summary="Makes this item public and accessible by a URL link.",
)
def publish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Makes this item publicly available by a URL link and return the current sharing status."""
return self.service.shareable_service.publish(trans, id)
@router.put(
"/api/workflows/{id}/unpublish",
summary="Removes this item from the published list.",
)
def unpublish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Removes this item from the published list and return the current sharing status."""
return self.service.shareable_service.unpublish(trans, id)
@router.put(
"/api/workflows/{id}/share_with_users",
summary="Share this item with specific users.",
)
def share_with_users(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
payload: ShareWithPayload = Body(...),
) -> ShareWithStatus:
"""Shares this item with specific users and return the current sharing status."""
return self.service.shareable_service.share_with_users(trans, id, payload)
@router.put(
"/api/workflows/{id}/slug",
summary="Set a new slug for this shared item.",
status_code=status.HTTP_204_NO_CONTENT,
)
def set_slug(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
payload: SetSlugPayload = Body(...),
):
"""Sets a new slug to access this item by URL. The new slug must be unique."""
self.service.shareable_service.set_slug(trans, id, payload)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.post(
"/api/invocations/{invocation_id}/prepare_store_download",
summary="Prepare a worklfow invocation export-style download.",
)
def prepare_store_download(
self,
trans: ProvidesUserContext = DependsOnTrans,
invocation_id: EncodedDatabaseIdField = InvocationIDPathParam,
payload: PrepareStoreDownloadPayload = Body(...),
) -> AsyncFile:
return self.invocations_service.prepare_store_download(
trans,
invocation_id,
payload,
)
@router.post(
"/api/invocations/{invocation_id}/write_store",
summary="Prepare a worklfow invocation export-style download and write to supplied URI.",
)
def write_store(
self,
trans: ProvidesUserContext = DependsOnTrans,
invocation_id: EncodedDatabaseIdField = InvocationIDPathParam,
payload: WriteStoreToPayload = Body(...),
) -> AsyncTaskResultSummary:
rval = self.invocations_service.write_store(
trans,
invocation_id,
payload,
)
return rval
| 43.66277 | 204 | 0.625136 |
import hashlib
import json
import logging
import os
from typing import (
Any,
Dict,
List,
Optional,
)
from fastapi import (
Body,
Path,
Query,
Response,
status,
)
from gxformat2._yaml import ordered_dump
from markupsafe import escape
from pydantic import Extra
from galaxy import (
exceptions,
model,
util,
)
from galaxy.files.uris import (
stream_url_to_str,
validate_uri_access,
)
from galaxy.managers.context import ProvidesUserContext
from galaxy.managers.jobs import (
fetch_job_states,
invocation_job_source_iter,
summarize_job_metrics,
)
from galaxy.managers.workflows import (
MissingToolsException,
RefactorRequest,
WorkflowCreateOptions,
WorkflowUpdateOptions,
)
from galaxy.model.item_attrs import UsesAnnotations
from galaxy.schema.fields import EncodedDatabaseIdField
from galaxy.schema.schema import (
AsyncFile,
AsyncTaskResultSummary,
SetSlugPayload,
ShareWithPayload,
ShareWithStatus,
SharingStatus,
StoreContentSource,
WorkflowSortByEnum,
WriteStoreToPayload,
)
from galaxy.structured_app import StructuredApp
from galaxy.tool_shed.galaxy_install.install_manager import InstallRepositoryManager
from galaxy.tools import recommendations
from galaxy.tools.parameters import populate_state
from galaxy.tools.parameters.basic import workflow_building_modes
from galaxy.util.sanitize_html import sanitize_html
from galaxy.version import VERSION
from galaxy.web import (
expose_api,
expose_api_anonymous,
expose_api_anonymous_and_sessionless,
expose_api_raw,
expose_api_raw_anonymous_and_sessionless,
format_return_as_json,
)
from galaxy.webapps.base.controller import (
SharableMixin,
url_for,
UsesStoredWorkflowMixin,
)
from galaxy.webapps.base.webapp import GalaxyWebTransaction
from galaxy.webapps.galaxy.services.base import (
ConsumesModelStores,
ServesExportStores,
)
from galaxy.webapps.galaxy.services.invocations import (
InvocationIndexPayload,
InvocationSerializationParams,
InvocationsService,
PrepareStoreDownloadPayload,
)
from galaxy.webapps.galaxy.services.workflows import (
WorkflowIndexPayload,
WorkflowsService,
)
from galaxy.workflow.extract import extract_workflow
from galaxy.workflow.modules import module_factory
from galaxy.workflow.run import queue_invoke
from galaxy.workflow.run_request import build_workflow_run_configs
from . import (
BaseGalaxyAPIController,
depends,
DependsOnTrans,
IndexQueryTag,
Router,
search_query_param,
)
log = logging.getLogger(__name__)
router = Router(tags=["workflows"])
class CreateInvocationFromStore(StoreContentSource):
history_id: Optional[str]
class Config:
extra = Extra.allow
class WorkflowsAPIController(
BaseGalaxyAPIController,
UsesStoredWorkflowMixin,
UsesAnnotations,
SharableMixin,
ServesExportStores,
ConsumesModelStores,
):
service: WorkflowsService = depends(WorkflowsService)
invocations_service: InvocationsService = depends(InvocationsService)
def __init__(self, app: StructuredApp):
super().__init__(app)
self.history_manager = app.history_manager
self.workflow_manager = app.workflow_manager
self.workflow_contents_manager = app.workflow_contents_manager
self.tool_recommendations = recommendations.ToolRecommendations()
@expose_api
def get_workflow_menu(self, trans: ProvidesUserContext, **kwd):
user = trans.user
ids_in_menu = [x.stored_workflow_id for x in user.stored_workflow_menu_entries]
workflows = self.get_workflows_list(trans, **kwd)
return {"ids_in_menu": ids_in_menu, "workflows": workflows}
@expose_api
def set_workflow_menu(self, trans: GalaxyWebTransaction, payload=None, **kwd):
payload = payload or {}
user = trans.user
workflow_ids = payload.get("workflow_ids")
if workflow_ids is None:
workflow_ids = []
elif type(workflow_ids) != list:
workflow_ids = [workflow_ids]
workflow_ids_decoded = []
for ids in workflow_ids:
workflow_ids_decoded.append(trans.security.decode_id(ids))
sess = trans.sa_session
for m in user.stored_workflow_menu_entries:
sess.delete(m)
user.stored_workflow_menu_entries = []
q = sess.query(model.StoredWorkflow)
seen_workflow_ids = set()
for wf_id in workflow_ids_decoded:
if wf_id in seen_workflow_ids:
continue
else:
seen_workflow_ids.add(wf_id)
m = model.StoredWorkflowMenuEntry()
m.stored_workflow = q.get(wf_id)
user.stored_workflow_menu_entries.append(m)
sess.flush()
message = "Menu updated."
trans.set_message(message)
return {"message": message, "status": "done"}
def get_workflows_list(
self,
trans: ProvidesUserContext,
missing_tools=False,
show_published=None,
show_shared=None,
show_hidden=False,
show_deleted=False,
**kwd,
):
show_published = util.string_as_bool_or_none(show_published)
show_hidden = util.string_as_bool(show_hidden)
show_deleted = util.string_as_bool(show_deleted)
missing_tools = util.string_as_bool(missing_tools)
show_shared = util.string_as_bool_or_none(show_shared)
payload = WorkflowIndexPayload(
show_published=show_published,
show_hidden=show_hidden,
show_deleted=show_deleted,
show_shared=show_shared,
missing_tools=missing_tools,
)
workflows, _ = self.service.index(trans, payload)
return workflows
@expose_api_anonymous_and_sessionless
def show(self, trans: GalaxyWebTransaction, id, **kwd):
stored_workflow = self.__get_stored_workflow(trans, id, **kwd)
if stored_workflow.importable is False and stored_workflow.user != trans.user and not trans.user_is_admin:
if (
trans.sa_session.query(model.StoredWorkflowUserShareAssociation)
.filter_by(user=trans.user, stored_workflow=stored_workflow)
.count()
== 0
):
message = "Workflow is neither importable, nor owned by or shared with current user"
raise exceptions.ItemAccessibilityException(message)
if kwd.get("legacy", False):
style = "legacy"
else:
style = "instance"
version = kwd.get("version")
if version is None and util.string_as_bool(kwd.get("instance", "false")):
workflow_id = self.decode_id(id)
for i, workflow in enumerate(reversed(stored_workflow.workflows)):
if workflow.id == workflow_id:
version = i
break
return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style=style, version=version)
@expose_api
def show_versions(self, trans: GalaxyWebTransaction, workflow_id, **kwds):
instance = util.string_as_bool(kwds.get("instance", "false"))
stored_workflow = self.workflow_manager.get_stored_accessible_workflow(
trans, workflow_id, by_stored_id=not instance
)
return [
{"version": i, "update_time": str(w.update_time), "steps": len(w.steps)}
for i, w in enumerate(reversed(stored_workflow.workflows))
]
@expose_api
def create(self, trans: GalaxyWebTransaction, payload=None, **kwd):
ways_to_create = {
"archive_source",
"from_history_id",
"from_path",
"shared_workflow_id",
"workflow",
}
if trans.user_is_bootstrap_admin:
raise exceptions.RealUserRequiredException("Only real users can create or run workflows.")
if payload is None or len(ways_to_create.intersection(payload)) == 0:
message = f"One parameter among - {', '.join(ways_to_create)} - must be specified"
raise exceptions.RequestParameterMissingException(message)
if len(ways_to_create.intersection(payload)) > 1:
message = f"Only one parameter among - {', '.join(ways_to_create)} - must be specified"
raise exceptions.RequestParameterInvalidException(message)
if "archive_source" in payload:
archive_source = payload["archive_source"]
archive_file = payload.get("archive_file")
archive_data = None
if archive_source:
validate_uri_access(archive_source, trans.user_is_admin, trans.app.config.fetch_url_allowlist_ips)
if archive_source.startswith("file://"):
workflow_src = {"src": "from_path", "path": archive_source[len("file://") :]}
payload["workflow"] = workflow_src
return self.__api_import_new_workflow(trans, payload, **kwd)
elif archive_source == "trs_tool":
trs_server = payload.get("trs_server")
trs_tool_id = payload.get("trs_tool_id")
trs_version_id = payload.get("trs_version_id")
import_source = None
archive_data = self.app.trs_proxy.get_version_descriptor(trs_server, trs_tool_id, trs_version_id)
else:
try:
archive_data = stream_url_to_str(
archive_source, trans.app.file_sources, prefix="gx_workflow_download"
)
import_source = "URL"
except Exception:
raise exceptions.MessageException(f"Failed to open URL '{escape(archive_source)}'.")
elif hasattr(archive_file, "file"):
uploaded_file = archive_file.file
uploaded_file_name = uploaded_file.name
if os.path.getsize(os.path.abspath(uploaded_file_name)) > 0:
archive_data = util.unicodify(uploaded_file.read())
import_source = "uploaded file"
else:
raise exceptions.MessageException("You attempted to upload an empty file.")
else:
raise exceptions.MessageException("Please provide a URL or file.")
return self.__api_import_from_archive(trans, archive_data, import_source, payload=payload)
if "from_history_id" in payload:
from_history_id = payload.get("from_history_id")
from_history_id = self.decode_id(from_history_id)
history = self.history_manager.get_accessible(from_history_id, trans.user, current_history=trans.history)
job_ids = [self.decode_id(_) for _ in payload.get("job_ids", [])]
dataset_ids = payload.get("dataset_ids", [])
dataset_collection_ids = payload.get("dataset_collection_ids", [])
workflow_name = payload["workflow_name"]
stored_workflow = extract_workflow(
trans=trans,
user=trans.user,
history=history,
job_ids=job_ids,
dataset_ids=dataset_ids,
dataset_collection_ids=dataset_collection_ids,
workflow_name=workflow_name,
)
item = stored_workflow.to_dict(value_mapper={"id": trans.security.encode_id})
item["url"] = url_for("workflow", id=item["id"])
return item
if "from_path" in payload:
from_path = payload.get("from_path")
object_id = payload.get("object_id")
workflow_src = {"src": "from_path", "path": from_path}
if object_id is not None:
workflow_src["object_id"] = object_id
payload["workflow"] = workflow_src
return self.__api_import_new_workflow(trans, payload, **kwd)
if "shared_workflow_id" in payload:
workflow_id = payload["shared_workflow_id"]
return self.__api_import_shared_workflow(trans, workflow_id, payload)
if "workflow" in payload:
return self.__api_import_new_workflow(trans, payload, **kwd)
raise exceptions.RequestParameterMissingException("No method for workflow creation supplied.")
@expose_api_raw_anonymous_and_sessionless
def workflow_dict(self, trans: GalaxyWebTransaction, workflow_id, **kwd):
stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id, **kwd)
style = kwd.get("style", "export")
download_format = kwd.get("format")
version = kwd.get("version")
history_id = kwd.get("history_id")
history = None
if history_id:
history = self.history_manager.get_accessible(
self.decode_id(history_id), trans.user, current_history=trans.history
)
ret_dict = self.workflow_contents_manager.workflow_to_dict(
trans, stored_workflow, style=style, version=version, history=history
)
if download_format == "json-download":
sname = stored_workflow.name
sname = "".join(c in util.FILENAME_VALID_CHARS and c or "_" for c in sname)[0:150]
if ret_dict.get("format-version", None) == "0.1":
extension = "ga"
else:
extension = "gxwf.json"
trans.response.headers[
"Content-Disposition"
] = f'attachment; filename="Galaxy-Workflow-{sname}.{extension}"'
trans.response.set_content_type("application/galaxy-archive")
if style == "format2" and download_format != "json-download":
return ordered_dump(ret_dict)
else:
return format_return_as_json(ret_dict, pretty=True)
@expose_api
def delete(self, trans: ProvidesUserContext, id, **kwd):
stored_workflow = self.__get_stored_workflow(trans, id, **kwd)
if stored_workflow.user != trans.user and not trans.user_is_admin:
raise exceptions.InsufficientPermissionsException()
stored_workflow.deleted = True
trans.sa_session.flush()
return f"Workflow '{stored_workflow.name}' successfully deleted"
@expose_api
def import_new_workflow_deprecated(self, trans: GalaxyWebTransaction, payload, **kwd):
return self.__api_import_new_workflow(trans, payload, **kwd)
@expose_api
def update(self, trans: GalaxyWebTransaction, id, payload, **kwds):
stored_workflow = self.__get_stored_workflow(trans, id, **kwds)
workflow_dict = payload.get("workflow", {})
workflow_dict.update({k: v for k, v in payload.items() if k not in workflow_dict})
if workflow_dict:
require_flush = False
raw_workflow_description = self.__normalize_workflow(trans, workflow_dict)
workflow_dict = raw_workflow_description.as_dict
new_workflow_name = workflow_dict.get("name")
old_workflow = stored_workflow.latest_workflow
name_updated = new_workflow_name and new_workflow_name != stored_workflow.name
steps_updated = "steps" in workflow_dict
if name_updated and not steps_updated:
sanitized_name = sanitize_html(new_workflow_name or old_workflow.name)
workflow = old_workflow.copy(user=trans.user)
workflow.stored_workflow = stored_workflow
workflow.name = sanitized_name
stored_workflow.name = sanitized_name
stored_workflow.latest_workflow = workflow
trans.sa_session.add(workflow, stored_workflow)
require_flush = True
if "hidden" in workflow_dict and stored_workflow.hidden != workflow_dict["hidden"]:
stored_workflow.hidden = workflow_dict["hidden"]
require_flush = True
if "published" in workflow_dict and stored_workflow.published != workflow_dict["published"]:
stored_workflow.published = workflow_dict["published"]
require_flush = True
if "importable" in workflow_dict and stored_workflow.importable != workflow_dict["importable"]:
stored_workflow.importable = workflow_dict["importable"]
require_flush = True
if "annotation" in workflow_dict and not steps_updated:
newAnnotation = sanitize_html(workflow_dict["annotation"])
self.add_item_annotation(trans.sa_session, trans.user, stored_workflow, newAnnotation)
require_flush = True
if "menu_entry" in workflow_dict or "show_in_tool_panel" in workflow_dict:
show_in_panel = workflow_dict.get("menu_entry") or workflow_dict.get("show_in_tool_panel")
stored_workflow_menu_entries = trans.user.stored_workflow_menu_entries
decoded_id = trans.security.decode_id(id)
if show_in_panel:
workflow_ids = [wf.stored_workflow_id for wf in stored_workflow_menu_entries]
if decoded_id not in workflow_ids:
menu_entry = model.StoredWorkflowMenuEntry()
menu_entry.stored_workflow = stored_workflow
stored_workflow_menu_entries.append(menu_entry)
trans.sa_session.add(menu_entry)
require_flush = True
else:
entries = {x.stored_workflow_id: x for x in stored_workflow_menu_entries}
if decoded_id in entries:
stored_workflow_menu_entries.remove(entries[decoded_id])
require_flush = True
if "tags" in workflow_dict:
trans.app.tag_handler.set_tags_from_list(
user=trans.user, item=stored_workflow, new_tags_list=workflow_dict["tags"]
)
if require_flush:
trans.sa_session.flush()
if "steps" in workflow_dict:
try:
workflow_update_options = WorkflowUpdateOptions(**payload)
workflow, errors = self.workflow_contents_manager.update_workflow_from_raw_description(
trans,
stored_workflow,
raw_workflow_description,
workflow_update_options,
)
except MissingToolsException:
raise exceptions.MessageException(
"This workflow contains missing tools. It cannot be saved until they have been removed from the workflow or installed."
)
else:
message = "Updating workflow requires dictionary containing 'workflow' attribute with new JSON description."
raise exceptions.RequestParameterInvalidException(message)
return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style="instance")
@expose_api
def refactor(self, trans, id, payload, **kwds):
stored_workflow = self.__get_stored_workflow(trans, id, **kwds)
refactor_request = RefactorRequest(**payload)
return self.workflow_contents_manager.refactor(trans, stored_workflow, refactor_request)
@expose_api
def build_module(self, trans: GalaxyWebTransaction, payload=None):
if payload is None:
payload = {}
inputs = payload.get("inputs", {})
trans.workflow_building_mode = workflow_building_modes.ENABLED
module = module_factory.from_dict(trans, payload, from_tool_form=True)
if "tool_state" not in payload:
module_state: Dict[str, Any] = {}
populate_state(trans, module.get_inputs(), inputs, module_state, check=False)
module.recover_state(module_state, from_tool_form=True)
return {
"label": inputs.get("__label", ""),
"annotation": inputs.get("__annotation", ""),
"name": module.get_name(),
"tool_state": module.get_state(),
"content_id": module.get_content_id(),
"inputs": module.get_all_inputs(connectable_only=True),
"outputs": module.get_all_outputs(),
"config_form": module.get_config_form(),
"post_job_actions": module.get_post_job_actions(inputs),
}
@expose_api
def get_tool_predictions(self, trans: ProvidesUserContext, payload, **kwd):
remote_model_url = payload.get("remote_model_url", trans.app.config.tool_recommendation_model_path)
tool_sequence = payload.get("tool_sequence", "")
if "tool_sequence" not in payload or remote_model_url is None:
return
tool_sequence, recommended_tools = self.tool_recommendations.get_predictions(
trans, tool_sequence, remote_model_url
)
return {"current_tool": tool_sequence, "predicted_data": recommended_tools}
def __api_import_from_archive(self, trans: GalaxyWebTransaction, archive_data, source=None, payload=None):
payload = payload or {}
try:
data = json.loads(archive_data)
except Exception:
if "GalaxyWorkflow" in archive_data:
data = {"yaml_content": archive_data}
else:
raise exceptions.MessageException("The data content does not appear to be a valid workflow.")
if not data:
raise exceptions.MessageException("The data content is missing.")
raw_workflow_description = self.__normalize_workflow(trans, data)
workflow_create_options = WorkflowCreateOptions(**payload)
workflow, missing_tool_tups = self._workflow_from_dict(
trans, raw_workflow_description, workflow_create_options, source=source
)
workflow_id = workflow.id
workflow = workflow.latest_workflow
response = {
"message": f"Workflow '{escape(workflow.name)}' imported successfully.",
"status": "success",
"id": trans.security.encode_id(workflow_id),
}
if workflow.has_errors:
response["message"] = "Imported, but some steps in this workflow have validation errors."
response["status"] = "error"
elif len(workflow.steps) == 0:
response["message"] = "Imported, but this workflow has no steps."
response["status"] = "error"
elif workflow.has_cycles:
response["message"] = "Imported, but this workflow contains cycles."
response["status"] = "error"
return response
def __api_import_new_workflow(self, trans: GalaxyWebTransaction, payload, **kwd):
data = payload["workflow"]
raw_workflow_description = self.__normalize_workflow(trans, data)
workflow_create_options = WorkflowCreateOptions(**payload)
workflow, missing_tool_tups = self._workflow_from_dict(
trans,
raw_workflow_description,
workflow_create_options,
)
workflow_id = workflow.id
encoded_id = trans.security.encode_id(workflow_id)
item = workflow.to_dict(value_mapper={"id": trans.security.encode_id})
item["annotations"] = [x.annotation for x in workflow.annotations]
item["url"] = url_for("workflow", id=encoded_id)
item["owner"] = workflow.user.username
item["number_of_steps"] = len(workflow.latest_workflow.steps)
return item
def __normalize_workflow(self, trans: GalaxyWebTransaction, as_dict):
return self.workflow_contents_manager.normalize_workflow_format(trans, as_dict)
@expose_api
def import_shared_workflow_deprecated(self, trans: GalaxyWebTransaction, payload, **kwd):
workflow_id = payload.get("workflow_id", None)
if workflow_id is None:
raise exceptions.ObjectAttributeMissingException("Missing required parameter 'workflow_id'.")
self.__api_import_shared_workflow(trans, workflow_id, payload)
def __api_import_shared_workflow(self, trans: GalaxyWebTransaction, workflow_id, payload, **kwd):
try:
stored_workflow = self.get_stored_workflow(trans, workflow_id, check_ownership=False)
except Exception:
raise exceptions.ObjectNotFound(f"Malformed workflow id ( {workflow_id} ) specified.")
if stored_workflow.importable is False:
raise exceptions.ItemAccessibilityException(
"The owner of this workflow has disabled imports via this link."
)
elif stored_workflow.deleted:
raise exceptions.ItemDeletionException("You can't import this workflow because it has been deleted.")
imported_workflow = self._import_shared_workflow(trans, stored_workflow)
item = imported_workflow.to_dict(value_mapper={"id": trans.security.encode_id})
encoded_id = trans.security.encode_id(imported_workflow.id)
item["url"] = url_for("workflow", id=encoded_id)
return item
@expose_api
def invoke(self, trans: GalaxyWebTransaction, workflow_id, payload, **kwd):
# Get workflow + accessibility check.
stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id, instance=kwd.get("instance", False))
workflow = stored_workflow.latest_workflow
run_configs = build_workflow_run_configs(trans, workflow, payload)
is_batch = payload.get("batch")
if not is_batch and len(run_configs) != 1:
raise exceptions.RequestParameterInvalidException("Must specify 'batch' to use batch parameters.")
require_exact_tool_versions = util.string_as_bool(payload.get("require_exact_tool_versions", "true"))
tools = self.workflow_contents_manager.get_all_tools(workflow)
missing_tools = [
tool
for tool in tools
if not self.app.toolbox.has_tool(
tool["tool_id"], tool_version=tool["tool_version"], exact=require_exact_tool_versions
)
]
if missing_tools:
missing_tools_message = "Workflow was not invoked; the following required tools are not installed: "
if require_exact_tool_versions:
missing_tools_message += ", ".join(
[f"{tool['tool_id']} (version {tool['tool_version']})" for tool in missing_tools]
)
else:
missing_tools_message += ", ".join([tool["tool_id"] for tool in missing_tools])
raise exceptions.MessageException(missing_tools_message)
invocations = []
for run_config in run_configs:
workflow_scheduler_id = payload.get("scheduler", None)
# TODO: workflow scheduler hints
work_request_params = dict(scheduler=workflow_scheduler_id)
workflow_invocation = queue_invoke(
trans=trans,
workflow=workflow,
workflow_run_config=run_config,
request_params=work_request_params,
flush=False,
)
invocations.append(workflow_invocation)
trans.sa_session.flush()
invocations = [self.encode_all_ids(trans, invocation.to_dict(), recursive=True) for invocation in invocations]
if is_batch:
return invocations
else:
return invocations[0]
@expose_api
def index_invocations(self, trans: GalaxyWebTransaction, **kwd):
invocation_payload = InvocationIndexPayload(**kwd)
serialization_params = InvocationSerializationParams(**kwd)
invocations, total_matches = self.invocations_service.index(trans, invocation_payload, serialization_params)
trans.response.headers["total_matches"] = total_matches
return invocations
@expose_api_anonymous
def create_invocations_from_store(self, trans, payload, **kwd):
create_payload = CreateInvocationFromStore(**payload)
serialization_params = InvocationSerializationParams(**payload)
# refactor into a service...
return self._create_from_store(trans, create_payload, serialization_params)
def _create_from_store(
self, trans, payload: CreateInvocationFromStore, serialization_params: InvocationSerializationParams
):
history = self.history_manager.get_owned(
self.decode_id(payload.history_id), trans.user, current_history=trans.history
)
object_tracker = self.create_objects_from_store(
trans,
payload,
history=history,
)
return self.invocations_service.serialize_workflow_invocations(
object_tracker.invocations_by_key.values(), serialization_params
)
@expose_api
def show_invocation(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.get_invocation(trans, decoded_workflow_invocation_id, eager=True)
if not workflow_invocation:
raise exceptions.ObjectNotFound()
return self.__encode_invocation(workflow_invocation, **kwd)
@expose_api
def cancel_invocation(self, trans: ProvidesUserContext, invocation_id, **kwd):
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.cancel_invocation(trans, decoded_workflow_invocation_id)
return self.__encode_invocation(workflow_invocation, **kwd)
@expose_api
def show_invocation_report(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
kwd["format"] = "json"
return self.workflow_manager.get_invocation_report(trans, invocation_id, **kwd)
@expose_api_raw
def show_invocation_report_pdf(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
kwd["format"] = "pdf"
trans.response.set_content_type("application/pdf")
return self.workflow_manager.get_invocation_report(trans, invocation_id, **kwd)
def _generate_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.get_invocation(trans, decoded_workflow_invocation_id)
history = workflow_invocation.history
workflow = workflow_invocation.workflow
stored_workflow = workflow.stored_workflow
# pull in the user info from those who the history and workflow has been shared with
contributing_users = [stored_workflow.user]
# may want to extend this to have more reviewers.
reviewing_users = [stored_workflow.user]
encoded_workflow_id = trans.security.encode_id(stored_workflow.id)
encoded_history_id = trans.security.encode_id(history.id)
dict_workflow = json.loads(self.workflow_dict(trans, encoded_workflow_id))
spec_version = kwd.get("spec_version", "https://w3id.org/ieee/ieee-2791-schema/2791object.json")
for i, w in enumerate(reversed(stored_workflow.workflows)):
if workflow == w:
current_version = i
contributors = []
for contributing_user in contributing_users:
contributor = {
"orcid": kwd.get("xref", []),
"name": contributing_user.username,
"affiliation": "",
"contribution": ["authoredBy"],
"email": contributing_user.email,
}
contributors.append(contributor)
reviewers = []
for reviewer in reviewing_users:
reviewer = {
"status": "approved",
"reviewer_comment": "",
"date": workflow_invocation.update_time.isoformat(),
"reviewer": {
"orcid": kwd.get("orcid", []),
"name": contributing_user.username,
"affiliation": "",
"contribution": "curatedBy",
"email": contributing_user.email,
},
}
reviewers.append(reviewer)
provenance_domain = {
"name": workflow.name,
"version": current_version,
"review": reviewers,
"derived_from": url_for("workflow", id=encoded_workflow_id, qualified=True),
"created": workflow_invocation.create_time.isoformat(),
"modified": workflow_invocation.update_time.isoformat(),
"contributors": contributors,
"license": "https://spdx.org/licenses/CC-BY-4.0.html",
}
keywords = []
for tag in stored_workflow.tags:
keywords.append(tag.user_tname)
for tag in history.tags:
if tag.user_tname not in keywords:
keywords.append(tag.user_tname)
metrics = {}
tools, input_subdomain, output_subdomain, pipeline_steps, software_prerequisites = [], [], [], [], []
for step in workflow_invocation.steps:
if step.workflow_step.type == "tool":
workflow_outputs_list, output_list, input_list = set(), [], []
for wo in step.workflow_step.workflow_outputs:
workflow_outputs_list.add(wo.output_name)
for job in step.jobs:
metrics[i] = summarize_job_metrics(trans, job)
for job_input in job.input_datasets:
if hasattr(job_input.dataset, "dataset_id"):
encoded_dataset_id = trans.security.encode_id(job_input.dataset.dataset_id)
input_obj = {
# TODO: that should maybe be a step prefix + element identifier where appropriate.
"filename": job_input.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_input.dataset.create_time.isoformat(),
}
input_list.append(input_obj)
for job_output in job.output_datasets:
if hasattr(job_output.dataset, "dataset_id"):
encoded_dataset_id = trans.security.encode_id(job_output.dataset.dataset_id)
output_obj = {
"filename": job_output.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_output.dataset.create_time.isoformat(),
}
output_list.append(output_obj)
if job_output.name in workflow_outputs_list:
output = {
"mediatype": job_output.dataset.extension,
"uri": {
"filename": job_output.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_output.dataset.create_time.isoformat(),
},
}
output_subdomain.append(output)
workflow_step = step.workflow_step
step_index = workflow_step.order_index
current_step = dict_workflow["steps"][str(step_index)]
pipeline_step = {
"step_number": step_index,
"name": current_step["name"],
"description": current_step["annotation"],
"version": current_step["tool_version"],
"prerequisite": kwd.get("prerequisite", []),
"input_list": input_list,
"output_list": output_list,
}
pipeline_steps.append(pipeline_step)
try:
software_prerequisite = {
"name": current_step["content_id"],
"version": current_step["tool_version"],
"uri": {"uri": current_step["content_id"], "access_time": current_step["uuid"]},
}
if software_prerequisite["uri"]["uri"] not in tools:
software_prerequisites.append(software_prerequisite)
tools.append(software_prerequisite["uri"]["uri"])
except Exception:
continue
if step.workflow_step.type == "data_input" and step.output_datasets:
for output_assoc in step.output_datasets:
encoded_dataset_id = trans.security.encode_id(output_assoc.dataset_id)
input_obj = {
"filename": step.workflow_step.label,
"uri": url_for(
"history_content", history_id=encoded_history_id, id=encoded_dataset_id, qualified=True
),
"access_time": step.workflow_step.update_time.isoformat(),
}
input_subdomain.append(input_obj)
if step.workflow_step.type == "data_collection_input" and step.output_dataset_collections:
for output_dataset_collection_association in step.output_dataset_collections:
encoded_dataset_id = trans.security.encode_id(
output_dataset_collection_association.dataset_collection_id
)
input_obj = {
"filename": step.workflow_step.label,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
type="dataset_collection",
qualified=True,
),
"access_time": step.workflow_step.update_time.isoformat(),
}
input_subdomain.append(input_obj)
usability_domain = []
for a in stored_workflow.annotations:
usability_domain.append(a.annotation)
for h in history.annotations:
usability_domain.append(h.annotation)
parametric_domain = []
for inv_step in workflow_invocation.steps:
try:
for k, v in inv_step.workflow_step.tool_inputs.items():
param, value, step = k, v, inv_step.workflow_step.order_index
parametric_domain.append({"param": param, "value": value, "step": step})
except Exception:
continue
execution_domain = {
"script_access_type": "a_galaxy_workflow",
"script": [url_for("workflows", encoded_workflow_id=encoded_workflow_id, qualified=True)],
"script_driver": "Galaxy",
"software_prerequisites": software_prerequisites,
"external_data_endpoints": [
{"name": "Access to Galaxy", "url": url_for("/", qualified=True)},
kwd.get("external_data_endpoints"),
],
"environment_variables": kwd.get("environment_variables", {}),
}
extension = [
{
"extension_schema": "https://raw.githubusercontent.com/biocompute-objects/extension_domain/6d2cd8482e6075746984662edcf78b57d3d38065/galaxy/galaxy_extension.json",
"galaxy_extension": {
"galaxy_url": url_for("/", qualified=True),
"galaxy_version": VERSION,
# TODO:
# 'aws_estimate': aws_estimate,
# 'job_metrics': metrics
},
}
]
error_domain = {
"empirical_error": kwd.get("empirical_error", []),
"algorithmic_error": kwd.get("algorithmic_error", []),
}
bco_dict = {
"provenance_domain": provenance_domain,
"usability_domain": usability_domain,
"extension_domain": extension,
"description_domain": {
"keywords": keywords,
"xref": kwd.get("xref", []),
"platform": ["Galaxy"],
"pipeline_steps": pipeline_steps,
},
"execution_domain": execution_domain,
"parametric_domain": parametric_domain,
"io_domain": {
"input_subdomain": input_subdomain,
"output_subdomain": output_subdomain,
},
"error_domain": error_domain,
}
# Generate etag from the BCO excluding object_id and spec_version, as
# specified in https://github.com/biocompute-objects/BCO_Specification/blob/main/docs/top-level.md#203-etag-etag
etag = hashlib.sha256(json.dumps(bco_dict, sort_keys=True).encode()).hexdigest()
bco_dict.update(
{
"object_id": url_for(
controller=f"api/invocations/{invocation_id}", action="biocompute", qualified=True
),
"spec_version": spec_version,
"etag": etag,
}
)
return bco_dict
@expose_api
def export_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
return self._generate_invocation_bco(trans, invocation_id, **kwd)
@expose_api_raw
def download_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
ret_dict = self._generate_invocation_bco(trans, invocation_id, **kwd)
trans.response.headers["Content-Disposition"] = f'attachment; filename="bco_{invocation_id}.json"'
trans.response.set_content_type("application/json")
return format_return_as_json(ret_dict, pretty=True)
@expose_api
def invocation_step(self, trans, invocation_id, step_id, **kwd):
decoded_invocation_step_id = self.decode_id(step_id)
invocation_step = self.workflow_manager.get_invocation_step(trans, decoded_invocation_step_id)
return self.__encode_invocation_step(trans, invocation_step)
@expose_api_anonymous_and_sessionless
def invocation_step_jobs_summary(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
decoded_invocation_id = self.decode_id(invocation_id)
ids = []
types = []
for (job_source_type, job_source_id, _) in invocation_job_source_iter(trans.sa_session, decoded_invocation_id):
ids.append(job_source_id)
types.append(job_source_type)
return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)]
@expose_api_anonymous_and_sessionless
def invocation_jobs_summary(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
ids = [self.decode_id(invocation_id)]
types = ["WorkflowInvocation"]
return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)][0]
@expose_api
def update_invocation_step(self, trans: GalaxyWebTransaction, invocation_id, step_id, payload, **kwd):
decoded_invocation_step_id = self.decode_id(step_id)
action = payload.get("action", None)
invocation_step = self.workflow_manager.update_invocation_step(
trans,
decoded_invocation_step_id,
action=action,
)
return self.__encode_invocation_step(trans, invocation_step)
def _workflow_from_dict(self, trans, data, workflow_create_options, source=None):
publish = workflow_create_options.publish
importable = workflow_create_options.is_importable
if publish and not importable:
raise exceptions.RequestParameterInvalidException("Published workflow must be importable.")
workflow_contents_manager = self.app.workflow_contents_manager
raw_workflow_description = workflow_contents_manager.ensure_raw_description(data)
created_workflow = workflow_contents_manager.build_workflow_from_raw_description(
trans,
raw_workflow_description,
workflow_create_options,
source=source,
)
if importable:
self._make_item_accessible(trans.sa_session, created_workflow.stored_workflow)
trans.sa_session.flush()
self._import_tools_if_needed(trans, workflow_create_options, raw_workflow_description)
return created_workflow.stored_workflow, created_workflow.missing_tools
def _import_tools_if_needed(self, trans, workflow_create_options, raw_workflow_description):
if not workflow_create_options.import_tools:
return
if not trans.user_is_admin:
raise exceptions.AdminRequiredException()
data = raw_workflow_description.as_dict
tools = {}
for key in data["steps"]:
item = data["steps"][key]
if item is not None:
if "tool_shed_repository" in item:
tool_shed_repository = item["tool_shed_repository"]
if (
"owner" in tool_shed_repository
and "changeset_revision" in tool_shed_repository
and "name" in tool_shed_repository
and "tool_shed" in tool_shed_repository
):
toolstr = (
tool_shed_repository["owner"]
+ tool_shed_repository["changeset_revision"]
+ tool_shed_repository["name"]
+ tool_shed_repository["tool_shed"]
)
tools[toolstr] = tool_shed_repository
irm = InstallRepositoryManager(self.app)
install_options = workflow_create_options.install_options
for k in tools:
item = tools[k]
tool_shed_url = f"https://{item['tool_shed']}/"
name = item["name"]
owner = item["owner"]
changeset_revision = item["changeset_revision"]
irm.install(tool_shed_url, name, owner, changeset_revision, install_options)
def __encode_invocation_step(self, trans: ProvidesUserContext, invocation_step):
return self.encode_all_ids(trans, invocation_step.to_dict("element"), True)
def __get_stored_accessible_workflow(self, trans, workflow_id, **kwd):
instance = util.string_as_bool(kwd.get("instance", "false"))
return self.workflow_manager.get_stored_accessible_workflow(trans, workflow_id, by_stored_id=not instance)
def __get_stored_workflow(self, trans, workflow_id, **kwd):
instance = util.string_as_bool(kwd.get("instance", "false"))
return self.workflow_manager.get_stored_workflow(trans, workflow_id, by_stored_id=not instance)
def __encode_invocation(self, invocation, **kwd):
params = InvocationSerializationParams(**kwd)
return self.invocations_service.serialize_workflow_invocation(invocation, params)
StoredWorkflowIDPathParam: EncodedDatabaseIdField = Path(
..., title="Stored Workflow ID", description="The encoded database identifier of the Stored Workflow."
)
InvocationIDPathParam: EncodedDatabaseIdField = Path(
..., title="Invocation ID", description="The encoded database identifier of the Invocation."
)
DeletedQueryParam: bool = Query(
default=False, title="Display deleted", description="Whether to restrict result to deleted workflows."
)
HiddenQueryParam: bool = Query(
default=False, title="Display hidden", description="Whether to restrict result to hidden workflows."
)
MissingToolsQueryParam: bool = Query(
default=False,
title="Display missing tools",
description="Whether to include a list of missing tools per workflow entry",
)
ShowPublishedQueryParam: Optional[bool] = Query(default=None, title="Include published workflows.", description="")
ShowSharedQueryParam: Optional[bool] = Query(
default=None, title="Include workflows shared with authenticated user.", description=""
)
SortByQueryParam: Optional[WorkflowSortByEnum] = Query(
default=None,
title="Sort workflow index by this attribute",
description="In unspecified, default ordering depends on other parameters but generally the user's own workflows appear first based on update time",
)
SortDescQueryParam: Optional[bool] = Query(
default=None,
title="Sort Descending",
description="Sort in descending order?",
)
LimitQueryParam: Optional[int] = Query(default=None, title="Limit number of queries.")
OffsetQueryParam: Optional[int] = Query(
default=0,
title="Number of workflows to skip in sorted query (to enable pagination).",
)
query_tags = [
IndexQueryTag("name", "The stored workflow's name.", "n"),
IndexQueryTag(
"tag",
"The workflow's tag, if the tag contains a colon an approach will be made to match the key and value of the tag separately.",
"t",
),
IndexQueryTag("user", "The stored workflow's owner's username.", "u"),
IndexQueryTag(
"is:published",
"Include only published workflows in the final result. Be sure the the query parameter `show_published` is set to `true` if to include all published workflows and not just the requesting user's.",
),
IndexQueryTag(
"is:share_with_me",
"Include only workflows shared with the requesting user. Be sure the the query parameter `show_shared` is set to `true` if to include shared workflows.",
),
]
SearchQueryParam: Optional[str] = search_query_param(
model_name="Stored Workflow",
tags=query_tags,
free_text_fields=["name", "tag", "user"],
)
SkipStepCountsQueryParam: bool = Query(
default=False,
title="Skip step counts.",
description="Set this to true to skip joining workflow step counts and optimize the resulting index query. Response objects will not contain step counts.",
)
@router.cbv
class FastAPIWorkflows:
service: WorkflowsService = depends(WorkflowsService)
invocations_service: InvocationsService = depends(InvocationsService)
@router.get(
"/api/workflows",
summary="Lists stored workflows viewable by the user.",
response_description="A list with summary stored workflow information per viewable entry.",
)
def index(
self,
response: Response,
trans: ProvidesUserContext = DependsOnTrans,
show_deleted: bool = DeletedQueryParam,
show_hidden: bool = HiddenQueryParam,
missing_tools: bool = MissingToolsQueryParam,
show_published: Optional[bool] = ShowPublishedQueryParam,
show_shared: Optional[bool] = ShowSharedQueryParam,
sort_by: Optional[WorkflowSortByEnum] = SortByQueryParam,
sort_desc: Optional[bool] = SortDescQueryParam,
limit: Optional[int] = LimitQueryParam,
offset: Optional[int] = OffsetQueryParam,
search: Optional[str] = SearchQueryParam,
skip_step_counts: bool = SkipStepCountsQueryParam,
) -> List[Dict[str, Any]]:
payload = WorkflowIndexPayload(
show_published=show_published,
show_hidden=show_hidden,
show_deleted=show_deleted,
show_shared=show_shared,
missing_tools=missing_tools,
sort_by=sort_by,
sort_desc=sort_desc,
limit=limit,
offset=offset,
search=search,
skip_step_counts=skip_step_counts,
)
workflows, total_matches = self.service.index(trans, payload, include_total_count=True)
response.headers["total_matches"] = str(total_matches)
return workflows
@router.get(
"/api/workflows/{id}/sharing",
summary="Get the current sharing status of the given item.",
)
def sharing(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
return self.service.shareable_service.sharing(trans, id)
@router.put(
"/api/workflows/{id}/enable_link_access",
summary="Makes this item accessible by a URL link.",
)
def enable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
return self.service.shareable_service.enable_link_access(trans, id)
@router.put(
"/api/workflows/{id}/disable_link_access",
summary="Makes this item inaccessible by a URL link.",
)
def disable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
return self.service.shareable_service.disable_link_access(trans, id)
@router.put(
"/api/workflows/{id}/publish",
summary="Makes this item public and accessible by a URL link.",
)
def publish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
return self.service.shareable_service.publish(trans, id)
@router.put(
"/api/workflows/{id}/unpublish",
summary="Removes this item from the published list.",
)
def unpublish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
return self.service.shareable_service.unpublish(trans, id)
@router.put(
"/api/workflows/{id}/share_with_users",
summary="Share this item with specific users.",
)
def share_with_users(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
payload: ShareWithPayload = Body(...),
) -> ShareWithStatus:
return self.service.shareable_service.share_with_users(trans, id, payload)
@router.put(
"/api/workflows/{id}/slug",
summary="Set a new slug for this shared item.",
status_code=status.HTTP_204_NO_CONTENT,
)
def set_slug(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
payload: SetSlugPayload = Body(...),
):
self.service.shareable_service.set_slug(trans, id, payload)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.post(
"/api/invocations/{invocation_id}/prepare_store_download",
summary="Prepare a worklfow invocation export-style download.",
)
def prepare_store_download(
self,
trans: ProvidesUserContext = DependsOnTrans,
invocation_id: EncodedDatabaseIdField = InvocationIDPathParam,
payload: PrepareStoreDownloadPayload = Body(...),
) -> AsyncFile:
return self.invocations_service.prepare_store_download(
trans,
invocation_id,
payload,
)
@router.post(
"/api/invocations/{invocation_id}/write_store",
summary="Prepare a worklfow invocation export-style download and write to supplied URI.",
)
def write_store(
self,
trans: ProvidesUserContext = DependsOnTrans,
invocation_id: EncodedDatabaseIdField = InvocationIDPathParam,
payload: WriteStoreToPayload = Body(...),
) -> AsyncTaskResultSummary:
rval = self.invocations_service.write_store(
trans,
invocation_id,
payload,
)
return rval
| true | true |
f72f7ca31617049ebeb428ce645c75a60f6eb63c | 22,243 | py | Python | setup.py | kylemannock/python-games | 1cd25fb4b52c2226dae2d77c996c878bad4bcd65 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | setup.py | kylemannock/python-games | 1cd25fb4b52c2226dae2d77c996c878bad4bcd65 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | setup.py | kylemannock/python-games | 1cd25fb4b52c2226dae2d77c996c878bad4bcd65 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | #!/usr/bin/env python
#
# This is the distutils setup script for pygame.
# Full instructions are in https://www.pygame.org/wiki/GettingStarted
#
# To configure, compile, install, just run this script.
# python setup.py install
DESCRIPTION = """Pygame is a Python wrapper module for the
SDL multimedia library. It contains python functions and classes
that will allow you to use SDL's support for playing cdroms,
audio and video output, and keyboard, mouse and joystick input."""
EXTRAS = {}
METADATA = {
"name": "pygame",
"version": "1.9.5.dev0",
"license": "LGPL",
"url": "https://www.pygame.org",
"author": "Pete Shinners, Rene Dudfield, Marcus von Appen, Bob Pendleton, others...",
"author_email": "pygame@seul.org",
"description": "Python Game Development",
"long_description": DESCRIPTION,
}
import sys
import os
def compilation_help():
""" On failure point people to a web page for help.
"""
import platform
the_system = platform.system()
if the_system == 'Linux':
if hasattr(platform, 'linux_distribution'):
distro = platform.linux_distribution()
if distro[0] == 'Ubuntu':
the_system = 'Ubuntu'
elif distro[0] == 'Debian':
the_system = 'Debian'
help_urls = {
'Linux': 'https://www.pygame.org/wiki/Compilation',
'Ubuntu': 'https://www.pygame.org/wiki/CompileUbuntu',
'Debian': 'https://www.pygame.org/wiki/CompileDebian',
'Windows': 'https://www.pygame.org/wiki/CompileWindows',
'Darwin': 'https://www.pygame.org/wiki/MacCompile',
}
default = 'https://www.pygame.org/wiki/Compilation'
url = help_urls.get(platform.system(), default)
is_pypy = '__pypy__' in sys.builtin_module_names
if is_pypy:
url += '\n https://www.pygame.org/wiki/CompilePyPy'
print ('---')
print ('For help with compilation see:')
print (' %s' % url)
print ('To contribute to pygame development see:')
print (' https://www.pygame.org/contribute.html')
print ('---')
if not hasattr(sys, 'version_info') or sys.version_info < (2,7):
compilation_help()
raise SystemExit("Pygame requires Python version 2.7 or above.")
#get us to the correct directory
path = os.path.split(os.path.abspath(sys.argv[0]))[0]
os.chdir(path)
#os.environ["CFLAGS"] = "-W -Wall -Wpointer-arith -Wcast-qual -Winline " + \
# "-Wcast-align -Wconversion -Wstrict-prototypes " + \
# "-Wmissing-prototypes -Wmissing-declarations " + \
# "-Wnested-externs -Wshadow -Wredundant-decls"
if "-warnings" in sys.argv:
os.environ["CFLAGS"] = "-W -Wimplicit-int " + \
"-Wimplicit-function-declaration " + \
"-Wimplicit -Wmain -Wreturn-type -Wunused -Wswitch " + \
"-Wcomment -Wtrigraphs -Wformat -Wchar-subscripts " + \
"-Wuninitialized -Wparentheses " +\
"-Wpointer-arith -Wcast-qual -Winline -Wcast-align " + \
"-Wconversion -Wstrict-prototypes " + \
"-Wmissing-prototypes -Wmissing-declarations " + \
"-Wnested-externs -Wshadow -Wredundant-decls"
sys.argv.remove ("-warnings")
AUTO_CONFIG = False
if '-auto' in sys.argv:
AUTO_CONFIG = True
sys.argv.remove('-auto')
import os.path, glob, stat, shutil
import distutils.sysconfig
from distutils.core import setup, Extension, Command
from distutils.extension import read_setup_file
from distutils.command.install_data import install_data
from distutils.command.sdist import sdist
revision = ''
# Python 3.0 patch
if sys.version_info[0:2] == (3, 0):
import distutils.version
def _cmp(x, y):
try:
if x < y:
return -1
elif x == y:
return 0
return 1
except TypeError:
return NotImplemented
distutils.version.cmp = _cmp
del _cmp
def add_datafiles(data_files, dest_dir, pattern):
"""Add directory structures to data files according to a pattern"""
src_dir, elements = pattern
def do_directory(root_dest_path, root_src_path, elements):
files = []
for e in elements:
if isinstance(e, list):
src_dir, elems = e
dest_path = '/'.join([root_dest_path, src_dir])
src_path = os.path.join(root_src_path, src_dir)
do_directory(dest_path, src_path, elems)
else:
files.extend(glob.glob(os.path.join(root_src_path, e)))
if files:
data_files.append((root_dest_path, files))
do_directory(dest_dir, src_dir, elements)
# allow optionally using setuptools for bdist_egg.
if "-setuptools" in sys.argv:
from setuptools import setup, find_packages
sys.argv.remove ("-setuptools")
from setuptools import setup, find_packages
# NOTE: the bdist_mpkg_support is for darwin.
try:
import bdist_mpkg_support
from setuptools import setup, Extension
except ImportError:
pass
else:
EXTRAS.update({
'options': bdist_mpkg_support.options,
'setup_requires': ['bdist_mpkg>=0.4.2'],
#'install_requires': ['pyobjc'],
#'dependency_links': ['http://rene.f0o.com/~rene/stuff/macosx/']
})
#headers to install
headers = glob.glob(os.path.join('src_c', '*.h'))
headers.remove(os.path.join('src_c', 'scale.h'))
# option for not installing the headers.
if "-noheaders" in sys.argv:
headers = []
sys.argv.remove ("-noheaders")
#sanity check for any arguments
if len(sys.argv) == 1 and sys.stdout.isatty():
if sys.version_info[0] >= 3:
reply = input('\nNo Arguments Given, Perform Default Install? [Y/n]')
else:
reply = raw_input('\nNo Arguments Given, Perform Default Install? [Y/n]')
if not reply or reply[0].lower() != 'n':
sys.argv.append('install')
#make sure there is a Setup file
if AUTO_CONFIG or not os.path.isfile('Setup'):
print ('\n\nWARNING, No "Setup" File Exists, Running "buildconfig/config.py"')
import buildconfig.config
buildconfig.config.main(AUTO_CONFIG)
if '-config' in sys.argv:
sys.exit(0)
print ('\nContinuing With "setup.py"')
try:
s_mtime = os.stat("Setup")[stat.ST_MTIME]
sin_mtime = os.stat(os.path.join('buildconfig', 'Setup.SDL1.in'))[stat.ST_MTIME]
if sin_mtime > s_mtime:
print ('\n\nWARNING, "buildconfig/Setup.SDL1.in" newer than "Setup",'
'you might need to modify "Setup".')
except:
pass
# get compile info for all extensions
try:
extensions = read_setup_file('Setup')
except:
print ("""Error with the "Setup" file,
perhaps make a clean copy from "Setup.in".""")
compilation_help()
raise
#decide whether or not to enable new buffer protocol support
enable_newbuf = False
if sys.version_info >= (2, 6, 0):
try:
sys.pypy_version_info
except AttributeError:
enable_newbuf = True
if enable_newbuf:
enable_newbuf_value = '1'
else:
enable_newbuf_value = '0'
for e in extensions:
e.define_macros.append(('ENABLE_NEWBUF', enable_newbuf_value))
#if new buffer protocol support is disabled then remove the testing framework
if not enable_newbuf:
posn = None
for i, e in enumerate(extensions):
if e.name == 'newbuffer':
posn = i
if (posn is not None):
del extensions[posn]
# if not building font, try replacing with ftfont
alternate_font = os.path.join('src_py', 'font.py')
if os.path.exists(alternate_font):
os.remove(alternate_font)
have_font = False
have_freetype = False
for e in extensions:
if e.name == 'font':
have_font = True
if e.name == '_freetype':
have_freetype = True
if not have_font and have_freetype:
shutil.copyfile(os.path.join('src_py', 'ftfont.py'), alternate_font)
#extra files to install
data_path = os.path.join(distutils.sysconfig.get_python_lib(), 'pygame')
pygame_data_files = []
data_files = [('pygame', pygame_data_files)]
#add files in distribution directory
# pygame_data_files.append('LGPL')
# pygame_data_files.append('readme.html')
# pygame_data_files.append('install.html')
#add non .py files in lib directory
for f in glob.glob(os.path.join('src_py', '*')):
if not f[-3:] == '.py' and not f[-4:] == '.doc' and os.path.isfile(f):
pygame_data_files.append(f)
#tests/fixtures
add_datafiles(data_files, 'pygame/tests',
['test',
[['fixtures',
[['xbm_cursors',
['*.xbm']],
['fonts',
['*.ttf', '*.otf', '*.bdf', '*.png']]]]]])
#examples
add_datafiles(data_files, 'pygame/examples',
['examples',
['readme.rst',
['data',
['*']],
['macosx',
['*.py',
['aliens_app_example',
['*.py',
'README.txt',
['English.lproj',
['aliens.icns',
['MainMenu.nib',
['*']]]]]]]]]])
#docs
add_datafiles(data_files, 'pygame/docs',
['docs',
['*.html', # Navigation and help pages
'*.gif', # pygame logos
'*.js', # For doc search
['ref', # pygame reference
['*.html', # Reference pages
'*.js', # Comments script
'*.json']], # Comment data
['c_api', # pygame C API
['*.html']],
['tut', # Tutorials
['*.html',
['tom',
['*.html',
'*.png']]]],
['_static', # Sphinx added support files
['*.css',
'*.png',
'*.ico',
'*.js']],
['_images', # Sphinx added reST ".. image::" refs
['*.jpg',
'*.png',
'*.gif']],
['_sources', # Used for ref search
['*.txt',
['ref',
['*.txt']]]]]])
#generate the version module
def parse_version(ver):
from re import findall
return ', '.join(s for s in findall('\d+', ver)[0:3])
def write_version_module(pygame_version, revision):
vernum = parse_version(pygame_version)
with open(os.path.join('buildconfig', 'version.py.in'), 'r') as header_file:
header = header_file.read()
with open(os.path.join('src_py', 'version.py'), 'w') as version_file:
version_file.write(header)
version_file.write('ver = "' + pygame_version + '"\n')
version_file.write('vernum = ' + vernum + '\n')
version_file.write('rev = "' + revision + '"\n')
write_version_module(METADATA['version'], revision)
#required. This will be filled if doing a Windows build.
cmdclass = {}
#try to find DLLs and copy them too (only on windows)
if sys.platform == 'win32':
from distutils.command.build_ext import build_ext
#add dependency DLLs to the project
lib_dependencies = {}
for e in extensions:
if e.name.startswith('COPYLIB_'):
lib_dependencies[e.name[8:]] = e.libraries
def dependencies(roots):
"""Return a set of dependencies for the list of library file roots
The return set is a dictionary keyed on library root name with values of 1.
"""
root_set = {}
for root in roots:
try:
deps = lib_dependencies[root]
except KeyError:
pass
else:
root_set[root] = 1
root_set.update(dependencies(deps))
return root_set
the_dlls = {}
required_dlls = {}
for e in extensions:
if e.name.startswith('COPYLIB_'):
the_dlls[e.name[8:]] = e.library_dirs[0]
else:
required_dlls.update(dependencies(e.libraries))
# join the required_dlls and the_dlls keys together.
lib_names = {}
for lib in list(required_dlls.keys()) + list(the_dlls.keys()):
lib_names[lib] = 1
for lib in lib_names.keys():
#next DLL; a distutils bug requires the paths to have Windows separators
f = the_dlls[lib].replace('/', os.sep)
if f == '_':
print ("WARNING, DLL for %s library not found." % lib)
else:
pygame_data_files.append(f)
class WinBuildExt(build_ext):
"""This build_ext sets necessary environment variables for MinGW"""
# __sdl_lib_dir is possible location of msvcrt replacement import
# libraries, if they exist. Pygame module base only links to SDL so
# should have the SDL library directory as its only -L option.
for e in extensions:
if e.name == 'base':
__sdl_lib_dir = e.library_dirs[0].replace('/', os.sep)
break
cmdclass['build_ext'] = WinBuildExt
# Add the precompiled smooth scale MMX functions to transform.
def replace_scale_mmx():
for e in extensions:
if e.name == 'transform':
if '64 bit' in sys.version:
e.extra_objects.append(
os.path.join('buildconfig', 'obj', 'win64', 'scale_mmx.obj'))
else:
e.extra_objects.append(
os.path.join('buildconfig', 'obj', 'win32', 'scale_mmx.obj'))
for i in range(len(e.sources)):
if e.sources[i].endswith('scale_mmx.c'):
del e.sources[i]
return
replace_scale_mmx()
#clean up the list of extensions
for e in extensions[:]:
if e.name.startswith('COPYLIB_'):
extensions.remove(e) #don't compile the COPYLIBs, just clean them
else:
e.name = 'pygame.' + e.name #prepend package name on modules
#data installer with improved intelligence over distutils
#data files are copied into the project directory instead
#of willy-nilly
class smart_install_data(install_data):
def run(self):
#need to change self.install_dir to the actual library dir
install_cmd = self.get_finalized_command('install')
self.install_dir = getattr(install_cmd, 'install_lib')
return install_data.run(self)
cmdclass['install_data'] = smart_install_data
class OurSdist(sdist):
def initialize_options(self):
sdist.initialize_options(self)
# we do not want MANIFEST.in to appear in the root cluttering up things.
self.template = os.path.join('buildconfig', 'MANIFEST.in')
cmdclass['sdist'] = OurSdist
if "bdist_msi" in sys.argv:
# if you are making an msi, we want it to overwrite files
# we also want to include the repository revision in the file name
from distutils.command import bdist_msi
import msilib
class bdist_msi_overwrite_on_install(bdist_msi.bdist_msi):
def run(self):
bdist_msi.bdist_msi.run(self)
# Remove obsolete files.
comp = "pygame1" # Pygame component
prop = comp # Directory property
records = [("surfarray.pyd", comp,
"SURFAR~1.PYD|surfarray.pyd", prop, 1),
("sndarray.pyd", comp,
"SNDARRAY.PYD|sndarray.pyd", prop, 1),
("camera.pyd", comp, "CAMERA.PYD|camera.pyd", prop, 1),
("color.py", comp, "COLOR.PY|color.py", prop, 1),
("color.pyc", comp, "COLOR.PYC|color.pyc", prop, 1),
("color.pyo", comp, "COLOR.PYO|color.pyo", prop, 1)]
msilib.add_data(self.db, "RemoveFile", records)
# Overwrite outdated files.
fullname = self.distribution.get_fullname()
installer_name = self.get_installer_filename(fullname)
print ("changing %s to overwrite files on install" % installer_name)
msilib.add_data(self.db, "Property", [("REINSTALLMODE", "amus")])
self.db.Commit()
def get_installer_filename(self, fullname):
if revision:
fullname += '-hg_' + revision
return bdist_msi.bdist_msi.get_installer_filename(self, fullname)
cmdclass['bdist_msi'] = bdist_msi_overwrite_on_install
# test command. For doing 'python setup.py test'
class TestCommand(Command):
user_options = [ ]
def initialize_options(self):
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
'''
runs the tests with default options.
'''
import subprocess
return subprocess.call([sys.executable, os.path.join('test', '__main__.py')])
cmdclass['test'] = TestCommand
class DocsCommand(Command):
""" For building the pygame documentation with `python setup.py docs`.
This generates html, and documentation .h header files.
"""
user_options = [ ]
def initialize_options(self):
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
'''
runs the tests with default options.
'''
docs_help = (
"Building docs requires Python version 3.6 or above, and sphinx."
)
if not hasattr(sys, 'version_info') or sys.version_info < (3, 6):
raise SystemExit(docs_help)
import subprocess
try:
return subprocess.call([
sys.executable, os.path.join('buildconfig', 'makeref.py')]
)
except:
print(docs_help)
raise
cmdclass['docs'] = DocsCommand
# Prune empty file lists.
date_files = [(path, files) for path, files in data_files if files]
#finally,
#call distutils with all needed info
PACKAGEDATA = {
"cmdclass": cmdclass,
"packages": ['pygame', 'pygame.gp2x', 'pygame.threads',
'pygame.tests',
'pygame.tests.test_utils',
'pygame.tests.run_tests__tests',
'pygame.tests.run_tests__tests.all_ok',
'pygame.tests.run_tests__tests.failures1',
'pygame.tests.run_tests__tests.incomplete',
'pygame.tests.run_tests__tests.infinite_loop',
'pygame.tests.run_tests__tests.print_stderr',
'pygame.tests.run_tests__tests.print_stdout',
'pygame.tests.run_tests__tests.incomplete_todo',
'pygame.tests.run_tests__tests.exclude',
'pygame.tests.run_tests__tests.timeout',
'pygame.tests.run_tests__tests.everything',
'pygame.docs',
'pygame.examples'],
"package_dir": {'pygame': 'src_py',
'pygame.threads': 'src_py/threads',
'pygame.gp2x': 'src_py/gp2x',
'pygame.tests': 'test',
'pygame.docs': 'docs',
'pygame.examples': 'examples'},
"headers": headers,
"ext_modules": extensions,
"data_files": data_files,
"zip_safe": False,
}
PACKAGEDATA.update(METADATA)
PACKAGEDATA.update(EXTRAS)
try:
setup(**PACKAGEDATA)
except:
compilation_help()
raise
def remove_old_files():
# try and figure out where we are installed.
#pygame could be installed in a weird location because of
# setuptools or something else. The only sane way seems to be by trying
# first to import it, and see where the imported one is.
#
# Otherwise we might delete some files from another installation.
try:
import pygame.base
use_pygame = 1
except:
use_pygame = 0
if use_pygame:
install_path= os.path.split(pygame.base.__file__)[0]
extension_ext = os.path.splitext(pygame.base.__file__)[1]
else:
if not os.path.exists(data_path):
return
install_path = data_path
base_file = glob.glob(os.path.join(data_path, "base*"))
if not base_file:
return
extension_ext = os.path.splitext(base_file[0])[1]
# here are the .so/.pyd files we need to ask to remove.
ext_to_remove = ["camera"]
# here are the .py/.pyo/.pyc files we need to ask to remove.
py_to_remove = ["color"]
os.path.join(data_path, 'color.py')
if os.name == "e32": # Don't warn on Symbian. The color.py is used as a wrapper.
py_to_remove = []
# See if any of the files are there.
extension_files = ["%s%s" % (x, extension_ext) for x in ext_to_remove]
py_files = ["%s%s" % (x, py_ext)
for py_ext in [".py", ".pyc", ".pyo"]
for x in py_to_remove]
files = py_files + extension_files
unwanted_files = []
for f in files:
unwanted_files.append( os.path.join( install_path, f ) )
ask_remove = []
for f in unwanted_files:
if os.path.exists(f):
ask_remove.append(f)
for f in ask_remove:
try:
print("trying to remove old file :%s: ..." %f)
os.remove(f)
print("Successfully removed :%s:." % f)
except:
print("FAILED to remove old file :%s:" % f)
if "install" in sys.argv:
# remove some old files.
# only call after a successful install. Should only reach here if there is
# a successful install... otherwise setup() raises an error.
try:
remove_old_files()
except:
pass
| 32.61437 | 99 | 0.572944 |
DESCRIPTION = """Pygame is a Python wrapper module for the
SDL multimedia library. It contains python functions and classes
that will allow you to use SDL's support for playing cdroms,
audio and video output, and keyboard, mouse and joystick input."""
EXTRAS = {}
METADATA = {
"name": "pygame",
"version": "1.9.5.dev0",
"license": "LGPL",
"url": "https://www.pygame.org",
"author": "Pete Shinners, Rene Dudfield, Marcus von Appen, Bob Pendleton, others...",
"author_email": "pygame@seul.org",
"description": "Python Game Development",
"long_description": DESCRIPTION,
}
import sys
import os
def compilation_help():
import platform
the_system = platform.system()
if the_system == 'Linux':
if hasattr(platform, 'linux_distribution'):
distro = platform.linux_distribution()
if distro[0] == 'Ubuntu':
the_system = 'Ubuntu'
elif distro[0] == 'Debian':
the_system = 'Debian'
help_urls = {
'Linux': 'https://www.pygame.org/wiki/Compilation',
'Ubuntu': 'https://www.pygame.org/wiki/CompileUbuntu',
'Debian': 'https://www.pygame.org/wiki/CompileDebian',
'Windows': 'https://www.pygame.org/wiki/CompileWindows',
'Darwin': 'https://www.pygame.org/wiki/MacCompile',
}
default = 'https://www.pygame.org/wiki/Compilation'
url = help_urls.get(platform.system(), default)
is_pypy = '__pypy__' in sys.builtin_module_names
if is_pypy:
url += '\n https://www.pygame.org/wiki/CompilePyPy'
print ('---')
print ('For help with compilation see:')
print (' %s' % url)
print ('To contribute to pygame development see:')
print (' https://www.pygame.org/contribute.html')
print ('---')
if not hasattr(sys, 'version_info') or sys.version_info < (2,7):
compilation_help()
raise SystemExit("Pygame requires Python version 2.7 or above.")
#get us to the correct directory
path = os.path.split(os.path.abspath(sys.argv[0]))[0]
os.chdir(path)
#os.environ["CFLAGS"] = "-W -Wall -Wpointer-arith -Wcast-qual -Winline " + \
# "-Wcast-align -Wconversion -Wstrict-prototypes " + \
# "-Wmissing-prototypes -Wmissing-declarations " + \
# "-Wnested-externs -Wshadow -Wredundant-decls"
if "-warnings" in sys.argv:
os.environ["CFLAGS"] = "-W -Wimplicit-int " + \
"-Wimplicit-function-declaration " + \
"-Wimplicit -Wmain -Wreturn-type -Wunused -Wswitch " + \
"-Wcomment -Wtrigraphs -Wformat -Wchar-subscripts " + \
"-Wuninitialized -Wparentheses " +\
"-Wpointer-arith -Wcast-qual -Winline -Wcast-align " + \
"-Wconversion -Wstrict-prototypes " + \
"-Wmissing-prototypes -Wmissing-declarations " + \
"-Wnested-externs -Wshadow -Wredundant-decls"
sys.argv.remove ("-warnings")
AUTO_CONFIG = False
if '-auto' in sys.argv:
AUTO_CONFIG = True
sys.argv.remove('-auto')
import os.path, glob, stat, shutil
import distutils.sysconfig
from distutils.core import setup, Extension, Command
from distutils.extension import read_setup_file
from distutils.command.install_data import install_data
from distutils.command.sdist import sdist
revision = ''
# Python 3.0 patch
if sys.version_info[0:2] == (3, 0):
import distutils.version
def _cmp(x, y):
try:
if x < y:
return -1
elif x == y:
return 0
return 1
except TypeError:
return NotImplemented
distutils.version.cmp = _cmp
del _cmp
def add_datafiles(data_files, dest_dir, pattern):
src_dir, elements = pattern
def do_directory(root_dest_path, root_src_path, elements):
files = []
for e in elements:
if isinstance(e, list):
src_dir, elems = e
dest_path = '/'.join([root_dest_path, src_dir])
src_path = os.path.join(root_src_path, src_dir)
do_directory(dest_path, src_path, elems)
else:
files.extend(glob.glob(os.path.join(root_src_path, e)))
if files:
data_files.append((root_dest_path, files))
do_directory(dest_dir, src_dir, elements)
# allow optionally using setuptools for bdist_egg.
if "-setuptools" in sys.argv:
from setuptools import setup, find_packages
sys.argv.remove ("-setuptools")
from setuptools import setup, find_packages
# NOTE: the bdist_mpkg_support is for darwin.
try:
import bdist_mpkg_support
from setuptools import setup, Extension
except ImportError:
pass
else:
EXTRAS.update({
'options': bdist_mpkg_support.options,
'setup_requires': ['bdist_mpkg>=0.4.2'],
#'install_requires': ['pyobjc'],
#'dependency_links': ['http://rene.f0o.com/~rene/stuff/macosx/']
})
#headers to install
headers = glob.glob(os.path.join('src_c', '*.h'))
headers.remove(os.path.join('src_c', 'scale.h'))
# option for not installing the headers.
if "-noheaders" in sys.argv:
headers = []
sys.argv.remove ("-noheaders")
#sanity check for any arguments
if len(sys.argv) == 1 and sys.stdout.isatty():
if sys.version_info[0] >= 3:
reply = input('\nNo Arguments Given, Perform Default Install? [Y/n]')
else:
reply = raw_input('\nNo Arguments Given, Perform Default Install? [Y/n]')
if not reply or reply[0].lower() != 'n':
sys.argv.append('install')
#make sure there is a Setup file
if AUTO_CONFIG or not os.path.isfile('Setup'):
print ('\n\nWARNING, No "Setup" File Exists, Running "buildconfig/config.py"')
import buildconfig.config
buildconfig.config.main(AUTO_CONFIG)
if '-config' in sys.argv:
sys.exit(0)
print ('\nContinuing With "setup.py"')
try:
s_mtime = os.stat("Setup")[stat.ST_MTIME]
sin_mtime = os.stat(os.path.join('buildconfig', 'Setup.SDL1.in'))[stat.ST_MTIME]
if sin_mtime > s_mtime:
print ('\n\nWARNING, "buildconfig/Setup.SDL1.in" newer than "Setup",'
'you might need to modify "Setup".')
except:
pass
# get compile info for all extensions
try:
extensions = read_setup_file('Setup')
except:
print ("""Error with the "Setup" file,
perhaps make a clean copy from "Setup.in".""")
compilation_help()
raise
#decide whether or not to enable new buffer protocol support
enable_newbuf = False
if sys.version_info >= (2, 6, 0):
try:
sys.pypy_version_info
except AttributeError:
enable_newbuf = True
if enable_newbuf:
enable_newbuf_value = '1'
else:
enable_newbuf_value = '0'
for e in extensions:
e.define_macros.append(('ENABLE_NEWBUF', enable_newbuf_value))
#if new buffer protocol support is disabled then remove the testing framework
if not enable_newbuf:
posn = None
for i, e in enumerate(extensions):
if e.name == 'newbuffer':
posn = i
if (posn is not None):
del extensions[posn]
# if not building font, try replacing with ftfont
alternate_font = os.path.join('src_py', 'font.py')
if os.path.exists(alternate_font):
os.remove(alternate_font)
have_font = False
have_freetype = False
for e in extensions:
if e.name == 'font':
have_font = True
if e.name == '_freetype':
have_freetype = True
if not have_font and have_freetype:
shutil.copyfile(os.path.join('src_py', 'ftfont.py'), alternate_font)
#extra files to install
data_path = os.path.join(distutils.sysconfig.get_python_lib(), 'pygame')
pygame_data_files = []
data_files = [('pygame', pygame_data_files)]
#add files in distribution directory
# pygame_data_files.append('LGPL')
# pygame_data_files.append('readme.html')
# pygame_data_files.append('install.html')
#add non .py files in lib directory
for f in glob.glob(os.path.join('src_py', '*')):
if not f[-3:] == '.py' and not f[-4:] == '.doc' and os.path.isfile(f):
pygame_data_files.append(f)
#tests/fixtures
add_datafiles(data_files, 'pygame/tests',
['test',
[['fixtures',
[['xbm_cursors',
['*.xbm']],
['fonts',
['*.ttf', '*.otf', '*.bdf', '*.png']]]]]])
#examples
add_datafiles(data_files, 'pygame/examples',
['examples',
['readme.rst',
['data',
['*']],
['macosx',
['*.py',
['aliens_app_example',
['*.py',
'README.txt',
['English.lproj',
['aliens.icns',
['MainMenu.nib',
['*']]]]]]]]]])
#docs
add_datafiles(data_files, 'pygame/docs',
['docs',
['*.html', # Navigation and help pages
'*.gif', # pygame logos
'*.js', # For doc search
['ref', # pygame reference
['*.html', # Reference pages
'*.js', # Comments script
'*.json']], # Comment data
['c_api', # pygame C API
['*.html']],
['tut', # Tutorials
['*.html',
['tom',
['*.html',
'*.png']]]],
['_static', # Sphinx added support files
['*.css',
'*.png',
'*.ico',
'*.js']],
['_images', # Sphinx added reST ".. image::" refs
['*.jpg',
'*.png',
'*.gif']],
['_sources', # Used for ref search
['*.txt',
['ref',
['*.txt']]]]]])
#generate the version module
def parse_version(ver):
from re import findall
return ', '.join(s for s in findall('\d+', ver)[0:3])
def write_version_module(pygame_version, revision):
vernum = parse_version(pygame_version)
with open(os.path.join('buildconfig', 'version.py.in'), 'r') as header_file:
header = header_file.read()
with open(os.path.join('src_py', 'version.py'), 'w') as version_file:
version_file.write(header)
version_file.write('ver = "' + pygame_version + '"\n')
version_file.write('vernum = ' + vernum + '\n')
version_file.write('rev = "' + revision + '"\n')
write_version_module(METADATA['version'], revision)
#required. This will be filled if doing a Windows build.
cmdclass = {}
#try to find DLLs and copy them too (only on windows)
if sys.platform == 'win32':
from distutils.command.build_ext import build_ext
#add dependency DLLs to the project
lib_dependencies = {}
for e in extensions:
if e.name.startswith('COPYLIB_'):
lib_dependencies[e.name[8:]] = e.libraries
def dependencies(roots):
root_set = {}
for root in roots:
try:
deps = lib_dependencies[root]
except KeyError:
pass
else:
root_set[root] = 1
root_set.update(dependencies(deps))
return root_set
the_dlls = {}
required_dlls = {}
for e in extensions:
if e.name.startswith('COPYLIB_'):
the_dlls[e.name[8:]] = e.library_dirs[0]
else:
required_dlls.update(dependencies(e.libraries))
# join the required_dlls and the_dlls keys together.
lib_names = {}
for lib in list(required_dlls.keys()) + list(the_dlls.keys()):
lib_names[lib] = 1
for lib in lib_names.keys():
#next DLL; a distutils bug requires the paths to have Windows separators
f = the_dlls[lib].replace('/', os.sep)
if f == '_':
print ("WARNING, DLL for %s library not found." % lib)
else:
pygame_data_files.append(f)
class WinBuildExt(build_ext):
# __sdl_lib_dir is possible location of msvcrt replacement import
# libraries, if they exist. Pygame module base only links to SDL so
# should have the SDL library directory as its only -L option.
for e in extensions:
if e.name == 'base':
__sdl_lib_dir = e.library_dirs[0].replace('/', os.sep)
break
cmdclass['build_ext'] = WinBuildExt
# Add the precompiled smooth scale MMX functions to transform.
def replace_scale_mmx():
for e in extensions:
if e.name == 'transform':
if '64 bit' in sys.version:
e.extra_objects.append(
os.path.join('buildconfig', 'obj', 'win64', 'scale_mmx.obj'))
else:
e.extra_objects.append(
os.path.join('buildconfig', 'obj', 'win32', 'scale_mmx.obj'))
for i in range(len(e.sources)):
if e.sources[i].endswith('scale_mmx.c'):
del e.sources[i]
return
replace_scale_mmx()
#clean up the list of extensions
for e in extensions[:]:
if e.name.startswith('COPYLIB_'):
extensions.remove(e) #don't compile the COPYLIBs, just clean them
else:
e.name = 'pygame.' + e.name
class smart_install_data(install_data):
def run(self):
install_cmd = self.get_finalized_command('install')
self.install_dir = getattr(install_cmd, 'install_lib')
return install_data.run(self)
cmdclass['install_data'] = smart_install_data
class OurSdist(sdist):
def initialize_options(self):
sdist.initialize_options(self)
self.template = os.path.join('buildconfig', 'MANIFEST.in')
cmdclass['sdist'] = OurSdist
if "bdist_msi" in sys.argv:
from distutils.command import bdist_msi
import msilib
class bdist_msi_overwrite_on_install(bdist_msi.bdist_msi):
def run(self):
bdist_msi.bdist_msi.run(self)
comp = "pygame1"
prop = comp
records = [("surfarray.pyd", comp,
"SURFAR~1.PYD|surfarray.pyd", prop, 1),
("sndarray.pyd", comp,
"SNDARRAY.PYD|sndarray.pyd", prop, 1),
("camera.pyd", comp, "CAMERA.PYD|camera.pyd", prop, 1),
("color.py", comp, "COLOR.PY|color.py", prop, 1),
("color.pyc", comp, "COLOR.PYC|color.pyc", prop, 1),
("color.pyo", comp, "COLOR.PYO|color.pyo", prop, 1)]
msilib.add_data(self.db, "RemoveFile", records)
fullname = self.distribution.get_fullname()
installer_name = self.get_installer_filename(fullname)
print ("changing %s to overwrite files on install" % installer_name)
msilib.add_data(self.db, "Property", [("REINSTALLMODE", "amus")])
self.db.Commit()
def get_installer_filename(self, fullname):
if revision:
fullname += '-hg_' + revision
return bdist_msi.bdist_msi.get_installer_filename(self, fullname)
cmdclass['bdist_msi'] = bdist_msi_overwrite_on_install
class TestCommand(Command):
user_options = [ ]
def initialize_options(self):
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
import subprocess
return subprocess.call([sys.executable, os.path.join('test', '__main__.py')])
cmdclass['test'] = TestCommand
class DocsCommand(Command):
user_options = [ ]
def initialize_options(self):
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
docs_help = (
"Building docs requires Python version 3.6 or above, and sphinx."
)
if not hasattr(sys, 'version_info') or sys.version_info < (3, 6):
raise SystemExit(docs_help)
import subprocess
try:
return subprocess.call([
sys.executable, os.path.join('buildconfig', 'makeref.py')]
)
except:
print(docs_help)
raise
cmdclass['docs'] = DocsCommand
date_files = [(path, files) for path, files in data_files if files]
PACKAGEDATA = {
"cmdclass": cmdclass,
"packages": ['pygame', 'pygame.gp2x', 'pygame.threads',
'pygame.tests',
'pygame.tests.test_utils',
'pygame.tests.run_tests__tests',
'pygame.tests.run_tests__tests.all_ok',
'pygame.tests.run_tests__tests.failures1',
'pygame.tests.run_tests__tests.incomplete',
'pygame.tests.run_tests__tests.infinite_loop',
'pygame.tests.run_tests__tests.print_stderr',
'pygame.tests.run_tests__tests.print_stdout',
'pygame.tests.run_tests__tests.incomplete_todo',
'pygame.tests.run_tests__tests.exclude',
'pygame.tests.run_tests__tests.timeout',
'pygame.tests.run_tests__tests.everything',
'pygame.docs',
'pygame.examples'],
"package_dir": {'pygame': 'src_py',
'pygame.threads': 'src_py/threads',
'pygame.gp2x': 'src_py/gp2x',
'pygame.tests': 'test',
'pygame.docs': 'docs',
'pygame.examples': 'examples'},
"headers": headers,
"ext_modules": extensions,
"data_files": data_files,
"zip_safe": False,
}
PACKAGEDATA.update(METADATA)
PACKAGEDATA.update(EXTRAS)
try:
setup(**PACKAGEDATA)
except:
compilation_help()
raise
def remove_old_files():
try:
import pygame.base
use_pygame = 1
except:
use_pygame = 0
if use_pygame:
install_path= os.path.split(pygame.base.__file__)[0]
extension_ext = os.path.splitext(pygame.base.__file__)[1]
else:
if not os.path.exists(data_path):
return
install_path = data_path
base_file = glob.glob(os.path.join(data_path, "base*"))
if not base_file:
return
extension_ext = os.path.splitext(base_file[0])[1]
ext_to_remove = ["camera"]
py_to_remove = ["color"]
os.path.join(data_path, 'color.py')
if os.name == "e32":
py_to_remove = []
# See if any of the files are there.
extension_files = ["%s%s" % (x, extension_ext) for x in ext_to_remove]
py_files = ["%s%s" % (x, py_ext)
for py_ext in [".py", ".pyc", ".pyo"]
for x in py_to_remove]
files = py_files + extension_files
unwanted_files = []
for f in files:
unwanted_files.append( os.path.join( install_path, f ) )
ask_remove = []
for f in unwanted_files:
if os.path.exists(f):
ask_remove.append(f)
for f in ask_remove:
try:
print("trying to remove old file :%s: ..." %f)
os.remove(f)
print("Successfully removed :%s:." % f)
except:
print("FAILED to remove old file :%s:" % f)
if "install" in sys.argv:
# remove some old files.
# only call after a successful install. Should only reach here if there is
# a successful install... otherwise setup() raises an error.
try:
remove_old_files()
except:
pass
| true | true |
f72f7d71973611e5b1b6ca5c103ced2746353d09 | 2,070 | py | Python | ansible/old/inventory.py | Otus-DevOps-2021-11/ivan32rus_infra | d504a0bf8d98c9850b005390cd93aeea7e81bd1e | [
"MIT"
] | null | null | null | ansible/old/inventory.py | Otus-DevOps-2021-11/ivan32rus_infra | d504a0bf8d98c9850b005390cd93aeea7e81bd1e | [
"MIT"
] | 9 | 2021-12-20T21:16:20.000Z | 2022-02-18T11:56:22.000Z | docker/docker-monolith/infra/ansible/inventory.py | Otus-DevOps-2021-11/ivan32rus_microservices | 88e273124d1e1b39c46c526370b96385779c5a05 | [
"MIT"
] | 1 | 2021-12-18T11:49:30.000Z | 2021-12-18T11:49:30.000Z | #!/usr/bin/env python
'''
Example custom dynamic inventory script for Ansible, in Python.
FOR pyhhon 3.8.10 it's working
used:
https://www.jeffgeerling.com/blog/creating-custom-dynamic-inventories-ansible
'''
import os
import sys
import argparse
try:
import json
except ImportError:
import simplejson as json
class ExampleInventory(object):
def __init__(self):
self.inventory = {}
self.read_cli_args()
# Called with `--list`.
if self.args.list:
self.inventory = self.example_inventory()
# Called with `--host [hostname]`.
elif self.args.host:
# Not implemented, since we return _meta info `--list`.
self.inventory = self.empty_inventory()
# If no groups or vars are present, return an empty inventory.
else:
self.inventory = self.empty_inventory()
print (json.dumps(self.inventory))
# Example inventory for testing.
def example_inventory(self):
return {
"group": {
"hosts": [
"51.250.4.212",
"62.84.113.197"
],
"vars": {
"ansible_ssh_user": "ubuntu",
"ansible_ssh_private_key_file": "~/.ssh/id_rsa",
"example_variable": "value"
}
},
"_meta": {
"hostvars": {
"51.250.4.212": {
"reddit": "db"
},
"62.84.113.197": {
"reddit": "app"
}
}
}
}
# Empty inventory for testing.
def empty_inventory(self):
return {'_meta': {'hostvars': {}}}
# Read the command line args passed to the script.
def read_cli_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--list', action = 'store_true')
parser.add_argument('--host', action = 'store')
self.args = parser.parse_args()
# Get the inventory.
ExampleInventory()
| 26.883117 | 77 | 0.528986 |
import os
import sys
import argparse
try:
import json
except ImportError:
import simplejson as json
class ExampleInventory(object):
def __init__(self):
self.inventory = {}
self.read_cli_args()
if self.args.list:
self.inventory = self.example_inventory()
elif self.args.host:
self.inventory = self.empty_inventory()
else:
self.inventory = self.empty_inventory()
print (json.dumps(self.inventory))
def example_inventory(self):
return {
"group": {
"hosts": [
"51.250.4.212",
"62.84.113.197"
],
"vars": {
"ansible_ssh_user": "ubuntu",
"ansible_ssh_private_key_file": "~/.ssh/id_rsa",
"example_variable": "value"
}
},
"_meta": {
"hostvars": {
"51.250.4.212": {
"reddit": "db"
},
"62.84.113.197": {
"reddit": "app"
}
}
}
}
def empty_inventory(self):
return {'_meta': {'hostvars': {}}}
def read_cli_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--list', action = 'store_true')
parser.add_argument('--host', action = 'store')
self.args = parser.parse_args()
ExampleInventory()
| true | true |
f72f7e10becbbc60d0c8669c8b5575f45e441968 | 4,353 | py | Python | pyrelational/models/mcdropout_model.py | RelationRx/pyrelational | 41ededeff84158bd88b76d39006764de3388c821 | [
"Apache-2.0"
] | 42 | 2022-02-09T16:36:37.000Z | 2022-03-25T00:25:34.000Z | pyrelational/models/mcdropout_model.py | RelationRx/pyrelational | 41ededeff84158bd88b76d39006764de3388c821 | [
"Apache-2.0"
] | 4 | 2022-03-22T13:22:38.000Z | 2022-03-25T16:14:40.000Z | pyrelational/models/mcdropout_model.py | RelationRx/pyrelational | 41ededeff84158bd88b76d39006764de3388c821 | [
"Apache-2.0"
] | 3 | 2022-02-15T17:50:30.000Z | 2022-03-10T18:14:16.000Z | import copy
import logging
from abc import ABC
from typing import Dict, Optional, Type, Union
import torch
from pytorch_lightning import LightningModule
from torch.nn.modules import Module
from torch.utils.data import DataLoader
from .generic_model import GenericModel
from .lightning_model import LightningModel
logger = logging.getLogger()
class GenericMCDropoutModel(GenericModel, ABC):
"""
Generic model wrapper for mcdropout uncertainty estimator
"""
def __init__(
self,
model_class: Type[Module],
model_config: Union[str, Dict],
trainer_config: Union[str, Dict],
n_estimators: int = 10,
eval_dropout_prob: float = 0.2,
):
super(GenericMCDropoutModel, self).__init__(model_class, model_config, trainer_config)
_check_mc_dropout_model(model_class, model_config)
self.n_estimators = n_estimators
self.eval_dropout_prob = eval_dropout_prob
def __call__(self, loader: DataLoader) -> torch.Tensor:
"""
:param loader: pytorch dataloader
:return: model predictions
"""
if self.current_model is None:
raise ValueError("No current model, call 'train(train_loader, valid_loader)' to train the model first")
predictions = []
model = self.current_model
model.eval()
with torch.no_grad():
_enable_only_dropout_layers(model, self.eval_dropout_prob)
for _ in range(self.n_estimators):
model_prediction = []
for x, _ in loader:
model_prediction.append(model(x).detach().cpu())
predictions.append(torch.cat(model_prediction, 0))
predictions = torch.stack(predictions)
return predictions
class LightningMCDropoutModel(GenericMCDropoutModel, LightningModel):
r"""
Wrapper for MC Dropout estimator with pytorch lightning trainer
Example:
.. code-block:: python
import torch
import pytorch_lightning as pl
class PyLModel(pl.LightningModule):
def __init__(self, in_dim, out_dim):
super(PyLModel, self).()
self.linear = torch.nn.Linear(in_dim, out_dim)
# need to define other train/test steps and optimizers methods required
# by pytorch-lightning to run this example
wrapper = LightningMCDropoutModel(
PyLModel,
model_config={"in_dim":10, "out_dim":1},
trainer_config={"epochs":100},
n_estimators=10,
eval_dropout_prob=0.2,
)
wrapper.train(train_loader, valid_loader)
predictions = wrapper(loader)
assert predictions.size(0) == 10
"""
def __init__(
self,
model_class: Type[LightningModule],
model_config: Union[Dict, str],
trainer_config: Union[Dict, str],
n_estimators: int = 10,
eval_dropout_prob: float = 0.2,
):
super(LightningMCDropoutModel, self).__init__(
model_class,
model_config,
trainer_config,
n_estimators=n_estimators,
eval_dropout_prob=eval_dropout_prob,
)
def _enable_only_dropout_layers(model: Module, p: Optional[float] = None) -> None:
def enable_dropout_on_module(m):
if m.__class__.__name__.startswith("Dropout"):
if isinstance(p, float) and (0 <= p <= 1):
m.p = p
elif isinstance(p, float) and (p < 0 or p > 1):
logger.warning(f"Evaluation dropout probability should be a float between 0 and 1, got {p}")
m.train()
model.apply(enable_dropout_on_module)
def _check_mc_dropout_model(model_class: Type[Module], model_config: Dict) -> None:
model = model_class(**model_config)
def has_dropout_module(model):
is_dropout = []
for m in model.children():
if m.__class__.__name__.startswith("Dropout"):
is_dropout.append(True)
else:
is_dropout += has_dropout_module(m)
return is_dropout
if not any(has_dropout_module(model)):
raise ValueError("Model provided do not contain any torch.nn.Dropout modules, cannot apply MC Dropout")
| 32.977273 | 115 | 0.623938 | import copy
import logging
from abc import ABC
from typing import Dict, Optional, Type, Union
import torch
from pytorch_lightning import LightningModule
from torch.nn.modules import Module
from torch.utils.data import DataLoader
from .generic_model import GenericModel
from .lightning_model import LightningModel
logger = logging.getLogger()
class GenericMCDropoutModel(GenericModel, ABC):
def __init__(
self,
model_class: Type[Module],
model_config: Union[str, Dict],
trainer_config: Union[str, Dict],
n_estimators: int = 10,
eval_dropout_prob: float = 0.2,
):
super(GenericMCDropoutModel, self).__init__(model_class, model_config, trainer_config)
_check_mc_dropout_model(model_class, model_config)
self.n_estimators = n_estimators
self.eval_dropout_prob = eval_dropout_prob
def __call__(self, loader: DataLoader) -> torch.Tensor:
if self.current_model is None:
raise ValueError("No current model, call 'train(train_loader, valid_loader)' to train the model first")
predictions = []
model = self.current_model
model.eval()
with torch.no_grad():
_enable_only_dropout_layers(model, self.eval_dropout_prob)
for _ in range(self.n_estimators):
model_prediction = []
for x, _ in loader:
model_prediction.append(model(x).detach().cpu())
predictions.append(torch.cat(model_prediction, 0))
predictions = torch.stack(predictions)
return predictions
class LightningMCDropoutModel(GenericMCDropoutModel, LightningModel):
def __init__(
self,
model_class: Type[LightningModule],
model_config: Union[Dict, str],
trainer_config: Union[Dict, str],
n_estimators: int = 10,
eval_dropout_prob: float = 0.2,
):
super(LightningMCDropoutModel, self).__init__(
model_class,
model_config,
trainer_config,
n_estimators=n_estimators,
eval_dropout_prob=eval_dropout_prob,
)
def _enable_only_dropout_layers(model: Module, p: Optional[float] = None) -> None:
def enable_dropout_on_module(m):
if m.__class__.__name__.startswith("Dropout"):
if isinstance(p, float) and (0 <= p <= 1):
m.p = p
elif isinstance(p, float) and (p < 0 or p > 1):
logger.warning(f"Evaluation dropout probability should be a float between 0 and 1, got {p}")
m.train()
model.apply(enable_dropout_on_module)
def _check_mc_dropout_model(model_class: Type[Module], model_config: Dict) -> None:
model = model_class(**model_config)
def has_dropout_module(model):
is_dropout = []
for m in model.children():
if m.__class__.__name__.startswith("Dropout"):
is_dropout.append(True)
else:
is_dropout += has_dropout_module(m)
return is_dropout
if not any(has_dropout_module(model)):
raise ValueError("Model provided do not contain any torch.nn.Dropout modules, cannot apply MC Dropout")
| true | true |
f72f7e5086ec62838452027d2f90f7f9292e8954 | 1,602 | py | Python | fairml/tests/test_orthogonal_projection.py | ravish0007/fairml | bdfb707ff9554c1a789dc8de3926c1ef3cfb1fc8 | [
"MIT"
] | 330 | 2017-02-24T08:34:39.000Z | 2022-02-24T15:41:19.000Z | fairml/tests/test_orthogonal_projection.py | ravish0007/fairml | bdfb707ff9554c1a789dc8de3926c1ef3cfb1fc8 | [
"MIT"
] | 14 | 2017-02-02T00:54:16.000Z | 2021-02-19T16:01:20.000Z | fairml/tests/test_orthogonal_projection.py | ravish0007/fairml | bdfb707ff9554c1a789dc8de3926c1ef3cfb1fc8 | [
"MIT"
] | 70 | 2017-01-31T20:51:10.000Z | 2022-02-17T07:38:52.000Z | from __future__ import division
import pytest
import numpy as np
from random import randint
from fairml.orthogonal_projection import audit_model
from fairml.orthogonal_projection import get_orthogonal_vector
from fairml.utils import mse
from fairml.utils import accuracy
from fairml.utils import detect_feature_sign
from fairml.perturbation_strategies import constant_zero
# let's define a black-box function
def black_box_function(input_data):
if not (input_data.shape[1] == weights.shape[0]):
raise Exception("problem, misaligned dimensions")
output = np.dot(input_data, weights)
return output
def test_orthogonal_projection(number_of_tries=20, size=10000):
"""Orthogonal projection function. """
for i in range(number_of_tries):
a = np.random.normal(0, 1, size)
b = np.random.normal(0, 1, size)
c = np.random.binomial(10, 0.1, size)
d = np.random.uniform(0, 10, size)
# normal-normal check
orth_b = get_orthogonal_vector(a, b)
assert np.dot(orth_b, a) < 1e-8
# normal- normal check
ortho_c = get_orthogonal_vector(a, c)
assert np.dot(ortho_c, a) < 1e-8
# normal - uniform check
ortho_d = get_orthogonal_vector(a, d)
assert np.dot(ortho_d, a) < 1e-8
def test_mse():
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]
test_mse = mse(y_true, y_pred)
assert test_mse == 0.375
def test_accuracy():
y_pred = [0, 2, 1, 3]
y_true = [0, 1, 2, 3]
test_acc = accuracy(y_pred, y_true)
print(test_acc)
assert test_acc == 0.5
| 25.428571 | 63 | 0.671036 | from __future__ import division
import pytest
import numpy as np
from random import randint
from fairml.orthogonal_projection import audit_model
from fairml.orthogonal_projection import get_orthogonal_vector
from fairml.utils import mse
from fairml.utils import accuracy
from fairml.utils import detect_feature_sign
from fairml.perturbation_strategies import constant_zero
def black_box_function(input_data):
if not (input_data.shape[1] == weights.shape[0]):
raise Exception("problem, misaligned dimensions")
output = np.dot(input_data, weights)
return output
def test_orthogonal_projection(number_of_tries=20, size=10000):
for i in range(number_of_tries):
a = np.random.normal(0, 1, size)
b = np.random.normal(0, 1, size)
c = np.random.binomial(10, 0.1, size)
d = np.random.uniform(0, 10, size)
# normal-normal check
orth_b = get_orthogonal_vector(a, b)
assert np.dot(orth_b, a) < 1e-8
# normal- normal check
ortho_c = get_orthogonal_vector(a, c)
assert np.dot(ortho_c, a) < 1e-8
# normal - uniform check
ortho_d = get_orthogonal_vector(a, d)
assert np.dot(ortho_d, a) < 1e-8
def test_mse():
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]
test_mse = mse(y_true, y_pred)
assert test_mse == 0.375
def test_accuracy():
y_pred = [0, 2, 1, 3]
y_true = [0, 1, 2, 3]
test_acc = accuracy(y_pred, y_true)
print(test_acc)
assert test_acc == 0.5
| true | true |
f72f8089cc89101fe7a243dfe2b57fdf92cb7ad2 | 22,912 | py | Python | LLDBagility/stubvm.py | killvxk/LLDBagility | 6cc0df91bd49e43997b77048611d1b73f43f7b29 | [
"Apache-2.0"
] | 1 | 2019-07-13T12:04:04.000Z | 2019-07-13T12:04:04.000Z | LLDBagility/stubvm.py | killvxk/LLDBagility | 6cc0df91bd49e43997b77048611d1b73f43f7b29 | [
"Apache-2.0"
] | null | null | null | LLDBagility/stubvm.py | killvxk/LLDBagility | 6cc0df91bd49e43997b77048611d1b73f43f7b29 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import re
import threading
import lldbagilityutils
from PyFDP.FDP import FDP
from VMSN import VMSN
logger = lldbagilityutils.create_indented_logger(__name__, "/tmp/stubvm.log")
NULL = 0x0
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/eflags.h
EFL_TF = 0x00000100
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/i386/vm_param.h
I386_PGBYTES = 4096
VM_MIN_KERNEL_ADDRESS = 0xFFFFFF8000000000
VM_MAX_KERNEL_ADDRESS = 0xFFFFFFFFFFFFEFFF
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/EXTERNAL_HEADERS/mach-o/loader.h
MH_MAGIC_64 = 0xFEEDFACF
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/exception_types.h
EXC_SOFTWARE = 0x5
EXC_BREAKPOINT = 0x6
EXC_SOFT_SIGNAL = 0x10003
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/i386/exception.h
EXC_I386_BPTFLT = 0x3
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/bsd/sys/signal.h
SIGINT = 0x2
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/proc_reg.h
MSR_IA32_GS_BASE = 0xC0000101
MSR_IA32_KERNEL_GS_BASE = 0xC0000102
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/machine.h
CPU_TYPE_X86 = 0x7
CPU_ARCH_ABI64 = 0x01000000
CPU_TYPE_X86_64 = CPU_TYPE_X86 | CPU_ARCH_ABI64
CPU_SUBTYPE_X86_ARCH1 = 0x4
class STUBVM(object):
def __init__(self, stub, name):
self.stub = stub(name)
self.name = name
self.lock = threading.RLock()
self._exception = None
self._soft_breakpoints = {}
self._interrupt_at_next_resume = False
self._singlestep_at_next_resume = False
self._kdp_vaddr = None
self._store_kdp_at_next_write_virtual_memory = False
self._return_incremented_at_next_read_register_rip = False
@lldbagilityutils.indented(logger)
def _continue_until_kernel_code(self):
logger.debug("_continue_until_kernel_code()")
if _in_kernel_space(self.read_register("rip")):
return
# set a breakpoint on writes to the CR3 register (with high probability
# only the kernel is doing it)
cr3bp_id = self.stub.SetBreakpoint(
self.stub.CR_HBP,
0x0,
self.stub.WRITE_BP,
self.stub.VIRTUAL_ADDRESS,
0x3,
0x1,
self.stub.NO_CR3,
)
assert 0 <= cr3bp_id <= 254
# resume the VM execution until reaching kernel code
while True:
self.stub.Resume()
self.stub.WaitForStateChanged()
if _in_kernel_space(self.read_register("rip")):
logger.debug("> stopping: 0x{:016x}".format(self.read_register("rip")))
break
self.stub.SingleStep()
self.stub.UnsetBreakpoint(cr3bp_id)
@lldbagilityutils.indented(logger)
def _get_active_thread_vaddr(self):
logger.debug("_get_active_thread_vaddr()")
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/cpu_data.h#L392
def _get_gs_base(self):
logger.debug("_get_gs_base()")
gs_base = self.read_msr64(MSR_IA32_GS_BASE)
logger.debug("> MSR_IA32_GS_BASE: 0x{:016x}".format(gs_base))
if not _in_kernel_space(gs_base):
gs_base = self.read_msr64(MSR_IA32_KERNEL_GS_BASE)
logger.debug("> MSR_IA32_KERNEL_GS_BASE: 0x{:016x}".format(gs_base))
return gs_base
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/mp_desc.c#L476
cpu_data_vaddr = _get_gs_base(self)
logger.debug("> cpu_data_vaddr: 0x{:016x}".format(cpu_data_vaddr))
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/cpu_data.h#L149
cpu_this = lldbagilityutils.u64(self.read_virtual_memory(cpu_data_vaddr, 0x8))
logger.debug("> cpu_this: 0x{:016x}".format(cpu_this))
assert cpu_data_vaddr == cpu_this
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/cpu_data.h#L150
cpu_active_thread = lldbagilityutils.u64(
self.read_virtual_memory(cpu_data_vaddr + 0x8, 0x8)
)
logger.debug("> cpu_active_thread: 0x{:016x}".format(cpu_active_thread))
return cpu_active_thread
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def complete_attach(self):
logger.debug("complete_attach()")
self.halt()
self.unset_all_breakpoints()
self._continue_until_kernel_code()
assert _in_kernel_space(self.read_register("rip"))
self.kernel_cr3 = self.read_register("cr3")
logger.debug("> kernel_cr3: 0x{:x}".format(self.kernel_cr3))
self.kernel_load_vaddr = _find_kernel_load_vaddr(self)
logger.debug("> kernel_load_vaddr: 0x{:016x}".format(self.kernel_load_vaddr))
self.kernel_slide = _compute_kernel_slide(self.kernel_load_vaddr)
logger.debug("> kernel_slide: 0x{:x}".format(self.kernel_slide))
self.kernel_version = _find_kernel_version(self)
logger.debug("> kernel_version: {}".format(self.kernel_version))
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_num_cpus(self):
logger.debug("get_num_cpus()")
return self.stub.GetCpuCount()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_host_info(self):
logger.debug("get_host_info()")
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/kdp/ml/x86_64/kdp_machdep.c#L256
cpus_mask = 0x0
for i in range(self.get_num_cpus()):
cpus_mask |= 1 << i
cpu_type = CPU_TYPE_X86_64
cpu_subtype = CPU_SUBTYPE_X86_ARCH1
return cpus_mask, cpu_type, cpu_subtype
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_kernel_version(self):
logger.debug("get_kernel_version()")
kernel_version = self.kernel_version
if b"stext" not in kernel_version:
logger.debug("> stext")
# return the known kernel load address to make LLDB do less requests
kernel_version += "; stext=0x{:016x}".format(self.kernel_load_vaddr)
return kernel_version
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_msr64(self, msr):
logger.debug("read_msr64(msr=0x{:x})".format(msr))
return self.stub.ReadMsr(msr, CpuId=self.stub.CPU0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_msr64(self, msr, val):
logger.debug("write_msr64(msr=0x{:x}, val=0x{:x})".format(msr, val))
self.stub.WriteMsr(self, msr, val, CpuId=self.stub.CPU0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_register(self, reg):
logger.debug("read_register(reg='{}')".format(reg))
val = getattr(self.stub, reg)
if reg == "rip" and self._return_incremented_at_next_read_register_rip:
logger.debug("> _return_incremented_at_next_read_register_rip")
self._return_incremented_at_next_read_register_rip = False
# https://github.com/llvm/llvm-project/tree/llvmorg-8.0.0/lldb/source/Plugins/Process/MacOSX-Kernel/ThreadKDP.cpp#L157
# https://github.com/llvm/llvm-project/tree/llvmorg-8.0.0/lldb/source/Plugins/Process/Utility/StopInfoMachException.cpp#L571
return val + 1
return val
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_registers(self, regs):
logger.debug("read_registers()")
return {reg: self.read_register(reg) for reg in regs}
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_register(self, reg, val):
logger.debug("write_register(reg='{}', val=0x{:x})".format(reg, val))
if reg == "rflags":
if val & EFL_TF:
logger.debug("> _singlestep_at_next_resume")
self._singlestep_at_next_resume = True
# disallow changes to RFLAGS
return
setattr(self.stub, reg, val)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_registers(self, regs):
logger.debug("write_registers()")
for reg, val in regs.items():
self.write_register(reg, val)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_virtual_memory(self, vaddr, nbytes):
logger.debug(
"read_virtual_memory(vaddr=0x{:016x}, nbytes=0x{:x})".format(vaddr, nbytes)
)
data = self.stub.ReadVirtualMemory(vaddr, nbytes)
if not data and not _in_kernel_space(self.read_register("rip")):
# if reading fails, it could be the case that we are trying to read kernel
# virtual addresses from user space (e.g. when LLDB stops in user land and
# the user loads or uses lldbmacros)
# in this case, we try the read again but using the kernel pmap
logger.debug("> using kernel pmap")
process_cr3 = self.read_register("cr3")
# switch to kernel pmap
self.write_register("cr3", self.kernel_cr3)
# try the read again
data = self.stub.ReadVirtualMemory(vaddr, nbytes)
# switch back to the process pmap
self.write_register("cr3", process_cr3)
if self._kdp_vaddr and vaddr <= self._kdp_vaddr <= vaddr + nbytes:
# this request has very likely been generated by LLDBmacros
logger.debug("> fake kdp struct")
assert data is not None
# fill some fields of the empty (since the boot-arg "debug" is probably not set) kdp struct
saved_state = lldbagilityutils.p64(NULL)
kdp_thread = lldbagilityutils.p64(self._get_active_thread_vaddr())
fake_partial_kdp_struct = b"".join((saved_state, kdp_thread))
kdp_struct_offset = self._kdp_vaddr - vaddr
data = (
data[:kdp_struct_offset]
+ fake_partial_kdp_struct
+ data[kdp_struct_offset + len(fake_partial_kdp_struct) :]
)
data = data if data else b""
logger.debug("> len(data): 0x{:x}".format(len(data)))
return data
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_virtual_memory(self, vaddr, data):
logger.debug("write_virtual_memory(vaddr=0x{:016x}, data=...)".format(vaddr))
assert self.is_state_halted()
if self._store_kdp_at_next_write_virtual_memory:
logger.debug("> _store_kdp_at_next_write_virtual_memory")
self._store_kdp_at_next_write_virtual_memory = False
self._kdp_vaddr = vaddr
return
return self.stub.WriteVirtualMemory(vaddr, data)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def set_soft_exec_breakpoint(self, vaddr):
logger.debug("set_soft_exec_breakpoint(vaddr=0x{:016x})".format(vaddr))
assert self.is_state_halted()
id = 0x0
length = 0x1
self._soft_breakpoints[vaddr] = self.stub.SetBreakpoint(
self.stub.SOFT_HBP,
id,
self.stub.EXECUTE_BP,
self.stub.VIRTUAL_ADDRESS,
vaddr,
length,
self.stub.NO_CR3,
)
logger.debug("> bp id: {}".format(self._soft_breakpoints[vaddr]))
return self._soft_breakpoints[vaddr]
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_soft_breakpoint(self, vaddr):
logger.debug("unset_soft_breakpoint(vaddr=0x{:016x})")
assert self.is_state_halted()
try:
id = self._soft_breakpoints[vaddr]
except KeyError:
logger.debug("> no such breakpoint")
else:
del self._soft_breakpoints[vaddr]
return self.stub.UnsetBreakpoint(id)
return False
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def set_hard_breakpoint(self, trigger, nreg, vaddr):
logger.debug(
"set_hard_exec_breakpoint(trigger='{}', nreg=0x{:016x}, vaddr=0x{:016x})".format(
trigger, nreg, vaddr
)
)
assert self.is_state_halted()
assert trigger in ("e", "w", "rw")
assert 0 <= nreg <= 3
trigger_bitshifts = {nreg: 16 + nreg * 4 for nreg in range(4)}
status_bitshifts = {nreg: nreg * 2 for nreg in range(4)}
ctrl_mask = self.read_register("dr7")
# reset trigger entry for the chosen register to 0b00
ctrl_mask &= ~(0b11 << trigger_bitshifts[nreg])
# set new entry
if trigger == "e":
trigger_entry = 0b00
elif trigger == "w":
trigger_entry = 0b01
elif trigger == "rw":
trigger_entry = 0b11
else:
raise NotImplementedError
ctrl_mask |= trigger_entry << trigger_bitshifts[nreg]
# enable breakpoint globally
ctrl_mask |= 0b10 << status_bitshifts[nreg]
logger.debug("> ctrl_mask: 0b{:032b}".format(ctrl_mask))
self.write_register("dr{}".format(nreg), vaddr)
self.write_register("dr7", ctrl_mask)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_hard_breakpoint(self, nreg):
logger.debug("unset_hard_breakpoint(nreg=0x{:016x})".format(nreg))
assert self.is_state_halted()
assert 0 <= nreg <= 3
status_bitshifts = {nreg: nreg * 2 for nreg in range(4)}
ctrl_mask = self.read_register("dr7")
# disable breakpoint globally and locally
ctrl_mask &= ~(0b11 << status_bitshifts[nreg])
logger.debug("> ctrl_mask: 0b{:032b}".format(ctrl_mask))
self.write_register("dr{}".format(nreg), 0x0)
self.write_register("dr7", ctrl_mask)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_all_breakpoints(self):
logger.debug("unset_all_breakpoints()")
assert self.is_state_halted()
# remove soft breakpoints
self._soft_breakpoints.clear()
self.stub.UnsetAllBreakpoint()
# remove hard breakpoints
self.write_register("dr0", 0x0)
self.write_register("dr1", 0x0)
self.write_register("dr2", 0x0)
self.write_register("dr3", 0x0)
self.write_register("dr6", 0x0)
self.write_register("dr7", 0x0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def halt(self):
logger.debug("halt()")
self.stub.Pause()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt(self):
logger.debug("interrupt()")
self._exception = (EXC_SOFTWARE, EXC_SOFT_SIGNAL, SIGINT)
self.halt()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def single_step(self):
logger.debug("single_step()")
self._exception = (EXC_BREAKPOINT, EXC_I386_BPTFLT, 0x0)
self.stub.SingleStep()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def resume(self):
logger.debug("resume()")
if self._interrupt_at_next_resume:
logger.debug("> _interrupt_at_next_resume")
self._interrupt_at_next_resume = False
self.interrupt()
return
if self._singlestep_at_next_resume:
logger.debug("> _singlestep_at_next_resume")
self._singlestep_at_next_resume = False
self.single_step()
return
if self.is_breakpoint_hit():
logger.debug(
"> state breakpoint hit: 0x{:016x}".format(self.read_register("rip"))
)
self.stub.SingleStep()
self.stub.Resume()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_and_take_snapshot(self):
logger.debug("interrupt_and_take_snapshot()")
self.interrupt()
self.stub.Save()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_and_restore_last_snapshot(self):
logger.debug("interrupt_and_restore_last_snapshot()")
self.interrupt()
if self.stub.Restore():
# breakpoints are not restored
self._soft_breakpoints.clear()
return True
else:
logger.debug("> could not restore")
return False
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def state(self):
logger.debug("state()")
if self.is_breakpoint_hit():
logger.debug("> state breakpoint hit")
self._exception = (EXC_BREAKPOINT, EXC_I386_BPTFLT, 0x0)
# the following assumes that the next call to STUBVM.read_register("rip")
# will be made by LLDB in response to this EXC_BREAKPOINT exception
self._return_incremented_at_next_read_register_rip = True
state = (self.stub.GetState(), self._exception)
self._exception = None
return state
@lldbagilityutils.synchronized
def is_state_changed(self):
return self.stub.GetStateChanged() or self._exception
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def is_state_halted(self):
logger.debug("is_state_halted()")
return self.stub.GetState() & self.stub.STATE_PAUSED
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def is_breakpoint_hit(self):
logger.debug("is_breakpoint_hit()")
return self.stub.GetState() & (
self.stub.STATE_BREAKPOINT_HIT | self.stub.STATE_HARD_BREAKPOINT_HIT
)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_at_next_resume(self):
logger.debug("interrupt_at_next_resume()")
self._interrupt_at_next_resume = True
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def store_kdp_at_next_write_virtual_memory(self):
logger.debug("store_kdp_at_next_write_virtual_memory()")
self._store_kdp_at_next_write_virtual_memory = True
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def abort_store_kdp_at_next_write_virtual_memory(self):
logger.debug("abort_store_kdp_at_next_write_virtual_memory()")
assert not self._kdp_vaddr
self._store_kdp_at_next_write_virtual_memory = False
def _in_kernel_space(addr):
return VM_MIN_KERNEL_ADDRESS <= addr <= VM_MAX_KERNEL_ADDRESS
@lldbagilityutils.indented(logger)
def _find_kernel_load_vaddr(vm):
logger.debug("_find_kernel_load_vaddr()")
assert _in_kernel_space(vm.read_register("rip"))
@lldbagilityutils.indented(logger)
def _is_kernel_load_vaddr(vaddr):
logger.debug("_is_kernel_load_vaddr()")
if not _in_kernel_space(vaddr):
return False
data = vm.read_virtual_memory(vaddr, 0x4)
return data and lldbagilityutils.u32(data) == MH_MAGIC_64
@lldbagilityutils.indented(logger)
def _get_debug_kernel_load_vaddr():
logger.debug("_get_debug_kernel_load_vaddr()")
# from the LLDB documentation: "If the debug flag is included in the
# boot-args nvram setting, the kernel's load address will be noted
# in the lowglo page at a fixed address"
# https://github.com/llvm/llvm-project/blob/llvmorg-8.0.0/lldb/source/Plugins/DynamicLoader/Darwin-Kernel/DynamicLoaderDarwinKernel.cpp#L226
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/x86_64/lowglobals.h#L54
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/x86_64/pmap.c#L1175
lgStext_vaddr = 0xFFFFFF8000002010
data = vm.read_virtual_memory(lgStext_vaddr, 0x8)
if data:
vaddr = lldbagilityutils.u64(data)
if _is_kernel_load_vaddr(vaddr):
return vaddr
else:
# probably trying to attach to the target before lgStext is initialised
return None
else:
return None
@lldbagilityutils.indented(logger)
def _search_kernel_load_vaddr(start_vaddr):
logger.debug(
"_search_kernel_load_vaddr(start_vaddr=0x{:016x})".format(start_vaddr)
)
# try to find the load address manually
assert _in_kernel_space(start_vaddr)
vaddr = start_vaddr & ~(I386_PGBYTES - 1)
while vaddr >= VM_MIN_KERNEL_ADDRESS:
if _is_kernel_load_vaddr(vaddr):
return vaddr
vaddr -= I386_PGBYTES
else:
raise AssertionError
kernel_load_vaddr = _get_debug_kernel_load_vaddr() or _search_kernel_load_vaddr(
vm.read_register("rip")
)
return kernel_load_vaddr
def _compute_kernel_slide(kernel_load_vaddr):
return kernel_load_vaddr - 0xFFFFFF8000200000
@lldbagilityutils.indented(logger)
def _find_kernel_version(vm):
logger.debug("_find_kernel_version()")
kernel_macho = b""
while len(kernel_macho) < 42 * 1024 * 1024: # a reasonable upper bound?
buf = b""
while len(buf) < 2 * 1024 * 1024:
vaddr = vm.kernel_load_vaddr + len(kernel_macho) + len(buf)
buf += vm.read_virtual_memory(vaddr, I386_PGBYTES)
kernel_macho += buf
try:
kernel_version = re.search(
b"(?P<version>Darwin Kernel Version .+?X86_64)\0", kernel_macho
).group("version")
except AttributeError:
continue
else:
return kernel_version
else:
raise AssertionError
class FDPSTUB(FDP):
NO_CR3 = FDP.FDP_NO_CR3
SOFT_HBP = FDP.FDP_SOFTHBP
CR_HBP = FDP.FDP_CRHBP
VIRTUAL_ADDRESS = FDP.FDP_VIRTUAL_ADDRESS
EXECUTE_BP = FDP.FDP_EXECUTE_BP
WRITE_BP = FDP.FDP_WRITE_BP
STATE_PAUSED = FDP.FDP_STATE_PAUSED
STATE_BREAKPOINT_HIT = FDP.FDP_STATE_BREAKPOINT_HIT
STATE_HARD_BREAKPOINT_HIT = FDP.FDP_STATE_HARD_BREAKPOINT_HIT
CPU0 = FDP.FDP_CPU0
def __init__(self, name):
super(FDPSTUB, self).__init__(name)
assert self.GetCpuCount() == 1, (
"VMs with more than one CPU are not fully supported by FDP! "
"Decrease the number of processors in the VM settings"
)
class VMSNSTUB(VMSN):
NO_CR3 = 0
SOFT_HBP = 2
CR_HBP = 0
VIRTUAL_ADDRESS = 0
EXECUTE_BP = 0
WRITE_BP = 0
STATE_PAUSED = 1
STATE_BREAKPOINT_HIT = 1
STATE_HARD_BREAKPOINT_HIT = 0
CPU0 = FDP.FDP_CPU0
def __init__(self, name):
super(VMSNSTUB, self).__init__(name)
| 37.255285 | 148 | 0.659043 |
import re
import threading
import lldbagilityutils
from PyFDP.FDP import FDP
from VMSN import VMSN
logger = lldbagilityutils.create_indented_logger(__name__, "/tmp/stubvm.log")
NULL = 0x0
EFL_TF = 0x00000100
I386_PGBYTES = 4096
VM_MIN_KERNEL_ADDRESS = 0xFFFFFF8000000000
VM_MAX_KERNEL_ADDRESS = 0xFFFFFFFFFFFFEFFF
MH_MAGIC_64 = 0xFEEDFACF
EXC_SOFTWARE = 0x5
EXC_BREAKPOINT = 0x6
EXC_SOFT_SIGNAL = 0x10003
EXC_I386_BPTFLT = 0x3
SIGINT = 0x2
MSR_IA32_GS_BASE = 0xC0000101
MSR_IA32_KERNEL_GS_BASE = 0xC0000102
CPU_TYPE_X86 = 0x7
CPU_ARCH_ABI64 = 0x01000000
CPU_TYPE_X86_64 = CPU_TYPE_X86 | CPU_ARCH_ABI64
CPU_SUBTYPE_X86_ARCH1 = 0x4
class STUBVM(object):
def __init__(self, stub, name):
self.stub = stub(name)
self.name = name
self.lock = threading.RLock()
self._exception = None
self._soft_breakpoints = {}
self._interrupt_at_next_resume = False
self._singlestep_at_next_resume = False
self._kdp_vaddr = None
self._store_kdp_at_next_write_virtual_memory = False
self._return_incremented_at_next_read_register_rip = False
@lldbagilityutils.indented(logger)
def _continue_until_kernel_code(self):
logger.debug("_continue_until_kernel_code()")
if _in_kernel_space(self.read_register("rip")):
return
cr3bp_id = self.stub.SetBreakpoint(
self.stub.CR_HBP,
0x0,
self.stub.WRITE_BP,
self.stub.VIRTUAL_ADDRESS,
0x3,
0x1,
self.stub.NO_CR3,
)
assert 0 <= cr3bp_id <= 254
while True:
self.stub.Resume()
self.stub.WaitForStateChanged()
if _in_kernel_space(self.read_register("rip")):
logger.debug("> stopping: 0x{:016x}".format(self.read_register("rip")))
break
self.stub.SingleStep()
self.stub.UnsetBreakpoint(cr3bp_id)
@lldbagilityutils.indented(logger)
def _get_active_thread_vaddr(self):
logger.debug("_get_active_thread_vaddr()")
def _get_gs_base(self):
logger.debug("_get_gs_base()")
gs_base = self.read_msr64(MSR_IA32_GS_BASE)
logger.debug("> MSR_IA32_GS_BASE: 0x{:016x}".format(gs_base))
if not _in_kernel_space(gs_base):
gs_base = self.read_msr64(MSR_IA32_KERNEL_GS_BASE)
logger.debug("> MSR_IA32_KERNEL_GS_BASE: 0x{:016x}".format(gs_base))
return gs_base
cpu_data_vaddr = _get_gs_base(self)
logger.debug("> cpu_data_vaddr: 0x{:016x}".format(cpu_data_vaddr))
cpu_this = lldbagilityutils.u64(self.read_virtual_memory(cpu_data_vaddr, 0x8))
logger.debug("> cpu_this: 0x{:016x}".format(cpu_this))
assert cpu_data_vaddr == cpu_this
cpu_active_thread = lldbagilityutils.u64(
self.read_virtual_memory(cpu_data_vaddr + 0x8, 0x8)
)
logger.debug("> cpu_active_thread: 0x{:016x}".format(cpu_active_thread))
return cpu_active_thread
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def complete_attach(self):
logger.debug("complete_attach()")
self.halt()
self.unset_all_breakpoints()
self._continue_until_kernel_code()
assert _in_kernel_space(self.read_register("rip"))
self.kernel_cr3 = self.read_register("cr3")
logger.debug("> kernel_cr3: 0x{:x}".format(self.kernel_cr3))
self.kernel_load_vaddr = _find_kernel_load_vaddr(self)
logger.debug("> kernel_load_vaddr: 0x{:016x}".format(self.kernel_load_vaddr))
self.kernel_slide = _compute_kernel_slide(self.kernel_load_vaddr)
logger.debug("> kernel_slide: 0x{:x}".format(self.kernel_slide))
self.kernel_version = _find_kernel_version(self)
logger.debug("> kernel_version: {}".format(self.kernel_version))
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_num_cpus(self):
logger.debug("get_num_cpus()")
return self.stub.GetCpuCount()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_host_info(self):
logger.debug("get_host_info()")
cpus_mask = 0x0
for i in range(self.get_num_cpus()):
cpus_mask |= 1 << i
cpu_type = CPU_TYPE_X86_64
cpu_subtype = CPU_SUBTYPE_X86_ARCH1
return cpus_mask, cpu_type, cpu_subtype
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_kernel_version(self):
logger.debug("get_kernel_version()")
kernel_version = self.kernel_version
if b"stext" not in kernel_version:
logger.debug("> stext")
kernel_version += "; stext=0x{:016x}".format(self.kernel_load_vaddr)
return kernel_version
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_msr64(self, msr):
logger.debug("read_msr64(msr=0x{:x})".format(msr))
return self.stub.ReadMsr(msr, CpuId=self.stub.CPU0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_msr64(self, msr, val):
logger.debug("write_msr64(msr=0x{:x}, val=0x{:x})".format(msr, val))
self.stub.WriteMsr(self, msr, val, CpuId=self.stub.CPU0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_register(self, reg):
logger.debug("read_register(reg='{}')".format(reg))
val = getattr(self.stub, reg)
if reg == "rip" and self._return_incremented_at_next_read_register_rip:
logger.debug("> _return_incremented_at_next_read_register_rip")
self._return_incremented_at_next_read_register_rip = False
return val + 1
return val
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_registers(self, regs):
logger.debug("read_registers()")
return {reg: self.read_register(reg) for reg in regs}
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_register(self, reg, val):
logger.debug("write_register(reg='{}', val=0x{:x})".format(reg, val))
if reg == "rflags":
if val & EFL_TF:
logger.debug("> _singlestep_at_next_resume")
self._singlestep_at_next_resume = True
return
setattr(self.stub, reg, val)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_registers(self, regs):
logger.debug("write_registers()")
for reg, val in regs.items():
self.write_register(reg, val)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_virtual_memory(self, vaddr, nbytes):
logger.debug(
"read_virtual_memory(vaddr=0x{:016x}, nbytes=0x{:x})".format(vaddr, nbytes)
)
data = self.stub.ReadVirtualMemory(vaddr, nbytes)
if not data and not _in_kernel_space(self.read_register("rip")):
logger.debug("> using kernel pmap")
process_cr3 = self.read_register("cr3")
self.write_register("cr3", self.kernel_cr3)
data = self.stub.ReadVirtualMemory(vaddr, nbytes)
self.write_register("cr3", process_cr3)
if self._kdp_vaddr and vaddr <= self._kdp_vaddr <= vaddr + nbytes:
logger.debug("> fake kdp struct")
assert data is not None
saved_state = lldbagilityutils.p64(NULL)
kdp_thread = lldbagilityutils.p64(self._get_active_thread_vaddr())
fake_partial_kdp_struct = b"".join((saved_state, kdp_thread))
kdp_struct_offset = self._kdp_vaddr - vaddr
data = (
data[:kdp_struct_offset]
+ fake_partial_kdp_struct
+ data[kdp_struct_offset + len(fake_partial_kdp_struct) :]
)
data = data if data else b""
logger.debug("> len(data): 0x{:x}".format(len(data)))
return data
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_virtual_memory(self, vaddr, data):
logger.debug("write_virtual_memory(vaddr=0x{:016x}, data=...)".format(vaddr))
assert self.is_state_halted()
if self._store_kdp_at_next_write_virtual_memory:
logger.debug("> _store_kdp_at_next_write_virtual_memory")
self._store_kdp_at_next_write_virtual_memory = False
self._kdp_vaddr = vaddr
return
return self.stub.WriteVirtualMemory(vaddr, data)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def set_soft_exec_breakpoint(self, vaddr):
logger.debug("set_soft_exec_breakpoint(vaddr=0x{:016x})".format(vaddr))
assert self.is_state_halted()
id = 0x0
length = 0x1
self._soft_breakpoints[vaddr] = self.stub.SetBreakpoint(
self.stub.SOFT_HBP,
id,
self.stub.EXECUTE_BP,
self.stub.VIRTUAL_ADDRESS,
vaddr,
length,
self.stub.NO_CR3,
)
logger.debug("> bp id: {}".format(self._soft_breakpoints[vaddr]))
return self._soft_breakpoints[vaddr]
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_soft_breakpoint(self, vaddr):
logger.debug("unset_soft_breakpoint(vaddr=0x{:016x})")
assert self.is_state_halted()
try:
id = self._soft_breakpoints[vaddr]
except KeyError:
logger.debug("> no such breakpoint")
else:
del self._soft_breakpoints[vaddr]
return self.stub.UnsetBreakpoint(id)
return False
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def set_hard_breakpoint(self, trigger, nreg, vaddr):
logger.debug(
"set_hard_exec_breakpoint(trigger='{}', nreg=0x{:016x}, vaddr=0x{:016x})".format(
trigger, nreg, vaddr
)
)
assert self.is_state_halted()
assert trigger in ("e", "w", "rw")
assert 0 <= nreg <= 3
trigger_bitshifts = {nreg: 16 + nreg * 4 for nreg in range(4)}
status_bitshifts = {nreg: nreg * 2 for nreg in range(4)}
ctrl_mask = self.read_register("dr7")
ctrl_mask &= ~(0b11 << trigger_bitshifts[nreg])
if trigger == "e":
trigger_entry = 0b00
elif trigger == "w":
trigger_entry = 0b01
elif trigger == "rw":
trigger_entry = 0b11
else:
raise NotImplementedError
ctrl_mask |= trigger_entry << trigger_bitshifts[nreg]
ctrl_mask |= 0b10 << status_bitshifts[nreg]
logger.debug("> ctrl_mask: 0b{:032b}".format(ctrl_mask))
self.write_register("dr{}".format(nreg), vaddr)
self.write_register("dr7", ctrl_mask)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_hard_breakpoint(self, nreg):
logger.debug("unset_hard_breakpoint(nreg=0x{:016x})".format(nreg))
assert self.is_state_halted()
assert 0 <= nreg <= 3
status_bitshifts = {nreg: nreg * 2 for nreg in range(4)}
ctrl_mask = self.read_register("dr7")
ctrl_mask &= ~(0b11 << status_bitshifts[nreg])
logger.debug("> ctrl_mask: 0b{:032b}".format(ctrl_mask))
self.write_register("dr{}".format(nreg), 0x0)
self.write_register("dr7", ctrl_mask)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_all_breakpoints(self):
logger.debug("unset_all_breakpoints()")
assert self.is_state_halted()
self._soft_breakpoints.clear()
self.stub.UnsetAllBreakpoint()
self.write_register("dr0", 0x0)
self.write_register("dr1", 0x0)
self.write_register("dr2", 0x0)
self.write_register("dr3", 0x0)
self.write_register("dr6", 0x0)
self.write_register("dr7", 0x0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def halt(self):
logger.debug("halt()")
self.stub.Pause()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt(self):
logger.debug("interrupt()")
self._exception = (EXC_SOFTWARE, EXC_SOFT_SIGNAL, SIGINT)
self.halt()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def single_step(self):
logger.debug("single_step()")
self._exception = (EXC_BREAKPOINT, EXC_I386_BPTFLT, 0x0)
self.stub.SingleStep()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def resume(self):
logger.debug("resume()")
if self._interrupt_at_next_resume:
logger.debug("> _interrupt_at_next_resume")
self._interrupt_at_next_resume = False
self.interrupt()
return
if self._singlestep_at_next_resume:
logger.debug("> _singlestep_at_next_resume")
self._singlestep_at_next_resume = False
self.single_step()
return
if self.is_breakpoint_hit():
logger.debug(
"> state breakpoint hit: 0x{:016x}".format(self.read_register("rip"))
)
self.stub.SingleStep()
self.stub.Resume()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_and_take_snapshot(self):
logger.debug("interrupt_and_take_snapshot()")
self.interrupt()
self.stub.Save()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_and_restore_last_snapshot(self):
logger.debug("interrupt_and_restore_last_snapshot()")
self.interrupt()
if self.stub.Restore():
self._soft_breakpoints.clear()
return True
else:
logger.debug("> could not restore")
return False
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def state(self):
logger.debug("state()")
if self.is_breakpoint_hit():
logger.debug("> state breakpoint hit")
self._exception = (EXC_BREAKPOINT, EXC_I386_BPTFLT, 0x0)
self._return_incremented_at_next_read_register_rip = True
state = (self.stub.GetState(), self._exception)
self._exception = None
return state
@lldbagilityutils.synchronized
def is_state_changed(self):
return self.stub.GetStateChanged() or self._exception
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def is_state_halted(self):
logger.debug("is_state_halted()")
return self.stub.GetState() & self.stub.STATE_PAUSED
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def is_breakpoint_hit(self):
logger.debug("is_breakpoint_hit()")
return self.stub.GetState() & (
self.stub.STATE_BREAKPOINT_HIT | self.stub.STATE_HARD_BREAKPOINT_HIT
)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_at_next_resume(self):
logger.debug("interrupt_at_next_resume()")
self._interrupt_at_next_resume = True
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def store_kdp_at_next_write_virtual_memory(self):
logger.debug("store_kdp_at_next_write_virtual_memory()")
self._store_kdp_at_next_write_virtual_memory = True
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def abort_store_kdp_at_next_write_virtual_memory(self):
logger.debug("abort_store_kdp_at_next_write_virtual_memory()")
assert not self._kdp_vaddr
self._store_kdp_at_next_write_virtual_memory = False
def _in_kernel_space(addr):
return VM_MIN_KERNEL_ADDRESS <= addr <= VM_MAX_KERNEL_ADDRESS
@lldbagilityutils.indented(logger)
def _find_kernel_load_vaddr(vm):
logger.debug("_find_kernel_load_vaddr()")
assert _in_kernel_space(vm.read_register("rip"))
@lldbagilityutils.indented(logger)
def _is_kernel_load_vaddr(vaddr):
logger.debug("_is_kernel_load_vaddr()")
if not _in_kernel_space(vaddr):
return False
data = vm.read_virtual_memory(vaddr, 0x4)
return data and lldbagilityutils.u32(data) == MH_MAGIC_64
@lldbagilityutils.indented(logger)
def _get_debug_kernel_load_vaddr():
logger.debug("_get_debug_kernel_load_vaddr()")
# boot-args nvram setting, the kernel's load address will be noted
# in the lowglo page at a fixed address"
# https://github.com/llvm/llvm-project/blob/llvmorg-8.0.0/lldb/source/Plugins/DynamicLoader/Darwin-Kernel/DynamicLoaderDarwinKernel.cpp#L226
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/x86_64/lowglobals.h#L54
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/x86_64/pmap.c#L1175
lgStext_vaddr = 0xFFFFFF8000002010
data = vm.read_virtual_memory(lgStext_vaddr, 0x8)
if data:
vaddr = lldbagilityutils.u64(data)
if _is_kernel_load_vaddr(vaddr):
return vaddr
else:
# probably trying to attach to the target before lgStext is initialised
return None
else:
return None
@lldbagilityutils.indented(logger)
def _search_kernel_load_vaddr(start_vaddr):
logger.debug(
"_search_kernel_load_vaddr(start_vaddr=0x{:016x})".format(start_vaddr)
)
# try to find the load address manually
assert _in_kernel_space(start_vaddr)
vaddr = start_vaddr & ~(I386_PGBYTES - 1)
while vaddr >= VM_MIN_KERNEL_ADDRESS:
if _is_kernel_load_vaddr(vaddr):
return vaddr
vaddr -= I386_PGBYTES
else:
raise AssertionError
kernel_load_vaddr = _get_debug_kernel_load_vaddr() or _search_kernel_load_vaddr(
vm.read_register("rip")
)
return kernel_load_vaddr
def _compute_kernel_slide(kernel_load_vaddr):
return kernel_load_vaddr - 0xFFFFFF8000200000
@lldbagilityutils.indented(logger)
def _find_kernel_version(vm):
logger.debug("_find_kernel_version()")
kernel_macho = b""
while len(kernel_macho) < 42 * 1024 * 1024: # a reasonable upper bound?
buf = b""
while len(buf) < 2 * 1024 * 1024:
vaddr = vm.kernel_load_vaddr + len(kernel_macho) + len(buf)
buf += vm.read_virtual_memory(vaddr, I386_PGBYTES)
kernel_macho += buf
try:
kernel_version = re.search(
b"(?P<version>Darwin Kernel Version .+?X86_64)\0", kernel_macho
).group("version")
except AttributeError:
continue
else:
return kernel_version
else:
raise AssertionError
class FDPSTUB(FDP):
NO_CR3 = FDP.FDP_NO_CR3
SOFT_HBP = FDP.FDP_SOFTHBP
CR_HBP = FDP.FDP_CRHBP
VIRTUAL_ADDRESS = FDP.FDP_VIRTUAL_ADDRESS
EXECUTE_BP = FDP.FDP_EXECUTE_BP
WRITE_BP = FDP.FDP_WRITE_BP
STATE_PAUSED = FDP.FDP_STATE_PAUSED
STATE_BREAKPOINT_HIT = FDP.FDP_STATE_BREAKPOINT_HIT
STATE_HARD_BREAKPOINT_HIT = FDP.FDP_STATE_HARD_BREAKPOINT_HIT
CPU0 = FDP.FDP_CPU0
def __init__(self, name):
super(FDPSTUB, self).__init__(name)
assert self.GetCpuCount() == 1, (
"VMs with more than one CPU are not fully supported by FDP! "
"Decrease the number of processors in the VM settings"
)
class VMSNSTUB(VMSN):
NO_CR3 = 0
SOFT_HBP = 2
CR_HBP = 0
VIRTUAL_ADDRESS = 0
EXECUTE_BP = 0
WRITE_BP = 0
STATE_PAUSED = 1
STATE_BREAKPOINT_HIT = 1
STATE_HARD_BREAKPOINT_HIT = 0
CPU0 = FDP.FDP_CPU0
def __init__(self, name):
super(VMSNSTUB, self).__init__(name)
| true | true |
f72f83101ec2c8b57466741fe18334c3e7fb24c9 | 3,882 | py | Python | dewars/zone6.py | drnasmith/flask-ispyb-logistics | 930b2707fc1d679607fc2d8d3d0895edfc944af6 | [
"Apache-2.0"
] | null | null | null | dewars/zone6.py | drnasmith/flask-ispyb-logistics | 930b2707fc1d679607fc2d8d3d0895edfc944af6 | [
"Apache-2.0"
] | null | null | null | dewars/zone6.py | drnasmith/flask-ispyb-logistics | 930b2707fc1d679607fc2d8d3d0895edfc944af6 | [
"Apache-2.0"
] | 1 | 2018-10-31T13:53:27.000Z | 2018-10-31T13:53:27.000Z | # System imports
from datetime import datetime
import time
import json
import logging
# Package imports
from flask import Blueprint
from flask import render_template
from flask import jsonify
from flask import request
# Local imports
import common
api = Blueprint('zone6', __name__, url_prefix='/zone6')
rack_prefix = 'RACK'
rack_suffixes = ['A1', 'A2', 'A3', 'A4',
'B1', 'B2', 'B3', 'B4',
'C1', 'C2', 'C3', 'C4',
'D1', 'D2', 'D3', 'D4',
'E1', 'E2', 'E3', 'E4',
'F1', 'F2', 'F3', 'F4',
'G1', 'G2', 'G3', 'G4',
'H1', 'H2', 'H3', 'H4',
'J1', 'J2', 'J3', 'J4',
'K1', 'K2', 'K3', 'K4',
'L1', 'L2', 'L3', 'L4',
'M1', 'M2', 'M3', 'M4',
'N1', 'N2', 'N3', 'N4',
'P1', 'P2', 'P3', 'P4',
'Q1', 'Q2', 'Q3', 'Q4',
'R1', 'R2', 'R3', 'R4',
'X1', 'X2', 'X3', 'X4',
'X5', 'X6', 'X7', 'X8',
'X9', 'X10', 'X11', 'X12',
]
rack_locations = ['-'.join([rack_prefix, suffix])
for suffix in rack_suffixes]
beamlines = ['i03',
'i04',
'i04-1',
'i24',
]
beamline_prefix = 'BEAMLINE'
beamline_locations = ['{}-{}'.format(beamline_prefix, x.upper()) for x in beamlines]
beamline_locations.extend(['USER-COLLECTION',
'STORES-OUT',
'ZONE-6-STORE',
])
"""
App to demonstrate use of vuejs
"""
@api.route("/vdewars")
def vdewars():
return render_template('vue-dewars.html', title="Zone6 Dewars", api_prefix="zone6", rack_locations=rack_locations)
@api.route('/')
def index():
"""
Main page for dewar management
"""
return render_template('dewars.html',
title="zone6 Dewar Management",
rack_locations=rack_locations,
rack_suffixes=rack_suffixes,
rack_prefix=rack_prefix,
beamlines=beamline_locations,
api_prefix="zone6",
)
@api.route('/dewars', methods=["GET", "POST", "DELETE"])
def location():
"""
API route for dewar management
"""
result = {}
status_code = 200
if request.method == "GET":
# Get any dewar with any rack location
# There should only be one per location
# Simple call so use controller directly
result, status_code = common.find_dewars_by_location(rack_locations)
elif request.method == "POST":
location = request.form['location']
barcode = request.form['barcode']
result, status_code = common.update_dewar_location(barcode, location)
elif request.method == "DELETE":
try:
location = request.form['location']
except KeyError:
# No form data (used axios?) Try params
location = request.args.get('location')
result, status_code = common.remove_dewar_from_location(location)
else:
result = {'location': '',
'barcode': '',
'status': 'fail',
'reason': 'Method/route not implemented yet'}
status_code = 501
return jsonify(result), status_code
@api.route('/dewars/find', methods=["GET"])
def find():
"""
Return a list of matching dewars with this facility code
Should be requested with parameters in the URL ?fc=DLS-MS-1234 request
We specifically return the status code so the front end can show feedback
"""
facilitycode = request.args.get('fc')
result, status_code = common.find_dewar(facilitycode)
return jsonify(result), status_code
| 29.409091 | 118 | 0.516486 |
from datetime import datetime
import time
import json
import logging
from flask import Blueprint
from flask import render_template
from flask import jsonify
from flask import request
import common
api = Blueprint('zone6', __name__, url_prefix='/zone6')
rack_prefix = 'RACK'
rack_suffixes = ['A1', 'A2', 'A3', 'A4',
'B1', 'B2', 'B3', 'B4',
'C1', 'C2', 'C3', 'C4',
'D1', 'D2', 'D3', 'D4',
'E1', 'E2', 'E3', 'E4',
'F1', 'F2', 'F3', 'F4',
'G1', 'G2', 'G3', 'G4',
'H1', 'H2', 'H3', 'H4',
'J1', 'J2', 'J3', 'J4',
'K1', 'K2', 'K3', 'K4',
'L1', 'L2', 'L3', 'L4',
'M1', 'M2', 'M3', 'M4',
'N1', 'N2', 'N3', 'N4',
'P1', 'P2', 'P3', 'P4',
'Q1', 'Q2', 'Q3', 'Q4',
'R1', 'R2', 'R3', 'R4',
'X1', 'X2', 'X3', 'X4',
'X5', 'X6', 'X7', 'X8',
'X9', 'X10', 'X11', 'X12',
]
rack_locations = ['-'.join([rack_prefix, suffix])
for suffix in rack_suffixes]
beamlines = ['i03',
'i04',
'i04-1',
'i24',
]
beamline_prefix = 'BEAMLINE'
beamline_locations = ['{}-{}'.format(beamline_prefix, x.upper()) for x in beamlines]
beamline_locations.extend(['USER-COLLECTION',
'STORES-OUT',
'ZONE-6-STORE',
])
@api.route("/vdewars")
def vdewars():
return render_template('vue-dewars.html', title="Zone6 Dewars", api_prefix="zone6", rack_locations=rack_locations)
@api.route('/')
def index():
return render_template('dewars.html',
title="zone6 Dewar Management",
rack_locations=rack_locations,
rack_suffixes=rack_suffixes,
rack_prefix=rack_prefix,
beamlines=beamline_locations,
api_prefix="zone6",
)
@api.route('/dewars', methods=["GET", "POST", "DELETE"])
def location():
result = {}
status_code = 200
if request.method == "GET":
result, status_code = common.find_dewars_by_location(rack_locations)
elif request.method == "POST":
location = request.form['location']
barcode = request.form['barcode']
result, status_code = common.update_dewar_location(barcode, location)
elif request.method == "DELETE":
try:
location = request.form['location']
except KeyError:
location = request.args.get('location')
result, status_code = common.remove_dewar_from_location(location)
else:
result = {'location': '',
'barcode': '',
'status': 'fail',
'reason': 'Method/route not implemented yet'}
status_code = 501
return jsonify(result), status_code
@api.route('/dewars/find', methods=["GET"])
def find():
facilitycode = request.args.get('fc')
result, status_code = common.find_dewar(facilitycode)
return jsonify(result), status_code
| true | true |
f72f834ad520c083812e4cfa1295c11679786a0f | 7,772 | py | Python | video2html.py | stevevai/video2chars | b90a2280d184083a746087d1c12d638afa3da5bb | [
"Apache-2.0"
] | 1 | 2019-04-14T16:28:25.000Z | 2019-04-14T16:28:25.000Z | video2html.py | stevevai/video2chars | b90a2280d184083a746087d1c12d638afa3da5bb | [
"Apache-2.0"
] | null | null | null | video2html.py | stevevai/video2chars | b90a2280d184083a746087d1c12d638afa3da5bb | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
import json
import os
import cv2
import numpy as np
from time import time
import webbrowser
play_chars_js = '''
let i = 0;
window.setInterval(function(){
let img = frames[i++];
let html = ""
for(let line of img){
for(let char of line){
let [[r,g,b], ch] = char;
line += '<span style="color:rgb(' + r + ', ' + g + ', '+ b + ');">'+ ch + '</span>'
}
html += "<br>"
}
document.body.innerHTML = html;
}, 1000/fps);
'''
class VideoToHtml:
# 灰度是数越小越白
pixels = ".,:!+mw1I?2354KE%8B&$WM@#"
def __init__(self, video_path, fps_for_html=8, time_interval=None):
"""
:param video_path: 字符串, 视频文件的路径
:param fps_for_html: 生成的html的帧率
:param time_interval: 用于截取视频(开始时间,结束时间)单位秒
"""
# 从指定文件创建一个VideoCapture对象
self.cap = cv2.VideoCapture(video_path)
self.width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.frames_count_all = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
self.resize_width = None
self.resize_height = None
self.frames_count = 0
self.fps_for_html = fps_for_html
self.time_interval = time_interval
def set_width(self, width):
"""只能缩小,而且始终保持长宽比"""
if width >= self.width:
return False
else:
self.resize_width = width
self.resize_height = int(self.height * (width / self.width))
return True
def set_height(self, height):
"""只能缩小,而且始终保持长宽比"""
if height >= self.height:
return False
else:
self.resize_height = height
self.resize_width = int(self.width * (height / self.height))
return True
def resize(self, img):
"""
将img转换成需要的大小
原则:只缩小,不放大。
"""
# 没指定就不需resize了
if not self.resize_width or not self.resize_height:
return img
else:
size = (self.resize_width, self.resize_height)
return cv2.resize(img, size, interpolation=cv2.INTER_AREA)
def get_img_by_pos(self, pos):
# 把指针移动到指定帧的位置
self.cap.set(cv2.CAP_PROP_POS_FRAMES, pos)
# cap.read() 返回值介绍:
# ret 布尔值,表示是否读取到图像
# frame 为图像矩阵,类型为 numpy.ndarray.
ret, frame = self.cap.read()
return ret, frame
def get_frame_pos(self):
"""生成需要获取的帧的位置,使用了惰性求值"""
step = int(self.fps / self.fps_for_html)
# 如果未指定
if not self.time_interval:
self.frames_count = int(self.frames_count_all / step) # 更新count
return range(0, self.frames_count_all, step)
# 如果指定了
start, end = self.time_interval
pos_start = int(self.fps * start)
pos_end = int(self.fps * end)
self.frames_count = int((pos_end - pos_start) / step) # 更新count
return range(pos_start, pos_end, step)
def get_imgs(self):
assert self.cap.isOpened()
for i in self.get_frame_pos():
ret, frame = self.get_img_by_pos(i)
if not ret:
print("读取失败,跳出循环")
break
yield self.resize(frame) # 惰性求值
# 结束时要释放空间
self.cap.release()
def get_char(self, gray):
percent = gray / 255 # 转换到 0-1 之间
index = int(percent * (len(self.pixels) - 1)) # 拿到index
return self.pixels[index]
def get_pix_html(self, r, g, b, gray):
char = self.get_char(gray)
def get_html_pic(self, img, img_id):
"""
将给定的img转换成html字符画
:return: 一个div
"""
hidden = 'hidden="hidden"' if img_id != 0 else ''
html_pic = [f'<div id="f-{img_id}" {hidden}>']
# 宽高刚好和size相反,要注意。(这是numpy的特性。。)
height, width, channel = img.shape
# 转换成灰度图,用来选择合适的字符
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
for y in range(height):
for x in range(width):
r, g, b = img[y][x]
gray = img_gray[y][x]
# 这里字符是灰度,rgb是背景色background-color
pixel_char = f'<span style="background-color:rgb({r}, {g}, {b})"> </span>'
html_pic.append(pixel_char)
html_pic.append("<br>") # 换行
html_pic.append('</div>')
return "".join(html_pic)
def write_html(self, file_name):
time_start = time()
with open(file_name, 'w') as html:
# 要记得设置monospace等宽字体,不然没法玩
# 行距0.75是因为等宽字体,有一定宽高比,所以要平衡一下
html.write('<!DOCTYPE html><html>'
f'<script>window.fps = {self.fps_for_html};</script>'
'<script src="play_chars.js"></script>'
'<body style="font-family: monospace;font-size: xx-small;'
'text-align: center;line-height: 0.75;font-weight: bolder;">'
)
try:
i = 0
for img in self.get_imgs():
html_pic = self.get_html_pic(img, i)
html.write(html_pic)
if i % 30:
print(f"进度:{i/self.frames_count * 100:.2f}%, 已用时:{time() - time_start:.2f}")
i += 1
finally:
html.write("</body>"
"</html>")
def get_json_pic(self, img):
"""测试阶段,不实用"""
json_pic = []
# 宽高刚好和size相反,要注意。(这是numpy的特性。。)
height, width, channel = img.shape
# 转换成灰度图,用来选择合适的字符
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
for y in range(height):
line = []
for x in range(width):
r, g, b = img[y][x]
gray = img_gray[y][x]
char = self.get_char(gray)
line.append([[int(r), int(g), int(b)], char])
json_pic.append(line)
return json.dumps(json_pic)
def write_html_with_json(self, file_name):
"""测试阶段,不实用"""
with open(file_name, 'w') as html:
# 要记得设置monospace等宽字体,不然没法玩
html.write('<!DOCTYPE html>'
'<html>'
'<body style="font-family: monospace;font-size: xx-small;text-align: center;line-height: 0.7;">'
'</body>'
'<script>'
'var frames=[\n')
try:
i = 0
for img in self.get_imgs():
json_pic = self.get_json_pic(img)
html.write(f"{json_pic},\n")
if i % 20:
print(f"进度:{i/self.frames_count * 100:.2f}%")
i += 1
finally:
html.write('];'
f'var fps={self.fps_for_html};'
f'{play_chars_js}'
'</script>'
'</html>')
def get_file_name(file_path):
"""
从文件路径中提取出不带拓展名的文件名
"""
# 从文件路径获取文件名 _name
path, file_name_with_extension = os.path.split(file_path)
# 拿到文件名前缀
file_name, file_extension = os.path.splitext(file_name_with_extension)
return file_name
def main():
# 视频路径,换成你自己的
# video_path = "/home/ryan/Downloads/HorribleSubs+Golden+Kamuy+02+1080p.mp4"
video_path = "BadApple.mp4"
video2html = VideoToHtml(video_path, fps_for_html=5, time_interval=(20, 30))
video2html.set_width(100)
html_name = "output/" + get_file_name(video_path) + ".html"
# video2html.write_html(html_name)
webbrowser.open(html_name)
if __name__ == "__main__":
main()
| 28.15942 | 119 | 0.523289 |
import json
import os
import cv2
import numpy as np
from time import time
import webbrowser
play_chars_js = '''
let i = 0;
window.setInterval(function(){
let img = frames[i++];
let html = ""
for(let line of img){
for(let char of line){
let [[r,g,b], ch] = char;
line += '<span style="color:rgb(' + r + ', ' + g + ', '+ b + ');">'+ ch + '</span>'
}
html += "<br>"
}
document.body.innerHTML = html;
}, 1000/fps);
'''
class VideoToHtml:
pixels = ".,:!+mw1I?2354KE%8B&$WM@#"
def __init__(self, video_path, fps_for_html=8, time_interval=None):
self.cap = cv2.VideoCapture(video_path)
self.width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.frames_count_all = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
self.resize_width = None
self.resize_height = None
self.frames_count = 0
self.fps_for_html = fps_for_html
self.time_interval = time_interval
def set_width(self, width):
if width >= self.width:
return False
else:
self.resize_width = width
self.resize_height = int(self.height * (width / self.width))
return True
def set_height(self, height):
if height >= self.height:
return False
else:
self.resize_height = height
self.resize_width = int(self.width * (height / self.height))
return True
def resize(self, img):
if not self.resize_width or not self.resize_height:
return img
else:
size = (self.resize_width, self.resize_height)
return cv2.resize(img, size, interpolation=cv2.INTER_AREA)
def get_img_by_pos(self, pos):
self.cap.set(cv2.CAP_PROP_POS_FRAMES, pos)
ret, frame = self.cap.read()
return ret, frame
def get_frame_pos(self):
step = int(self.fps / self.fps_for_html)
if not self.time_interval:
self.frames_count = int(self.frames_count_all / step)
return range(0, self.frames_count_all, step)
start, end = self.time_interval
pos_start = int(self.fps * start)
pos_end = int(self.fps * end)
self.frames_count = int((pos_end - pos_start) / step)
return range(pos_start, pos_end, step)
def get_imgs(self):
assert self.cap.isOpened()
for i in self.get_frame_pos():
ret, frame = self.get_img_by_pos(i)
if not ret:
print("读取失败,跳出循环")
break
yield self.resize(frame)
self.cap.release()
def get_char(self, gray):
percent = gray / 255
index = int(percent * (len(self.pixels) - 1))
return self.pixels[index]
def get_pix_html(self, r, g, b, gray):
char = self.get_char(gray)
def get_html_pic(self, img, img_id):
hidden = 'hidden="hidden"' if img_id != 0 else ''
html_pic = [f'<div id="f-{img_id}" {hidden}>']
height, width, channel = img.shape
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
for y in range(height):
for x in range(width):
r, g, b = img[y][x]
gray = img_gray[y][x]
pixel_char = f'<span style="background-color:rgb({r}, {g}, {b})"> </span>'
html_pic.append(pixel_char)
html_pic.append("<br>")
html_pic.append('</div>')
return "".join(html_pic)
def write_html(self, file_name):
time_start = time()
with open(file_name, 'w') as html:
html.write('<!DOCTYPE html><html>'
f'<script>window.fps = {self.fps_for_html};</script>'
'<script src="play_chars.js"></script>'
'<body style="font-family: monospace;font-size: xx-small;'
'text-align: center;line-height: 0.75;font-weight: bolder;">'
)
try:
i = 0
for img in self.get_imgs():
html_pic = self.get_html_pic(img, i)
html.write(html_pic)
if i % 30:
print(f"进度:{i/self.frames_count * 100:.2f}%, 已用时:{time() - time_start:.2f}")
i += 1
finally:
html.write("</body>"
"</html>")
def get_json_pic(self, img):
json_pic = []
height, width, channel = img.shape
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
for y in range(height):
line = []
for x in range(width):
r, g, b = img[y][x]
gray = img_gray[y][x]
char = self.get_char(gray)
line.append([[int(r), int(g), int(b)], char])
json_pic.append(line)
return json.dumps(json_pic)
def write_html_with_json(self, file_name):
with open(file_name, 'w') as html:
html.write('<!DOCTYPE html>'
'<html>'
'<body style="font-family: monospace;font-size: xx-small;text-align: center;line-height: 0.7;">'
'</body>'
'<script>'
'var frames=[\n')
try:
i = 0
for img in self.get_imgs():
json_pic = self.get_json_pic(img)
html.write(f"{json_pic},\n")
if i % 20:
print(f"进度:{i/self.frames_count * 100:.2f}%")
i += 1
finally:
html.write('];'
f'var fps={self.fps_for_html};'
f'{play_chars_js}'
'</script>'
'</html>')
def get_file_name(file_path):
path, file_name_with_extension = os.path.split(file_path)
file_name, file_extension = os.path.splitext(file_name_with_extension)
return file_name
def main():
video_path = "BadApple.mp4"
video2html = VideoToHtml(video_path, fps_for_html=5, time_interval=(20, 30))
video2html.set_width(100)
html_name = "output/" + get_file_name(video_path) + ".html"
webbrowser.open(html_name)
if __name__ == "__main__":
main()
| true | true |
f72f8513b66880f4db7e38933741e9e12297e8ac | 142 | py | Python | data_parse/get_banks_NY.py | hhalim/TargetBanks | 8febd7300f3b01e92641e0f63355d3f66bfe674c | [
"MIT"
] | null | null | null | data_parse/get_banks_NY.py | hhalim/TargetBanks | 8febd7300f3b01e92641e0f63355d3f66bfe674c | [
"MIT"
] | null | null | null | data_parse/get_banks_NY.py | hhalim/TargetBanks | 8febd7300f3b01e92641e0f63355d3f66bfe674c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from parse_banks import parse_and_insert
#STATE = TX
file = '../data/NY_SOD_FDIC.html'
parse_and_insert(file, 'NY')
| 17.75 | 40 | 0.697183 |
from parse_banks import parse_and_insert
file = '../data/NY_SOD_FDIC.html'
parse_and_insert(file, 'NY')
| true | true |
f72f865c6f3e0b0d982fd0cd8b5149aa4a6ebc41 | 12,464 | py | Python | tools/gen_esp_err_to_name.py | jkoelker/esp-idf | 4b91c82cc447640e5b61407e810f1d6f3eabd233 | [
"Apache-2.0"
] | 2 | 2018-06-27T02:28:03.000Z | 2020-12-08T19:33:44.000Z | tools/gen_esp_err_to_name.py | jkoelker/esp-idf | 4b91c82cc447640e5b61407e810f1d6f3eabd233 | [
"Apache-2.0"
] | null | null | null | tools/gen_esp_err_to_name.py | jkoelker/esp-idf | 4b91c82cc447640e5b61407e810f1d6f3eabd233 | [
"Apache-2.0"
] | 1 | 2021-01-09T16:19:22.000Z | 2021-01-09T16:19:22.000Z | #!/usr/bin/env python
#
# Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import re
import fnmatch
import string
import collections
import textwrap
# list files here which should not be parsed
ignore_files = [ 'components/mdns/test_afl_fuzz_host/esp32_compat.h' ]
# macros from here have higher priorities in case of collisions
priority_headers = [ 'components/esp32/include/esp_err.h' ]
err_dict = collections.defaultdict(list) #identified errors are stored here; mapped by the error code
rev_err_dict = dict() #map of error string to error code
unproc_list = list() #errors with unknown codes which depend on other errors
class ErrItem:
"""
Contains information about the error:
- name - error string
- file - relative path inside the IDF project to the file which defines this error
- comment - (optional) comment for the error
- rel_str - (optional) error string which is a base for the error
- rel_off - (optional) offset in relation to the base error
"""
def __init__(self, name, file, comment, rel_str = "", rel_off = 0):
self.name = name
self.file = file
self.comment = comment
self.rel_str = rel_str
self.rel_off = rel_off
def __str__(self):
ret = self.name + " from " + self.file
if (self.rel_str != ""):
ret += " is (" + self.rel_str + " + " + str(self.rel_off) + ")"
if self.comment != "":
ret += " // " + self.comment
return ret
def __cmp__(self, other):
if self.file in priority_headers and other.file not in priority_headers:
return -1
elif self.file not in priority_headers and other.file in priority_headers:
return 1
base = "_BASE"
if self.file == other.file:
if self.name.endswith(base) and not(other.name.endswith(base)):
return 1
elif not(self.name.endswith(base)) and other.name.endswith(base):
return -1
self_key = self.file + self.name
other_key = other.file + other.name
if self_key < other_key:
return -1
elif self_key > other_key:
return 1
else:
return 0
class InputError(RuntimeError):
"""
Represents and error on the input
"""
def __init__(self, p, e):
super(InputError, self).__init__(p + ": " + e)
def process(line, idf_path):
"""
Process a line of text from file idf_path (relative to IDF project).
Fills the global list unproc_list and dictionaries err_dict, rev_err_dict
"""
if idf_path.endswith(".c"):
# We would not try to include a C file
raise InputError(idf_path, "This line should be in a header file: %s" % line)
words = re.split(r' +', line, 2)
# words[1] is the error name
# words[2] is the rest of the line (value, base + value, comment)
if len(words) < 2:
raise InputError(idf_path, "Error at line %s" % line)
line = ""
todo_str = words[2]
comment = ""
# identify possible comment
m = re.search(r'/\*!<(.+?(?=\*/))', todo_str)
if m:
comment = string.strip(m.group(1))
todo_str = string.strip(todo_str[:m.start()]) # keep just the part before the comment
# identify possible parentheses ()
m = re.search(r'\((.+)\)', todo_str)
if m:
todo_str = m.group(1) #keep what is inside the parentheses
# identify BASE error code, e.g. from the form BASE + 0x01
m = re.search(r'\s*(\w+)\s*\+(.+)', todo_str)
if m:
related = m.group(1) # BASE
todo_str = m.group(2) # keep and process only what is after "BASE +"
# try to match a hexadecimal number
m = re.search(r'0x([0-9A-Fa-f]+)', todo_str)
if m:
num = int(m.group(1), 16)
else:
# Try to match a decimal number. Negative value is possible for some numbers, e.g. ESP_FAIL
m = re.search(r'(-?[0-9]+)', todo_str)
if m:
num = int(m.group(1), 10)
elif re.match(r'\w+', todo_str):
# It is possible that there is no number, e.g. #define ERROR BASE
related = todo_str # BASE error
num = 0 # (BASE + 0)
else:
raise InputError(idf_path, "Cannot parse line %s" % line)
try:
related
except NameError:
# The value of the error is known at this moment because it do not depends on some other BASE error code
err_dict[num].append(ErrItem(words[1], idf_path, comment))
rev_err_dict[words[1]] = num
else:
# Store the information available now and compute the error code later
unproc_list.append(ErrItem(words[1], idf_path, comment, related, num))
def process_remaining_errors():
"""
Create errors which could not be processed before because the error code
for the BASE error code wasn't known.
This works for sure only if there is no multiple-time dependency, e.g.:
#define BASE1 0
#define BASE2 (BASE1 + 10)
#define ERROR (BASE2 + 10) - ERROR will be processed successfully only if it processed later than BASE2
"""
for item in unproc_list:
if item.rel_str in rev_err_dict:
base_num = rev_err_dict[item.rel_str]
base = err_dict[base_num][0]
num = base_num + item.rel_off
err_dict[num].append(ErrItem(item.name, item.file, item.comment))
rev_err_dict[item.name] = num
else:
print(item.rel_str + " referenced by " + item.name + " in " + item.file + " is unknown")
del unproc_list[:]
def path_to_include(path):
"""
Process the path (relative to the IDF project) in a form which can be used
to include in a C file. Using just the filename does not work all the
time because some files are deeper in the tree. This approach tries to
find an 'include' parent directory an include its subdirectories, e.g.
"components/XY/include/esp32/file.h" will be transported into "esp32/file.h"
So this solution works only works when the subdirectory or subdirectories
are inside the "include" directory. Other special cases need to be handled
here when the compiler gives an unknown header file error message.
"""
spl_path = string.split(path, os.sep)
try:
i = spl_path.index('include')
except ValueError:
# no include in the path -> use just the filename
return os.path.basename(path)
else:
return str(os.sep).join(spl_path[i+1:]) # subdirectories and filename in "include"
def print_warning(error_list, error_code):
"""
Print warning about errors with the same error code
"""
print("[WARNING] The following errors have the same code (%d):" % error_code)
for e in error_list:
print(" " + str(e))
def max_string_width():
max = 0
for k in err_dict.keys():
for e in err_dict[k]:
x = len(e.name)
if x > max:
max = x
return max
def generate_c_output(fin, fout):
"""
Writes the output to fout based on th error dictionary err_dict and
template file fin.
"""
# make includes unique by using a set
includes = set()
for k in err_dict.keys():
for e in err_dict[k]:
includes.add(path_to_include(e.file))
# The order in a set in non-deterministic therefore it could happen that the
# include order will be different in other machines and false difference
# in the output file could be reported. In order to avoid this, the items
# are sorted in a list.
include_list = list(includes)
include_list.sort()
max_width = max_string_width() + 17 + 1 # length of " ERR_TBL_IT()," with spaces is 17
max_decdig = max(len(str(k)) for k in err_dict.keys())
for line in fin:
if re.match(r'@COMMENT@', line):
fout.write("//Do not edit this file because it is autogenerated by " + os.path.basename(__file__) + "\n")
elif re.match(r'@HEADERS@', line):
for i in include_list:
fout.write("#if __has_include(\"" + i + "\")\n#include \"" + i + "\"\n#endif\n")
elif re.match(r'@ERROR_ITEMS@', line):
last_file = ""
for k in sorted(err_dict.keys()):
if len(err_dict[k]) > 1:
err_dict[k].sort()
print_warning(err_dict[k], k)
for e in err_dict[k]:
if e.file != last_file:
last_file = e.file
fout.write(" // %s\n" % last_file)
table_line = (" ERR_TBL_IT(" + e.name + "), ").ljust(max_width) + "/* " + str(k).rjust(max_decdig)
fout.write("# ifdef %s\n" % e.name)
fout.write(table_line)
hexnum_length = 0
if k > 0: # negative number and zero should be only ESP_FAIL and ESP_OK
hexnum = " 0x%x" % k
hexnum_length = len(hexnum)
fout.write(hexnum)
if e.comment != "":
if len(e.comment) < 50:
fout.write(" %s" % e.comment)
else:
indent = " " * (len(table_line) + hexnum_length + 1)
w = textwrap.wrap(e.comment, width=120, initial_indent = indent, subsequent_indent = indent)
# this couldn't be done with initial_indent because there is no initial_width option
fout.write(" %s" % w[0].strip())
for i in range(1, len(w)):
fout.write("\n%s" % w[i])
fout.write(" */\n# endif\n")
else:
fout.write(line)
def generate_rst_output(fout):
for k in sorted(err_dict.keys()):
v = err_dict[k][0]
fout.write(':c:macro:`{}` '.format(v.name))
if k > 0:
fout.write('**(0x{:x})**'.format(k))
else:
fout.write('({:d})'.format(k))
if len(v.comment) > 0:
fout.write(': {}'.format(v.comment))
fout.write('\n\n')
def main():
parser = argparse.ArgumentParser(description='ESP32 esp_err_to_name lookup generator for esp_err_t')
parser.add_argument('--c_input', help='Path to the esp_err_to_name.c.in template input.', default=os.environ['IDF_PATH'] + '/components/esp32/esp_err_to_name.c.in')
parser.add_argument('--c_output', help='Path to the esp_err_to_name.c output.', default=os.environ['IDF_PATH'] + '/components/esp32/esp_err_to_name.c')
parser.add_argument('--rst_output', help='Generate .rst output and save it into this file')
args = parser.parse_args()
for root, dirnames, filenames in os.walk(os.environ['IDF_PATH']):
for filename in fnmatch.filter(filenames, '*.[ch]'):
full_path = os.path.join(root, filename)
idf_path = os.path.relpath(full_path, os.environ['IDF_PATH'])
if idf_path in ignore_files:
continue
with open(full_path, "r+") as f:
for line in f:
# match also ESP_OK and ESP_FAIL because some of ESP_ERRs are referencing them
if re.match(r"\s*#define\s+(ESP_ERR_|ESP_OK|ESP_FAIL)", line):
try:
process(str.strip(line), idf_path)
except InputError as e:
print (e)
process_remaining_errors()
if args.rst_output is not None:
with open(args.rst_output, 'w') as fout:
generate_rst_output(fout)
else:
with open(args.c_input, 'r') as fin, open(args.c_output, 'w') as fout:
generate_c_output(fin, fout)
if __name__ == "__main__":
main()
| 39.948718 | 168 | 0.594352 |
import os
import argparse
import re
import fnmatch
import string
import collections
import textwrap
ignore_files = [ 'components/mdns/test_afl_fuzz_host/esp32_compat.h' ]
priority_headers = [ 'components/esp32/include/esp_err.h' ]
err_dict = collections.defaultdict(list)
rev_err_dict = dict()
unproc_list = list()
class ErrItem:
def __init__(self, name, file, comment, rel_str = "", rel_off = 0):
self.name = name
self.file = file
self.comment = comment
self.rel_str = rel_str
self.rel_off = rel_off
def __str__(self):
ret = self.name + " from " + self.file
if (self.rel_str != ""):
ret += " is (" + self.rel_str + " + " + str(self.rel_off) + ")"
if self.comment != "":
ret += " // " + self.comment
return ret
def __cmp__(self, other):
if self.file in priority_headers and other.file not in priority_headers:
return -1
elif self.file not in priority_headers and other.file in priority_headers:
return 1
base = "_BASE"
if self.file == other.file:
if self.name.endswith(base) and not(other.name.endswith(base)):
return 1
elif not(self.name.endswith(base)) and other.name.endswith(base):
return -1
self_key = self.file + self.name
other_key = other.file + other.name
if self_key < other_key:
return -1
elif self_key > other_key:
return 1
else:
return 0
class InputError(RuntimeError):
def __init__(self, p, e):
super(InputError, self).__init__(p + ": " + e)
def process(line, idf_path):
if idf_path.endswith(".c"):
raise InputError(idf_path, "This line should be in a header file: %s" % line)
words = re.split(r' +', line, 2)
if len(words) < 2:
raise InputError(idf_path, "Error at line %s" % line)
line = ""
todo_str = words[2]
comment = ""
m = re.search(r'/\*!<(.+?(?=\*/))', todo_str)
if m:
comment = string.strip(m.group(1))
todo_str = string.strip(todo_str[:m.start()])
m = re.search(r'\((.+)\)', todo_str)
if m:
todo_str = m.group(1)
m = re.search(r'\s*(\w+)\s*\+(.+)', todo_str)
if m:
related = m.group(1)
todo_str = m.group(2)
m = re.search(r'0x([0-9A-Fa-f]+)', todo_str)
if m:
num = int(m.group(1), 16)
else:
m = re.search(r'(-?[0-9]+)', todo_str)
if m:
num = int(m.group(1), 10)
elif re.match(r'\w+', todo_str):
ed = todo_str
num = 0
else:
raise InputError(idf_path, "Cannot parse line %s" % line)
try:
related
except NameError:
err_dict[num].append(ErrItem(words[1], idf_path, comment))
rev_err_dict[words[1]] = num
else:
unproc_list.append(ErrItem(words[1], idf_path, comment, related, num))
def process_remaining_errors():
for item in unproc_list:
if item.rel_str in rev_err_dict:
base_num = rev_err_dict[item.rel_str]
base = err_dict[base_num][0]
num = base_num + item.rel_off
err_dict[num].append(ErrItem(item.name, item.file, item.comment))
rev_err_dict[item.name] = num
else:
print(item.rel_str + " referenced by " + item.name + " in " + item.file + " is unknown")
del unproc_list[:]
def path_to_include(path):
spl_path = string.split(path, os.sep)
try:
i = spl_path.index('include')
except ValueError:
return os.path.basename(path)
else:
return str(os.sep).join(spl_path[i+1:])
def print_warning(error_list, error_code):
print("[WARNING] The following errors have the same code (%d):" % error_code)
for e in error_list:
print(" " + str(e))
def max_string_width():
max = 0
for k in err_dict.keys():
for e in err_dict[k]:
x = len(e.name)
if x > max:
max = x
return max
def generate_c_output(fin, fout):
includes = set()
for k in err_dict.keys():
for e in err_dict[k]:
includes.add(path_to_include(e.file))
include_list = list(includes)
include_list.sort()
max_width = max_string_width() + 17 + 1
max_decdig = max(len(str(k)) for k in err_dict.keys())
for line in fin:
if re.match(r'@COMMENT@', line):
fout.write("//Do not edit this file because it is autogenerated by " + os.path.basename(__file__) + "\n")
elif re.match(r'@HEADERS@', line):
for i in include_list:
fout.write("#if __has_include(\"" + i + "\")\n#include \"" + i + "\"\n#endif\n")
elif re.match(r'@ERROR_ITEMS@', line):
last_file = ""
for k in sorted(err_dict.keys()):
if len(err_dict[k]) > 1:
err_dict[k].sort()
print_warning(err_dict[k], k)
for e in err_dict[k]:
if e.file != last_file:
last_file = e.file
fout.write(" // %s\n" % last_file)
table_line = (" ERR_TBL_IT(" + e.name + "), ").ljust(max_width) + "/* " + str(k).rjust(max_decdig)
fout.write("# ifdef %s\n" % e.name)
fout.write(table_line)
hexnum_length = 0
if k > 0:
hexnum = " 0x%x" % k
hexnum_length = len(hexnum)
fout.write(hexnum)
if e.comment != "":
if len(e.comment) < 50:
fout.write(" %s" % e.comment)
else:
indent = " " * (len(table_line) + hexnum_length + 1)
w = textwrap.wrap(e.comment, width=120, initial_indent = indent, subsequent_indent = indent)
fout.write(" %s" % w[0].strip())
for i in range(1, len(w)):
fout.write("\n%s" % w[i])
fout.write(" */\n# endif\n")
else:
fout.write(line)
def generate_rst_output(fout):
for k in sorted(err_dict.keys()):
v = err_dict[k][0]
fout.write(':c:macro:`{}` '.format(v.name))
if k > 0:
fout.write('**(0x{:x})**'.format(k))
else:
fout.write('({:d})'.format(k))
if len(v.comment) > 0:
fout.write(': {}'.format(v.comment))
fout.write('\n\n')
def main():
parser = argparse.ArgumentParser(description='ESP32 esp_err_to_name lookup generator for esp_err_t')
parser.add_argument('--c_input', help='Path to the esp_err_to_name.c.in template input.', default=os.environ['IDF_PATH'] + '/components/esp32/esp_err_to_name.c.in')
parser.add_argument('--c_output', help='Path to the esp_err_to_name.c output.', default=os.environ['IDF_PATH'] + '/components/esp32/esp_err_to_name.c')
parser.add_argument('--rst_output', help='Generate .rst output and save it into this file')
args = parser.parse_args()
for root, dirnames, filenames in os.walk(os.environ['IDF_PATH']):
for filename in fnmatch.filter(filenames, '*.[ch]'):
full_path = os.path.join(root, filename)
idf_path = os.path.relpath(full_path, os.environ['IDF_PATH'])
if idf_path in ignore_files:
continue
with open(full_path, "r+") as f:
for line in f:
# match also ESP_OK and ESP_FAIL because some of ESP_ERRs are referencing them
if re.match(r"\s*#define\s+(ESP_ERR_|ESP_OK|ESP_FAIL)", line):
try:
process(str.strip(line), idf_path)
except InputError as e:
print (e)
process_remaining_errors()
if args.rst_output is not None:
with open(args.rst_output, 'w') as fout:
generate_rst_output(fout)
else:
with open(args.c_input, 'r') as fin, open(args.c_output, 'w') as fout:
generate_c_output(fin, fout)
if __name__ == "__main__":
main()
| true | true |
f72f86c3063032f7831abf4c8121553aee852eaf | 5,154 | py | Python | ba5l-linear-space-alignment/lsa_v2.py | kjco/bioinformatics-algorithms | 3c466157b89c1cbd54749563e39d86a307d7a3f3 | [
"MIT"
] | null | null | null | ba5l-linear-space-alignment/lsa_v2.py | kjco/bioinformatics-algorithms | 3c466157b89c1cbd54749563e39d86a307d7a3f3 | [
"MIT"
] | null | null | null | ba5l-linear-space-alignment/lsa_v2.py | kjco/bioinformatics-algorithms | 3c466157b89c1cbd54749563e39d86a307d7a3f3 | [
"MIT"
] | null | null | null | import sys
import math
sys.setrecursionlimit(20)
# 0: down, 1: right, 2: lower right
SIGMA = 5
# Number of columns to keep in the score matrix
BUF_WIDTH = 2
def compute_scores(str1, str2, m, b):
"""Populates the score and backtrack matrices.
Args:
str1: first string.
str2: second string.
m: score matrix.
b: backtrack matrix.
"""
# print '**** %d, %d' %(len(str1), len(str2))
last_col = 0
for j in xrange(1, len(str2)+1):
k = j % BUF_WIDTH
k_prev = (j - 1) % BUF_WIDTH
# Initialize the boundaries.
m[0][k] = -SIGMA*j
for i in xrange(1, len(str1)+1):
# Case 0
m[i][k] = m[i-1][k] - SIGMA
b[i] = 0
# Case 1
score = m[i][k_prev] - SIGMA
if score > m[i][k]:
m[i][k] = score
b[i] = 1
# Case 2
score = m[i-1][k_prev] + d[(str1[i-1], str2[j-1])]
if score > m[i][k]:
m[i][k] = score
b[i] = 2
last_col = k
# print [m[x][k] for x in range(len(str1)+1)]
return last_col
def middle_edge(top, bottom, left, right):
"""Returns the middle edge of the area."""
length = bottom - top +1
width = right - left + 1
mid = (left + right) /2
# l_width = mid - left + 1
# r_width = right - mid
# Initialize the matrices.
m = [[0 for j in range(BUF_WIDTH)] for i in range(length+1)]
# We need only one column for backtracking.
b = [0 for i in range(length+1)]
r = [[0 for j in range(BUF_WIDTH)] for i in range(length+1)]
# We need only one column for backtracking.
br = [0 for i in range(length+1)]
for i in xrange(1, length+1):
m[i][0] = -SIGMA*i
for i in xrange(1, length+1):
r[i][0] = -SIGMA*i
# Compute scores from source to mid column.
stra, strb = str1[top:bottom+1], str2[left:mid+1]
k = compute_scores(stra, strb, m, b)
l_mid_list = []
for i in range(length+1):
l_mid_list.append(m[i][k])
# Compute scores from sink to mid column.
rev1 = (str1[top:bottom+1])[::-1]
rev2 = (str2[mid+1:right+1])[::-1]
k = compute_scores(rev1, rev2, r, br)
r_mid_list = []
for i in range(length+1):
r_mid_list.append(r[i][k])
rev_r_mid_list = r_mid_list[::-1]
sum_list = []
for i in range(length):
sum_list.append(l_mid_list[i]+rev_r_mid_list[i])
# for x in r:
# print x
# #print max(sum_list)
# print l_mid_list
# print r_mid_list
# print sum_list
max_index = sum_list.index(max(sum_list))
y = mid
r1 = (max_index, y)
x = length - max_index
if br[x] == 2:
row = max_index + 1
col = y + 1
elif br[x] == 1:
row = max_index
col = y + 1
elif br[x] == 0:
row = max_index + 1
col = y
else:
raise ValueError('Unexpected value: br[%d] = %d' % (x, br[x]))
r2 = (row, col)
# print '(%s, %s)' % (str(row), str(col))
return (r1, r2)
#================ MAIN =======================
# Read the scoring matrix
d = dict()
with open('BLOSUM62.txt') as f:
alphabets = f.readline().strip().split()
for line in f:
chars = line.strip().split()
m = chars[0]
scores = map(int, chars[1:])
for i in range(len(scores)):
d[(m, alphabets[i])] = scores[i]
# Read input strings
#with open('dataset_79_12.txt','r') as f:
##with open('linear_space.txt','r') as f:
## str1 = f.readline().strip()
## str2 = f.readline().strip()
str1 = 'PLEASANTLY'
str2 = 'MEANLY'
#===============================================
mid_edge_src, mid_edge_dest = middle_edge(0, len(str1)-1, 0, len(str2)-1)
print mid_edge_src
print mid_edge_dest
##
##print mid_edge_src[0]
def seq_align(src,dest):
diff0 = dest[0] - src[0]
diff1 = dest[1] = src[1]
if diff0 == 1 and diff1 == 0:
path1.append('-')
path2.append(str2[src[0]])
elif diff0 == 0 and diff1 == 1:
path1.append(str1[src[1]])
path2.append('-')
elif diff0 == 1 and diff1 == 1:
path1.append(str1[src[1]])
path2.append(str2[src[0]])
def LSA(top, bottom, left, right):
print top, bottom, left, right
if left == right:
#seq_align(mid_edge_src,mid_edge_dest)
return
mid_edge_src, mid_edge_dest = middle_edge(top, bottom, left, right)
print mid_edge_src, mid_edge_dest
midNode, middle = mid_edge_src
LSA(top, midNode, left, middle)
print '%d, %d, %d, %d****** %s, %s' %(top, bottom, left, right, mid_edge_src, mid_edge_dest)
LSA(mid_edge_dest[0],bottom,mid_edge_dest[1],right)
path1 = []
path2 = []
LSA(0, len(str1)-1, 0, len(str2)-1)
print ''.join(path1)
print ''.join(path2)
# mid_edge_src,mid_edge_dest = middle_edge(0, len(str1), 0, len(str2))
# print '=============='
# print mid_edge_src, mid_edge_dest
| 26.56701 | 97 | 0.521925 | import sys
import math
sys.setrecursionlimit(20)
SIGMA = 5
BUF_WIDTH = 2
def compute_scores(str1, str2, m, b):
"""Populates the score and backtrack matrices.
Args:
str1: first string.
str2: second string.
m: score matrix.
b: backtrack matrix.
"""
last_col = 0
for j in xrange(1, len(str2)+1):
k = j % BUF_WIDTH
k_prev = (j - 1) % BUF_WIDTH
m[0][k] = -SIGMA*j
for i in xrange(1, len(str1)+1):
m[i][k] = m[i-1][k] - SIGMA
b[i] = 0
score = m[i][k_prev] - SIGMA
if score > m[i][k]:
m[i][k] = score
b[i] = 1
score = m[i-1][k_prev] + d[(str1[i-1], str2[j-1])]
if score > m[i][k]:
m[i][k] = score
b[i] = 2
last_col = k
return last_col
def middle_edge(top, bottom, left, right):
"""Returns the middle edge of the area."""
length = bottom - top +1
width = right - left + 1
mid = (left + right) /2
m = [[0 for j in range(BUF_WIDTH)] for i in range(length+1)]
b = [0 for i in range(length+1)]
r = [[0 for j in range(BUF_WIDTH)] for i in range(length+1)]
br = [0 for i in range(length+1)]
for i in xrange(1, length+1):
m[i][0] = -SIGMA*i
for i in xrange(1, length+1):
r[i][0] = -SIGMA*i
stra, strb = str1[top:bottom+1], str2[left:mid+1]
k = compute_scores(stra, strb, m, b)
l_mid_list = []
for i in range(length+1):
l_mid_list.append(m[i][k])
rev1 = (str1[top:bottom+1])[::-1]
rev2 = (str2[mid+1:right+1])[::-1]
k = compute_scores(rev1, rev2, r, br)
r_mid_list = []
for i in range(length+1):
r_mid_list.append(r[i][k])
rev_r_mid_list = r_mid_list[::-1]
sum_list = []
for i in range(length):
sum_list.append(l_mid_list[i]+rev_r_mid_list[i])
um_list.index(max(sum_list))
y = mid
r1 = (max_index, y)
x = length - max_index
if br[x] == 2:
row = max_index + 1
col = y + 1
elif br[x] == 1:
row = max_index
col = y + 1
elif br[x] == 0:
row = max_index + 1
col = y
else:
raise ValueError('Unexpected value: br[%d] = %d' % (x, br[x]))
r2 = (row, col)
return (r1, r2)
d = dict()
with open('BLOSUM62.txt') as f:
alphabets = f.readline().strip().split()
for line in f:
chars = line.strip().split()
m = chars[0]
scores = map(int, chars[1:])
for i in range(len(scores)):
d[(m, alphabets[i])] = scores[i]
, len(str2)-1)
print mid_edge_src
print mid_edge_dest
t):
diff0 = dest[0] - src[0]
diff1 = dest[1] = src[1]
if diff0 == 1 and diff1 == 0:
path1.append('-')
path2.append(str2[src[0]])
elif diff0 == 0 and diff1 == 1:
path1.append(str1[src[1]])
path2.append('-')
elif diff0 == 1 and diff1 == 1:
path1.append(str1[src[1]])
path2.append(str2[src[0]])
def LSA(top, bottom, left, right):
print top, bottom, left, right
if left == right:
return
mid_edge_src, mid_edge_dest = middle_edge(top, bottom, left, right)
print mid_edge_src, mid_edge_dest
midNode, middle = mid_edge_src
LSA(top, midNode, left, middle)
print '%d, %d, %d, %d****** %s, %s' %(top, bottom, left, right, mid_edge_src, mid_edge_dest)
LSA(mid_edge_dest[0],bottom,mid_edge_dest[1],right)
path1 = []
path2 = []
LSA(0, len(str1)-1, 0, len(str2)-1)
print ''.join(path1)
print ''.join(path2)
| false | true |
f72f87aeb2af6ecbc535d203c78b35e72e451ee6 | 40,174 | py | Python | pandas/core/indexes/interval.py | mattboggess/pandas | 5551bcf9d297ea8a0aeffb70b17ae6730e8abf89 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/interval.py | mattboggess/pandas | 5551bcf9d297ea8a0aeffb70b17ae6730e8abf89 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/interval.py | mattboggess/pandas | 5551bcf9d297ea8a0aeffb70b17ae6730e8abf89 | [
"BSD-3-Clause"
] | 1 | 2018-10-14T18:27:49.000Z | 2018-10-14T18:27:49.000Z | """ define the IntervalIndex """
import textwrap
import warnings
import numpy as np
from pandas.compat import add_metaclass
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.cast import find_common_type, maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
ensure_platform_int,
is_list_like,
is_datetime_or_timedelta_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_float_dtype,
is_interval_dtype,
is_object_dtype,
is_scalar,
is_float,
is_number,
is_integer)
from pandas.core.indexes.base import (
Index, ensure_index,
default_pprint, _index_shared_docs)
from pandas._libs import Timestamp, Timedelta
from pandas._libs.interval import (
Interval, IntervalMixin, IntervalTree,
)
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.core.indexes.multi import MultiIndex
import pandas.core.common as com
from pandas.util._decorators import cache_readonly, Appender
from pandas.util._doctools import _WritableDoc
from pandas.util._exceptions import rewrite_exception
from pandas.core.config import get_option
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import DateOffset
import pandas.core.indexes.base as ibase
from pandas.core.arrays.interval import (IntervalArray,
_interval_shared_docs)
_VALID_CLOSED = {'left', 'right', 'both', 'neither'}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='IntervalIndex',
target_klass='IntervalIndex or list of Intervals',
name=textwrap.dedent("""\
name : object, optional
to be stored in the index.
"""),
))
def _get_next_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label + np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label + 1
elif is_float_dtype(dtype):
return np.nextafter(label, np.infty)
else:
raise TypeError('cannot determine next label for type {typ!r}'
.format(typ=type(label)))
def _get_prev_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label - np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label - 1
elif is_float_dtype(dtype):
return np.nextafter(label, -np.infty)
else:
raise TypeError('cannot determine next label for type {typ!r}'
.format(typ=type(label)))
def _get_interval_closed_bounds(interval):
"""
Given an Interval or IntervalIndex, return the corresponding interval with
closed bounds.
"""
left, right = interval.left, interval.right
if interval.open_left:
left = _get_next_label(left)
if interval.open_right:
right = _get_prev_label(right)
return left, right
def _new_IntervalIndex(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't have
arguments and breaks __new__
"""
return cls.from_arrays(**d)
@Appender(_interval_shared_docs['class'] % dict(
klass="IntervalIndex",
summary="Immutable index of intervals that are closed on the same side.",
name=_index_doc_kwargs['name'],
versionadded="0.20.0",
extra_methods="contains\n",
examples=textwrap.dedent("""\
Examples
--------
A new ``IntervalIndex`` is typically constructed using
:func:`interval_range`:
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
closed='right', dtype='interval[int64]')
It may also be constructed using one of the constructor
methods: :meth:`IntervalIndex.from_arrays`,
:meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
See further examples in the doc strings of ``interval_range`` and the
mentioned constructor methods.
"""),
))
@add_metaclass(_WritableDoc)
class IntervalIndex(IntervalMixin, Index):
_typ = 'intervalindex'
_comparables = ['name']
_attributes = ['name', 'closed']
# we would like our indexing holder to defer to us
_defer_to_indexing = True
# Immutable, so we are able to cache computations like isna in '_mask'
_mask = None
def __new__(cls, data, closed=None, dtype=None, copy=False,
name=None, verify_integrity=True):
if name is None and hasattr(data, 'name'):
name = data.name
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype,
verify_integrity=verify_integrity)
return cls._simple_new(array, name)
@classmethod
def _simple_new(cls, array, name, closed=None):
"""
Construct from an IntervalArray
Parameters
----------
array : IntervalArray
name : str
Attached as result.name
closed : Any
Ignored.
"""
result = IntervalMixin.__new__(cls)
result._data = array
result.name = name
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, left=None, right=None, **kwargs):
result = self._data._shallow_copy(left=left, right=right)
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(result, **attributes)
@cache_readonly
def _isnan(self):
"""Return a mask indicating if each value is NA"""
if self._mask is None:
self._mask = isna(self.left)
return self._mask
@cache_readonly
def _engine(self):
return IntervalTree(self.left, self.right, closed=self.closed)
def __contains__(self, key):
"""
return a boolean if this key is IN the index
We *only* accept an Interval
Parameters
----------
key : Interval
Returns
-------
boolean
"""
if not isinstance(key, Interval):
return False
try:
self.get_loc(key)
return True
except KeyError:
return False
def contains(self, key):
"""
Return a boolean indicating if the key is IN the index
We accept / allow keys to be not *just* actual
objects.
Parameters
----------
key : int, float, Interval
Returns
-------
boolean
"""
try:
self.get_loc(key)
return True
except KeyError:
return False
@classmethod
@Appender(_interval_shared_docs['from_breaks'] % _index_doc_kwargs)
def from_breaks(cls, breaks, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(breaks, closed=closed, copy=copy,
dtype=dtype)
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_arrays'] % _index_doc_kwargs)
def from_arrays(cls, left, right, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(left, right, closed, copy=copy,
dtype=dtype)
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_intervals'] % _index_doc_kwargs)
def from_intervals(cls, data, closed=None, name=None, copy=False,
dtype=None):
msg = ('IntervalIndex.from_intervals is deprecated and will be '
'removed in a future version; Use IntervalIndex(...) instead')
warnings.warn(msg, FutureWarning, stacklevel=2)
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype)
if name is None and isinstance(data, cls):
name = data.name
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_tuples'] % _index_doc_kwargs)
def from_tuples(cls, data, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy,
dtype=dtype)
return cls._simple_new(arr, name=name)
@Appender(_interval_shared_docs['to_tuples'] % dict(
return_type="Index",
examples="""
Examples
--------
>>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])
>>> idx.to_tuples()
Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')
>>> idx.to_tuples(na_tuple=False)
Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')""",
))
def to_tuples(self, na_tuple=True):
tuples = self._data.to_tuples(na_tuple=na_tuple)
return Index(tuples)
@cache_readonly
def _multiindex(self):
return MultiIndex.from_arrays([self.left, self.right],
names=['left', 'right'])
@property
def left(self):
"""
Return the left endpoints of each Interval in the IntervalIndex as
an Index
"""
return self._data._left
@property
def right(self):
"""
Return the right endpoints of each Interval in the IntervalIndex as
an Index
"""
return self._data._right
@property
def closed(self):
"""
Whether the intervals are closed on the left-side, right-side, both or
neither
"""
return self._data._closed
@Appender(_interval_shared_docs['set_closed'] % _index_doc_kwargs)
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
msg = "invalid option for 'closed': {closed}"
raise ValueError(msg.format(closed=closed))
# return self._shallow_copy(closed=closed)
array = self._data.set_closed(closed)
return self._simple_new(array, self.name)
@property
def length(self):
"""
Return an Index with entries denoting the length of each Interval in
the IntervalIndex
"""
return self._data.length
@property
def size(self):
# Avoid materializing ndarray[Interval]
return self._data.size
@property
def shape(self):
# Avoid materializing ndarray[Interval]
return self._data.shape
@property
def itemsize(self):
msg = ('IntervalIndex.itemsize is deprecated and will be removed in '
'a future version')
warnings.warn(msg, FutureWarning, stacklevel=2)
# supress the warning from the underlying left/right itemsize
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return self.left.itemsize + self.right.itemsize
def __len__(self):
return len(self.left)
@cache_readonly
def values(self):
"""
Return the IntervalIndex's data as an IntervalArray.
"""
return self._data
@cache_readonly
def _values(self):
return self._data
@cache_readonly
def _ndarray_values(self):
return np.array(self._data)
def __array__(self, result=None):
""" the array interface, return my values """
return self._ndarray_values
def __array_wrap__(self, result, context=None):
# we don't want the superclass implementation
return result
def __reduce__(self):
d = dict(left=self.left,
right=self.right)
d.update(self._get_attributes_dict())
return _new_IntervalIndex, (self.__class__, d), None
@Appender(_index_shared_docs['copy'])
def copy(self, deep=False, name=None):
array = self._data.copy(deep=deep)
attributes = self._get_attributes_dict()
if name is not None:
attributes.update(name=name)
return self._simple_new(array, **attributes)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
with rewrite_exception('IntervalArray', self.__class__.__name__):
new_values = self.values.astype(dtype, copy=copy)
if is_interval_dtype(new_values):
return self._shallow_copy(new_values.left, new_values.right)
return super(IntervalIndex, self).astype(dtype, copy=copy)
@cache_readonly
def dtype(self):
"""Return the dtype object of the underlying data"""
return self._data.dtype
@property
def inferred_type(self):
"""Return a string of the type inferred from the values"""
return 'interval'
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we don't use an explicit engine
# so return the bytes here
return (self.left.memory_usage(deep=deep) +
self.right.memory_usage(deep=deep))
@cache_readonly
def mid(self):
"""
Return the midpoint of each Interval in the IntervalIndex as an Index
"""
return self._data.mid
@cache_readonly
def is_monotonic(self):
"""
Return True if the IntervalIndex is monotonic increasing (only equal or
increasing values), else False
"""
return self._multiindex.is_monotonic
@cache_readonly
def is_monotonic_increasing(self):
"""
Return True if the IntervalIndex is monotonic increasing (only equal or
increasing values), else False
"""
return self._multiindex.is_monotonic_increasing
@cache_readonly
def is_monotonic_decreasing(self):
"""
Return True if the IntervalIndex is monotonic decreasing (only equal or
decreasing values), else False
"""
return self._multiindex.is_monotonic_decreasing
@cache_readonly
def is_unique(self):
"""
Return True if the IntervalIndex contains unique elements, else False
"""
return self._multiindex.is_unique
@cache_readonly
def is_non_overlapping_monotonic(self):
return self._data.is_non_overlapping_monotonic
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if kind == 'iloc':
return super(IntervalIndex, self)._convert_scalar_indexer(
key, kind=kind)
return key
def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
"""
we are passed a list-like indexer. Return the
indexer for matching intervals.
"""
locs = self.get_indexer_for(keyarr)
# we have missing values
if (locs == -1).any():
raise KeyError
return locs
def _maybe_cast_indexed(self, key):
"""
we need to cast the key, which could be a scalar
or an array-like to the type of our subtype
"""
if isinstance(key, IntervalIndex):
return key
subtype = self.dtype.subtype
if is_float_dtype(subtype):
if is_integer(key):
key = float(key)
elif isinstance(key, (np.ndarray, Index)):
key = key.astype('float64')
elif is_integer_dtype(subtype):
if is_integer(key):
key = int(key)
return key
def _check_method(self, method):
if method is None:
return
if method in ['bfill', 'backfill', 'pad', 'ffill', 'nearest']:
msg = 'method {method} not yet implemented for IntervalIndex'
raise NotImplementedError(msg.format(method=method))
raise ValueError("Invalid fill method")
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError('can only get slices from an IntervalIndex if '
'bounds are non-overlapping and all monotonic '
'increasing or decreasing')
if isinstance(label, IntervalMixin):
raise NotImplementedError
# GH 20921: "not is_monotonic_increasing" for the second condition
# instead of "is_monotonic_decreasing" to account for single element
# indexes being both increasing and decreasing
if ((side == 'left' and self.left.is_monotonic_increasing) or
(side == 'right' and not self.left.is_monotonic_increasing)):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
def _get_loc_only_exact_matches(self, key):
if isinstance(key, Interval):
if not self.is_unique:
raise ValueError("cannot index with a slice Interval"
" and a non-unique index")
# TODO: this expands to a tuple index, see if we can
# do better
return Index(self._multiindex.values).get_loc(key)
raise KeyError
def _find_non_overlapping_monotonic_bounds(self, key):
if isinstance(key, IntervalMixin):
start = self._searchsorted_monotonic(
key.left, 'left', exclude_label=key.open_left)
stop = self._searchsorted_monotonic(
key.right, 'right', exclude_label=key.open_right)
elif isinstance(key, slice):
# slice
start, stop = key.start, key.stop
if (key.step or 1) != 1:
raise NotImplementedError("cannot slice with a slice step")
if start is None:
start = 0
else:
start = self._searchsorted_monotonic(start, 'left')
if stop is None:
stop = len(self)
else:
stop = self._searchsorted_monotonic(stop, 'right')
else:
# scalar or index-like
start = self._searchsorted_monotonic(key, 'left')
stop = self._searchsorted_monotonic(key, 'right')
return start, stop
def get_loc(self, key, method=None):
"""Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}, optional
* default: matches where the label is within an interval only.
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
>>> index = pd.IntervalIndex([i1, i2])
>>> index.get_loc(1)
0
You can also supply an interval or an location for a point inside an
interval.
>>> index.get_loc(pd.Interval(0, 2))
array([0, 1], dtype=int64)
>>> index.get_loc(1.5)
1
If a label is in several intervals, you get the locations of all the
relevant intervals.
>>> i3 = pd.Interval(0, 2)
>>> overlapping_index = pd.IntervalIndex([i2, i3])
>>> overlapping_index.get_loc(1.5)
array([0, 1], dtype=int64)
"""
self._check_method(method)
original_key = key
key = self._maybe_cast_indexed(key)
if self.is_non_overlapping_monotonic:
if isinstance(key, Interval):
left = self._maybe_cast_slice_bound(key.left, 'left', None)
right = self._maybe_cast_slice_bound(key.right, 'right', None)
key = Interval(left, right, key.closed)
else:
key = self._maybe_cast_slice_bound(key, 'left', None)
start, stop = self._find_non_overlapping_monotonic_bounds(key)
if start is None or stop is None:
return slice(start, stop)
elif start + 1 == stop:
return start
elif start < stop:
return slice(start, stop)
else:
raise KeyError(original_key)
else:
# use the interval tree
if isinstance(key, Interval):
left, right = _get_interval_closed_bounds(key)
return self._engine.get_loc_interval(left, right)
else:
return self._engine.get_loc(key)
def get_value(self, series, key):
if com.is_bool_indexer(key):
loc = key
elif is_list_like(key):
loc = self.get_indexer(key)
elif isinstance(key, slice):
if not (key.step is None or key.step == 1):
raise ValueError("cannot support not-default step in a slice")
try:
loc = self.get_loc(key)
except TypeError:
# we didn't find exact intervals or are non-unique
msg = "unable to slice with this key: {key}".format(key=key)
raise ValueError(msg)
else:
loc = self.get_loc(key)
return series.iloc[loc]
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
self._check_method(method)
target = ensure_index(target)
target = self._maybe_cast_indexed(target)
if self.equals(target):
return np.arange(len(self), dtype='intp')
if self.is_non_overlapping_monotonic:
start, stop = self._find_non_overlapping_monotonic_bounds(target)
start_plus_one = start + 1
if not ((start_plus_one < stop).any()):
return np.where(start_plus_one == stop, start, -1)
if not self.is_unique:
raise ValueError("cannot handle non-unique indices")
# IntervalIndex
if isinstance(target, IntervalIndex):
indexer = self._get_reindexer(target)
# non IntervalIndex
else:
indexer = np.concatenate([self.get_loc(i) for i in target])
return ensure_platform_int(indexer)
def _get_reindexer(self, target):
"""
Return an indexer for a target IntervalIndex with self
"""
# find the left and right indexers
lindexer = self._engine.get_indexer(target.left.values)
rindexer = self._engine.get_indexer(target.right.values)
# we want to return an indexer on the intervals
# however, our keys could provide overlapping of multiple
# intervals, so we iterate thru the indexers and construct
# a set of indexers
indexer = []
n = len(self)
for i, (lhs, rhs) in enumerate(zip(lindexer, rindexer)):
target_value = target[i]
# matching on the lhs bound
if (lhs != -1 and
self.closed == 'right' and
target_value.left == self[lhs].right):
lhs += 1
# matching on the lhs bound
if (rhs != -1 and
self.closed == 'left' and
target_value.right == self[rhs].left):
rhs -= 1
# not found
if lhs == -1 and rhs == -1:
indexer.append(np.array([-1]))
elif rhs == -1:
indexer.append(np.arange(lhs, n))
elif lhs == -1:
# care about left/right closed here
value = self[i]
# target.closed same as self.closed
if self.closed == target.closed:
if target_value.left < value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'left'
elif self.closed == 'right':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'right'
elif self.closed == 'left':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
indexer.append(np.arange(0, rhs + 1))
else:
indexer.append(np.arange(lhs, rhs + 1))
return np.concatenate(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = self._maybe_cast_indexed(ensure_index(target))
return super(IntervalIndex, self).get_indexer_non_unique(target)
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
return self._shallow_copy(values)
def delete(self, loc):
"""
Return a new IntervalIndex with passed location(-s) deleted
Returns
-------
new_index : IntervalIndex
"""
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
return self._shallow_copy(new_left, new_right)
def insert(self, loc, item):
"""
Return a new IntervalIndex inserting new item at location. Follows
Python list.append semantics for negative values. Only Interval
objects and NA can be inserted into an IntervalIndex
Parameters
----------
loc : int
item : object
Returns
-------
new_index : IntervalIndex
"""
if isinstance(item, Interval):
if item.closed != self.closed:
raise ValueError('inserted item must be closed on the same '
'side as the index')
left_insert = item.left
right_insert = item.right
elif is_scalar(item) and isna(item):
# GH 18295
left_insert = right_insert = item
else:
raise ValueError('can only insert Interval objects and NA into '
'an IntervalIndex')
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
return self._shallow_copy(new_left, new_right)
def _as_like_interval_index(self, other):
self._assert_can_do_setop(other)
other = ensure_index(other)
if not isinstance(other, IntervalIndex):
msg = ('the other index needs to be an IntervalIndex too, but '
'was type {}').format(other.__class__.__name__)
raise TypeError(msg)
elif self.closed != other.closed:
msg = ('can only do set operations between two IntervalIndex '
'objects that are closed on the same side')
raise ValueError(msg)
return other
def _concat_same_dtype(self, to_concat, name):
"""
assert that we all have the same .closed
we allow a 0-len index here as well
"""
if not len({i.closed for i in to_concat if len(i)}) == 1:
msg = ('can only append two IntervalIndex objects '
'that are closed on the same side')
raise ValueError(msg)
return super(IntervalIndex, self)._concat_same_dtype(to_concat, name)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
result = self._data.take(indices, axis=axis, allow_fill=allow_fill,
fill_value=fill_value, **kwargs)
attributes = self._get_attributes_dict()
return self._simple_new(result, **attributes)
def __getitem__(self, value):
result = self._data[value]
if isinstance(result, IntervalArray):
return self._shallow_copy(result)
else:
# scalar
return result
# __repr__ associated methods are based on MultiIndex
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
from pandas.io.formats.format import IntervalArrayFormatter
return IntervalArrayFormatter(values=self,
na_rep=na_rep,
justify='all').get_result()
def _format_data(self, name=None):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option(
'display.max_seq_items') or n) // 10, 10)
formatter = str
if n == 0:
summary = '[]'
elif n == 1:
first = formatter(self[0])
summary = '[{first}]'.format(first=first)
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = '[{first}, {last}]'.format(first=first, last=last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
summary = '[{head} ... {tail}]'.format(
head=', '.join(head), tail=', '.join(tail))
else:
tail = [formatter(x) for x in self]
summary = '[{tail}]'.format(tail=', '.join(tail))
return summary + ',' + self._format_space()
def _format_attrs(self):
attrs = [('closed', repr(self.closed))]
if self.name is not None:
attrs.append(('name', default_pprint(self.name)))
attrs.append(('dtype', "'{dtype}'".format(dtype=self.dtype)))
return attrs
def _format_space(self):
space = ' ' * (len(self.__class__.__name__) + 1)
return "\n{space}".format(space=space)
def argsort(self, *args, **kwargs):
return np.lexsort((self.right, self.left))
def equals(self, other):
"""
Determines if two IntervalIndex objects contain the same elements
"""
if self.is_(other):
return True
# if we can coerce to an II
# then we can compare
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
other = Index(getattr(other, '.values', other))
return (self.left.equals(other.left) and
self.right.equals(other.right) and
self.closed == other.closed)
def _setop(op_name):
def func(self, other):
other = self._as_like_interval_index(other)
# GH 19016: ensure set op will not return a prohibited dtype
subtypes = [self.dtype.subtype, other.dtype.subtype]
common_subtype = find_common_type(subtypes)
if is_object_dtype(common_subtype):
msg = ('can only do {op} between two IntervalIndex '
'objects that have compatible dtypes')
raise TypeError(msg.format(op=op_name))
result = getattr(self._multiindex, op_name)(other._multiindex)
result_name = self.name if self.name == other.name else None
# GH 19101: ensure empty results have correct dtype
if result.empty:
result = result.values.astype(self.dtype.subtype)
else:
result = result.values
return type(self).from_tuples(result, closed=self.closed,
name=result_name)
return func
union = _setop('union')
intersection = _setop('intersection')
difference = _setop('difference')
symmetric_difference = _setop('symmetric_difference')
# TODO: arithmetic operations
IntervalIndex._add_logical_methods_disabled()
def _is_valid_endpoint(endpoint):
"""helper for interval_range to check if start/end are valid types"""
return any([is_number(endpoint),
isinstance(endpoint, Timestamp),
isinstance(endpoint, Timedelta),
endpoint is None])
def _is_type_compatible(a, b):
"""helper for interval_range to check type compat of start/end/freq"""
is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
return ((is_number(a) and is_number(b)) or
(is_ts_compat(a) and is_ts_compat(b)) or
(is_td_compat(a) and is_td_compat(b)) or
com._any_none(a, b))
def interval_range(start=None, end=None, periods=None, freq=None,
name=None, closed='right'):
"""
Return a fixed frequency IntervalIndex
Parameters
----------
start : numeric or datetime-like, default None
Left bound for generating intervals
end : numeric or datetime-like, default None
Right bound for generating intervals
periods : integer, default None
Number of periods to generate
freq : numeric, string, or DateOffset, default None
The length of each interval. Must be consistent with the type of start
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
for numeric and 'D' for datetime-like.
name : string, default None
Name of the resulting IntervalIndex
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``IntervalIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end``, inclusively.
To learn more about datetime-like frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : IntervalIndex
Examples
--------
Numeric ``start`` and ``end`` is supported.
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
closed='right', dtype='interval[int64]')
Additionally, datetime-like input is also supported.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
end=pd.Timestamp('2017-01-04'))
IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
(2017-01-03, 2017-01-04]]
closed='right', dtype='interval[datetime64[ns]]')
The ``freq`` parameter specifies the frequency between the left and right.
endpoints of the individual intervals within the ``IntervalIndex``. For
numeric ``start`` and ``end``, the frequency must also be numeric.
>>> pd.interval_range(start=0, periods=4, freq=1.5)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]
closed='right', dtype='interval[float64]')
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
convertible to a DateOffset.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
periods=3, freq='MS')
IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
(2017-03-01, 2017-04-01]]
closed='right', dtype='interval[datetime64[ns]]')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.interval_range(start=0, end=6, periods=4)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]
closed='right',
dtype='interval[float64]')
The ``closed`` parameter specifies which endpoints of the individual
intervals within the ``IntervalIndex`` are closed.
>>> pd.interval_range(end=5, periods=4, closed='both')
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]]
closed='both', dtype='interval[int64]')
See Also
--------
IntervalIndex : an Index of intervals that are all closed on the same side.
"""
start = com.maybe_box_datetimelike(start)
end = com.maybe_box_datetimelike(end)
endpoint = start if start is not None else end
if freq is None and com._any_none(periods, start, end):
freq = 1 if is_number(endpoint) else 'D'
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError('Of the four parameters: start, end, periods, and '
'freq, exactly three must be specified')
if not _is_valid_endpoint(start):
msg = 'start must be numeric or datetime-like, got {start}'
raise ValueError(msg.format(start=start))
elif not _is_valid_endpoint(end):
msg = 'end must be numeric or datetime-like, got {end}'
raise ValueError(msg.format(end=end))
if is_float(periods):
periods = int(periods)
elif not is_integer(periods) and periods is not None:
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if freq is not None and not is_number(freq):
try:
freq = to_offset(freq)
except ValueError:
raise ValueError('freq must be numeric or convertible to '
'DateOffset, got {freq}'.format(freq=freq))
# verify type compatibility
if not all([_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq)]):
raise TypeError("start, end, freq need to be type compatible")
# +1 to convert interval count to breaks count (n breaks = n-1 intervals)
if periods is not None:
periods += 1
if is_number(endpoint):
# force consistency between start/end/freq (lower end if freq skips it)
if com._all_not_none(start, end, freq):
end -= (end - start) % freq
# compute the period/start/end if unspecified (at most one)
if periods is None:
periods = int((end - start) // freq) + 1
elif start is None:
start = end - (periods - 1) * freq
elif end is None:
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com._not_none(start, end, freq)):
# np.linspace always produces float output
breaks = maybe_downcast_to_dtype(breaks, 'int64')
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
range_func = date_range
else:
range_func = timedelta_range
breaks = range_func(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
| 34.543422 | 84 | 0.599218 | import textwrap
import warnings
import numpy as np
from pandas.compat import add_metaclass
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.cast import find_common_type, maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
ensure_platform_int,
is_list_like,
is_datetime_or_timedelta_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_float_dtype,
is_interval_dtype,
is_object_dtype,
is_scalar,
is_float,
is_number,
is_integer)
from pandas.core.indexes.base import (
Index, ensure_index,
default_pprint, _index_shared_docs)
from pandas._libs import Timestamp, Timedelta
from pandas._libs.interval import (
Interval, IntervalMixin, IntervalTree,
)
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.core.indexes.multi import MultiIndex
import pandas.core.common as com
from pandas.util._decorators import cache_readonly, Appender
from pandas.util._doctools import _WritableDoc
from pandas.util._exceptions import rewrite_exception
from pandas.core.config import get_option
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import DateOffset
import pandas.core.indexes.base as ibase
from pandas.core.arrays.interval import (IntervalArray,
_interval_shared_docs)
_VALID_CLOSED = {'left', 'right', 'both', 'neither'}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='IntervalIndex',
target_klass='IntervalIndex or list of Intervals',
name=textwrap.dedent("""\
name : object, optional
to be stored in the index.
"""),
))
def _get_next_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label + np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label + 1
elif is_float_dtype(dtype):
return np.nextafter(label, np.infty)
else:
raise TypeError('cannot determine next label for type {typ!r}'
.format(typ=type(label)))
def _get_prev_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label - np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label - 1
elif is_float_dtype(dtype):
return np.nextafter(label, -np.infty)
else:
raise TypeError('cannot determine next label for type {typ!r}'
.format(typ=type(label)))
def _get_interval_closed_bounds(interval):
left, right = interval.left, interval.right
if interval.open_left:
left = _get_next_label(left)
if interval.open_right:
right = _get_prev_label(right)
return left, right
def _new_IntervalIndex(cls, d):
return cls.from_arrays(**d)
@Appender(_interval_shared_docs['class'] % dict(
klass="IntervalIndex",
summary="Immutable index of intervals that are closed on the same side.",
name=_index_doc_kwargs['name'],
versionadded="0.20.0",
extra_methods="contains\n",
examples=textwrap.dedent("""\
Examples
--------
A new ``IntervalIndex`` is typically constructed using
:func:`interval_range`:
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
closed='right', dtype='interval[int64]')
It may also be constructed using one of the constructor
methods: :meth:`IntervalIndex.from_arrays`,
:meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
See further examples in the doc strings of ``interval_range`` and the
mentioned constructor methods.
"""),
))
@add_metaclass(_WritableDoc)
class IntervalIndex(IntervalMixin, Index):
_typ = 'intervalindex'
_comparables = ['name']
_attributes = ['name', 'closed']
_defer_to_indexing = True
_mask = None
def __new__(cls, data, closed=None, dtype=None, copy=False,
name=None, verify_integrity=True):
if name is None and hasattr(data, 'name'):
name = data.name
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype,
verify_integrity=verify_integrity)
return cls._simple_new(array, name)
@classmethod
def _simple_new(cls, array, name, closed=None):
result = IntervalMixin.__new__(cls)
result._data = array
result.name = name
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, left=None, right=None, **kwargs):
result = self._data._shallow_copy(left=left, right=right)
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(result, **attributes)
@cache_readonly
def _isnan(self):
if self._mask is None:
self._mask = isna(self.left)
return self._mask
@cache_readonly
def _engine(self):
return IntervalTree(self.left, self.right, closed=self.closed)
def __contains__(self, key):
if not isinstance(key, Interval):
return False
try:
self.get_loc(key)
return True
except KeyError:
return False
def contains(self, key):
try:
self.get_loc(key)
return True
except KeyError:
return False
@classmethod
@Appender(_interval_shared_docs['from_breaks'] % _index_doc_kwargs)
def from_breaks(cls, breaks, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(breaks, closed=closed, copy=copy,
dtype=dtype)
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_arrays'] % _index_doc_kwargs)
def from_arrays(cls, left, right, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(left, right, closed, copy=copy,
dtype=dtype)
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_intervals'] % _index_doc_kwargs)
def from_intervals(cls, data, closed=None, name=None, copy=False,
dtype=None):
msg = ('IntervalIndex.from_intervals is deprecated and will be '
'removed in a future version; Use IntervalIndex(...) instead')
warnings.warn(msg, FutureWarning, stacklevel=2)
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype)
if name is None and isinstance(data, cls):
name = data.name
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_tuples'] % _index_doc_kwargs)
def from_tuples(cls, data, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy,
dtype=dtype)
return cls._simple_new(arr, name=name)
@Appender(_interval_shared_docs['to_tuples'] % dict(
return_type="Index",
examples="""
Examples
--------
>>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])
>>> idx.to_tuples()
Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')
>>> idx.to_tuples(na_tuple=False)
Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')""",
))
def to_tuples(self, na_tuple=True):
tuples = self._data.to_tuples(na_tuple=na_tuple)
return Index(tuples)
@cache_readonly
def _multiindex(self):
return MultiIndex.from_arrays([self.left, self.right],
names=['left', 'right'])
@property
def left(self):
return self._data._left
@property
def right(self):
return self._data._right
@property
def closed(self):
return self._data._closed
@Appender(_interval_shared_docs['set_closed'] % _index_doc_kwargs)
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
msg = "invalid option for 'closed': {closed}"
raise ValueError(msg.format(closed=closed))
array = self._data.set_closed(closed)
return self._simple_new(array, self.name)
@property
def length(self):
return self._data.length
@property
def size(self):
return self._data.size
@property
def shape(self):
return self._data.shape
@property
def itemsize(self):
msg = ('IntervalIndex.itemsize is deprecated and will be removed in '
'a future version')
warnings.warn(msg, FutureWarning, stacklevel=2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return self.left.itemsize + self.right.itemsize
def __len__(self):
return len(self.left)
@cache_readonly
def values(self):
return self._data
@cache_readonly
def _values(self):
return self._data
@cache_readonly
def _ndarray_values(self):
return np.array(self._data)
def __array__(self, result=None):
return self._ndarray_values
def __array_wrap__(self, result, context=None):
return result
def __reduce__(self):
d = dict(left=self.left,
right=self.right)
d.update(self._get_attributes_dict())
return _new_IntervalIndex, (self.__class__, d), None
@Appender(_index_shared_docs['copy'])
def copy(self, deep=False, name=None):
array = self._data.copy(deep=deep)
attributes = self._get_attributes_dict()
if name is not None:
attributes.update(name=name)
return self._simple_new(array, **attributes)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
with rewrite_exception('IntervalArray', self.__class__.__name__):
new_values = self.values.astype(dtype, copy=copy)
if is_interval_dtype(new_values):
return self._shallow_copy(new_values.left, new_values.right)
return super(IntervalIndex, self).astype(dtype, copy=copy)
@cache_readonly
def dtype(self):
return self._data.dtype
@property
def inferred_type(self):
return 'interval'
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we don't use an explicit engine
return (self.left.memory_usage(deep=deep) +
self.right.memory_usage(deep=deep))
@cache_readonly
def mid(self):
return self._data.mid
@cache_readonly
def is_monotonic(self):
return self._multiindex.is_monotonic
@cache_readonly
def is_monotonic_increasing(self):
return self._multiindex.is_monotonic_increasing
@cache_readonly
def is_monotonic_decreasing(self):
return self._multiindex.is_monotonic_decreasing
@cache_readonly
def is_unique(self):
return self._multiindex.is_unique
@cache_readonly
def is_non_overlapping_monotonic(self):
return self._data.is_non_overlapping_monotonic
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if kind == 'iloc':
return super(IntervalIndex, self)._convert_scalar_indexer(
key, kind=kind)
return key
def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
locs = self.get_indexer_for(keyarr)
if (locs == -1).any():
raise KeyError
return locs
def _maybe_cast_indexed(self, key):
if isinstance(key, IntervalIndex):
return key
subtype = self.dtype.subtype
if is_float_dtype(subtype):
if is_integer(key):
key = float(key)
elif isinstance(key, (np.ndarray, Index)):
key = key.astype('float64')
elif is_integer_dtype(subtype):
if is_integer(key):
key = int(key)
return key
def _check_method(self, method):
if method is None:
return
if method in ['bfill', 'backfill', 'pad', 'ffill', 'nearest']:
msg = 'method {method} not yet implemented for IntervalIndex'
raise NotImplementedError(msg.format(method=method))
raise ValueError("Invalid fill method")
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError('can only get slices from an IntervalIndex if '
'bounds are non-overlapping and all monotonic '
'increasing or decreasing')
if isinstance(label, IntervalMixin):
raise NotImplementedError
if ((side == 'left' and self.left.is_monotonic_increasing) or
(side == 'right' and not self.left.is_monotonic_increasing)):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
def _get_loc_only_exact_matches(self, key):
if isinstance(key, Interval):
if not self.is_unique:
raise ValueError("cannot index with a slice Interval"
" and a non-unique index")
return Index(self._multiindex.values).get_loc(key)
raise KeyError
def _find_non_overlapping_monotonic_bounds(self, key):
if isinstance(key, IntervalMixin):
start = self._searchsorted_monotonic(
key.left, 'left', exclude_label=key.open_left)
stop = self._searchsorted_monotonic(
key.right, 'right', exclude_label=key.open_right)
elif isinstance(key, slice):
start, stop = key.start, key.stop
if (key.step or 1) != 1:
raise NotImplementedError("cannot slice with a slice step")
if start is None:
start = 0
else:
start = self._searchsorted_monotonic(start, 'left')
if stop is None:
stop = len(self)
else:
stop = self._searchsorted_monotonic(stop, 'right')
else:
start = self._searchsorted_monotonic(key, 'left')
stop = self._searchsorted_monotonic(key, 'right')
return start, stop
def get_loc(self, key, method=None):
self._check_method(method)
original_key = key
key = self._maybe_cast_indexed(key)
if self.is_non_overlapping_monotonic:
if isinstance(key, Interval):
left = self._maybe_cast_slice_bound(key.left, 'left', None)
right = self._maybe_cast_slice_bound(key.right, 'right', None)
key = Interval(left, right, key.closed)
else:
key = self._maybe_cast_slice_bound(key, 'left', None)
start, stop = self._find_non_overlapping_monotonic_bounds(key)
if start is None or stop is None:
return slice(start, stop)
elif start + 1 == stop:
return start
elif start < stop:
return slice(start, stop)
else:
raise KeyError(original_key)
else:
if isinstance(key, Interval):
left, right = _get_interval_closed_bounds(key)
return self._engine.get_loc_interval(left, right)
else:
return self._engine.get_loc(key)
def get_value(self, series, key):
if com.is_bool_indexer(key):
loc = key
elif is_list_like(key):
loc = self.get_indexer(key)
elif isinstance(key, slice):
if not (key.step is None or key.step == 1):
raise ValueError("cannot support not-default step in a slice")
try:
loc = self.get_loc(key)
except TypeError:
msg = "unable to slice with this key: {key}".format(key=key)
raise ValueError(msg)
else:
loc = self.get_loc(key)
return series.iloc[loc]
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
self._check_method(method)
target = ensure_index(target)
target = self._maybe_cast_indexed(target)
if self.equals(target):
return np.arange(len(self), dtype='intp')
if self.is_non_overlapping_monotonic:
start, stop = self._find_non_overlapping_monotonic_bounds(target)
start_plus_one = start + 1
if not ((start_plus_one < stop).any()):
return np.where(start_plus_one == stop, start, -1)
if not self.is_unique:
raise ValueError("cannot handle non-unique indices")
# IntervalIndex
if isinstance(target, IntervalIndex):
indexer = self._get_reindexer(target)
# non IntervalIndex
else:
indexer = np.concatenate([self.get_loc(i) for i in target])
return ensure_platform_int(indexer)
def _get_reindexer(self, target):
# find the left and right indexers
lindexer = self._engine.get_indexer(target.left.values)
rindexer = self._engine.get_indexer(target.right.values)
# we want to return an indexer on the intervals
# however, our keys could provide overlapping of multiple
# intervals, so we iterate thru the indexers and construct
# a set of indexers
indexer = []
n = len(self)
for i, (lhs, rhs) in enumerate(zip(lindexer, rindexer)):
target_value = target[i]
# matching on the lhs bound
if (lhs != -1 and
self.closed == 'right' and
target_value.left == self[lhs].right):
lhs += 1
# matching on the lhs bound
if (rhs != -1 and
self.closed == 'left' and
target_value.right == self[rhs].left):
rhs -= 1
# not found
if lhs == -1 and rhs == -1:
indexer.append(np.array([-1]))
elif rhs == -1:
indexer.append(np.arange(lhs, n))
elif lhs == -1:
# care about left/right closed here
value = self[i]
# target.closed same as self.closed
if self.closed == target.closed:
if target_value.left < value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'left'
elif self.closed == 'right':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'right'
elif self.closed == 'left':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
indexer.append(np.arange(0, rhs + 1))
else:
indexer.append(np.arange(lhs, rhs + 1))
return np.concatenate(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = self._maybe_cast_indexed(ensure_index(target))
return super(IntervalIndex, self).get_indexer_non_unique(target)
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
return self._shallow_copy(values)
def delete(self, loc):
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
return self._shallow_copy(new_left, new_right)
def insert(self, loc, item):
if isinstance(item, Interval):
if item.closed != self.closed:
raise ValueError('inserted item must be closed on the same '
'side as the index')
left_insert = item.left
right_insert = item.right
elif is_scalar(item) and isna(item):
# GH 18295
left_insert = right_insert = item
else:
raise ValueError('can only insert Interval objects and NA into '
'an IntervalIndex')
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
return self._shallow_copy(new_left, new_right)
def _as_like_interval_index(self, other):
self._assert_can_do_setop(other)
other = ensure_index(other)
if not isinstance(other, IntervalIndex):
msg = ('the other index needs to be an IntervalIndex too, but '
'was type {}').format(other.__class__.__name__)
raise TypeError(msg)
elif self.closed != other.closed:
msg = ('can only do set operations between two IntervalIndex '
'objects that are closed on the same side')
raise ValueError(msg)
return other
def _concat_same_dtype(self, to_concat, name):
if not len({i.closed for i in to_concat if len(i)}) == 1:
msg = ('can only append two IntervalIndex objects '
'that are closed on the same side')
raise ValueError(msg)
return super(IntervalIndex, self)._concat_same_dtype(to_concat, name)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
result = self._data.take(indices, axis=axis, allow_fill=allow_fill,
fill_value=fill_value, **kwargs)
attributes = self._get_attributes_dict()
return self._simple_new(result, **attributes)
def __getitem__(self, value):
result = self._data[value]
if isinstance(result, IntervalArray):
return self._shallow_copy(result)
else:
# scalar
return result
# __repr__ associated methods are based on MultiIndex
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
from pandas.io.formats.format import IntervalArrayFormatter
return IntervalArrayFormatter(values=self,
na_rep=na_rep,
justify='all').get_result()
def _format_data(self, name=None):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option(
'display.max_seq_items') or n) // 10, 10)
formatter = str
if n == 0:
summary = '[]'
elif n == 1:
first = formatter(self[0])
summary = '[{first}]'.format(first=first)
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = '[{first}, {last}]'.format(first=first, last=last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
summary = '[{head} ... {tail}]'.format(
head=', '.join(head), tail=', '.join(tail))
else:
tail = [formatter(x) for x in self]
summary = '[{tail}]'.format(tail=', '.join(tail))
return summary + ',' + self._format_space()
def _format_attrs(self):
attrs = [('closed', repr(self.closed))]
if self.name is not None:
attrs.append(('name', default_pprint(self.name)))
attrs.append(('dtype', "'{dtype}'".format(dtype=self.dtype)))
return attrs
def _format_space(self):
space = ' ' * (len(self.__class__.__name__) + 1)
return "\n{space}".format(space=space)
def argsort(self, *args, **kwargs):
return np.lexsort((self.right, self.left))
def equals(self, other):
if self.is_(other):
return True
# if we can coerce to an II
# then we can compare
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
other = Index(getattr(other, '.values', other))
return (self.left.equals(other.left) and
self.right.equals(other.right) and
self.closed == other.closed)
def _setop(op_name):
def func(self, other):
other = self._as_like_interval_index(other)
# GH 19016: ensure set op will not return a prohibited dtype
subtypes = [self.dtype.subtype, other.dtype.subtype]
common_subtype = find_common_type(subtypes)
if is_object_dtype(common_subtype):
msg = ('can only do {op} between two IntervalIndex '
'objects that have compatible dtypes')
raise TypeError(msg.format(op=op_name))
result = getattr(self._multiindex, op_name)(other._multiindex)
result_name = self.name if self.name == other.name else None
# GH 19101: ensure empty results have correct dtype
if result.empty:
result = result.values.astype(self.dtype.subtype)
else:
result = result.values
return type(self).from_tuples(result, closed=self.closed,
name=result_name)
return func
union = _setop('union')
intersection = _setop('intersection')
difference = _setop('difference')
symmetric_difference = _setop('symmetric_difference')
# TODO: arithmetic operations
IntervalIndex._add_logical_methods_disabled()
def _is_valid_endpoint(endpoint):
return any([is_number(endpoint),
isinstance(endpoint, Timestamp),
isinstance(endpoint, Timedelta),
endpoint is None])
def _is_type_compatible(a, b):
is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
return ((is_number(a) and is_number(b)) or
(is_ts_compat(a) and is_ts_compat(b)) or
(is_td_compat(a) and is_td_compat(b)) or
com._any_none(a, b))
def interval_range(start=None, end=None, periods=None, freq=None,
name=None, closed='right'):
start = com.maybe_box_datetimelike(start)
end = com.maybe_box_datetimelike(end)
endpoint = start if start is not None else end
if freq is None and com._any_none(periods, start, end):
freq = 1 if is_number(endpoint) else 'D'
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError('Of the four parameters: start, end, periods, and '
'freq, exactly three must be specified')
if not _is_valid_endpoint(start):
msg = 'start must be numeric or datetime-like, got {start}'
raise ValueError(msg.format(start=start))
elif not _is_valid_endpoint(end):
msg = 'end must be numeric or datetime-like, got {end}'
raise ValueError(msg.format(end=end))
if is_float(periods):
periods = int(periods)
elif not is_integer(periods) and periods is not None:
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if freq is not None and not is_number(freq):
try:
freq = to_offset(freq)
except ValueError:
raise ValueError('freq must be numeric or convertible to '
'DateOffset, got {freq}'.format(freq=freq))
# verify type compatibility
if not all([_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq)]):
raise TypeError("start, end, freq need to be type compatible")
# +1 to convert interval count to breaks count (n breaks = n-1 intervals)
if periods is not None:
periods += 1
if is_number(endpoint):
# force consistency between start/end/freq (lower end if freq skips it)
if com._all_not_none(start, end, freq):
end -= (end - start) % freq
# compute the period/start/end if unspecified (at most one)
if periods is None:
periods = int((end - start) // freq) + 1
elif start is None:
start = end - (periods - 1) * freq
elif end is None:
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com._not_none(start, end, freq)):
# np.linspace always produces float output
breaks = maybe_downcast_to_dtype(breaks, 'int64')
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
range_func = date_range
else:
range_func = timedelta_range
breaks = range_func(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
| true | true |
f72f87d18502022289e2800971f93cfb71acdce7 | 50 | py | Python | gateway/__init__.py | RockefellerArchiveCenter/ProjectElectronAPIGateway | 4a35c24fe5c09df4f849d1f74b45af5a49c61ac6 | [
"MIT"
] | null | null | null | gateway/__init__.py | RockefellerArchiveCenter/ProjectElectronAPIGateway | 4a35c24fe5c09df4f849d1f74b45af5a49c61ac6 | [
"MIT"
] | null | null | null | gateway/__init__.py | RockefellerArchiveCenter/ProjectElectronAPIGateway | 4a35c24fe5c09df4f849d1f74b45af5a49c61ac6 | [
"MIT"
] | null | null | null | default_app_config = 'gateway.apps.GatewayConfig'
| 25 | 49 | 0.84 | default_app_config = 'gateway.apps.GatewayConfig'
| true | true |
f72f8822d3d4ac66a54db0bfc296d706ac05a14c | 1,219 | py | Python | setup.py | duyyudus/iconviet-lottery | ade157e050a6dac468a300d37c0c67a1e92e5d30 | [
"MIT"
] | null | null | null | setup.py | duyyudus/iconviet-lottery | ade157e050a6dac468a300d37c0c67a1e92e5d30 | [
"MIT"
] | null | null | null | setup.py | duyyudus/iconviet-lottery | ade157e050a6dac468a300d37c0c67a1e92e5d30 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
# All dependences
deps = {
'test': [],
'dev': ['iconsdk', 'tbears', 'pylint', 'autopep8', 'rope', 'black',],
}
install_requires = []
extra_requires = deps
test_requires = deps['test']
with open('README.adoc') as readme_file:
long_description = readme_file.read()
setup(
name='megaloop_lottery',
version='0.0.1',
description='A simple and incentived lottery Dapp on ICON network',
long_description=long_description,
long_description_content_type='text/asciidoc',
author='duyyudus',
author_email='duyyudus@gmail.com',
url='https://github.com/duyyudus/megaloop-lottery',
include_package_data=True,
tests_require=test_requires,
install_requires=install_requires,
extras_require=extra_requires,
license='MIT',
zip_safe=False,
keywords='Lottery Dapp',
python_requires='>=3.6',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX :: Linux',
],
)
| 28.348837 | 73 | 0.660377 |
from setuptools import setup, find_packages
deps = {
'test': [],
'dev': ['iconsdk', 'tbears', 'pylint', 'autopep8', 'rope', 'black',],
}
install_requires = []
extra_requires = deps
test_requires = deps['test']
with open('README.adoc') as readme_file:
long_description = readme_file.read()
setup(
name='megaloop_lottery',
version='0.0.1',
description='A simple and incentived lottery Dapp on ICON network',
long_description=long_description,
long_description_content_type='text/asciidoc',
author='duyyudus',
author_email='duyyudus@gmail.com',
url='https://github.com/duyyudus/megaloop-lottery',
include_package_data=True,
tests_require=test_requires,
install_requires=install_requires,
extras_require=extra_requires,
license='MIT',
zip_safe=False,
keywords='Lottery Dapp',
python_requires='>=3.6',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX :: Linux',
],
)
| true | true |
f72f888a64c2817014cb56a0375b9b4d1a93dade | 749 | py | Python | try.py | charliezjw/Neural-Signal-Decoder | fb0df09ba0314724c7c90141bd47cc8fb0201b7a | [
"MIT"
] | null | null | null | try.py | charliezjw/Neural-Signal-Decoder | fb0df09ba0314724c7c90141bd47cc8fb0201b7a | [
"MIT"
] | null | null | null | try.py | charliezjw/Neural-Signal-Decoder | fb0df09ba0314724c7c90141bd47cc8fb0201b7a | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
# a = tf.placeholder(tf.int32, [None, 3])
#
# b = tf.convert_to_tensor(tf.argmax(tf.bincount(a[0])))
# b = tf.stack([b, tf.argmax(tf.bincount(a[1]))], 0)
# for i in range(2, 5):
# max_indx = tf.argmax(tf.bincount(a[i]))
# b = tf.concat([b, [max_indx]], 0)
#
# with tf.Session() as sess:
# t1 = np.asarray([[1, 1, 0], [2, 4, 4], [6, 6, 6], [5, 5, 5], [2, 7, 7]])
# t2, t3 = sess.run([b, max_indx], feed_dict={a: t1})
# print(t2)
# print(t3)
a = np.asarray(np.asarray([[1, 1, 0], [2, 4, 4], [6, 6, 6], [5, 5, 5], [2, 7, 7]]))
b = np.zeros(a.shape[0])
c = np.asarray([1, 4, 6, 7, 9])
for i in range(a.shape[0]):
b[i] = np.argmax(np.bincount(a[i]))
print(np.mean(np.equal(b, c))) | 31.208333 | 83 | 0.542056 | import numpy as np
import tensorflow as tf
a = np.asarray(np.asarray([[1, 1, 0], [2, 4, 4], [6, 6, 6], [5, 5, 5], [2, 7, 7]]))
b = np.zeros(a.shape[0])
c = np.asarray([1, 4, 6, 7, 9])
for i in range(a.shape[0]):
b[i] = np.argmax(np.bincount(a[i]))
print(np.mean(np.equal(b, c))) | true | true |
f72f896b8a657f3551e24d80c0f44854acb5a54c | 666 | py | Python | pipeline/compilers/livescript.py | jnis77diver/django-pipeline | 8bac57adae84615d9d79ad19b2b591c2e46879f9 | [
"MIT"
] | 1 | 2019-10-20T02:58:27.000Z | 2019-10-20T02:58:27.000Z | pipeline/compilers/livescript.py | jnis77diver/django-pipeline | 8bac57adae84615d9d79ad19b2b591c2e46879f9 | [
"MIT"
] | 1 | 2021-09-20T22:02:21.000Z | 2021-09-21T13:55:41.000Z | pipeline/compilers/livescript.py | jnis77diver/django-pipeline | 8bac57adae84615d9d79ad19b2b591c2e46879f9 | [
"MIT"
] | 1 | 2021-09-18T01:39:48.000Z | 2021-09-18T01:39:48.000Z | from __future__ import unicode_literals
from pipeline.conf import settings
from pipeline.compilers import SubProcessCompiler
class LiveScriptCompiler(SubProcessCompiler):
output_extension = 'js'
def match_file(self, path):
return path.endswith('.ls')
def compile_file(self, infile, outfile, outdated=False, force=False):
if not outdated and not force:
return # File doesn't need to be recompiled
command = (
settings.LIVE_SCRIPT_BINARY,
"-cp",
settings.LIVE_SCRIPT_ARGUMENTS,
infile,
)
return self.execute_command(command, stdout_captured=outfile)
| 28.956522 | 73 | 0.672673 | from __future__ import unicode_literals
from pipeline.conf import settings
from pipeline.compilers import SubProcessCompiler
class LiveScriptCompiler(SubProcessCompiler):
output_extension = 'js'
def match_file(self, path):
return path.endswith('.ls')
def compile_file(self, infile, outfile, outdated=False, force=False):
if not outdated and not force:
return
command = (
settings.LIVE_SCRIPT_BINARY,
"-cp",
settings.LIVE_SCRIPT_ARGUMENTS,
infile,
)
return self.execute_command(command, stdout_captured=outfile)
| true | true |
f72f8af5b3ccf2010b8feadf774b09fd508c9661 | 32,775 | py | Python | WhoopClient.py | lcintron/WhoopClient | 46ccc6c3e3b98f4b6c82cf8938056d72a22bd6b6 | [
"MIT"
] | null | null | null | WhoopClient.py | lcintron/WhoopClient | 46ccc6c3e3b98f4b6c82cf8938056d72a22bd6b6 | [
"MIT"
] | null | null | null | WhoopClient.py | lcintron/WhoopClient | 46ccc6c3e3b98f4b6c82cf8938056d72a22bd6b6 | [
"MIT"
] | null | null | null | import requests
import pandas as pd
import numpy as np
import configparser
from datetime import datetime
from dateutil import relativedelta, parser, rrule
from dateutil.rrule import WEEKLY
class WhoopClient:
'''A class to allow a user to login and store their authorization code,
then perform pulls using the code in order to access different types of data'''
def __init__(self,
auth_code=None,
whoop_id=None,
current_datetime=datetime.utcnow()):
self.auth_code = auth_code
self.whoop_id = whoop_id
self.current_datetime = current_datetime
self.start_datetime = None
self.all_data = None
self.all_activities = None
self.sport_dict = None
self.all_sleep = None
self.all_sleep_events = None
def reset(self):
self.auth_code = None
self.whoop_id = None
self.current_datetime = datetime.utcnow()
self.start_datetime = None
self.all_data = None
self.all_activities = None
self.sport_dict = None
self.all_sleep = None
self.all_sleep_events = None
def pull_api(self, url, df=False):
auth_code = self.auth_code
headers = {'authorization': auth_code}
pull = requests.get(url, headers=headers)
if pull.status_code == 200 and len(pull.content) > 1:
if df:
d = pd.json_normalize(pull.json())
return d
else:
return pull.json()
else:
return "no response"
def pull_sleep_main(self, sleep_id):
athlete_id = self.whoop_id
sleep = self.pull_api(
'https://api-7.whoop.com/users/{}/sleeps/{}'.format(
athlete_id, sleep_id))
main_df = pd.json_normalize(sleep)
return main_df
def pull_sleep_events(self, sleep_id):
athlete_id = self.whoop_id
sleep = self.pull_api(
'https://api-7.whoop.com/users/{}/sleeps/{}'.format(
athlete_id, sleep_id))
events_df = pd.json_normalize(sleep['events'])
events_df['id'] = sleep_id
return events_df
def get_authorization(self, user_ini):
'''
Function to get the authorization token and user id.
This must be completed before a user can query the api
'''
config = configparser.ConfigParser()
config.read(user_ini)
username = config['whoop']['username']
password = config['whoop']['password']
headers = {
"username": username,
"password": password,
"grant_type": "password",
"issueRefresh": False
}
auth = requests.post("https://api-7.whoop.com/oauth/token",
json=headers)
if auth.status_code == 200:
content = auth.json()
user_id = content['user']['id']
token = content['access_token']
start_time = content['user']['profile']['createdAt']
self.whoop_id = user_id
self.auth_code = 'bearer ' + token
self.start_datetime = start_time
print("Whoop: Authentication successful")
else:
print(
"Authentication failed - please double check your credentials")
def get_keydata_all(self):
'''
This function returns a dataframe of WHOOP metrics for each day of WHOOP membership.
In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information
'''
if self.start_datetime:
if self.all_data is not None:
## All data already pulled
return self.all_data
else:
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=self.current_datetime,
dtstart=start_date)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
all_data = pd.DataFrame()
for dates in date_range:
cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(
self.whoop_id, dates[1], dates[0])
data = self.pull_api(cycle_url, df=True)
all_data = pd.concat([all_data, data])
all_data.reset_index(drop=True, inplace=True)
## fixing the day column so it's not a list
all_data['days'] = all_data['days'].map(lambda d: d[0])
all_data.rename(columns={"days": 'day'}, inplace=True)
## Putting all time into minutes instead of milliseconds
sleep_cols = [
'qualityDuration', 'needBreakdown.baseline',
'needBreakdown.debt', 'needBreakdown.naps',
'needBreakdown.strain', 'needBreakdown.total'
]
for sleep_col in sleep_cols:
all_data['sleep.' + sleep_col] = all_data[
'sleep.' + sleep_col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
## Making nap variable
all_data['nap_duration'] = all_data['sleep.naps'].apply(
lambda x: x[0]['qualityDuration'] / 60000
if len(x) == 1 else (sum([
y['qualityDuration'] for y in x
if y['qualityDuration'] is not None
]) / 60000 if len(x) > 1 else 0))
all_data.drop(['sleep.naps'], axis=1, inplace=True)
## dropping duplicates subsetting because of list columns
all_data.drop_duplicates(subset=['day', 'sleep.id'],
inplace=True)
self.all_data = all_data
return all_data
else:
print("Please run the authorization function first")
def get_activities_all(self):
'''
Activity data is pulled through the get_keydata functions so if the data pull is present, this function
just transforms the activity column into a dataframe of activities, where each activity is a row.
If it has not been pulled, this function runs the key data function then returns the activity dataframe'''
if self.sport_dict:
sport_dict = self.sport_dict
else:
sports = self.pull_api('https://api-7.whoop.com/sports')
sport_dict = {sport['id']: sport['name'] for sport in sports}
self.sport_dict = self.sport_dict
if self.start_datetime:
## process activity data
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull all data to process activities
data = self.get_keydata_all()
## now process activities data
act_data = pd.json_normalize(
data[data['strain.workouts'].apply(len) > 0]
['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper', 'during.lower'
]] = act_data[['during.upper',
'during.lower']].apply(pd.to_datetime)
act_data['total_minutes'] = act_data.apply(
lambda x:
(x['during.upper'] - x['during.lower']).total_seconds() / 60.0,
axis=1)
for z in range(0, 6):
act_data['zone{}_minutes'.format(
z + 1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)
act_data['sport_name'] = act_data.sportId.apply(
lambda x: sport_dict[x])
act_data['day'] = act_data['during.lower'].dt.strftime('%Y-%m-%d')
act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities = act_data
return act_data
else:
print("Whoop: Please run the authorization function first")
def get_sleep_all(self):
'''
This function returns all sleep metrics in a data frame, for the duration of user's WHOOP membership.
Each row in the data frame represents one night of sleep
'''
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull timeframe data
data = self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep is not None:
## All sleep data already pulled
return self.all_sleep
else:
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]
all_sleep = pd.DataFrame()
for s in sleep_list:
m = self.pull_sleep_main(s)
all_sleep = pd.concat([all_sleep, m])
## Cleaning sleep data
sleep_update = [
'qualityDuration', 'latency', 'debtPre', 'debtPost',
'needFromStrain', 'sleepNeed', 'habitualSleepNeed',
'timeInBed', 'lightSleepDuration', 'slowWaveSleepDuration',
'remSleepDuration', 'wakeDuration', 'arousalTime',
'noDataDuration', 'creditFromNaps', 'projectedSleep'
]
for col in sleep_update:
all_sleep[col] = all_sleep[col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
all_sleep.drop(['during.bounds'], axis=1, inplace=True)
self.all_sleep = all_sleep.copy(deep=True)
all_sleep.drop(['events'], axis=1, inplace=True)
return all_sleep
else:
print("Whoop: Please run the authorization function first")
def get_sleep_events_all(self):
'''
This function returns all sleep events in a data frame, for the duration of user's WHOOP membership.
Each row in the data frame represents an individual sleep event within an individual night of sleep.
Sleep events can be joined against the sleep or main datasets by sleep id.
All sleep times are returned in minutes.
'''
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull timeframe data
data = self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep_events is not None:
## All sleep data already pulled
return self.all_sleep_events
else:
if self.all_sleep is not None:
sleep_events = self.all_sleep[['activityId', 'events']]
all_sleep_events = pd.concat([
pd.concat([
pd.json_normalize(events),
pd.DataFrame({'id': len(events) * [sleep]})
],
axis=1) for events, sleep in
zip(sleep_events['events'], sleep_events['activityId'])
])
else:
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [
int(x) for x in sleep_ids if pd.isna(x) == False
]
all_sleep_events = pd.DataFrame()
for s in sleep_list:
events = self.pull_sleep_events(s)
all_sleep_events = pd.concat(
[all_sleep_events, events])
## Cleaning sleep events data
all_sleep_events['during.lower'] = pd.to_datetime(
all_sleep_events['during.lower'])
all_sleep_events['during.upper'] = pd.to_datetime(
all_sleep_events['during.upper'])
all_sleep_events.drop(['during.bounds'], axis=1, inplace=True)
all_sleep_events['total_minutes'] = all_sleep_events.apply(
lambda x: (x['during.upper'] - x['during.lower']
).total_seconds() / 60.0,
axis=1)
self.all_sleep_events = all_sleep_events
return all_sleep_events
else:
print("Whoop: Please run the authorization function first")
#returnTYpe = df, json
def get_hr_all(self, returnType=None):
'''
This function will pull every heart rate measurement recorded for the life of WHOOP membership.
The default return for this function is a list of lists, where each "row" contains the date, time, and hr value.
The measurements are spaced out every ~6 seconds on average.
To return a dataframe, set df=True. This will take a bit longer, but will return a data frame.
NOTE: This api pull takes about 6 seconds per week of data ... or 1 minutes for 10 weeks of data,
so be careful when you pull, it may take a while.
'''
if self.start_datetime:
athlete_id = self.whoop_id
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=self.current_datetime,
dtstart=start_date)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d + relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
hr_list = []
for dates in date_range:
start = dates[0]
end = dates[1]
ul = '''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(
athlete_id, end, start)
hr_vals = self.pull_api(ul)['values']
hr_values = [[
datetime.utcfromtimestamp(h['time'] / 1e3).date(),
datetime.utcfromtimestamp(h['time'] / 1e3).time(),
h['data']
] for h in hr_vals]
hr_list.extend(hr_values)
if returnType == "df":
hr_df = pd.DataFrame(hr_list)
hr_df.columns = ['date', 'time', 'hr']
return hr_df
elif returnType == "json":
hr_json = [{
'datetime': str(h[0]) + 'T' + str(h[1]),
'hr': h[2]
} for h in hr_list]
return hr_json
else:
return hr_list
else:
print("Please run the authorization function first")
def get_keydata_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(),
"%Y-%m-%d")):
'''
This function returns a dataframe of WHOOP metrics for each day in a specified time period.
To use this function, provide a start and end date in string format as follows "YYYY-MM-DD".
If no end date is specified, it will default to today's date.
In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information
'''
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Please enter an end date earlier than tomorrow")
else:
print(
"Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=e,
dtstart=st)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals if d <= e]
time_data = pd.DataFrame()
for dates in date_range:
cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(
self.whoop_id, dates[1], dates[0])
data = self.pull_api(cycle_url, df=True)
time_data = pd.concat([time_data, data])
time_data.reset_index(drop=True, inplace=True)
## fixing the day column so it's not a list
time_data['days'] = time_data['days'].map(lambda d: d[0])
time_data.rename(columns={"days": 'day'}, inplace=True)
## Putting all time into minutes instead of milliseconds
sleep_cols = [
'qualityDuration', 'needBreakdown.baseline',
'needBreakdown.debt', 'needBreakdown.naps',
'needBreakdown.strain', 'needBreakdown.total'
]
for sleep_col in sleep_cols:
time_data['sleep.' + sleep_col] = time_data[
'sleep.' + sleep_col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
## Making nap variable
time_data['nap_duration'] = time_data['sleep.naps'].apply(
lambda x: x[0]['qualityDuration'] / 60000
if len(x) == 1 else (sum([
y['qualityDuration'] for y in x
if y['qualityDuration'] is not None
]) / 60000 if len(x) > 1 else 0))
time_data.drop(['sleep.naps'], axis=1, inplace=True)
## removing duplicates
time_data.drop_duplicates(subset=['day', 'sleep.id'],
inplace=True)
return time_data
else:
print("Whoop: Please run the authorization function first")
def get_activities_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(),
"%Y-%m-%d")):
'''
Activity data is pulled through the get_keydata functions so if the data pull is present, this function
just transforms the activity column into a dataframe of activities, where each activity is a row.
If it has not been pulled, this function runs the key data function then returns the activity dataframe
If no end date is specified, it will default to today's date.
'''
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Please enter an end date earlier than tomorrow")
else:
print(
"Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
if self.sport_dict:
sport_dict = self.sport_dict
else:
sports = self.pull_api('https://api-7.whoop.com/sports')
sport_dict = {
sport['id']: sport['name']
for sport in sports
}
self.sport_dict = self.sport_dict
## process activity data
if self.all_data is not None:
## use existing
data = self.all_data
data = data[(data.day >= start)
& (data.day <= end)].copy(deep=True)
else:
## pull timeframe data
data = self.get_keydata_timeframe(start, end)
## now process activities data
act_data = pd.json_normalize(
data[data['strain.workouts'].apply(len) > 0]
['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper', 'during.lower'
]] = act_data[['during.upper',
'during.lower']].apply(pd.to_datetime)
act_data['total_minutes'] = act_data.apply(
lambda x: (x['during.upper'] - x['during.lower']
).total_seconds() / 60.0,
axis=1)
for z in range(0, 6):
act_data['zone{}_minutes'.format(
z +
1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)
act_data['sport_name'] = act_data.sportId.apply(
lambda x: sport_dict[x])
act_data['day'] = act_data['during.lower'].dt.strftime(
'%Y-%m-%d')
act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities = act_data
return act_data
else:
print("Whoop: Please run the authorization function first")
def get_sleep_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(),
"%Y-%m-%d")):
'''
This function returns sleep metrics in a data frame, for timeframe specified by the user.
Each row in the data frame represents one night of sleep.
If no end date is specified, it will default to today's date.
All sleep times are returned in minutes.
'''
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Whoop: Please enter an end date earlier than tomorrow")
else:
print(
"Whoop: Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
data = data[(data.day >= start)
& (data.day <= end)].copy(deep=True)
else:
## pull timeframe data
data = self.get_keydata_timeframe(start, end)
## getting all the sleep ids
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]
if self.all_sleep is not None:
## All sleep data already pulled so just filter
all_sleep = self.all_sleep
time_sleep = all_sleep[all_sleep.activityId.isin(
sleep_list)]
return time_sleep
else:
time_sleep = pd.DataFrame()
for s in sleep_list:
m = self.pull_sleep_main(s)
time_sleep = pd.concat([time_sleep, m])
## Cleaning sleep data
sleep_update = [
'qualityDuration', 'latency', 'debtPre', 'debtPost',
'needFromStrain', 'sleepNeed', 'habitualSleepNeed',
'timeInBed', 'lightSleepDuration',
'slowWaveSleepDuration', 'remSleepDuration',
'wakeDuration', 'arousalTime', 'noDataDuration',
'creditFromNaps', 'projectedSleep'
]
for col in sleep_update:
time_sleep[col] = time_sleep[col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
time_sleep.drop(['during.bounds', 'events'],
axis=1,
inplace=True)
return time_sleep
else:
print("Whoop: Please run the authorization function first")
def get_sleep_events_timeframe(self,
start,
end=datetime.strftime(
datetime.utcnow(), "%Y-%m-%d")):
'''
This function returns sleep events in a data frame, for the time frame specified by the user.
Each row in the data frame represents an individual sleep event within an individual night of sleep.
Sleep events can be joined against the sleep or main datasets by sleep id.
If no end date is specified, it will default to today's date.
'''
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Whoop: Please enter an end date earlier than tomorrow")
else:
print(
"Whoop: Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
data = data[(data.day >= start)
& (data.day <= end)].copy(deep=True)
else:
## pull timeframe data
data = self.get_keydata_timeframe(start, end)
## getting all the sleep ids
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]
if self.all_sleep_events is not None:
## All sleep data already pulled so just filter
all_sleep_events = self.all_sleep_events
time_sleep_events = all_sleep_events[
all_sleep_events.id.isin(sleep_list)]
return time_sleep_events
else:
if self.all_sleep is not None:
sleep_events = self.all_sleep[['activityId', 'events']]
time_sleep = sleep_events[sleep_events.id.isin(
sleep_list)]
time_sleep_events = pd.concat([
pd.concat([
pd.json_normalize(events),
pd.DataFrame({'id': len(events) * [sleep]})
],
axis=1) for events, sleep in
zip(time_sleep['events'], time_sleep['activityId'])
])
else:
time_sleep_events = pd.DataFrame()
for s in sleep_list:
events = self.pull_sleep_events(s)
time_sleep_events = pd.concat(
[time_sleep_events, events])
## Cleaning sleep events data
time_sleep_events['during.lower'] = pd.to_datetime(
time_sleep_events['during.lower'])
time_sleep_events['during.upper'] = pd.to_datetime(
time_sleep_events['during.upper'])
time_sleep_events.drop(['during.bounds'],
axis=1,
inplace=True)
time_sleep_events[
'total_minutes'] = time_sleep_events.apply(
lambda x: (x['during.upper'] - x['during.lower']
).total_seconds() / 60.0,
axis=1)
return time_sleep_events
else:
print("Whoop: Please run the authorization function first")
def get_hr_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(), "%Y-%m-%d"),
returnType=None):
'''
This function will pull every heart rate measurement recorded, for the time frame specified by the user.
The default return for this function is a list of lists, where each "row" contains the date, time, and hr value.
The measurements are spaced out every ~6 seconds on average.
To return a dataframe, set df=True. This will take a bit longer, but will return a data frame.
If no end date is specified, it will default to today's date.
NOTE: This api pull takes about 6 seconds per week of data ... or 1 minutes for 10 weeks of data,
so be careful when you pull, it may take a while.
'''
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Whoop: Please enter an end date earlier than tomorrow")
else:
print(
"Whoop: Please enter a start date that is earlier than your end date"
)
else:
if self.start_datetime:
athlete_id = self.whoop_id
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
## using the st and e since it needs the datetime formatted date
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=e,
dtstart=st)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
hr_list = []
for dates in date_range:
start = dates[0]
end = dates[1]
ul = '''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(
athlete_id, end, start)
hr_vals = self.pull_api(ul)['values']
hr_values = [[
str(datetime.utcfromtimestamp(h['time'] / 1e3).date()),
str(datetime.utcfromtimestamp(h['time'] / 1e3).time()),
h['data']
] for h in hr_vals]
hr_list.extend(hr_values)
if returnType == "df":
hr_df = pd.DataFrame(hr_list)
hr_df.columns = ['date', 'time', 'hr']
return hr_df
elif returnType == "json":
hr_json = [{
'datetime': str(h[0]) + 'T' + str(h[1]),
'hr': h[2]
} for h in hr_list]
return hr_json
else:
return hr_list
else:
print("Whoop: Please run the authorization function first")
| 43.993289 | 121 | 0.49251 | import requests
import pandas as pd
import numpy as np
import configparser
from datetime import datetime
from dateutil import relativedelta, parser, rrule
from dateutil.rrule import WEEKLY
class WhoopClient:
def __init__(self,
auth_code=None,
whoop_id=None,
current_datetime=datetime.utcnow()):
self.auth_code = auth_code
self.whoop_id = whoop_id
self.current_datetime = current_datetime
self.start_datetime = None
self.all_data = None
self.all_activities = None
self.sport_dict = None
self.all_sleep = None
self.all_sleep_events = None
def reset(self):
self.auth_code = None
self.whoop_id = None
self.current_datetime = datetime.utcnow()
self.start_datetime = None
self.all_data = None
self.all_activities = None
self.sport_dict = None
self.all_sleep = None
self.all_sleep_events = None
def pull_api(self, url, df=False):
auth_code = self.auth_code
headers = {'authorization': auth_code}
pull = requests.get(url, headers=headers)
if pull.status_code == 200 and len(pull.content) > 1:
if df:
d = pd.json_normalize(pull.json())
return d
else:
return pull.json()
else:
return "no response"
def pull_sleep_main(self, sleep_id):
athlete_id = self.whoop_id
sleep = self.pull_api(
'https://api-7.whoop.com/users/{}/sleeps/{}'.format(
athlete_id, sleep_id))
main_df = pd.json_normalize(sleep)
return main_df
def pull_sleep_events(self, sleep_id):
athlete_id = self.whoop_id
sleep = self.pull_api(
'https://api-7.whoop.com/users/{}/sleeps/{}'.format(
athlete_id, sleep_id))
events_df = pd.json_normalize(sleep['events'])
events_df['id'] = sleep_id
return events_df
def get_authorization(self, user_ini):
config = configparser.ConfigParser()
config.read(user_ini)
username = config['whoop']['username']
password = config['whoop']['password']
headers = {
"username": username,
"password": password,
"grant_type": "password",
"issueRefresh": False
}
auth = requests.post("https://api-7.whoop.com/oauth/token",
json=headers)
if auth.status_code == 200:
content = auth.json()
user_id = content['user']['id']
token = content['access_token']
start_time = content['user']['profile']['createdAt']
self.whoop_id = user_id
self.auth_code = 'bearer ' + token
self.start_datetime = start_time
print("Whoop: Authentication successful")
else:
print(
"Authentication failed - please double check your credentials")
def get_keydata_all(self):
if self.start_datetime:
if self.all_data is not None:
elf.all_data
else:
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=self.current_datetime,
dtstart=start_date)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
all_data = pd.DataFrame()
for dates in date_range:
cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(
self.whoop_id, dates[1], dates[0])
data = self.pull_api(cycle_url, df=True)
all_data = pd.concat([all_data, data])
all_data.reset_index(drop=True, inplace=True)
ta['days'].map(lambda d: d[0])
all_data.rename(columns={"days": 'day'}, inplace=True)
## Putting all time into minutes instead of milliseconds
sleep_cols = [
'qualityDuration', 'needBreakdown.baseline',
'needBreakdown.debt', 'needBreakdown.naps',
'needBreakdown.strain', 'needBreakdown.total'
]
for sleep_col in sleep_cols:
all_data['sleep.' + sleep_col] = all_data[
'sleep.' + sleep_col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
## Making nap variable
all_data['nap_duration'] = all_data['sleep.naps'].apply(
lambda x: x[0]['qualityDuration'] / 60000
if len(x) == 1 else (sum([
y['qualityDuration'] for y in x
if y['qualityDuration'] is not None
]) / 60000 if len(x) > 1 else 0))
all_data.drop(['sleep.naps'], axis=1, inplace=True)
## dropping duplicates subsetting because of list columns
all_data.drop_duplicates(subset=['day', 'sleep.id'],
inplace=True)
self.all_data = all_data
return all_data
else:
print("Please run the authorization function first")
def get_activities_all(self):
if self.sport_dict:
sport_dict = self.sport_dict
else:
sports = self.pull_api('https://api-7.whoop.com/sports')
sport_dict = {sport['id']: sport['name'] for sport in sports}
self.sport_dict = self.sport_dict
if self.start_datetime:
## process activity data
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull all data to process activities
data = self.get_keydata_all()
## now process activities data
act_data = pd.json_normalize(
data[data['strain.workouts'].apply(len) > 0]
['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper', 'during.lower'
]] = act_data[['during.upper',
'during.lower']].apply(pd.to_datetime)
act_data['total_minutes'] = act_data.apply(
lambda x:
(x['during.upper'] - x['during.lower']).total_seconds() / 60.0,
axis=1)
for z in range(0, 6):
act_data['zone{}_minutes'.format(
z + 1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)
act_data['sport_name'] = act_data.sportId.apply(
lambda x: sport_dict[x])
act_data['day'] = act_data['during.lower'].dt.strftime('%Y-%m-%d')
act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities = act_data
return act_data
else:
print("Whoop: Please run the authorization function first")
def get_sleep_all(self):
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull timeframe data
data = self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep is not None:
## All sleep data already pulled
return self.all_sleep
else:
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]
all_sleep = pd.DataFrame()
for s in sleep_list:
m = self.pull_sleep_main(s)
all_sleep = pd.concat([all_sleep, m])
## Cleaning sleep data
sleep_update = [
'qualityDuration', 'latency', 'debtPre', 'debtPost',
'needFromStrain', 'sleepNeed', 'habitualSleepNeed',
'timeInBed', 'lightSleepDuration', 'slowWaveSleepDuration',
'remSleepDuration', 'wakeDuration', 'arousalTime',
'noDataDuration', 'creditFromNaps', 'projectedSleep'
]
for col in sleep_update:
all_sleep[col] = all_sleep[col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
all_sleep.drop(['during.bounds'], axis=1, inplace=True)
self.all_sleep = all_sleep.copy(deep=True)
all_sleep.drop(['events'], axis=1, inplace=True)
return all_sleep
else:
print("Whoop: Please run the authorization function first")
def get_sleep_events_all(self):
if self.auth_code:
if self.all_data is not None:
## use existing
data = self.all_data
else:
## pull timeframe data
data = self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep_events is not None:
## All sleep data already pulled
return self.all_sleep_events
else:
if self.all_sleep is not None:
sleep_events = self.all_sleep[['activityId', 'events']]
all_sleep_events = pd.concat([
pd.concat([
pd.json_normalize(events),
pd.DataFrame({'id': len(events) * [sleep]})
],
axis=1) for events, sleep in
zip(sleep_events['events'], sleep_events['activityId'])
])
else:
sleep_ids = data['sleep.id'].values.tolist()
sleep_list = [
int(x) for x in sleep_ids if pd.isna(x) == False
]
all_sleep_events = pd.DataFrame()
for s in sleep_list:
events = self.pull_sleep_events(s)
all_sleep_events = pd.concat(
[all_sleep_events, events])
## Cleaning sleep events data
all_sleep_events['during.lower'] = pd.to_datetime(
all_sleep_events['during.lower'])
all_sleep_events['during.upper'] = pd.to_datetime(
all_sleep_events['during.upper'])
all_sleep_events.drop(['during.bounds'], axis=1, inplace=True)
all_sleep_events['total_minutes'] = all_sleep_events.apply(
lambda x: (x['during.upper'] - x['during.lower']
).total_seconds() / 60.0,
axis=1)
self.all_sleep_events = all_sleep_events
return all_sleep_events
else:
print("Whoop: Please run the authorization function first")
#returnTYpe = df, json
def get_hr_all(self, returnType=None):
if self.start_datetime:
athlete_id = self.whoop_id
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=self.current_datetime,
dtstart=start_date)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d + relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
hr_list = []
for dates in date_range:
start = dates[0]
end = dates[1]
ul = '''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(
athlete_id, end, start)
hr_vals = self.pull_api(ul)['values']
hr_values = [[
datetime.utcfromtimestamp(h['time'] / 1e3).date(),
datetime.utcfromtimestamp(h['time'] / 1e3).time(),
h['data']
] for h in hr_vals]
hr_list.extend(hr_values)
if returnType == "df":
hr_df = pd.DataFrame(hr_list)
hr_df.columns = ['date', 'time', 'hr']
return hr_df
elif returnType == "json":
hr_json = [{
'datetime': str(h[0]) + 'T' + str(h[1]),
'hr': h[2]
} for h in hr_list]
return hr_json
else:
return hr_list
else:
print("Please run the authorization function first")
def get_keydata_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(),
"%Y-%m-%d")):
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Please enter an end date earlier than tomorrow")
else:
print(
"Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
intervals = rrule.rrule(freq=WEEKLY,
interval=1,
until=e,
dtstart=st)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals if d <= e]
time_data = pd.DataFrame()
for dates in date_range:
cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(
self.whoop_id, dates[1], dates[0])
data = self.pull_api(cycle_url, df=True)
time_data = pd.concat([time_data, data])
time_data.reset_index(drop=True, inplace=True)
## fixing the day column so it's not a list
time_data['days'] = time_data['days'].map(lambda d: d[0])
time_data.rename(columns={"days": 'day'}, inplace=True)
alityDuration', 'needBreakdown.baseline',
'needBreakdown.debt', 'needBreakdown.naps',
'needBreakdown.strain', 'needBreakdown.total'
]
for sleep_col in sleep_cols:
time_data['sleep.' + sleep_col] = time_data[
'sleep.' + sleep_col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
_data['nap_duration'] = time_data['sleep.naps'].apply(
lambda x: x[0]['qualityDuration'] / 60000
if len(x) == 1 else (sum([
y['qualityDuration'] for y in x
if y['qualityDuration'] is not None
]) / 60000 if len(x) > 1 else 0))
time_data.drop(['sleep.naps'], axis=1, inplace=True)
_data.drop_duplicates(subset=['day', 'sleep.id'],
inplace=True)
return time_data
else:
print("Whoop: Please run the authorization function first")
def get_activities_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(),
"%Y-%m-%d")):
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Please enter an end date earlier than tomorrow")
else:
print(
"Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
if self.sport_dict:
sport_dict = self.sport_dict
else:
sports = self.pull_api('https://api-7.whoop.com/sports')
sport_dict = {
sport['id']: sport['name']
for sport in sports
}
self.sport_dict = self.sport_dict
f.all_data is not None:
data = self.all_data
data = data[(data.day >= start)
& (data.day <= end)].copy(deep=True)
else:
data = self.get_keydata_timeframe(start, end)
d.json_normalize(
data[data['strain.workouts'].apply(len) > 0]
['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper', 'during.lower'
]] = act_data[['during.upper',
'during.lower']].apply(pd.to_datetime)
act_data['total_minutes'] = act_data.apply(
lambda x: (x['during.upper'] - x['during.lower']
).total_seconds() / 60.0,
axis=1)
for z in range(0, 6):
act_data['zone{}_minutes'.format(
z +
1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)
act_data['sport_name'] = act_data.sportId.apply(
lambda x: sport_dict[x])
act_data['day'] = act_data['during.lower'].dt.strftime(
'%Y-%m-%d')
act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities = act_data
return act_data
else:
print("Whoop: Please run the authorization function first")
def get_sleep_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(),
"%Y-%m-%d")):
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Whoop: Please enter an end date earlier than tomorrow")
else:
print(
"Whoop: Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
if self.all_data is not None:
data = self.all_data
data = data[(data.day >= start)
& (data.day <= end)].copy(deep=True)
else:
data = self.get_keydata_timeframe(start, end)
= data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]
if self.all_sleep is not None:
p
time_sleep = all_sleep[all_sleep.activityId.isin(
sleep_list)]
return time_sleep
else:
time_sleep = pd.DataFrame()
for s in sleep_list:
m = self.pull_sleep_main(s)
time_sleep = pd.concat([time_sleep, m])
sleep_update = [
'qualityDuration', 'latency', 'debtPre', 'debtPost',
'needFromStrain', 'sleepNeed', 'habitualSleepNeed',
'timeInBed', 'lightSleepDuration',
'slowWaveSleepDuration', 'remSleepDuration',
'wakeDuration', 'arousalTime', 'noDataDuration',
'creditFromNaps', 'projectedSleep'
]
for col in sleep_update:
time_sleep[col] = time_sleep[col].astype(float).apply(
lambda x: np.nan if np.isnan(x) else x / 60000)
time_sleep.drop(['during.bounds', 'events'],
axis=1,
inplace=True)
return time_sleep
else:
print("Whoop: Please run the authorization function first")
def get_sleep_events_timeframe(self,
start,
end=datetime.strftime(
datetime.utcnow(), "%Y-%m-%d")):
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Whoop: Please enter an end date earlier than tomorrow")
else:
print(
"Whoop: Please enter a start date that is earlier than your end date"
)
else:
if self.auth_code:
if self.all_data is not None:
data = self.all_data
data = data[(data.day >= start)
& (data.day <= end)].copy(deep=True)
else:
data = self.get_keydata_timeframe(start, end)
= data['sleep.id'].values.tolist()
sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]
if self.all_sleep_events is not None:
ll_sleep_events
time_sleep_events = all_sleep_events[
all_sleep_events.id.isin(sleep_list)]
return time_sleep_events
else:
if self.all_sleep is not None:
sleep_events = self.all_sleep[['activityId', 'events']]
time_sleep = sleep_events[sleep_events.id.isin(
sleep_list)]
time_sleep_events = pd.concat([
pd.concat([
pd.json_normalize(events),
pd.DataFrame({'id': len(events) * [sleep]})
],
axis=1) for events, sleep in
zip(time_sleep['events'], time_sleep['activityId'])
])
else:
time_sleep_events = pd.DataFrame()
for s in sleep_list:
events = self.pull_sleep_events(s)
time_sleep_events = pd.concat(
[time_sleep_events, events])
eep_events['during.lower'] = pd.to_datetime(
time_sleep_events['during.lower'])
time_sleep_events['during.upper'] = pd.to_datetime(
time_sleep_events['during.upper'])
time_sleep_events.drop(['during.bounds'],
axis=1,
inplace=True)
time_sleep_events[
'total_minutes'] = time_sleep_events.apply(
lambda x: (x['during.upper'] - x['during.lower']
).total_seconds() / 60.0,
axis=1)
return time_sleep_events
else:
print("Whoop: Please run the authorization function first")
def get_hr_timeframe(self,
start,
end=datetime.strftime(datetime.utcnow(), "%Y-%m-%d"),
returnType=None):
st = datetime.strptime(start, '%Y-%m-%d')
e = datetime.strptime(end, '%Y-%m-%d')
if st > e:
if e > datetime.today():
print("Whoop: Please enter an end date earlier than tomorrow")
else:
print(
"Whoop: Please enter a start date that is earlier than your end date"
)
else:
if self.start_datetime:
athlete_id = self.whoop_id
start_date = parser.isoparse(
self.start_datetime).replace(tzinfo=None)
end_time = 'T23:59:59.999Z'
start_time = 'T00:00:00.000Z'
interval=1,
until=e,
dtstart=st)
date_range = [[
d.strftime('%Y-%m-%d') + start_time,
(d +
relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')
+ end_time
] for d in intervals]
hr_list = []
for dates in date_range:
start = dates[0]
end = dates[1]
ul = '''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(
athlete_id, end, start)
hr_vals = self.pull_api(ul)['values']
hr_values = [[
str(datetime.utcfromtimestamp(h['time'] / 1e3).date()),
str(datetime.utcfromtimestamp(h['time'] / 1e3).time()),
h['data']
] for h in hr_vals]
hr_list.extend(hr_values)
if returnType == "df":
hr_df = pd.DataFrame(hr_list)
hr_df.columns = ['date', 'time', 'hr']
return hr_df
elif returnType == "json":
hr_json = [{
'datetime': str(h[0]) + 'T' + str(h[1]),
'hr': h[2]
} for h in hr_list]
return hr_json
else:
return hr_list
else:
print("Whoop: Please run the authorization function first")
| true | true |
f72f8c8616ebfc02596b74d7d800b811e4d3bb3f | 18,406 | py | Python | third_party/texar-0.2.0/examples/bert/utils/data_utils.py | swidi/poemo-generation | 3a349ac3a6fc3e82b24410013bced60a24c2d8bf | [
"MIT"
] | null | null | null | third_party/texar-0.2.0/examples/bert/utils/data_utils.py | swidi/poemo-generation | 3a349ac3a6fc3e82b24410013bced60a24c2d8bf | [
"MIT"
] | null | null | null | third_party/texar-0.2.0/examples/bert/utils/data_utils.py | swidi/poemo-generation | 3a349ac3a6fc3e82b24410013bced60a24c2d8bf | [
"MIT"
] | null | null | null | """
This is the Data Loading Pipeline for Sentence Classifier Task from
https://github.com/google-research/bert/blob/master/run_classifier.py
"""
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import csv
import collections
import sys
sys.path.append(os.path.dirname(__file__))
import tokenization
import tensorflow as tf
class InputExample():
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence.
For single sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second
sequence. Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures():
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class SSTProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
if set_type == 'train' or set_type == 'dev':
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[0])
# Single sentence classification, text_b doesn't exist
text_b = None
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
if set_type == 'test':
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[1])
# Single sentence classification, text_b doesn't exist
text_b = None
label = '0' # arbitrary set as 0
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type,
tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention rule is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# segment_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# sigment_ids: 0 0 0 0 0 0 0
#
# Where "segment_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
# here we disable the verbose printing of the data
if ex_index < 0:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_ids length: %d" % len(input_ids))
tf.logging.info("input_mask: %s" %\
" ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" %\
" ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
return tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
tf_example = tf.train.Example(
features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal
# percent of tokens from each, since if one sequence is very short then
# each token that's truncated likely contains more information than a
# longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def prepare_TFRecord_data(processor, tokenizer,
data_dir, max_seq_length, output_dir):
"""
Args:
processor: Data Preprocessor, which must have get_lables,
get_train/dev/test/examples methods defined.
tokenizer: The Sentence Tokenizer. Generally should be
SentencePiece Model.
data_dir: The input data directory.
max_seq_length: Max sequence length.
batch_size: mini-batch size.
model: `train`, `eval` or `test`.
output_dir: The directory to save the TFRecord in.
"""
label_list = processor.get_labels()
train_examples = processor.get_train_examples(data_dir)
train_file = os.path.join(output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, max_seq_length,
tokenizer, train_file)
eval_examples = processor.get_dev_examples(data_dir)
eval_file = os.path.join(output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list,
max_seq_length, tokenizer, eval_file)
test_examples = processor.get_test_examples(data_dir)
test_file = os.path.join(output_dir, "predict.tf_record")
file_based_convert_examples_to_features(
test_examples, label_list,
max_seq_length, tokenizer, test_file)
| 38.10766 | 80 | 0.60703 |
import os
import csv
import collections
import sys
sys.path.append(os.path.dirname(__file__))
import tokenization
import tensorflow as tf
class InputExample():
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures():
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
def get_train_examples(self, data_dir):
raise NotImplementedError()
def get_dev_examples(self, data_dir):
raise NotImplementedError()
def get_test_examples(self, data_dir):
raise NotImplementedError()
def get_labels(self):
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class SSTProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
if set_type == 'train' or set_type == 'dev':
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = None
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
if set_type == 'test':
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[1])
# Single sentence classification, text_b doesn't exist
text_b = None
label = '0'
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class XnliProcessor(DataProcessor):
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
def get_labels(self):
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")),
"test")
def get_labels(self):
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type,
tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")),
"test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")),
"train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")),
"test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
s.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 0:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_ids length: %d" % len(input_ids))
tf.logging.info("input_mask: %s" %\
" ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" %\
" ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
return tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
tf_example = tf.train.Example(
features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
# longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def prepare_TFRecord_data(processor, tokenizer,
data_dir, max_seq_length, output_dir):
label_list = processor.get_labels()
train_examples = processor.get_train_examples(data_dir)
train_file = os.path.join(output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, max_seq_length,
tokenizer, train_file)
eval_examples = processor.get_dev_examples(data_dir)
eval_file = os.path.join(output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list,
max_seq_length, tokenizer, eval_file)
test_examples = processor.get_test_examples(data_dir)
test_file = os.path.join(output_dir, "predict.tf_record")
file_based_convert_examples_to_features(
test_examples, label_list,
max_seq_length, tokenizer, test_file)
| true | true |
f72f8d11340df1f1d9eb56840c1b60800b76a5a8 | 827 | py | Python | Calculator.py | EdvinAlvarado/Code-Wars | a3a06a44cda004052b5c0930f3693678c5c92e21 | [
"BSD-2-Clause"
] | null | null | null | Calculator.py | EdvinAlvarado/Code-Wars | a3a06a44cda004052b5c0930f3693678c5c92e21 | [
"BSD-2-Clause"
] | null | null | null | Calculator.py | EdvinAlvarado/Code-Wars | a3a06a44cda004052b5c0930f3693678c5c92e21 | [
"BSD-2-Clause"
] | null | null | null |
class Calculator(object):
def evaluate(self, string):
print(string)
cmd = [int(s) if s.isdigit() else s for s in string.split(" ")]
cmd = [float(s) if isinstance(s, str) and s.find('.') != -1 else s for s in cmd]
print(cmd)
for i in range(sum([1 if s == '*' or s == '/' else 0 for s in cmd])):
for i, p in enumerate(cmd):
if p == '*':
cmd[i - 1] = cmd[i - 1] * cmd[i + 1]
del cmd[i:i+2]
break
elif p == '/':
cmd[i - 1] = cmd[i - 1] / cmd[i + 1]
del cmd[i:i+2]
break
for i in range(sum([1 if s == '+' or s == '-' else 0 for s in cmd])):
for i, p in enumerate(cmd):
if p == '+':
cmd[i - 1] = cmd[i - 1] + cmd[i + 1]
del cmd[i:i+2]
break
elif p == '-':
cmd[i - 1] = cmd[i - 1] - cmd[i + 1]
del cmd[i:i+2]
break
return cmd[0] | 22.972222 | 82 | 0.481258 |
class Calculator(object):
def evaluate(self, string):
print(string)
cmd = [int(s) if s.isdigit() else s for s in string.split(" ")]
cmd = [float(s) if isinstance(s, str) and s.find('.') != -1 else s for s in cmd]
print(cmd)
for i in range(sum([1 if s == '*' or s == '/' else 0 for s in cmd])):
for i, p in enumerate(cmd):
if p == '*':
cmd[i - 1] = cmd[i - 1] * cmd[i + 1]
del cmd[i:i+2]
break
elif p == '/':
cmd[i - 1] = cmd[i - 1] / cmd[i + 1]
del cmd[i:i+2]
break
for i in range(sum([1 if s == '+' or s == '-' else 0 for s in cmd])):
for i, p in enumerate(cmd):
if p == '+':
cmd[i - 1] = cmd[i - 1] + cmd[i + 1]
del cmd[i:i+2]
break
elif p == '-':
cmd[i - 1] = cmd[i - 1] - cmd[i + 1]
del cmd[i:i+2]
break
return cmd[0] | true | true |
f72f8d12e338c378aeef5bdcbc21e4ce4e2a2aa1 | 7,335 | py | Python | src/vmware/azext_vmware/_help.py | sanmishra18/azure-cli-extensions | 05499b7931a1fe4cd4536a6b83fa4f8f13663996 | [
"MIT"
] | 1 | 2021-04-22T09:20:58.000Z | 2021-04-22T09:20:58.000Z | src/vmware/azext_vmware/_help.py | sanmishra18/azure-cli-extensions | 05499b7931a1fe4cd4536a6b83fa4f8f13663996 | [
"MIT"
] | 1 | 2020-07-30T06:44:01.000Z | 2020-07-30T06:44:01.000Z | src/vmware/azext_vmware/_help.py | Juliehzl/azure-cli-extensions | b0b33f4d45c2e4c50ece782851291d967e1f36e2 | [
"MIT"
] | 1 | 2020-11-09T17:17:42.000Z | 2020-11-09T17:17:42.000Z | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
helps['vmware'] = """
type: group
short-summary: Commands to manage Azure VMware Solution.
"""
helps['vmware private-cloud'] = """
type: group
short-summary: Commands to manage private clouds.
"""
helps['vmware cluster'] = """
type: group
short-summary: Commands to manage clusters in a private cloud.
"""
helps['vmware authorization'] = """
type: group
short-summary: Commands to manage the authorizations of an ExpressRoute Circuit for a private cloud.
"""
helps['vmware hcx-enterprise-site'] = """
type: group
short-summary: Commands to manage HCX Enterprise Sites in a private cloud.
"""
helps['vmware location'] = """
type: group
short-summary: Commands to check availability by location.
"""
helps['vmware datastore'] = """
type: group
short-summary: Commands to manage a datastore in a private cloud cluster.
"""
helps['vmware cluster create'] = """
type: command
short-summary: Create a cluster in a private cloud. The maximum number of clusters is 4.
"""
helps['vmware cluster delete'] = """
type: command
short-summary: Delete a cluster in a private cloud.
"""
helps['vmware cluster list'] = """
type: command
short-summary: List clusters in a private cloud.
"""
helps['vmware cluster show'] = """
type: command
short-summary: Show details of a cluster in a private cloud.
"""
helps['vmware cluster update'] = """
type: command
short-summary: Update a cluster in a private cloud.
"""
helps['vmware private-cloud addidentitysource'] = """
type: command
short-summary: Add a vCenter Single Sign On Identity Source to a private cloud.
"""
helps['vmware private-cloud create'] = """
type: command
short-summary: Create a private cloud.
"""
helps['vmware private-cloud delete'] = """
type: command
short-summary: Delete a private cloud.
"""
helps['vmware private-cloud deleteidentitysource'] = """
type: command
short-summary: Delete a vCenter Single Sign On Identity Source for a private cloud.
"""
helps['vmware private-cloud list'] = """
type: command
short-summary: List the private clouds.
"""
helps['vmware private-cloud listadmincredentials'] = """
type: command
short-summary: List the admin credentials for the private cloud.
"""
helps['vmware private-cloud show'] = """
type: command
short-summary: Show details of a private cloud.
"""
helps['vmware private-cloud update'] = """
type: command
short-summary: Update a private cloud.
"""
helps['vmware private-cloud rotate-vcenter-password'] = """
type: command
short-summary: Rotate the vCenter password.
examples:
- name: Rotate the vCenter password.
text: az vmware private-cloud rotate-vcenter-password --resource-group MyResourceGroup --private-cloud MyPrivateCloud
"""
helps['vmware private-cloud rotate-nsxt-password'] = """
type: command
short-summary: Rotate the NSX-T Manager password.
examples:
- name: Rotate the NSX-T Manager password.
text: az vmware private-cloud rotate-nsxt-password --resource-group MyResourceGroup --private-cloud MyPrivateCloud
"""
helps['vmware authorization create'] = """
type: command
short-summary: Create an authorization for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware authorization list'] = """
type: command
short-summary: List authorizations for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware authorization show'] = """
type: command
short-summary: Show details of an authorization for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware authorization delete'] = """
type: command
short-summary: Delete an authorization for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware hcx-enterprise-site create'] = """
type: command
short-summary: Create an HCX Enterprise Site in a private cloud.
"""
helps['vmware hcx-enterprise-site list'] = """
type: command
short-summary: List HCX Enterprise Sites in a private cloud.
"""
helps['vmware hcx-enterprise-site show'] = """
type: command
short-summary: Show details of an HCX Enterprise Site in a private cloud.
"""
helps['vmware hcx-enterprise-site delete'] = """
type: command
short-summary: Delete an HCX Enterprise Site in a private cloud.
"""
helps['vmware location checkquotaavailability'] = """
type: command
short-summary: Return quota for subscription by region.
"""
helps['vmware location checktrialavailability'] = """
type: command
short-summary: Return trial status for subscription by region.
"""
helps['vmware datastore create'] = """
type: command
short-summary: Create a datastore in a private cloud cluster.
examples:
- name: Create a new Microsoft.StoragePool provided disk pool based iSCSI datastore.
text: az vmware datastore create --name iSCSIDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud --endpoints 10.10.0.1:3260 --lun-name lun0
- name: Create a new Microsoft.StoragePool provided disk pool based iSCSI datastore with multiple endpoints.
text: az vmware datastore create --name iSCSIDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud --endpoints 10.10.0.1:3260 10.10.0.2:3260 --lun-name lun0
- name: Create a new Microsoft.NetApp provided NetApp volume based NFSv3 datastore.
text: az vmware datastore create --name ANFDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud --nfs-file-path ANFVol1FilePath --nfs-provider-ip 10.10.0.1
"""
helps['vmware datastore show'] = """
type: command
short-summary: Show details of a datastore in a private cloud cluster.
examples:
- name: Show the details of an iSCSI or NFS based datastore.
text: az vmware datastore show --name MyCloudSANDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud
"""
helps['vmware datastore list'] = """
type: command
short-summary: List datastores in a private cloud cluster.
examples:
- name: List all iSCSI or NFS based datastores under Cluster-1.
text: az vmware datastore list --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud
"""
helps['vmware datastore delete'] = """
type: command
short-summary: Delete a datastore in a private cloud cluster.
examples:
- name: Delete an iSCSI or NFS based datastore.
text: az vmware datastore delete --name MyCloudSANDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud
"""
| 34.763033 | 204 | 0.671438 |
from knack.help_files import helps
helps['vmware'] = """
type: group
short-summary: Commands to manage Azure VMware Solution.
"""
helps['vmware private-cloud'] = """
type: group
short-summary: Commands to manage private clouds.
"""
helps['vmware cluster'] = """
type: group
short-summary: Commands to manage clusters in a private cloud.
"""
helps['vmware authorization'] = """
type: group
short-summary: Commands to manage the authorizations of an ExpressRoute Circuit for a private cloud.
"""
helps['vmware hcx-enterprise-site'] = """
type: group
short-summary: Commands to manage HCX Enterprise Sites in a private cloud.
"""
helps['vmware location'] = """
type: group
short-summary: Commands to check availability by location.
"""
helps['vmware datastore'] = """
type: group
short-summary: Commands to manage a datastore in a private cloud cluster.
"""
helps['vmware cluster create'] = """
type: command
short-summary: Create a cluster in a private cloud. The maximum number of clusters is 4.
"""
helps['vmware cluster delete'] = """
type: command
short-summary: Delete a cluster in a private cloud.
"""
helps['vmware cluster list'] = """
type: command
short-summary: List clusters in a private cloud.
"""
helps['vmware cluster show'] = """
type: command
short-summary: Show details of a cluster in a private cloud.
"""
helps['vmware cluster update'] = """
type: command
short-summary: Update a cluster in a private cloud.
"""
helps['vmware private-cloud addidentitysource'] = """
type: command
short-summary: Add a vCenter Single Sign On Identity Source to a private cloud.
"""
helps['vmware private-cloud create'] = """
type: command
short-summary: Create a private cloud.
"""
helps['vmware private-cloud delete'] = """
type: command
short-summary: Delete a private cloud.
"""
helps['vmware private-cloud deleteidentitysource'] = """
type: command
short-summary: Delete a vCenter Single Sign On Identity Source for a private cloud.
"""
helps['vmware private-cloud list'] = """
type: command
short-summary: List the private clouds.
"""
helps['vmware private-cloud listadmincredentials'] = """
type: command
short-summary: List the admin credentials for the private cloud.
"""
helps['vmware private-cloud show'] = """
type: command
short-summary: Show details of a private cloud.
"""
helps['vmware private-cloud update'] = """
type: command
short-summary: Update a private cloud.
"""
helps['vmware private-cloud rotate-vcenter-password'] = """
type: command
short-summary: Rotate the vCenter password.
examples:
- name: Rotate the vCenter password.
text: az vmware private-cloud rotate-vcenter-password --resource-group MyResourceGroup --private-cloud MyPrivateCloud
"""
helps['vmware private-cloud rotate-nsxt-password'] = """
type: command
short-summary: Rotate the NSX-T Manager password.
examples:
- name: Rotate the NSX-T Manager password.
text: az vmware private-cloud rotate-nsxt-password --resource-group MyResourceGroup --private-cloud MyPrivateCloud
"""
helps['vmware authorization create'] = """
type: command
short-summary: Create an authorization for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware authorization list'] = """
type: command
short-summary: List authorizations for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware authorization show'] = """
type: command
short-summary: Show details of an authorization for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware authorization delete'] = """
type: command
short-summary: Delete an authorization for an ExpressRoute Circuit in a private cloud.
"""
helps['vmware hcx-enterprise-site create'] = """
type: command
short-summary: Create an HCX Enterprise Site in a private cloud.
"""
helps['vmware hcx-enterprise-site list'] = """
type: command
short-summary: List HCX Enterprise Sites in a private cloud.
"""
helps['vmware hcx-enterprise-site show'] = """
type: command
short-summary: Show details of an HCX Enterprise Site in a private cloud.
"""
helps['vmware hcx-enterprise-site delete'] = """
type: command
short-summary: Delete an HCX Enterprise Site in a private cloud.
"""
helps['vmware location checkquotaavailability'] = """
type: command
short-summary: Return quota for subscription by region.
"""
helps['vmware location checktrialavailability'] = """
type: command
short-summary: Return trial status for subscription by region.
"""
helps['vmware datastore create'] = """
type: command
short-summary: Create a datastore in a private cloud cluster.
examples:
- name: Create a new Microsoft.StoragePool provided disk pool based iSCSI datastore.
text: az vmware datastore create --name iSCSIDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud --endpoints 10.10.0.1:3260 --lun-name lun0
- name: Create a new Microsoft.StoragePool provided disk pool based iSCSI datastore with multiple endpoints.
text: az vmware datastore create --name iSCSIDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud --endpoints 10.10.0.1:3260 10.10.0.2:3260 --lun-name lun0
- name: Create a new Microsoft.NetApp provided NetApp volume based NFSv3 datastore.
text: az vmware datastore create --name ANFDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud --nfs-file-path ANFVol1FilePath --nfs-provider-ip 10.10.0.1
"""
helps['vmware datastore show'] = """
type: command
short-summary: Show details of a datastore in a private cloud cluster.
examples:
- name: Show the details of an iSCSI or NFS based datastore.
text: az vmware datastore show --name MyCloudSANDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud
"""
helps['vmware datastore list'] = """
type: command
short-summary: List datastores in a private cloud cluster.
examples:
- name: List all iSCSI or NFS based datastores under Cluster-1.
text: az vmware datastore list --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud
"""
helps['vmware datastore delete'] = """
type: command
short-summary: Delete a datastore in a private cloud cluster.
examples:
- name: Delete an iSCSI or NFS based datastore.
text: az vmware datastore delete --name MyCloudSANDatastore1 --resource-group MyResourceGroup --cluster Cluster-1 --private-cloud MyPrivateCloud
"""
| true | true |
f72f8d9198afb6938ccdaa898c7765d5213e95ab | 1,699 | py | Python | docs/source/example_models.py | 2degrees/djeneralize | ef456292397888bd0476786d9f4ab2854b3604ef | [
"BSD-3-Clause"
] | 4 | 2015-10-19T10:49:47.000Z | 2017-03-22T13:06:32.000Z | docs/source/example_models.py | 2degrees/djeneralize | ef456292397888bd0476786d9f4ab2854b3604ef | [
"BSD-3-Clause"
] | 1 | 2015-04-16T17:37:15.000Z | 2015-04-17T08:05:21.000Z | docs/source/example_models.py | 2degrees/djeneralize | ef456292397888bd0476786d9f4ab2854b3604ef | [
"BSD-3-Clause"
] | 3 | 2015-08-12T12:28:03.000Z | 2015-09-30T09:45:40.000Z | from django.db import models
from djeneralize.models import BaseGeneralizationModel
from djeneralize.fields import SpecializedForeignKey
#{ General model
class WritingImplement(BaseGeneralizationModel):
name = models.CharField(max_length=30)
length = models.IntegerField()
holder = SpecializedForeignKey(
'WritingImplementHolder', null=True, blank=True)
def __unicode__(self):
return self.name
#{ Direct children of WritingImplement, i.e. first specialization
class Pencil(WritingImplement):
lead = models.CharField(max_length=2) # i.e. HB, B2, H5
class Meta:
specialization = 'pencil'
class Pen(WritingImplement):
ink_colour = models.CharField(max_length=30)
class Meta:
specialization = 'pen'
#{ Grand-children of WritingImplement, i.e. second degree of specialization
class FountainPen(Pen):
nib_width = models.DecimalField(max_digits=3, decimal_places=2)
class Meta:
specialization = 'fountain_pen'
class BallPointPen(Pen):
replaceable_insert = models.BooleanField(default=False)
class Meta:
specialization = 'ballpoint_pen'
#{ Writing implement holders general model
class WritingImplementHolder(BaseGeneralizationModel):
name = models.CharField(max_length=30)
def __unicode__(self):
return self.name
#{ Writing implement holders specializations
class StationaryCupboard(WritingImplementHolder):
volume = models.FloatField()
class Meta:
specialization = 'stationary_cupboard'
class PencilCase(WritingImplementHolder):
colour = models.CharField(max_length=30)
class Meta:
specialization = 'pencil_case'
#}
| 18.877778 | 75 | 0.727487 | from django.db import models
from djeneralize.models import BaseGeneralizationModel
from djeneralize.fields import SpecializedForeignKey
class WritingImplement(BaseGeneralizationModel):
name = models.CharField(max_length=30)
length = models.IntegerField()
holder = SpecializedForeignKey(
'WritingImplementHolder', null=True, blank=True)
def __unicode__(self):
return self.name
class Pencil(WritingImplement):
lead = models.CharField(max_length=2)
class Meta:
specialization = 'pencil'
class Pen(WritingImplement):
ink_colour = models.CharField(max_length=30)
class Meta:
specialization = 'pen'
class FountainPen(Pen):
nib_width = models.DecimalField(max_digits=3, decimal_places=2)
class Meta:
specialization = 'fountain_pen'
class BallPointPen(Pen):
replaceable_insert = models.BooleanField(default=False)
class Meta:
specialization = 'ballpoint_pen'
class WritingImplementHolder(BaseGeneralizationModel):
name = models.CharField(max_length=30)
def __unicode__(self):
return self.name
class StationaryCupboard(WritingImplementHolder):
volume = models.FloatField()
class Meta:
specialization = 'stationary_cupboard'
class PencilCase(WritingImplementHolder):
colour = models.CharField(max_length=30)
class Meta:
specialization = 'pencil_case'
| true | true |
f72f8e30417092439e50e50b97f2efb35888640b | 2,204 | py | Python | laikaboss/modules/explode_re_sub.py | sandialabs/laikaboss | 3064ac1176911651d61c5176e9bd83eacec36b16 | [
"Apache-2.0"
] | 2 | 2019-11-02T23:40:23.000Z | 2019-12-01T22:24:57.000Z | laikaboss/modules/explode_re_sub.py | sandialabs/laikaboss | 3064ac1176911651d61c5176e9bd83eacec36b16 | [
"Apache-2.0"
] | null | null | null | laikaboss/modules/explode_re_sub.py | sandialabs/laikaboss | 3064ac1176911651d61c5176e9bd83eacec36b16 | [
"Apache-2.0"
] | 3 | 2017-08-09T23:58:40.000Z | 2019-12-01T22:25:06.000Z | # Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import binascii
from laikaboss.si_module import SI_MODULE
from laikaboss.objectmodel import ModuleObject, ExternalVars
import laikaboss.util
class EXPLODE_RE_SUB(SI_MODULE):
'''
module around re.sub
'''
def __init__(self,):
self.module_name = "EXPLODE_RE_SUB"
self.re_pattern = None
def _run(self, scanObject, result, depth, args):
moduleResult = []
buffer = scanObject.buffer
pattern = laikaboss.util.get_option(args, 'pattern', 'resub_pattern', "uhm").encode('utf-8')
replacement = laikaboss.util.get_option(args, 'replacement', 'resub_resplacement', "").encode('utf-8')
pattern_hex = laikaboss.util.get_option(args, 'pattern_hex', 'resub_pattern_hex', "")
if pattern_hex:
pattern = binascii.unhexlify(pattern_hex)
replacement_hex = laikaboss.util.get_option(args, 'replacement_hex', 'resub_replacement_hex', "")
if replacement_hex:
replacement = binascii.unhexlify(replacement_hex)
name = laikaboss.util.get_option(args, 'name', 'resub_name', "resub")
if not self.re_pattern:
self.re_pattern = re.compile(pattern)
newdata = self.re_pattern.sub(replacement, buffer)
moduleResult.append(ModuleObject(buffer=newdata,externalVars=ExternalVars(filename=scanObject.filename + "_" + name)))
return moduleResult
| 36.733333 | 126 | 0.696915 |
import re
import binascii
from laikaboss.si_module import SI_MODULE
from laikaboss.objectmodel import ModuleObject, ExternalVars
import laikaboss.util
class EXPLODE_RE_SUB(SI_MODULE):
def __init__(self,):
self.module_name = "EXPLODE_RE_SUB"
self.re_pattern = None
def _run(self, scanObject, result, depth, args):
moduleResult = []
buffer = scanObject.buffer
pattern = laikaboss.util.get_option(args, 'pattern', 'resub_pattern', "uhm").encode('utf-8')
replacement = laikaboss.util.get_option(args, 'replacement', 'resub_resplacement', "").encode('utf-8')
pattern_hex = laikaboss.util.get_option(args, 'pattern_hex', 'resub_pattern_hex', "")
if pattern_hex:
pattern = binascii.unhexlify(pattern_hex)
replacement_hex = laikaboss.util.get_option(args, 'replacement_hex', 'resub_replacement_hex', "")
if replacement_hex:
replacement = binascii.unhexlify(replacement_hex)
name = laikaboss.util.get_option(args, 'name', 'resub_name', "resub")
if not self.re_pattern:
self.re_pattern = re.compile(pattern)
newdata = self.re_pattern.sub(replacement, buffer)
moduleResult.append(ModuleObject(buffer=newdata,externalVars=ExternalVars(filename=scanObject.filename + "_" + name)))
return moduleResult
| true | true |
f72f8e94a0df815f7d517e2b81ffc86c5c545f07 | 2,893 | py | Python | tensorflow/contrib/autograph/utils/multiple_dispatch_test.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 71 | 2017-05-25T16:02:15.000Z | 2021-06-09T16:08:08.000Z | tensorflow/contrib/autograph/utils/multiple_dispatch_test.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/contrib/autograph/utils/multiple_dispatch_test.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 31 | 2018-09-11T02:17:17.000Z | 2021-12-15T10:33:35.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multiple_dispatch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.autograph.utils import multiple_dispatch
from tensorflow.python.client.session import Session
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.platform import test
class MultipleDispatchTest(test.TestCase):
def test_dynamic_is_python(self):
a = np.eye(3)
also_a = a
not_actually_a = np.eye(3)
should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)
should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)
should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)
should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)
self.assertTrue(should_be_true1)
self.assertTrue(should_be_true2)
self.assertFalse(should_be_false1)
self.assertFalse(should_be_false2)
def test_dynamic_is_tf(self):
with Session().as_default():
a = constant([2.0])
also_a = a
not_actually_a = constant([2.0])
should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)
should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)
should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)
should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)
self.assertTrue(should_be_true1)
self.assertTrue(should_be_true2)
self.assertFalse(should_be_false1)
self.assertFalse(should_be_false2)
def test_run_cond_python(self):
true_fn = lambda: (2,)
false_fn = lambda: (3,)
self.assertEqual(multiple_dispatch.run_cond(True, true_fn, false_fn), 2)
self.assertEqual(multiple_dispatch.run_cond(False, true_fn, false_fn), 3)
def test_run_cond_tf(self):
true_fn = lambda: (constant(2),)
false_fn = lambda: (constant(3),)
with Session() as sess:
out = multiple_dispatch.run_cond(constant(True), true_fn, false_fn)
self.assertEqual(sess.run(out), 2)
out = multiple_dispatch.run_cond(constant(False), true_fn, false_fn)
self.assertEqual(sess.run(out), 3)
if __name__ == '__main__':
test.main()
| 38.065789 | 80 | 0.733495 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.autograph.utils import multiple_dispatch
from tensorflow.python.client.session import Session
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.platform import test
class MultipleDispatchTest(test.TestCase):
def test_dynamic_is_python(self):
a = np.eye(3)
also_a = a
not_actually_a = np.eye(3)
should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)
should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)
should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)
should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)
self.assertTrue(should_be_true1)
self.assertTrue(should_be_true2)
self.assertFalse(should_be_false1)
self.assertFalse(should_be_false2)
def test_dynamic_is_tf(self):
with Session().as_default():
a = constant([2.0])
also_a = a
not_actually_a = constant([2.0])
should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)
should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)
should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)
should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)
self.assertTrue(should_be_true1)
self.assertTrue(should_be_true2)
self.assertFalse(should_be_false1)
self.assertFalse(should_be_false2)
def test_run_cond_python(self):
true_fn = lambda: (2,)
false_fn = lambda: (3,)
self.assertEqual(multiple_dispatch.run_cond(True, true_fn, false_fn), 2)
self.assertEqual(multiple_dispatch.run_cond(False, true_fn, false_fn), 3)
def test_run_cond_tf(self):
true_fn = lambda: (constant(2),)
false_fn = lambda: (constant(3),)
with Session() as sess:
out = multiple_dispatch.run_cond(constant(True), true_fn, false_fn)
self.assertEqual(sess.run(out), 2)
out = multiple_dispatch.run_cond(constant(False), true_fn, false_fn)
self.assertEqual(sess.run(out), 3)
if __name__ == '__main__':
test.main()
| true | true |
f72f8f3096b8b8f70f02fbf592b95d3864e87c01 | 594 | py | Python | papy/misc.py | ArcturusB/papy | 360e4cc6b5c8473f8a5e8bce3153931f1a54a558 | [
"MIT"
] | 3 | 2022-03-10T08:13:07.000Z | 2022-03-10T08:13:13.000Z | papy/misc.py | ArcturusB/papy | 360e4cc6b5c8473f8a5e8bce3153931f1a54a558 | [
"MIT"
] | null | null | null | papy/misc.py | ArcturusB/papy | 360e4cc6b5c8473f8a5e8bce3153931f1a54a558 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import functools
import time
class Chrono():
def __init__(self, msg=None):
if msg:
print(msg)
self.t0 = time.time()
self.t = self.t0
def lap(self, name=None):
now = time.time()
if name:
print(name, end=': ')
msg = '{:.2g} s (total: {:.2g} s)'
msg = msg.format(now - self.t, now - self.t0)
print(msg)
self.t = now
def cached_property(func):
''' Decorator composition of @property with @functools.lru_cache() '''
return property(functools.lru_cache()(func))
| 24.75 | 74 | 0.552189 |
import functools
import time
class Chrono():
def __init__(self, msg=None):
if msg:
print(msg)
self.t0 = time.time()
self.t = self.t0
def lap(self, name=None):
now = time.time()
if name:
print(name, end=': ')
msg = '{:.2g} s (total: {:.2g} s)'
msg = msg.format(now - self.t, now - self.t0)
print(msg)
self.t = now
def cached_property(func):
return property(functools.lru_cache()(func))
| true | true |
f72f90fd5e586ab49bbf8330d152dd3b15c6712f | 2,341 | py | Python | app/lib/models.py | simonlevine/x-transformer-icd | 17d0a84f8b8e1f69623a82c0afab26830c7a1eb8 | [
"BSD-3-Clause"
] | 1 | 2020-12-15T00:55:25.000Z | 2020-12-15T00:55:25.000Z | app/lib/models.py | simonlevine/x-transformer-icd | 17d0a84f8b8e1f69623a82c0afab26830c7a1eb8 | [
"BSD-3-Clause"
] | null | null | null | app/lib/models.py | simonlevine/x-transformer-icd | 17d0a84f8b8e1f69623a82c0afab26830c7a1eb8 | [
"BSD-3-Clause"
] | null | null | null | """deserialize auto-icd models and provide a consistent interface"""
import typing as t
import json
import pickle
from pathlib import Path
import numpy as np
import onnxruntime as rt
APP_ROOT = Path("./app")
ASSETS_DIR = APP_ROOT/"assets"
class AutoICDModel:
def __init__(self, onnx_model_fp):
assert onnx_model_fp.exists()
self.sess = rt.InferenceSession(str(onnx_model_fp.resolve()))
def ___call__(self, free_text: str) -> t.Set[str]:
raise NotImplementedError("Subclasses just provide model interaction logic!")
# class KissModel(AutoICDModel):
# def __init__(self, onnx_model_fp, icd9_codes: t.List[str]):
# """because we are only loading a few codes,
# we need to know which ones in otder to decode
# decode the model output, which is a 1x|icd9_codes| matrix"""
# super().__init__(onnx_model_fp)
# self.icd9_codes = icd9_codes
# def ___call__(self, free_text: str) -> t.Set[str]:
# X = np.array([[free_text]])
# predictions, predictions_proba \
# = sess.run(None, {"free_text_input": X})[0]
# codes_predicted = [
# code for prediction, code in zip(predictions, self.icd9_codes)
# if prediction == 1 # i.e., if the code is predicted to be present
# ]
# codes2predicted_proba = {
# code: proba for code, proba in zip(self.icd9_codes, predictions_proba)
# }
# return codes_predicted, codes2predicted_proba
# def get_kiss_model():
# onnx_model_fp = ASSETS_DIR/"kiss_model.onnx"
# with open(ASSETS_DIR/"kiss_model.onnx.metadata.json") as f:
# icd9_codes = json.load(f)["icd9_codes_relevant"]
# model = KissModel(onnx_model_fp, icd9_codes)
# return model
class KissModel:
"""Kiss Model using pickle for persistence"""
def __init__(self):
with open(ASSETS_DIR/"kiss_model.pkl.metadata.json") as f_meta:
self.icd9_codes = json.load(f_meta)["icd9_codes_relevant"]
with open(ASSETS_DIR/"kiss_model.pkl", "rb") as f:
self.model = pickle.loads(f.read())
def __call__(self, free_text: str):
X = np.array([free_text])
predicted_codes_proba = self.model.predict_proba(X)
return np.array([proba.tolist() for proba in predicted_codes_proba])[:,0,1] | 34.940299 | 85 | 0.656557 |
import typing as t
import json
import pickle
from pathlib import Path
import numpy as np
import onnxruntime as rt
APP_ROOT = Path("./app")
ASSETS_DIR = APP_ROOT/"assets"
class AutoICDModel:
def __init__(self, onnx_model_fp):
assert onnx_model_fp.exists()
self.sess = rt.InferenceSession(str(onnx_model_fp.resolve()))
def ___call__(self, free_text: str) -> t.Set[str]:
raise NotImplementedError("Subclasses just provide model interaction logic!")
# we need to know which ones in otder to decode
# decode the model output, which is a 1x|icd9_codes| matrix"""
it__(self):
with open(ASSETS_DIR/"kiss_model.pkl.metadata.json") as f_meta:
self.icd9_codes = json.load(f_meta)["icd9_codes_relevant"]
with open(ASSETS_DIR/"kiss_model.pkl", "rb") as f:
self.model = pickle.loads(f.read())
def __call__(self, free_text: str):
X = np.array([free_text])
predicted_codes_proba = self.model.predict_proba(X)
return np.array([proba.tolist() for proba in predicted_codes_proba])[:,0,1] | true | true |
f72f9151e8655b5897d9a3dbddc1abcb2e46e0c6 | 1,689 | py | Python | app.py | Catlinman/catlinman.com | 9a94b8491975cf589b9cd53262a54fa56b7a0555 | [
"MIT"
] | 2 | 2017-07-04T11:51:30.000Z | 2019-10-07T08:04:12.000Z | app.py | Catlinman/catlinman.com | 9a94b8491975cf589b9cd53262a54fa56b7a0555 | [
"MIT"
] | null | null | null | app.py | Catlinman/catlinman.com | 9a94b8491975cf589b9cd53262a54fa56b7a0555 | [
"MIT"
] | 2 | 2020-09-29T06:56:07.000Z | 2020-10-02T21:49:31.000Z |
# Import Python modules.
import sys
# Import application modules.
import assets
import database
import blueprints
# Import basic Sanic modules.
from sanic import Sanic
# Get the required Jinja2 module for rendering templates.
import jinja2 as j2
# Enabling async template execution which allows you to take advantage of newer
# Python features requires Python 3.6 or later.
enable_async = sys.version_info >= (3, 6)
# Create a new Sanic application.
app = Sanic(name="catlinman.com", register=False)
# Setup the static directory.
app.static("/static", "./static")
# Load the template environment with async and jac support.
template_env = j2.Environment(
loader=j2.PackageLoader("app", "templates"),
autoescape=j2.select_autoescape(["html", "xml"]),
enable_async=enable_async,
trim_blocks=True,
lstrip_blocks=True
)
app.config.template_env = template_env
# Add middleware blueprints to this project.
app.blueprint(blueprints.middleware)
# Add all blueprints to this project.
app.blueprint(blueprints.root)
app.blueprint(blueprints.user)
app.blueprint(blueprints.about)
app.blueprint(blueprints.blog)
app.blueprint(blueprints.contact)
app.blueprint(blueprints.error)
app.blueprint(blueprints.gallery)
app.blueprint(blueprints.project)
# Load data blueprints into the data route.
app.blueprint(blueprints.location, url_prefix='/data')
app.blueprint(blueprints.psa, url_prefix='/data')
app.blueprint(blueprints.template, url_prefix='/data')
if __name__ == "__main__":
# Build all our assets.
assets.build_assets()
# Run the main application.
app.run(
host="127.0.0.1",
port=24070,
workers=1,
debug=False
)
| 25.590909 | 79 | 0.748372 |
import sys
import assets
import database
import blueprints
from sanic import Sanic
import jinja2 as j2
enable_async = sys.version_info >= (3, 6)
app = Sanic(name="catlinman.com", register=False)
app.static("/static", "./static")
template_env = j2.Environment(
loader=j2.PackageLoader("app", "templates"),
autoescape=j2.select_autoescape(["html", "xml"]),
enable_async=enable_async,
trim_blocks=True,
lstrip_blocks=True
)
app.config.template_env = template_env
app.blueprint(blueprints.middleware)
app.blueprint(blueprints.root)
app.blueprint(blueprints.user)
app.blueprint(blueprints.about)
app.blueprint(blueprints.blog)
app.blueprint(blueprints.contact)
app.blueprint(blueprints.error)
app.blueprint(blueprints.gallery)
app.blueprint(blueprints.project)
app.blueprint(blueprints.location, url_prefix='/data')
app.blueprint(blueprints.psa, url_prefix='/data')
app.blueprint(blueprints.template, url_prefix='/data')
if __name__ == "__main__":
assets.build_assets()
app.run(
host="127.0.0.1",
port=24070,
workers=1,
debug=False
)
| true | true |
f72f91b9b9f83a577df103957548819b165bf8d5 | 16,826 | py | Python | pychron/hardware/core/communicators/serial_communicator.py | ael-noblegas/pychron | 6ebbbb1f66a614972b62b7a9be4c784ae61b5d62 | [
"Apache-2.0"
] | null | null | null | pychron/hardware/core/communicators/serial_communicator.py | ael-noblegas/pychron | 6ebbbb1f66a614972b62b7a9be4c784ae61b5d62 | [
"Apache-2.0"
] | 80 | 2018-07-17T20:10:20.000Z | 2021-08-17T15:38:24.000Z | pychron/hardware/core/communicators/serial_communicator.py | ael-noblegas/pychron | 6ebbbb1f66a614972b62b7a9be4c784ae61b5d62 | [
"Apache-2.0"
] | null | null | null | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
# =============standard library imports ========================
import codecs
import glob
import os
import sys
import time
import serial
# =============local library imports ==========================
from .communicator import Communicator, process_response, prep_str, remove_eol_func
def get_ports():
if sys.platform == 'win32':
ports = ['COM{}'.format(i+1) for i in range(256)]
else:
usb = glob.glob('/dev/tty.usb*')
furpi = glob.glob('/dev/furpi.*')
pychron = glob.glob('/dev/pychron.*')
slab = glob.glob('/dev/tty.SLAB*')
if sys.platform == 'darwin':
keyspan = glob.glob('/dev/tty.U*')
else:
keyspan = glob.glob('/dev/ttyU*')
ports = keyspan + usb + furpi + pychron + slab
return ports
class SerialCommunicator(Communicator):
"""
Base Class for devices that communicate using a rs232 serial port.
Using Keyspan serial converter is the best option for a Mac
class is built on top of pyserial. Pyserial is used to create a handle and
this class uses the handle to read and write.
handles are created when a serial device is opened
setup args are loaded using load(). this method should be overwritten to
load specific items.
"""
# char_write = False
_auto_find_handle = False
_auto_write_handle = False
baudrate = None
port = None
bytesize = None
parity = None
stopbits = None
timeout = None
id_query = ''
id_response = ''
read_delay = None
read_terminator = None
read_terminator_position = None
clear_output = False
_config = None
_comms_report_attrs = ('port', 'baudrate', 'bytesize', 'parity', 'stopbits', 'timeout')
@property
def address(self):
return self.port
def test_connection(self):
return self.handle is not None
def reset(self):
handle = self.handle
try:
isopen = handle.isOpen()
orate = handle.getBaudrate()
if isopen:
handle.close()
handle.setBaudrate(0)
handle.open()
time.sleep(0.1)
handle.close()
handle.setBaudrate(orate)
if isopen:
handle.open()
except Exception:
self.warning('failed to reset connection')
def close(self):
if self.handle:
self.debug('closing handle {}'.format(self.handle))
self.handle.close()
def load_comdict(self, port, baudrate=9600, bytesize=8, parity=None, stopbits=1):
self.baudrate = baudrate
self.port = port
self.set_parity(parity)
self.set_stopbits(stopbits)
self.bytesize = bytesize
def load(self, config, path):
self.config_path = path
self._config = config
self.set_attribute(config, 'port', 'Communications', 'port')
self.set_attribute(config, 'baudrate', 'Communications', 'baudrate',
cast='int', optional=True)
self.set_attribute(config, 'bytesize', 'Communications', 'bytesize',
cast='int', optional=True)
self.set_attribute(config, 'timeout', 'Communications', 'timeout',
cast='float', optional=True)
self.set_attribute(config, 'clear_output', 'Communications', 'clear_output',
cast='boolean', optional=True)
parity = self.config_get(config, 'Communications', 'parity', optional=True)
self.set_parity(parity)
stopbits = self.config_get(config, 'Communications', 'stopbits', optional=True)
self.set_stopbits(stopbits)
self.set_attribute(config, 'read_delay', 'Communications', 'read_delay',
cast='float', optional=True, default=25)
self.set_attribute(config, 'read_terminator', 'Communications', 'terminator',
optional=True, default=None)
self.set_attribute(config, 'read_terminator_position', 'Communications', 'terminator_position',
optional=True, default=None, cast='int')
self.set_attribute(config, 'write_terminator', 'Communications', 'write_terminator',
optional=True, default=b'\r')
if self.write_terminator == 'CRLF':
self.write_terminator = b'\r\n'
if self.read_terminator == 'CRLF':
self.read_terminator = b'\r\n'
if self.read_terminator == 'ETX':
self.read_terminator = chr(3)
def set_parity(self, parity):
if parity:
self.parity = getattr(serial, 'PARITY_%s' % parity.upper())
def set_stopbits(self, stopbits):
if stopbits:
if stopbits in ('1', 1):
stopbits = 'ONE'
elif stopbits in ('2', 2):
stopbits = 'TWO'
self.stopbits = getattr(serial, 'STOPBITS_{}'.format(stopbits.upper()))
def tell(self, cmd, is_hex=False, info=None, verbose=True, **kw):
"""
"""
if self.handle is None:
if verbose:
info = 'no handle'
self.log_tell(cmd, info)
return
with self._lock:
self._write(cmd, is_hex=is_hex)
if verbose:
self.log_tell(cmd, info)
def read(self, nchars=None, *args, **kw):
"""
"""
with self._lock:
if nchars is not None:
r = self._read_nchars(nchars)
else:
r = self._read_terminator(*args, **kw)
return r
def ask(self, cmd, is_hex=False, verbose=True, delay=None,
replace=None, remove_eol=True, info=None, nbytes=None,
handshake_only=False,
handshake=None,
read_terminator=None,
terminator_position=None,
nchars=None):
"""
"""
if self.handle is None:
if verbose:
x = prep_str(cmd.strip())
self.info('no handle {}'.format(x))
return
if not self.handle.isOpen():
return
with self._lock:
if self.clear_output:
self.handle.flushInput()
self.handle.flushOutput()
cmd = self._write(cmd, is_hex=is_hex)
if cmd is None:
return
if is_hex:
if nbytes is None:
nbytes = 8
re = self._read_hex(nbytes=nbytes, delay=delay)
elif handshake is not None:
re = self._read_handshake(handshake, handshake_only, delay=delay)
elif nchars is not None:
re = self._read_nchars(nchars)
else:
re = self._read_terminator(delay=delay,
terminator=read_terminator,
terminator_position=terminator_position)
if remove_eol and not is_hex:
re = remove_eol_func(re)
if verbose:
pre = process_response(re, replace, remove_eol=not is_hex)
self.log_response(cmd, pre, info)
return re
def open(self, **kw):
"""
Use pyserial to create a handle connected to port wth baudrate
default handle parameters
baudrate=9600
bytesize=EIGHTBITS
parity= PARITY_NONE
stopbits= STOPBITS_ONE
timeout=None
"""
port = kw.get('port')
if port is None:
port = self.port
if port is None:
self.warning('Port not set')
return False
# #on windows device handles probably handled differently
if sys.platform == 'darwin':
port = '/dev/tty.{}'.format(port)
kw['port'] = port
for key in ['baudrate', 'bytesize', 'parity', 'stopbits', 'timeout']:
v = kw.get(key)
if v is None:
v = getattr(self, key)
if v is not None:
kw[key] = v
pref = kw.pop('prefs', None)
if pref is not None:
pref = pref.serial_preference
self._auto_find_handle = pref.auto_find_handle
self._auto_write_handle = pref.auto_write_handle
self.simulation = True
if self._validate_address(port):
try_connect = True
while try_connect:
try:
self.debug('Connection parameters={}'.format(kw))
self.handle = serial.Serial(**kw)
try_connect = False
self.simulation = False
except serial.serialutil.SerialException:
try_connect = False
self.debug_exception()
elif self._auto_find_handle:
self._find_handle(**kw)
self.debug('Serial device: {}'.format(self.handle))
return self.handle is not None # connected is true if handle is not None
# private
def _get_report_value(self, key):
c, value = super(SerialCommunicator, self)._get_report_value(key)
if self.handle:
value = getattr(self.handle, key)
return c, value
def _find_handle(self, **kw):
found = False
self.simulation = False
self.info('Trying to find correct port')
port = None
for port in get_ports():
self.info('trying port {}'.format(port))
kw['port'] = port
try:
self.handle = serial.Serial(**kw)
except serial.SerialException:
continue
r = self.ask(self.id_query)
# use id_response as a callable to do device specific
# checking
if callable(self.id_response):
if self.id_response(r):
found = True
self.simulation = False
break
if r == self.id_response:
found = True
self.simulation = False
break
if not found:
# update the port
if self._auto_write_handle and port:
# port in form
# /dev/tty.USAXXX1.1
p = os.path.split(port)[-1]
# remove tty.
p = p[4:]
self._config.set('Communication', 'port', )
self.write_configuration(self._config, self.config_path)
self.handle = None
self.simulation = True
def _validate_address(self, port):
"""
use glob to check the avaibable serial ports
valid ports start with /dev/tty.U or /dev/tty.usbmodem
"""
valid = get_ports()
if port in valid:
return True
else:
msg = '{} is not a valid port address'.format(port)
self.warning(msg)
if not valid:
self.warning('No valid ports')
else:
self.warning('======== Valid Ports ========')
for v in valid:
self.warning(v)
self.warning('=============================')
def _write(self, cmd, is_hex=False):
"""
use the serial handle to write the cmd to the serial buffer
return True if there is an exception writing cmd
"""
if not self.simulation:
if not isinstance(cmd, bytes):
cmd = bytes(cmd, 'utf-8')
if is_hex:
cmd = codecs.decode(cmd, 'hex')
else:
wt = self.write_terminator
if wt is not None:
if not isinstance(wt, bytes):
wt = bytes(wt, 'utf-8')
cmd += wt
try:
self.handle.write(cmd)
except (serial.serialutil.SerialException, OSError, IOError, ValueError) as e:
self.warning('Serial Communicator write execption: {}'.format(e))
return
return cmd
def _read_nchars(self, nchars, timeout=1, delay=None):
return self._read_loop(lambda r: self._get_nchars(nchars, r), delay, timeout)
def _read_hex(self, nbytes=8, timeout=1, delay=None):
return self._read_loop(lambda r: self._get_nbytes(nbytes, r), delay, timeout)
def _read_handshake(self, handshake, handshake_only, timeout=1, delay=None):
def hfunc(r):
terminated = False
ack, r = self._check_handshake(handshake)
if handshake_only and ack:
r = handshake[0]
terminated = True
elif ack and r is not None:
terminated = True
return r, terminated
return self._read_loop(hfunc, delay, timeout)
def _read_terminator(self, timeout=1, delay=None,
terminator=None, terminator_position=None):
if terminator is None:
terminator = self.read_terminator
if terminator_position is None:
terminator_position = self.read_terminator_position
if terminator is None:
terminator = (b'\r\x00', b'\r\n', b'\r', b'\n')
if not isinstance(terminator, (list, tuple)):
terminator = (terminator,)
def func(r):
terminated = False
try:
inw = self.handle.inWaiting()
r += self.handle.read(inw)
if r and r.strip():
for ti in terminator:
if terminator_position:
terminated = r[terminator_position] == ti
else:
if isinstance(ti, str):
ti = ti.encode()
terminated = r.endswith(ti)
if terminated:
break
except BaseException as e:
self.warning(e)
return r, terminated
return self._read_loop(func, delay, timeout)
def _get_nbytes(self, *args, **kw):
"""
1 byte == 2 chars
"""
return self._get_nchars(*args, **kw)
def _get_nchars(self, nchars, r):
handle = self.handle
inw = handle.inWaiting()
c = min(inw, nchars - len(r))
r += handle.read(c)
return r[:nchars], len(r) >= nchars
def _check_handshake(self, handshake_chrs):
ack, nak = handshake_chrs
inw = self.handle.inWaiting()
r = self.handle.read(inw)
if r:
return ack == r[0], r[1:]
return False, None
def _read_loop(self, func, delay, timeout=1):
if delay is not None:
time.sleep(delay / 1000.)
elif self.read_delay:
time.sleep(self.read_delay / 1000.)
r = b''
st = time.time()
handle = self.handle
ct = time.time()
while ct - st < timeout:
if not handle.isOpen():
break
try:
r, isterminated = func(r)
if isterminated:
break
except (ValueError, TypeError):
pass
time.sleep(0.01)
ct = time.time()
if ct - st > timeout:
l = len(r) if r else 0
self.info('timed out. {}s r={}, len={}'.format(timeout, r, l))
return r
if __name__ == '__main__':
s = SerialCommunicator()
s.read_delay = 0
s.port = 'usbmodemfd1221'
s.open()
time.sleep(2)
s.tell('A', verbose=False)
for i in range(10):
print('dddd', s.ask('1', verbose=False))
time.sleep(1)
# s.tell('ddd', verbose=False)
# print s.ask('ddd', verbose=False)
# ===================== EOF ==========================================
| 31.927894 | 103 | 0.524664 |
import codecs
import glob
import os
import sys
import time
import serial
from .communicator import Communicator, process_response, prep_str, remove_eol_func
def get_ports():
if sys.platform == 'win32':
ports = ['COM{}'.format(i+1) for i in range(256)]
else:
usb = glob.glob('/dev/tty.usb*')
furpi = glob.glob('/dev/furpi.*')
pychron = glob.glob('/dev/pychron.*')
slab = glob.glob('/dev/tty.SLAB*')
if sys.platform == 'darwin':
keyspan = glob.glob('/dev/tty.U*')
else:
keyspan = glob.glob('/dev/ttyU*')
ports = keyspan + usb + furpi + pychron + slab
return ports
class SerialCommunicator(Communicator):
_auto_find_handle = False
_auto_write_handle = False
baudrate = None
port = None
bytesize = None
parity = None
stopbits = None
timeout = None
id_query = ''
id_response = ''
read_delay = None
read_terminator = None
read_terminator_position = None
clear_output = False
_config = None
_comms_report_attrs = ('port', 'baudrate', 'bytesize', 'parity', 'stopbits', 'timeout')
@property
def address(self):
return self.port
def test_connection(self):
return self.handle is not None
def reset(self):
handle = self.handle
try:
isopen = handle.isOpen()
orate = handle.getBaudrate()
if isopen:
handle.close()
handle.setBaudrate(0)
handle.open()
time.sleep(0.1)
handle.close()
handle.setBaudrate(orate)
if isopen:
handle.open()
except Exception:
self.warning('failed to reset connection')
def close(self):
if self.handle:
self.debug('closing handle {}'.format(self.handle))
self.handle.close()
def load_comdict(self, port, baudrate=9600, bytesize=8, parity=None, stopbits=1):
self.baudrate = baudrate
self.port = port
self.set_parity(parity)
self.set_stopbits(stopbits)
self.bytesize = bytesize
def load(self, config, path):
self.config_path = path
self._config = config
self.set_attribute(config, 'port', 'Communications', 'port')
self.set_attribute(config, 'baudrate', 'Communications', 'baudrate',
cast='int', optional=True)
self.set_attribute(config, 'bytesize', 'Communications', 'bytesize',
cast='int', optional=True)
self.set_attribute(config, 'timeout', 'Communications', 'timeout',
cast='float', optional=True)
self.set_attribute(config, 'clear_output', 'Communications', 'clear_output',
cast='boolean', optional=True)
parity = self.config_get(config, 'Communications', 'parity', optional=True)
self.set_parity(parity)
stopbits = self.config_get(config, 'Communications', 'stopbits', optional=True)
self.set_stopbits(stopbits)
self.set_attribute(config, 'read_delay', 'Communications', 'read_delay',
cast='float', optional=True, default=25)
self.set_attribute(config, 'read_terminator', 'Communications', 'terminator',
optional=True, default=None)
self.set_attribute(config, 'read_terminator_position', 'Communications', 'terminator_position',
optional=True, default=None, cast='int')
self.set_attribute(config, 'write_terminator', 'Communications', 'write_terminator',
optional=True, default=b'\r')
if self.write_terminator == 'CRLF':
self.write_terminator = b'\r\n'
if self.read_terminator == 'CRLF':
self.read_terminator = b'\r\n'
if self.read_terminator == 'ETX':
self.read_terminator = chr(3)
def set_parity(self, parity):
if parity:
self.parity = getattr(serial, 'PARITY_%s' % parity.upper())
def set_stopbits(self, stopbits):
if stopbits:
if stopbits in ('1', 1):
stopbits = 'ONE'
elif stopbits in ('2', 2):
stopbits = 'TWO'
self.stopbits = getattr(serial, 'STOPBITS_{}'.format(stopbits.upper()))
def tell(self, cmd, is_hex=False, info=None, verbose=True, **kw):
if self.handle is None:
if verbose:
info = 'no handle'
self.log_tell(cmd, info)
return
with self._lock:
self._write(cmd, is_hex=is_hex)
if verbose:
self.log_tell(cmd, info)
def read(self, nchars=None, *args, **kw):
with self._lock:
if nchars is not None:
r = self._read_nchars(nchars)
else:
r = self._read_terminator(*args, **kw)
return r
def ask(self, cmd, is_hex=False, verbose=True, delay=None,
replace=None, remove_eol=True, info=None, nbytes=None,
handshake_only=False,
handshake=None,
read_terminator=None,
terminator_position=None,
nchars=None):
if self.handle is None:
if verbose:
x = prep_str(cmd.strip())
self.info('no handle {}'.format(x))
return
if not self.handle.isOpen():
return
with self._lock:
if self.clear_output:
self.handle.flushInput()
self.handle.flushOutput()
cmd = self._write(cmd, is_hex=is_hex)
if cmd is None:
return
if is_hex:
if nbytes is None:
nbytes = 8
re = self._read_hex(nbytes=nbytes, delay=delay)
elif handshake is not None:
re = self._read_handshake(handshake, handshake_only, delay=delay)
elif nchars is not None:
re = self._read_nchars(nchars)
else:
re = self._read_terminator(delay=delay,
terminator=read_terminator,
terminator_position=terminator_position)
if remove_eol and not is_hex:
re = remove_eol_func(re)
if verbose:
pre = process_response(re, replace, remove_eol=not is_hex)
self.log_response(cmd, pre, info)
return re
def open(self, **kw):
port = kw.get('port')
if port is None:
port = self.port
if port is None:
self.warning('Port not set')
return False
= '/dev/tty.{}'.format(port)
kw['port'] = port
for key in ['baudrate', 'bytesize', 'parity', 'stopbits', 'timeout']:
v = kw.get(key)
if v is None:
v = getattr(self, key)
if v is not None:
kw[key] = v
pref = kw.pop('prefs', None)
if pref is not None:
pref = pref.serial_preference
self._auto_find_handle = pref.auto_find_handle
self._auto_write_handle = pref.auto_write_handle
self.simulation = True
if self._validate_address(port):
try_connect = True
while try_connect:
try:
self.debug('Connection parameters={}'.format(kw))
self.handle = serial.Serial(**kw)
try_connect = False
self.simulation = False
except serial.serialutil.SerialException:
try_connect = False
self.debug_exception()
elif self._auto_find_handle:
self._find_handle(**kw)
self.debug('Serial device: {}'.format(self.handle))
return self.handle is not None
def _get_report_value(self, key):
c, value = super(SerialCommunicator, self)._get_report_value(key)
if self.handle:
value = getattr(self.handle, key)
return c, value
def _find_handle(self, **kw):
found = False
self.simulation = False
self.info('Trying to find correct port')
port = None
for port in get_ports():
self.info('trying port {}'.format(port))
kw['port'] = port
try:
self.handle = serial.Serial(**kw)
except serial.SerialException:
continue
r = self.ask(self.id_query)
if callable(self.id_response):
if self.id_response(r):
found = True
self.simulation = False
break
if r == self.id_response:
found = True
self.simulation = False
break
if not found:
if self._auto_write_handle and port:
p = os.path.split(port)[-1]
p = p[4:]
self._config.set('Communication', 'port', )
self.write_configuration(self._config, self.config_path)
self.handle = None
self.simulation = True
def _validate_address(self, port):
valid = get_ports()
if port in valid:
return True
else:
msg = '{} is not a valid port address'.format(port)
self.warning(msg)
if not valid:
self.warning('No valid ports')
else:
self.warning('======== Valid Ports ========')
for v in valid:
self.warning(v)
self.warning('=============================')
def _write(self, cmd, is_hex=False):
if not self.simulation:
if not isinstance(cmd, bytes):
cmd = bytes(cmd, 'utf-8')
if is_hex:
cmd = codecs.decode(cmd, 'hex')
else:
wt = self.write_terminator
if wt is not None:
if not isinstance(wt, bytes):
wt = bytes(wt, 'utf-8')
cmd += wt
try:
self.handle.write(cmd)
except (serial.serialutil.SerialException, OSError, IOError, ValueError) as e:
self.warning('Serial Communicator write execption: {}'.format(e))
return
return cmd
def _read_nchars(self, nchars, timeout=1, delay=None):
return self._read_loop(lambda r: self._get_nchars(nchars, r), delay, timeout)
def _read_hex(self, nbytes=8, timeout=1, delay=None):
return self._read_loop(lambda r: self._get_nbytes(nbytes, r), delay, timeout)
def _read_handshake(self, handshake, handshake_only, timeout=1, delay=None):
def hfunc(r):
terminated = False
ack, r = self._check_handshake(handshake)
if handshake_only and ack:
r = handshake[0]
terminated = True
elif ack and r is not None:
terminated = True
return r, terminated
return self._read_loop(hfunc, delay, timeout)
def _read_terminator(self, timeout=1, delay=None,
terminator=None, terminator_position=None):
if terminator is None:
terminator = self.read_terminator
if terminator_position is None:
terminator_position = self.read_terminator_position
if terminator is None:
terminator = (b'\r\x00', b'\r\n', b'\r', b'\n')
if not isinstance(terminator, (list, tuple)):
terminator = (terminator,)
def func(r):
terminated = False
try:
inw = self.handle.inWaiting()
r += self.handle.read(inw)
if r and r.strip():
for ti in terminator:
if terminator_position:
terminated = r[terminator_position] == ti
else:
if isinstance(ti, str):
ti = ti.encode()
terminated = r.endswith(ti)
if terminated:
break
except BaseException as e:
self.warning(e)
return r, terminated
return self._read_loop(func, delay, timeout)
def _get_nbytes(self, *args, **kw):
return self._get_nchars(*args, **kw)
def _get_nchars(self, nchars, r):
handle = self.handle
inw = handle.inWaiting()
c = min(inw, nchars - len(r))
r += handle.read(c)
return r[:nchars], len(r) >= nchars
def _check_handshake(self, handshake_chrs):
ack, nak = handshake_chrs
inw = self.handle.inWaiting()
r = self.handle.read(inw)
if r:
return ack == r[0], r[1:]
return False, None
def _read_loop(self, func, delay, timeout=1):
if delay is not None:
time.sleep(delay / 1000.)
elif self.read_delay:
time.sleep(self.read_delay / 1000.)
r = b''
st = time.time()
handle = self.handle
ct = time.time()
while ct - st < timeout:
if not handle.isOpen():
break
try:
r, isterminated = func(r)
if isterminated:
break
except (ValueError, TypeError):
pass
time.sleep(0.01)
ct = time.time()
if ct - st > timeout:
l = len(r) if r else 0
self.info('timed out. {}s r={}, len={}'.format(timeout, r, l))
return r
if __name__ == '__main__':
s = SerialCommunicator()
s.read_delay = 0
s.port = 'usbmodemfd1221'
s.open()
time.sleep(2)
s.tell('A', verbose=False)
for i in range(10):
print('dddd', s.ask('1', verbose=False))
time.sleep(1)
| true | true |
f72f91d02075c3865e71042a4c4631a5ce5c09f9 | 6,749 | py | Python | lfs/manage/views/marketing/featured.py | michael-hahn/django-lfs | 26c3471a8f8d88269c84f714f507b952dfdb6397 | [
"BSD-3-Clause"
] | 345 | 2015-01-03T19:19:27.000Z | 2022-03-20T11:00:50.000Z | lfs/manage/views/marketing/featured.py | michael-hahn/django-lfs | 26c3471a8f8d88269c84f714f507b952dfdb6397 | [
"BSD-3-Clause"
] | 73 | 2015-01-06T14:54:02.000Z | 2022-03-11T23:11:34.000Z | lfs/manage/views/marketing/featured.py | michael-hahn/django-lfs | 26c3471a8f8d88269c84f714f507b952dfdb6397 | [
"BSD-3-Clause"
] | 148 | 2015-01-07T16:30:08.000Z | 2022-03-25T21:20:58.000Z | import json
from django.contrib.auth.decorators import permission_required
from django.core.paginator import EmptyPage
from django.core.paginator import Paginator
from django.db.models import Q
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from lfs.caching.utils import lfs_get_object_or_404
from lfs.catalog.models import Category
from lfs.catalog.models import Product
from lfs.catalog.settings import VARIANT
from lfs.core.signals import featured_changed
from lfs.core.utils import LazyEncoder
from lfs.marketing.models import FeaturedProduct
@permission_required("manage_shop")
def manage_featured(request, template_name="manage/marketing/featured.html"):
"""
"""
inline = manage_featured_inline(request, as_string=True)
# amount options
amount_options = []
for value in (10, 25, 50, 100):
amount_options.append({
"value": value,
"selected": value == request.session.get("featured-amount")
})
return render_to_string(template_name, request=request, context={
"featured_inline": inline,
"amount_options": amount_options,
})
@permission_required("manage_shop")
def manage_featured_inline(request, as_string=False, template_name="manage/marketing/featured_inline.html"):
"""
"""
featured = FeaturedProduct.objects.all()
featured_ids = [f.product.id for f in featured]
r = request.POST if request.method == 'POST' else request.GET
s = request.session
# If we get the parameter ``keep-filters`` or ``page`` we take the
# filters out of the request resp. session. The request takes precedence.
# The page parameter is given if the user clicks on the next/previous page
# links. The ``keep-filters`` parameters is given is the users adds/removes
# products. In this way we keeps the current filters when we needed to. If
# the whole page is reloaded there is no ``keep-filters`` or ``page`` and
# all filters are reset as they should.
if r.get("keep-filters") or r.get("page"):
page = r.get("page", s.get("featured_products_page", 1))
filter_ = r.get("filter", s.get("filter"))
category_filter = r.get("featured_category_filter", s.get("featured_category_filter"))
else:
page = r.get("page", 1)
filter_ = r.get("filter")
category_filter = r.get("featured_category_filter")
# The current filters are saved in any case for later use.
s["featured_products_page"] = page
s["filter"] = filter_
s["featured_category_filter"] = category_filter
try:
s["featured-amount"] = int(r.get("featured-amount", s.get("featured-amount")))
except TypeError:
s["featured-amount"] = 25
filters = Q()
if filter_:
filters &= Q(name__icontains=filter_)
filters |= Q(sku__icontains=filter_)
filters |= (Q(sub_type=VARIANT) & Q(active_sku=False) & Q(parent__sku__icontains=filter_))
filters |= (Q(sub_type=VARIANT) & Q(active_name=False) & Q(parent__name__icontains=filter_))
if category_filter:
if category_filter == "None":
filters &= Q(categories=None)
elif category_filter == "All":
pass
else:
# First we collect all sub categories and using the `in` operator
category = lfs_get_object_or_404(Category, pk=category_filter)
categories = [category]
categories.extend(category.get_all_children())
filters &= Q(categories__in=categories)
products = Product.objects.filter(filters).exclude(pk__in=featured_ids)
paginator = Paginator(products, s["featured-amount"])
total = products.count()
try:
page = paginator.page(page)
except EmptyPage:
page = 0
result = render_to_string(template_name, request=request, context={
"featured": featured,
"total": total,
"page": page,
"paginator": paginator,
"filter": filter_
})
if as_string:
return result
else:
return HttpResponse(
json.dumps({
"html": [["#featured-inline", result]],
}), content_type='application/json')
# Actions
@permission_required("manage_shop")
def add_featured(request):
"""Adds featured by given ids (within request body).
"""
for temp_id in request.POST.keys():
if temp_id.startswith("product") is False:
continue
temp_id = temp_id.split("-")[1]
FeaturedProduct.objects.create(product_id=temp_id)
_update_positions()
html = [["#featured-inline", manage_featured_inline(request, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Featured product has been added.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("manage_shop")
def update_featured(request):
"""Saves or removes passed featured product passed id (within request body).
"""
if request.POST.get("action") == "remove":
for temp_id in request.POST.keys():
if not temp_id.startswith("product"):
continue
temp_id = temp_id.split("-")[1]
try:
featured = FeaturedProduct.objects.get(pk=temp_id)
featured.delete()
except (FeaturedProduct.DoesNotExist, ValueError):
pass
else:
_update_positions()
featured_changed.send(featured)
html = [["#featured-inline", manage_featured_inline(request, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Featured product has been removed.")
}, cls=LazyEncoder)
else:
for temp_id in request.POST.keys():
if temp_id.startswith("position") is False:
continue
temp_id = temp_id.split("-")[1]
featured = FeaturedProduct.objects.get(pk=temp_id)
# Update position
position = request.POST.get("position-%s" % temp_id)
featured.position = position
featured.save()
_update_positions()
html = [["#featured-inline", manage_featured_inline(request, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Featured product has been updated.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
def _update_positions():
for i, featured in enumerate(FeaturedProduct.objects.all()):
featured.position = (i + 1) * 10
featured.save()
| 33.577114 | 108 | 0.645281 | import json
from django.contrib.auth.decorators import permission_required
from django.core.paginator import EmptyPage
from django.core.paginator import Paginator
from django.db.models import Q
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from lfs.caching.utils import lfs_get_object_or_404
from lfs.catalog.models import Category
from lfs.catalog.models import Product
from lfs.catalog.settings import VARIANT
from lfs.core.signals import featured_changed
from lfs.core.utils import LazyEncoder
from lfs.marketing.models import FeaturedProduct
@permission_required("manage_shop")
def manage_featured(request, template_name="manage/marketing/featured.html"):
inline = manage_featured_inline(request, as_string=True)
amount_options = []
for value in (10, 25, 50, 100):
amount_options.append({
"value": value,
"selected": value == request.session.get("featured-amount")
})
return render_to_string(template_name, request=request, context={
"featured_inline": inline,
"amount_options": amount_options,
})
@permission_required("manage_shop")
def manage_featured_inline(request, as_string=False, template_name="manage/marketing/featured_inline.html"):
featured = FeaturedProduct.objects.all()
featured_ids = [f.product.id for f in featured]
r = request.POST if request.method == 'POST' else request.GET
s = request.session
if r.get("keep-filters") or r.get("page"):
page = r.get("page", s.get("featured_products_page", 1))
filter_ = r.get("filter", s.get("filter"))
category_filter = r.get("featured_category_filter", s.get("featured_category_filter"))
else:
page = r.get("page", 1)
filter_ = r.get("filter")
category_filter = r.get("featured_category_filter")
s["featured_products_page"] = page
s["filter"] = filter_
s["featured_category_filter"] = category_filter
try:
s["featured-amount"] = int(r.get("featured-amount", s.get("featured-amount")))
except TypeError:
s["featured-amount"] = 25
filters = Q()
if filter_:
filters &= Q(name__icontains=filter_)
filters |= Q(sku__icontains=filter_)
filters |= (Q(sub_type=VARIANT) & Q(active_sku=False) & Q(parent__sku__icontains=filter_))
filters |= (Q(sub_type=VARIANT) & Q(active_name=False) & Q(parent__name__icontains=filter_))
if category_filter:
if category_filter == "None":
filters &= Q(categories=None)
elif category_filter == "All":
pass
else:
category = lfs_get_object_or_404(Category, pk=category_filter)
categories = [category]
categories.extend(category.get_all_children())
filters &= Q(categories__in=categories)
products = Product.objects.filter(filters).exclude(pk__in=featured_ids)
paginator = Paginator(products, s["featured-amount"])
total = products.count()
try:
page = paginator.page(page)
except EmptyPage:
page = 0
result = render_to_string(template_name, request=request, context={
"featured": featured,
"total": total,
"page": page,
"paginator": paginator,
"filter": filter_
})
if as_string:
return result
else:
return HttpResponse(
json.dumps({
"html": [["#featured-inline", result]],
}), content_type='application/json')
@permission_required("manage_shop")
def add_featured(request):
for temp_id in request.POST.keys():
if temp_id.startswith("product") is False:
continue
temp_id = temp_id.split("-")[1]
FeaturedProduct.objects.create(product_id=temp_id)
_update_positions()
html = [["#featured-inline", manage_featured_inline(request, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Featured product has been added.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("manage_shop")
def update_featured(request):
if request.POST.get("action") == "remove":
for temp_id in request.POST.keys():
if not temp_id.startswith("product"):
continue
temp_id = temp_id.split("-")[1]
try:
featured = FeaturedProduct.objects.get(pk=temp_id)
featured.delete()
except (FeaturedProduct.DoesNotExist, ValueError):
pass
else:
_update_positions()
featured_changed.send(featured)
html = [["#featured-inline", manage_featured_inline(request, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Featured product has been removed.")
}, cls=LazyEncoder)
else:
for temp_id in request.POST.keys():
if temp_id.startswith("position") is False:
continue
temp_id = temp_id.split("-")[1]
featured = FeaturedProduct.objects.get(pk=temp_id)
position = request.POST.get("position-%s" % temp_id)
featured.position = position
featured.save()
_update_positions()
html = [["#featured-inline", manage_featured_inline(request, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Featured product has been updated.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
def _update_positions():
for i, featured in enumerate(FeaturedProduct.objects.all()):
featured.position = (i + 1) * 10
featured.save()
| true | true |
f72f92bc7ae99fd0568a2619f8d80e2d1390372f | 2,592 | py | Python | update.py | CyberHive/bucket-antivirus-function | bb5f91678cc85d08bfc42108edc399be5c5fc4b6 | [
"Apache-2.0"
] | null | null | null | update.py | CyberHive/bucket-antivirus-function | bb5f91678cc85d08bfc42108edc399be5c5fc4b6 | [
"Apache-2.0"
] | null | null | null | update.py | CyberHive/bucket-antivirus-function | bb5f91678cc85d08bfc42108edc399be5c5fc4b6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Upside Travel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import boto3
import clamav
from common import AV_DEFINITION_PATH
from common import AV_DEFINITION_S3_BUCKET
from common import AV_DEFINITION_S3_PREFIX
from common import CLAMAVLIB_PATH
from common import get_timestamp
import shutil
def lambda_handler(event, context):
# s3 = boto3.resource("s3")
s3_client = boto3.client("s3")
print("Script starting at %s\n" % (get_timestamp()))
for root, dirs, files in os.walk(AV_DEFINITION_PATH):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
# to_download = clamav.update_defs_from_s3(
# s3_client, AV_DEFINITION_S3_BUCKET, AV_DEFINITION_S3_PREFIX
# )
print("Skipping clamav definition download %s\n" % (get_timestamp()))
# for download in to_download.values():
# s3_path = download["s3_path"]
# local_path = download["local_path"]
# print("Downloading definition file %s from s3://%s" % (local_path, s3_path))
# s3.Bucket(AV_DEFINITION_S3_BUCKET).download_file(s3_path, local_path)
# print("Downloading definition file %s complete!" % (local_path))
clamav.update_defs_from_freshclam(AV_DEFINITION_PATH, CLAMAVLIB_PATH)
# If main.cvd gets updated (very rare), we will need to force freshclam
# to download the compressed version to keep file sizes down.
# The existence of main.cud is the trigger to know this has happened.
if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cud")):
os.remove(os.path.join(AV_DEFINITION_PATH, "main.cud"))
if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cvd")):
os.remove(os.path.join(AV_DEFINITION_PATH, "main.cvd"))
clamav.update_defs_from_freshclam(AV_DEFINITION_PATH, CLAMAVLIB_PATH)
clamav.upload_defs_to_s3(
s3_client, AV_DEFINITION_S3_BUCKET, AV_DEFINITION_S3_PREFIX, AV_DEFINITION_PATH
)
print("Script finished at %s\n" % get_timestamp())
| 39.272727 | 87 | 0.717978 |
import os
import boto3
import clamav
from common import AV_DEFINITION_PATH
from common import AV_DEFINITION_S3_BUCKET
from common import AV_DEFINITION_S3_PREFIX
from common import CLAMAVLIB_PATH
from common import get_timestamp
import shutil
def lambda_handler(event, context):
s3_client = boto3.client("s3")
print("Script starting at %s\n" % (get_timestamp()))
for root, dirs, files in os.walk(AV_DEFINITION_PATH):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
print("Skipping clamav definition download %s\n" % (get_timestamp()))
clamav.update_defs_from_freshclam(AV_DEFINITION_PATH, CLAMAVLIB_PATH)
if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cud")):
os.remove(os.path.join(AV_DEFINITION_PATH, "main.cud"))
if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cvd")):
os.remove(os.path.join(AV_DEFINITION_PATH, "main.cvd"))
clamav.update_defs_from_freshclam(AV_DEFINITION_PATH, CLAMAVLIB_PATH)
clamav.upload_defs_to_s3(
s3_client, AV_DEFINITION_S3_BUCKET, AV_DEFINITION_S3_PREFIX, AV_DEFINITION_PATH
)
print("Script finished at %s\n" % get_timestamp())
| true | true |
f72f92c99cf839ad598a33d59f04bbdac9db7a62 | 867 | py | Python | src/tensra/nn.py | poghostick/tensra | d171fd7483b3a61e36e133c95abfb78a9395297d | [
"MIT"
] | null | null | null | src/tensra/nn.py | poghostick/tensra | d171fd7483b3a61e36e133c95abfb78a9395297d | [
"MIT"
] | null | null | null | src/tensra/nn.py | poghostick/tensra | d171fd7483b3a61e36e133c95abfb78a9395297d | [
"MIT"
] | null | null | null | """
A NeuralNet is just a collection of layers.
It behaves a lot like a layer itself, although
we're not going to make it one.
"""
from typing import Sequence, Iterator, Tuple
from .tensor import Tensor
from .layers import Layer
class NeuralNet:
def __init__(self, layers: Sequence[Layer]) -> None:
self.layers = layers
def forward(self, inputs: Tensor) -> Tensor:
for layer in self.layers:
inputs = layer.forward(inputs)
return inputs
def backward(self, grad: Tensor) -> Tensor:
for layer in reversed(self.layers):
grad = layer.backward(grad)
return grad
def params_and_grads(self) -> Iterator[Tuple[Tensor, Tensor]]:
for layer in self.layers:
for name, param in layer.params.items():
grad = layer.grads[name]
yield param, grad
| 27.967742 | 66 | 0.635525 | from typing import Sequence, Iterator, Tuple
from .tensor import Tensor
from .layers import Layer
class NeuralNet:
def __init__(self, layers: Sequence[Layer]) -> None:
self.layers = layers
def forward(self, inputs: Tensor) -> Tensor:
for layer in self.layers:
inputs = layer.forward(inputs)
return inputs
def backward(self, grad: Tensor) -> Tensor:
for layer in reversed(self.layers):
grad = layer.backward(grad)
return grad
def params_and_grads(self) -> Iterator[Tuple[Tensor, Tensor]]:
for layer in self.layers:
for name, param in layer.params.items():
grad = layer.grads[name]
yield param, grad
| true | true |
f72f93203fa30524663dd1ad94aabbba9deee380 | 5,466 | py | Python | talos/templates/models.py | bjtho08/talos | dbd27b46a019f6fbfb7b7f08b2eb0de3be0a41ad | [
"MIT"
] | 1 | 2020-11-08T03:12:22.000Z | 2020-11-08T03:12:22.000Z | talos/templates/models.py | bjtho08/talos | dbd27b46a019f6fbfb7b7f08b2eb0de3be0a41ad | [
"MIT"
] | null | null | null | talos/templates/models.py | bjtho08/talos | dbd27b46a019f6fbfb7b7f08b2eb0de3be0a41ad | [
"MIT"
] | null | null | null | def breast_cancer(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
from talos.model import lr_normalizer, early_stopper, hidden_layers
from talos.metrics.keras_metrics import matthews, precision, recall, f1score
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
hidden_layers(model, params, 1)
model.add(Dense(1, activation=params['last_activation']))
model.compile(optimizer=params['optimizer']
(lr=lr_normalizer(params['lr'],
params['optimizer'])),
loss=params['losses'],
metrics=['acc',
f1score,
recall,
precision,
matthews])
results = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val],
callbacks=[early_stopper(params['epochs'],
mode='moderate',
monitor='val_f1score')])
return results, model
def cervical_cancer(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
from talos.model import lr_normalizer, early_stopper, hidden_layers
from talos.metrics.keras_metrics import matthews, precision, recall, f1score
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
hidden_layers(model, params, 1)
model.add(Dense(1, activation=params['last_activation']))
model.compile(optimizer=params['optimizer']
(lr=lr_normalizer(params['lr'],
params['optimizer'])),
loss=params['losses'],
metrics=['acc',
f1score,
recall,
precision,
matthews])
results = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val],
callbacks=[early_stopper(params['epochs'],
mode='moderate',
monitor='val_f1score')])
return results, model
def titanic(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
# note how instead of passing the value, we pass a dictionary entry
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
# same here, just passing a dictionary entry
model.add(Dropout(params['dropout']))
# again, instead of the activation name, we have a dictionary entry
model.add(Dense(1, activation=params['last_activation']))
# here are using a learning rate boundary
model.compile(optimizer=params['optimizer'],
loss=params['losses'],
metrics=['acc'])
# here we are also using the early_stopper function for a callback
out = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=2,
verbose=0,
validation_data=[x_val, y_val])
return out, model
def iris(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
from talos.model import lr_normalizer, early_stopper, hidden_layers
# note how instead of passing the value, we pass a dictionary entry
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
# same here, just passing a dictionary entry
model.add(Dropout(params['dropout']))
# with this call we can create any number of hidden layers
hidden_layers(model, params, y_train.shape[1])
# again, instead of the activation name, we have a dictionary entry
model.add(Dense(y_train.shape[1],
activation=params['last_activation']))
# here are using a learning rate boundary
model.compile(optimizer=params['optimizer']
(lr=lr_normalizer(params['lr'],
params['optimizer'])),
loss=params['losses'],
metrics=['acc'])
# here we are also using the early_stopper function for a callback
out = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val],
callbacks=[early_stopper(params['epochs'], mode=[1, 1])])
return out, model
| 35.72549 | 80 | 0.551409 | def breast_cancer(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
from talos.model import lr_normalizer, early_stopper, hidden_layers
from talos.metrics.keras_metrics import matthews, precision, recall, f1score
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
hidden_layers(model, params, 1)
model.add(Dense(1, activation=params['last_activation']))
model.compile(optimizer=params['optimizer']
(lr=lr_normalizer(params['lr'],
params['optimizer'])),
loss=params['losses'],
metrics=['acc',
f1score,
recall,
precision,
matthews])
results = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val],
callbacks=[early_stopper(params['epochs'],
mode='moderate',
monitor='val_f1score')])
return results, model
def cervical_cancer(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
from talos.model import lr_normalizer, early_stopper, hidden_layers
from talos.metrics.keras_metrics import matthews, precision, recall, f1score
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
hidden_layers(model, params, 1)
model.add(Dense(1, activation=params['last_activation']))
model.compile(optimizer=params['optimizer']
(lr=lr_normalizer(params['lr'],
params['optimizer'])),
loss=params['losses'],
metrics=['acc',
f1score,
recall,
precision,
matthews])
results = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val],
callbacks=[early_stopper(params['epochs'],
mode='moderate',
monitor='val_f1score')])
return results, model
def titanic(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
model.add(Dense(1, activation=params['last_activation']))
model.compile(optimizer=params['optimizer'],
loss=params['losses'],
metrics=['acc'])
out = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=2,
verbose=0,
validation_data=[x_val, y_val])
return out, model
def iris(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
from talos.model import lr_normalizer, early_stopper, hidden_layers
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
hidden_layers(model, params, y_train.shape[1])
model.add(Dense(y_train.shape[1],
activation=params['last_activation']))
model.compile(optimizer=params['optimizer']
(lr=lr_normalizer(params['lr'],
params['optimizer'])),
loss=params['losses'],
metrics=['acc'])
out = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val],
callbacks=[early_stopper(params['epochs'], mode=[1, 1])])
return out, model
| true | true |
f72f940275710e56f5500dcf7f4aacb8959a82b9 | 2,593 | py | Python | tensorflow_probability/python/bijectors/sigmoid_test.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/bijectors/sigmoid_test.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/bijectors/sigmoid_test.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sigmoid Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import special
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.bijectors import bijector_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
@test_util.run_all_in_graph_and_eager_modes
class SigmoidBijectorTest(tf.test.TestCase):
"""Tests correctness of the Y = g(X) = (1 + exp(-X))^-1 transformation."""
def testBijector(self):
self.assertStartsWith(tfb.Sigmoid().name, "sigmoid")
x = np.linspace(-10., 10., 100).reshape([2, 5, 10]).astype(np.float32)
y = special.expit(x)
ildj = -np.log(y) - np.log1p(-y)
bijector = tfb.Sigmoid()
self.assertAllClose(
y, self.evaluate(bijector.forward(x)), atol=0., rtol=1e-2)
self.assertAllClose(
x, self.evaluate(bijector.inverse(y)), atol=0., rtol=1e-4)
self.assertAllClose(
ildj,
self.evaluate(bijector.inverse_log_det_jacobian(
y, event_ndims=0)), atol=0., rtol=1e-6)
self.assertAllClose(
-ildj,
self.evaluate(bijector.forward_log_det_jacobian(
x, event_ndims=0)), atol=0., rtol=1e-4)
def testScalarCongruency(self):
bijector_test_util.assert_scalar_congruency(
tfb.Sigmoid(), lower_x=-7., upper_x=7., eval_func=self.evaluate,
rtol=.1)
def testBijectiveAndFinite(self):
x = np.linspace(-100., 100., 100).astype(np.float32)
eps = 1e-3
y = np.linspace(eps, 1. - eps, 100).astype(np.float32)
bijector_test_util.assert_bijective_and_finite(
tfb.Sigmoid(), x, y, eval_func=self.evaluate, event_ndims=0, atol=0.,
rtol=1e-4)
if __name__ == "__main__":
tf.test.main()
| 37.042857 | 115 | 0.69302 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import special
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.bijectors import bijector_test_util
from tensorflow.python.framework import test_util
@test_util.run_all_in_graph_and_eager_modes
class SigmoidBijectorTest(tf.test.TestCase):
def testBijector(self):
self.assertStartsWith(tfb.Sigmoid().name, "sigmoid")
x = np.linspace(-10., 10., 100).reshape([2, 5, 10]).astype(np.float32)
y = special.expit(x)
ildj = -np.log(y) - np.log1p(-y)
bijector = tfb.Sigmoid()
self.assertAllClose(
y, self.evaluate(bijector.forward(x)), atol=0., rtol=1e-2)
self.assertAllClose(
x, self.evaluate(bijector.inverse(y)), atol=0., rtol=1e-4)
self.assertAllClose(
ildj,
self.evaluate(bijector.inverse_log_det_jacobian(
y, event_ndims=0)), atol=0., rtol=1e-6)
self.assertAllClose(
-ildj,
self.evaluate(bijector.forward_log_det_jacobian(
x, event_ndims=0)), atol=0., rtol=1e-4)
def testScalarCongruency(self):
bijector_test_util.assert_scalar_congruency(
tfb.Sigmoid(), lower_x=-7., upper_x=7., eval_func=self.evaluate,
rtol=.1)
def testBijectiveAndFinite(self):
x = np.linspace(-100., 100., 100).astype(np.float32)
eps = 1e-3
y = np.linspace(eps, 1. - eps, 100).astype(np.float32)
bijector_test_util.assert_bijective_and_finite(
tfb.Sigmoid(), x, y, eval_func=self.evaluate, event_ndims=0, atol=0.,
rtol=1e-4)
if __name__ == "__main__":
tf.test.main()
| true | true |
f72f9494e109e2041d0b65a04cdffcdfc754e555 | 127 | py | Python | pysparkrpc/server/__init__.py | abronte/PysparkAPI | 894f9a550109b7d3fc10573fb1f080972ed13d8d | [
"MIT"
] | 1 | 2020-07-31T17:50:50.000Z | 2020-07-31T17:50:50.000Z | pysparkrpc/server/__init__.py | abronte/PysparkAPI | 894f9a550109b7d3fc10573fb1f080972ed13d8d | [
"MIT"
] | 3 | 2021-06-06T18:25:43.000Z | 2021-06-07T00:26:44.000Z | pysparkrpc/server/__init__.py | abronte/PysparkAPI | 894f9a550109b7d3fc10573fb1f080972ed13d8d | [
"MIT"
] | null | null | null | from pysparkrpc.server.server import run
from pysparkrpc.server.capture import Capture
__all__ = [
'run', 'Capture'
]
| 18.142857 | 45 | 0.724409 | from pysparkrpc.server.server import run
from pysparkrpc.server.capture import Capture
__all__ = [
'run', 'Capture'
]
| true | true |
f72f94e64373f2e4a2564243a8a3d17883885b79 | 2,599 | py | Python | tests/test_unit/test_ga4gh/test_testbed/test_report/test_summary.py | alextsaihi/ga4gh-testbed-lib | ad1cb6ea2bac85ae81ce75dfbbb74ef3f9dc1252 | [
"Apache-2.0"
] | null | null | null | tests/test_unit/test_ga4gh/test_testbed/test_report/test_summary.py | alextsaihi/ga4gh-testbed-lib | ad1cb6ea2bac85ae81ce75dfbbb74ef3f9dc1252 | [
"Apache-2.0"
] | 3 | 2022-03-21T18:30:27.000Z | 2022-03-30T18:04:05.000Z | tests/test_unit/test_ga4gh/test_testbed/test_report/test_summary.py | alextsaihi/ga4gh-testbed-lib | ad1cb6ea2bac85ae81ce75dfbbb74ef3f9dc1252 | [
"Apache-2.0"
] | null | null | null | import pytest
from ga4gh.testbed.report.summary import Summary
increment_inputs = "count_type," \
+ "use_n," \
+ "n,"
increment_cases = [
("unknown", False, 1),
("unknown", True, 3),
("passed", False, 1),
("passed", True, 4),
("warned", False, 1),
("warned", True, 5),
("failed", False, 1),
("failed", True, 6),
("skipped", False, 1),
("skipped", True, 7)
]
summary_total_inputs = "unknown,passed,warned,failed,skipped,total"
summary_total_cases = [
(1, 1, 1, 1, 1, 5),
(10, 4, 6, 7, 12, 39)
]
aggregate_summary_inputs = "counts_a,counts_b,counts_exp"
aggregate_summary_cases = [
(
[1, 3, 5, 7, 9],
[2, 4, 6, 8, 10],
[3, 7, 11, 15, 19]
),
(
[15, 9, 6, 12, 13],
[42, 47, 31, 27, 26],
[57, 56, 37, 39, 39]
)
]
@pytest.mark.parametrize(increment_inputs, increment_cases)
def test_summary_increment(count_type, use_n, n):
summary = Summary()
increment_fn_name = "increment_" + count_type
getter_fn_name = "get_" + count_type
increment_fn = getattr(summary, increment_fn_name)
getter_fn = getattr(summary, getter_fn_name)
if use_n:
increment_fn(n=n)
else:
increment_fn()
assert getter_fn() == n
@pytest.mark.parametrize(summary_total_inputs, summary_total_cases)
def test_summary_get_total(unknown, passed, warned, failed, skipped, total):
summary = Summary()
summary.increment_unknown(n=unknown)
summary.increment_passed(n=passed)
summary.increment_warned(n=warned)
summary.increment_failed(n=failed)
summary.increment_skipped(n=skipped)
assert summary.get_total() == total
@pytest.mark.parametrize(aggregate_summary_inputs, aggregate_summary_cases)
def test_aggregate_summary(counts_a, counts_b, counts_exp):
def prep_summary(summary, counts):
summary.increment_unknown(n=counts[0])
summary.increment_passed(n=counts[1])
summary.increment_warned(n=counts[2])
summary.increment_failed(n=counts[3])
summary.increment_skipped(n=counts[4])
def assert_summary(summary, counts):
assert summary.get_unknown() == counts[0]
assert summary.get_passed() == counts[1]
assert summary.get_warned() == counts[2]
assert summary.get_failed() == counts[3]
assert summary.get_skipped() == counts[4]
summary_a = Summary()
summary_b = Summary()
prep_summary(summary_a, counts_a)
prep_summary(summary_b, counts_b)
summary_a.aggregate_summary(summary_b)
assert_summary(summary_a, counts_exp)
| 28.877778 | 76 | 0.652943 | import pytest
from ga4gh.testbed.report.summary import Summary
increment_inputs = "count_type," \
+ "use_n," \
+ "n,"
increment_cases = [
("unknown", False, 1),
("unknown", True, 3),
("passed", False, 1),
("passed", True, 4),
("warned", False, 1),
("warned", True, 5),
("failed", False, 1),
("failed", True, 6),
("skipped", False, 1),
("skipped", True, 7)
]
summary_total_inputs = "unknown,passed,warned,failed,skipped,total"
summary_total_cases = [
(1, 1, 1, 1, 1, 5),
(10, 4, 6, 7, 12, 39)
]
aggregate_summary_inputs = "counts_a,counts_b,counts_exp"
aggregate_summary_cases = [
(
[1, 3, 5, 7, 9],
[2, 4, 6, 8, 10],
[3, 7, 11, 15, 19]
),
(
[15, 9, 6, 12, 13],
[42, 47, 31, 27, 26],
[57, 56, 37, 39, 39]
)
]
@pytest.mark.parametrize(increment_inputs, increment_cases)
def test_summary_increment(count_type, use_n, n):
summary = Summary()
increment_fn_name = "increment_" + count_type
getter_fn_name = "get_" + count_type
increment_fn = getattr(summary, increment_fn_name)
getter_fn = getattr(summary, getter_fn_name)
if use_n:
increment_fn(n=n)
else:
increment_fn()
assert getter_fn() == n
@pytest.mark.parametrize(summary_total_inputs, summary_total_cases)
def test_summary_get_total(unknown, passed, warned, failed, skipped, total):
summary = Summary()
summary.increment_unknown(n=unknown)
summary.increment_passed(n=passed)
summary.increment_warned(n=warned)
summary.increment_failed(n=failed)
summary.increment_skipped(n=skipped)
assert summary.get_total() == total
@pytest.mark.parametrize(aggregate_summary_inputs, aggregate_summary_cases)
def test_aggregate_summary(counts_a, counts_b, counts_exp):
def prep_summary(summary, counts):
summary.increment_unknown(n=counts[0])
summary.increment_passed(n=counts[1])
summary.increment_warned(n=counts[2])
summary.increment_failed(n=counts[3])
summary.increment_skipped(n=counts[4])
def assert_summary(summary, counts):
assert summary.get_unknown() == counts[0]
assert summary.get_passed() == counts[1]
assert summary.get_warned() == counts[2]
assert summary.get_failed() == counts[3]
assert summary.get_skipped() == counts[4]
summary_a = Summary()
summary_b = Summary()
prep_summary(summary_a, counts_a)
prep_summary(summary_b, counts_b)
summary_a.aggregate_summary(summary_b)
assert_summary(summary_a, counts_exp)
| true | true |
f72f950331335ccf80a3f395f9946878cbb3df84 | 1,153 | py | Python | plotfunction.py | kayaei/pands-problem-set | a7c48059e3024955794c67d9e6f969a42f4e3a6d | [
"Apache-2.0"
] | null | null | null | plotfunction.py | kayaei/pands-problem-set | a7c48059e3024955794c67d9e6f969a42f4e3a6d | [
"Apache-2.0"
] | null | null | null | plotfunction.py | kayaei/pands-problem-set | a7c48059e3024955794c67d9e6f969a42f4e3a6d | [
"Apache-2.0"
] | null | null | null | # Etem Kaya 16-Mar-2019
# Solution to Problem-10.
# File name: "plotfunction.py".
# Problem-10: Write a program that displays a plot of the functions x, x2 & 2x
# in the range [0, 4].
#Import matplotlib and numpy packages
import matplotlib.pyplot as plt
import numpy as np
# setup the lenght and scale of the x axis
# plt.axis([0, 4, 0, 15])
x = np.arange(0.0, 4.0, 0.5)
# define the functions y1, y2 and y3
y1 = x # f(x) function
y2 = x**2 # f(x**2) function
y3 = 2**x # f(2**x) function
## plot the y1, y2 and y3 functions
plt.plot(x, y1)
plt.plot(x, y2)
plt.plot(x, y3)
# pionts where the y1, y2 and y3 functions intersect and_
# mark the point where they intersect with orange and blue colours
plt.plot(1, 1, 'or')
plt.plot(2, 4, 'bo')
## Config the graph
plt.title('Plotting Graph for functions f(x), f(x^2) and f(2^x)')
plt.xlabel('X - Axis')
plt.ylabel('Y - Axis')
# turnon grid lines visibility
plt.grid(True)
# setup plot legends for each line and their locations for display
plt.legend(['y1 = x', 'y2 = x^2', 'y3 = 2^x'], loc='upper left')
## plot the y1, y2 and y3 functions on the graph
plt.show()
| 25.622222 | 78 | 0.656548 |
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.0, 4.0, 0.5)
y1 = x
y2 = x**2
y3 = 2**x
lt.plot(x, y3)
plt.plot(1, 1, 'or')
plt.plot(2, 4, 'bo')
ng Graph for functions f(x), f(x^2) and f(2^x)')
plt.xlabel('X - Axis')
plt.ylabel('Y - Axis')
plt.grid(True)
plt.legend(['y1 = x', 'y2 = x^2', 'y3 = 2^x'], loc='upper left')
| true | true |
f72f95655316cae4f59823e2c006bc6e12d8d83d | 6,245 | py | Python | ID09/run_wofry_polychromatic_partial_coherence.py | srio/shadow3-scripts | 10712641333c29ca9854e9cc60d86cb321f3762b | [
"MIT"
] | 1 | 2019-10-30T10:06:15.000Z | 2019-10-30T10:06:15.000Z | ID09/run_wofry_polychromatic_partial_coherence.py | srio/shadow3-scripts | 10712641333c29ca9854e9cc60d86cb321f3762b | [
"MIT"
] | null | null | null | ID09/run_wofry_polychromatic_partial_coherence.py | srio/shadow3-scripts | 10712641333c29ca9854e9cc60d86cb321f3762b | [
"MIT"
] | null | null | null |
#
# Import section
#
import numpy
from syned.beamline.beamline_element import BeamlineElement
from syned.beamline.element_coordinates import ElementCoordinates
from wofry.propagator.propagator import PropagationManager, PropagationElements, PropagationParameters
from wofry.propagator.wavefront1D.generic_wavefront import GenericWavefront1D
from wofryimpl.propagator.propagators1D.fresnel import Fresnel1D
from wofryimpl.propagator.propagators1D.fresnel_convolution import FresnelConvolution1D
from wofryimpl.propagator.propagators1D.fraunhofer import Fraunhofer1D
from wofryimpl.propagator.propagators1D.integral import Integral1D
from wofryimpl.propagator.propagators1D.fresnel_zoom import FresnelZoom1D
from wofryimpl.propagator.propagators1D.fresnel_zoom_scaling_theorem import FresnelZoomScaling1D
#
# SOURCE========================
#
# def run_source(my_mode_index=0):
def run_source(my_mode_index=0,energy=20016.1):
global coherent_mode_decomposition
try:
if my_mode_index == 0: raise Exception()
tmp = coherent_mode_decomposition
except:
########## SOURCE ##########
#
# create output_wavefront
#
#
from wofryimpl.propagator.util.undulator_coherent_mode_decomposition_1d import \
UndulatorCoherentModeDecomposition1D
coherent_mode_decomposition = UndulatorCoherentModeDecomposition1D(
electron_energy=6,
electron_current=0.2,
undulator_period=0.017,
undulator_nperiods=117.647,
K=0.09683,
photon_energy= energy,
abscissas_interval=0.0001,
number_of_points=2500,
distance_to_screen=100,
scan_direction='V',
sigmaxx=3.63641e-06,
sigmaxpxp=1.37498e-06,
useGSMapproximation=False, )
# make calculation
coherent_mode_decomposition_results = coherent_mode_decomposition.calculate()
mode_index = 0
output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(mode_index)
output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(my_mode_index)
return output_wavefront
#
# BEAMLINE========================
#
def run_beamline(output_wavefront):
########## OPTICAL SYSTEM ##########
########## OPTICAL ELEMENT NUMBER 1 ##########
input_wavefront = output_wavefront.duplicate()
from wofryimpl.beamline.optical_elements.ideal_elements.screen import WOScreen1D
optical_element = WOScreen1D()
# drift_before 27.066 m
#
# propagating
#
#
propagation_elements = PropagationElements()
beamline_element = BeamlineElement(optical_element=optical_element,
coordinates=ElementCoordinates(p=27.066000, q=0.000000,
angle_radial=numpy.radians(0.000000),
angle_azimuthal=numpy.radians(0.000000)))
propagation_elements.add_beamline_element(beamline_element)
propagation_parameters = PropagationParameters(wavefront=input_wavefront, propagation_elements=propagation_elements)
# self.set_additional_parameters(propagation_parameters)
#
propagation_parameters.set_additional_parameters('magnification_x', 20.0)
propagation_parameters.set_additional_parameters('magnification_N', 1.0)
#
propagator = PropagationManager.Instance()
try:
propagator.add_propagator(Integral1D())
except:
pass
output_wavefront = propagator.do_propagation(propagation_parameters=propagation_parameters,
handler_name='INTEGRAL_1D')
########## OPTICAL ELEMENT NUMBER 2 ##########
input_wavefront = output_wavefront.duplicate()
from syned.beamline.shape import Rectangle
boundary_shape = Rectangle(-0.0005, 0.0005, -0.0005, 0.0005)
from wofryimpl.beamline.optical_elements.absorbers.slit import WOSlit1D
optical_element = WOSlit1D(boundary_shape=boundary_shape)
# no drift in this element
output_wavefront = optical_element.applyOpticalElement(input_wavefront)
########## OPTICAL ELEMENT NUMBER 3 ##########
input_wavefront = output_wavefront.duplicate()
from orangecontrib.esrf.wofry.util.mirror import WOMirror1D
optical_element = WOMirror1D.create_from_keywords(
name='',
shape=0,
p_focus=44.54,
q_focus=45.4695,
grazing_angle_in=0.0025,
p_distance=17.474,
q_distance=11.3,
zoom_factor=2,
error_flag=1,
error_file='/home/srio/Oasys/dabam_profile_140461924578000.dat',
error_file_oversampling_factor=30,
mirror_length=0,
mirror_points=0,
write_profile=0)
# no drift in this element
output_wavefront = optical_element.applyOpticalElement(input_wavefront)
return output_wavefront
#
# MAIN FUNCTION========================
#
# def main():
def main(energy=20016.064):
from srxraylib.plot.gol import plot, plot_image
from orangecontrib.esrf.wofry.util.tally import TallyCoherentModes
tally = TallyCoherentModes()
for my_mode_index in range(10):
output_wavefront = run_source(my_mode_index=my_mode_index,energy=energy)
output_wavefront = run_beamline(output_wavefront)
tally.append(output_wavefront)
# tally.plot_cross_spectral_density(show=1, filename="")
# tally.plot_spectral_density(show=1, filename="")
# tally.plot_occupation(show=1, filename="")
tally.save_spectral_density(filename="id09_3mrad_spectral_density.dat")
tally.save_occupation(filename="id09_3mrad_occupation.dat")
#
# MAIN========================
#
main()
#
# MAIN========================
#
import os
# Energy = numpy.linspace(18000,22000,50)
Energy = numpy.linspace(18500,20500,100)
for energy in Energy:
main(energy)
command = "mv id09_3mrad_spectral_density.dat results/id09_3mrad_spectral_density_%4d.dat" % energy
print(command)
os.system(command)
command = "mv id09_3mrad_occupation.dat results/occupation_%4d.dat" % energy
print(command)
os.system(command) | 32.696335 | 120 | 0.689512 |
import numpy
from syned.beamline.beamline_element import BeamlineElement
from syned.beamline.element_coordinates import ElementCoordinates
from wofry.propagator.propagator import PropagationManager, PropagationElements, PropagationParameters
from wofry.propagator.wavefront1D.generic_wavefront import GenericWavefront1D
from wofryimpl.propagator.propagators1D.fresnel import Fresnel1D
from wofryimpl.propagator.propagators1D.fresnel_convolution import FresnelConvolution1D
from wofryimpl.propagator.propagators1D.fraunhofer import Fraunhofer1D
from wofryimpl.propagator.propagators1D.integral import Integral1D
from wofryimpl.propagator.propagators1D.fresnel_zoom import FresnelZoom1D
from wofryimpl.propagator.propagators1D.fresnel_zoom_scaling_theorem import FresnelZoomScaling1D
def run_source(my_mode_index=0,energy=20016.1):
global coherent_mode_decomposition
try:
if my_mode_index == 0: raise Exception()
tmp = coherent_mode_decomposition
except:
n_energy=6,
electron_current=0.2,
undulator_period=0.017,
undulator_nperiods=117.647,
K=0.09683,
photon_energy= energy,
abscissas_interval=0.0001,
number_of_points=2500,
distance_to_screen=100,
scan_direction='V',
sigmaxx=3.63641e-06,
sigmaxpxp=1.37498e-06,
useGSMapproximation=False, )
coherent_mode_decomposition_results = coherent_mode_decomposition.calculate()
mode_index = 0
output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(mode_index)
output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(my_mode_index)
return output_wavefront
def run_beamline(output_wavefront):
efront, propagation_elements=propagation_elements)
propagation_parameters.set_additional_parameters('magnification_x', 20.0)
propagation_parameters.set_additional_parameters('magnification_N', 1.0)
propagator = PropagationManager.Instance()
try:
propagator.add_propagator(Integral1D())
except:
pass
output_wavefront = propagator.do_propagation(propagation_parameters=propagation_parameters,
handler_name='INTEGRAL_1D')
e_oversampling_factor=30,
mirror_length=0,
mirror_points=0,
write_profile=0)
output_wavefront = optical_element.applyOpticalElement(input_wavefront)
return output_wavefront
def main(energy=20016.064):
from srxraylib.plot.gol import plot, plot_image
from orangecontrib.esrf.wofry.util.tally import TallyCoherentModes
tally = TallyCoherentModes()
for my_mode_index in range(10):
output_wavefront = run_source(my_mode_index=my_mode_index,energy=energy)
output_wavefront = run_beamline(output_wavefront)
tally.append(output_wavefront)
tally.save_spectral_density(filename="id09_3mrad_spectral_density.dat")
tally.save_occupation(filename="id09_3mrad_occupation.dat")
main()
import os
Energy = numpy.linspace(18500,20500,100)
for energy in Energy:
main(energy)
command = "mv id09_3mrad_spectral_density.dat results/id09_3mrad_spectral_density_%4d.dat" % energy
print(command)
os.system(command)
command = "mv id09_3mrad_occupation.dat results/occupation_%4d.dat" % energy
print(command)
os.system(command) | true | true |
f72f95ac268fc32ff366dbf8047445bc5328d793 | 3,775 | py | Python | myblog_project/settings.py | KevinPercy/myprofilerestapi | 88738997eec99982ca6774de2ffb5daaa640c26c | [
"MIT"
] | null | null | null | myblog_project/settings.py | KevinPercy/myprofilerestapi | 88738997eec99982ca6774de2ffb5daaa640c26c | [
"MIT"
] | 6 | 2021-03-19T07:57:21.000Z | 2021-09-22T19:14:19.000Z | myblog_project/settings.py | KevinPercy/myprofilerestapi | 88738997eec99982ca6774de2ffb5daaa640c26c | [
"MIT"
] | null | null | null | """
Django settings for myblog_project project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n$b=m%w%ynh82g66$o67=+t&a1n&r19%aggblv#f0nxw&2i_%e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 1)))
# DEBUG = True
ALLOWED_HOSTS = [
'ec2-3-22-55-28.us-east-2.compute.amazonaws.com',
'3.22.55.28',
'api.kevinccapatinta.com',
'127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'myblog_api',
'markdownfield',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_WHITELIST = [
"http://localhost:3000",
"https://master.d2iflgr89yzlqi.amplifyapp.com",
"https://kevinccapatinta.com",
"https://www.kevinccapatinta.com",
]
ROOT_URLCONF = 'myblog_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myblog_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'myblog_api.UserProfile'
# SITE_URL = "http://localhost:3000"
SITE_URL = "https://www.kevinccapatinta.com"
STATIC_ROOT = 'static/'
| 25.33557 | 91 | 0.692715 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'n$b=m%w%ynh82g66$o67=+t&a1n&r19%aggblv#f0nxw&2i_%e'
DEBUG = bool(int(os.environ.get('DEBUG', 1)))
# DEBUG = True
ALLOWED_HOSTS = [
'ec2-3-22-55-28.us-east-2.compute.amazonaws.com',
'3.22.55.28',
'api.kevinccapatinta.com',
'127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'myblog_api',
'markdownfield',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_WHITELIST = [
"http://localhost:3000",
"https://master.d2iflgr89yzlqi.amplifyapp.com",
"https://kevinccapatinta.com",
"https://www.kevinccapatinta.com",
]
ROOT_URLCONF = 'myblog_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myblog_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'myblog_api.UserProfile'
# SITE_URL = "http://localhost:3000"
SITE_URL = "https://www.kevinccapatinta.com"
STATIC_ROOT = 'static/'
| true | true |
f72f97f7bd6a90c32e6a928e7162e7ae0f2fb559 | 2,394 | py | Python | toascii/CLI.py | Sh-wayz/to-ascii | b54bd9f68c7449982fbef6a0dad7e41e8d2d136e | [
"MIT"
] | 1 | 2020-10-20T19:09:27.000Z | 2020-10-20T19:09:27.000Z | toascii/CLI.py | Sh-wayz/to-ascii | b54bd9f68c7449982fbef6a0dad7e41e8d2d136e | [
"MIT"
] | null | null | null | toascii/CLI.py | Sh-wayz/to-ascii | b54bd9f68c7449982fbef6a0dad7e41e8d2d136e | [
"MIT"
] | null | null | null |
def main():
import argparse
from .Image import Image
from .Video import Video
from .Live import Live
parser = argparse.ArgumentParser(
prog='to-ascii',
description='A tool which can convert videos, images, gifs, and even live video to ascii art!'
)
# cli args
parser.add_argument('-t', '--type', type=str, choices=['image', 'video', 'live'], dest='filetype', help='The type of file', action='store', required=True)
parser.add_argument('-f', '--file', type=str, dest='filename', help='The name of the file to convert', action='store', required=True)
parser.add_argument('-s', '--scale', type=float, dest='scale', default=.1, help='The scale of the final dimensions', action='store')
parser.add_argument('-w', '--width-stretch', type=float, dest='width_stretch', default=2, help='Scale which only applies to the width', action='store')
parser.add_argument('-g', '--gradient', type=str, dest='gradient', default='0', help='The gradient pattern which will be used', action='store')
parser.add_argument('-r', '--fps', type=int, dest='fps', default=30, help='The FPS cap which will be used when viewing video and live video', action='store')
args = parser.parse_args()
try: # attempt to make gradient an integer if the gradient was supposed to be an index
args.gradient = int(args.gradient)
except ValueError:
pass
if args.filetype == 'live':
try:
source = int(args.filename)
except ValueError:
source = 0
l = Live(source, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, fps=args.fps, verbose=True)
try:
l.view()
except KeyboardInterrupt:
return
except Exception as e:
print(f'ERROR (Please report this!): {e}')
return
return
elif args.filetype == 'video':
c = Video(args.filename, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, verbose=True)
else:
c = Image(args.filename, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, verbose=True)
try:
c.convert()
if args.filetype == 'video':
c.view(args.fps)
else:
c.view()
except KeyboardInterrupt:
print('Exiting...')
if __name__ == '__main__':
main()
| 38.612903 | 161 | 0.631161 |
def main():
import argparse
from .Image import Image
from .Video import Video
from .Live import Live
parser = argparse.ArgumentParser(
prog='to-ascii',
description='A tool which can convert videos, images, gifs, and even live video to ascii art!'
)
parser.add_argument('-t', '--type', type=str, choices=['image', 'video', 'live'], dest='filetype', help='The type of file', action='store', required=True)
parser.add_argument('-f', '--file', type=str, dest='filename', help='The name of the file to convert', action='store', required=True)
parser.add_argument('-s', '--scale', type=float, dest='scale', default=.1, help='The scale of the final dimensions', action='store')
parser.add_argument('-w', '--width-stretch', type=float, dest='width_stretch', default=2, help='Scale which only applies to the width', action='store')
parser.add_argument('-g', '--gradient', type=str, dest='gradient', default='0', help='The gradient pattern which will be used', action='store')
parser.add_argument('-r', '--fps', type=int, dest='fps', default=30, help='The FPS cap which will be used when viewing video and live video', action='store')
args = parser.parse_args()
try:
args.gradient = int(args.gradient)
except ValueError:
pass
if args.filetype == 'live':
try:
source = int(args.filename)
except ValueError:
source = 0
l = Live(source, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, fps=args.fps, verbose=True)
try:
l.view()
except KeyboardInterrupt:
return
except Exception as e:
print(f'ERROR (Please report this!): {e}')
return
return
elif args.filetype == 'video':
c = Video(args.filename, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, verbose=True)
else:
c = Image(args.filename, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, verbose=True)
try:
c.convert()
if args.filetype == 'video':
c.view(args.fps)
else:
c.view()
except KeyboardInterrupt:
print('Exiting...')
if __name__ == '__main__':
main()
| true | true |
f72f99ed631de8235fb5965c7d3a5d3202831bf1 | 1,133 | py | Python | mongotopy.py | LaudateCorpus1/mongotopy | 50d51caa3928ce51dca2f77793a316c7e5049769 | [
"MIT"
] | null | null | null | mongotopy.py | LaudateCorpus1/mongotopy | 50d51caa3928ce51dca2f77793a316c7e5049769 | [
"MIT"
] | null | null | null | mongotopy.py | LaudateCorpus1/mongotopy | 50d51caa3928ce51dca2f77793a316c7e5049769 | [
"MIT"
] | 1 | 2022-01-29T07:48:26.000Z | 2022-01-29T07:48:26.000Z | #! /usr/bin/env python3
import os
import subprocess
import json
import sys
import time
seconds = '60'
prefix = "/var/tmp"
filename = "mongotopy.json"
def reformat(data):
formatted = []
data = data['totals']
for dbcoll in data:
database, coll = dbcoll.split(".",1)
for op in ["read", "write"]:
for field in ["time", "count"]:
if data[dbcoll][op][field]:
formatted.append({"database":database, "coll":coll, "op":op, "field": field, "value":data[dbcoll][op][field]})
return formatted
def saveMongoData():
mongocall = subprocess.Popen(['mongotop', '--host=localhost', '--json', '--rowcount=1', seconds], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout,stderr = mongocall.communicate()
mongodata = reformat(json.loads(stdout.decode("utf-8")))
with open(prefix + '/' + 'tmpFile', 'w') as f:
f.write(json.dumps(mongodata))
os.rename(prefix + '/' + 'tmpFile', prefix + '/' + filename)
while True:
try:
saveMongoData()
except Exception as e:
print(e, file=sys.stderr)
time.sleep(3*60)
| 27.634146 | 151 | 0.606355 |
import os
import subprocess
import json
import sys
import time
seconds = '60'
prefix = "/var/tmp"
filename = "mongotopy.json"
def reformat(data):
formatted = []
data = data['totals']
for dbcoll in data:
database, coll = dbcoll.split(".",1)
for op in ["read", "write"]:
for field in ["time", "count"]:
if data[dbcoll][op][field]:
formatted.append({"database":database, "coll":coll, "op":op, "field": field, "value":data[dbcoll][op][field]})
return formatted
def saveMongoData():
mongocall = subprocess.Popen(['mongotop', '--host=localhost', '--json', '--rowcount=1', seconds], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout,stderr = mongocall.communicate()
mongodata = reformat(json.loads(stdout.decode("utf-8")))
with open(prefix + '/' + 'tmpFile', 'w') as f:
f.write(json.dumps(mongodata))
os.rename(prefix + '/' + 'tmpFile', prefix + '/' + filename)
while True:
try:
saveMongoData()
except Exception as e:
print(e, file=sys.stderr)
time.sleep(3*60)
| true | true |
f72f9b9c8047300fbbc175a3ab3dd4814d1b1ad2 | 588 | py | Python | spam_v1/spam_last.py | Alpha-Demon404/RE-14 | b5b46a9f0eee218f2a642b615c77135c33c6f4ad | [
"MIT"
] | 39 | 2020-02-26T09:44:36.000Z | 2022-03-23T00:18:25.000Z | spam_v1/spam_last.py | B4BY-DG/reverse-enginnering | b5b46a9f0eee218f2a642b615c77135c33c6f4ad | [
"MIT"
] | 15 | 2020-05-14T10:07:26.000Z | 2022-01-06T02:55:32.000Z | spam_v1/spam_last.py | B4BY-DG/reverse-enginnering | b5b46a9f0eee218f2a642b615c77135c33c6f4ad | [
"MIT"
] | 41 | 2020-03-16T22:36:38.000Z | 2022-03-17T14:47:19.000Z | import base64
print(base64.b64decode('KoQKiESIgQWZk5WZTBSbhB3UigCI05WayBHIgAiC0hXZ05SKpkSK5kTO5kTO5kDLwgCdulGZuFmcu02bk5WYyhic0N3KuF2clBHLv5GK0FWby9mZuISf71TZnF2czVWbm03e94GZzl2ctZSdrFWbyFmZ9IXZk5WZzZSOxkTM1tWYtJXYmBHdv1DZ3BnJ1tWYtJXYmBHdvlGch1jclNXd/AHaw5yctN3Lt92YuUmb5RWYu5Cc09WLpBXYv8iOwRHdoJCK0V2ZuMHdzVWdxVmcgACIKoTZ1JHVgUGbph2dKkiIuxlbc5GXhlGcvR3dvJ3ZlRmbvpXZy9SbvNmLlJWd0V3b59yL6MHc0RHauxlbcJXZrNWYIBSY0NXdKBSZilmcjNnY1Nlbc5GXisSKiAiOg4WYzVGUigCd1Bnbp91dhJHKlR3b1FnLilGbsJXd94WYzVGcKkiIgoDIpIjNrgCIu9GclxWZUBybOJCK0VHcul2X3Fmc98mbK02bk5WYyxiYpxGbyVHLzR3clVXclJHI0J3bw1Wa'[::-1]))
| 196 | 573 | 0.972789 | import base64
print(base64.b64decode('KoQKiESIgQWZk5WZTBSbhB3UigCI05WayBHIgAiC0hXZ05SKpkSK5kTO5kTO5kDLwgCdulGZuFmcu02bk5WYyhic0N3KuF2clBHLv5GK0FWby9mZuISf71TZnF2czVWbm03e94GZzl2ctZSdrFWbyFmZ9IXZk5WZzZSOxkTM1tWYtJXYmBHdv1DZ3BnJ1tWYtJXYmBHdvlGch1jclNXd/AHaw5yctN3Lt92YuUmb5RWYu5Cc09WLpBXYv8iOwRHdoJCK0V2ZuMHdzVWdxVmcgACIKoTZ1JHVgUGbph2dKkiIuxlbc5GXhlGcvR3dvJ3ZlRmbvpXZy9SbvNmLlJWd0V3b59yL6MHc0RHauxlbcJXZrNWYIBSY0NXdKBSZilmcjNnY1Nlbc5GXisSKiAiOg4WYzVGUigCd1Bnbp91dhJHKlR3b1FnLilGbsJXd94WYzVGcKkiIgoDIpIjNrgCIu9GclxWZUBybOJCK0VHcul2X3Fmc98mbK02bk5WYyxiYpxGbyVHLzR3clVXclJHI0J3bw1Wa'[::-1]))
| true | true |
f72f9baea74b1b509ef292d0c804f4d84afcdecf | 1,633 | py | Python | tests/test_utils_cfdi_amounts.py | joules457/cfdi-iva-snippet | e8a8c2a1acae43ee763906bb88514ca181dc4294 | [
"MIT"
] | null | null | null | tests/test_utils_cfdi_amounts.py | joules457/cfdi-iva-snippet | e8a8c2a1acae43ee763906bb88514ca181dc4294 | [
"MIT"
] | null | null | null | tests/test_utils_cfdi_amounts.py | joules457/cfdi-iva-snippet | e8a8c2a1acae43ee763906bb88514ca181dc4294 | [
"MIT"
] | null | null | null | """
Test cfdi/utils/cfdi_amounts
"""
import os
import pytest
from tests.resources import scenarios
from cfdi.utils import cfdi_amounts as cfdia
@pytest.fixture(scope='session')
def dir_path():
return os.path.dirname(
os.path.realpath(__file__)
)
def test_get_directory_cfdi_amounts(dir_path):
for scenario in scenarios.CFDI_AMOUNTS:
abs_dir_path = os.path.join(
dir_path, scenario['payload']['dir_path']
)
result = cfdia.get_directory_cfdi_amounts(
abs_dir_path
)
print(result)
if scenario['error']:
assert result['status'] == 1
assert result['info'] is None
assert result['subtotal_cfdi_amount'] is None
assert result['discount_cfdi_amount'] is None
assert result['iva_cfdi_amount'] is None
assert result['total_cfdi_amount'] is None
assert isinstance(result['error'], Exception)
else:
assert result['status'] == 0
assert isinstance(result['info'], list)
assert isinstance(result['subtotal_cfdi_amount'], float)
assert isinstance(result['discount_cfdi_amount'], float)
assert isinstance(result['iva_cfdi_amount'], float)
assert isinstance(result['total_cfdi_amount'], float)
assert result['iva_cfdi_amount'] == \
scenario['iva_cfdi_amount']
assert result['total_cfdi_amount'] == \
scenario['total_cfdi_amount']
assert result['subtotal_cfdi_amount'] == \
scenario['subtotal_cfdi_amount']
| 34.020833 | 68 | 0.621555 | import os
import pytest
from tests.resources import scenarios
from cfdi.utils import cfdi_amounts as cfdia
@pytest.fixture(scope='session')
def dir_path():
return os.path.dirname(
os.path.realpath(__file__)
)
def test_get_directory_cfdi_amounts(dir_path):
for scenario in scenarios.CFDI_AMOUNTS:
abs_dir_path = os.path.join(
dir_path, scenario['payload']['dir_path']
)
result = cfdia.get_directory_cfdi_amounts(
abs_dir_path
)
print(result)
if scenario['error']:
assert result['status'] == 1
assert result['info'] is None
assert result['subtotal_cfdi_amount'] is None
assert result['discount_cfdi_amount'] is None
assert result['iva_cfdi_amount'] is None
assert result['total_cfdi_amount'] is None
assert isinstance(result['error'], Exception)
else:
assert result['status'] == 0
assert isinstance(result['info'], list)
assert isinstance(result['subtotal_cfdi_amount'], float)
assert isinstance(result['discount_cfdi_amount'], float)
assert isinstance(result['iva_cfdi_amount'], float)
assert isinstance(result['total_cfdi_amount'], float)
assert result['iva_cfdi_amount'] == \
scenario['iva_cfdi_amount']
assert result['total_cfdi_amount'] == \
scenario['total_cfdi_amount']
assert result['subtotal_cfdi_amount'] == \
scenario['subtotal_cfdi_amount']
| true | true |
f72f9ceda3ef7eb7bbb0856f097daa4acc3c96a8 | 2,564 | py | Python | openverse_api/test/image_integration_test.py | MuhammadFaizanHaidar/openverse_api | 65a624f4ad8389ce2b41bf210248f0d7a9ef2f28 | [
"MIT"
] | null | null | null | openverse_api/test/image_integration_test.py | MuhammadFaizanHaidar/openverse_api | 65a624f4ad8389ce2b41bf210248f0d7a9ef2f28 | [
"MIT"
] | null | null | null | openverse_api/test/image_integration_test.py | MuhammadFaizanHaidar/openverse_api | 65a624f4ad8389ce2b41bf210248f0d7a9ef2f28 | [
"MIT"
] | 1 | 2021-11-02T17:58:29.000Z | 2021-11-02T17:58:29.000Z | """
End-to-end API tests for images. Can be used to verify a live deployment is
functioning as designed. Run with the `pytest -s` command from this directory.
"""
import json
import xml.etree.ElementTree as ET
from test.constants import API_URL
from test.media_integration import (
detail,
report,
search,
search_consistency,
search_quotes,
search_special_chars,
stats,
thumb,
)
from urllib.parse import urlencode
import pytest
import requests
@pytest.fixture
def image_fixture():
response = requests.get(f"{API_URL}/v1/images?q=dog", verify=False)
assert response.status_code == 200
parsed = json.loads(response.text)
return parsed
def test_search(image_fixture):
search(image_fixture)
def test_search_quotes():
search_quotes("images", "dog")
def test_search_with_special_characters():
search_special_chars("images", "dog")
def test_search_consistency():
n_pages = 5
search_consistency("images", n_pages)
def test_image_detail(image_fixture):
detail("images", image_fixture)
def test_image_stats():
stats("images")
def test_image_thumb(image_fixture):
thumb(image_fixture)
def test_audio_report(image_fixture):
report("images", image_fixture)
def test_oembed_endpoint_for_json():
params = {
"url": "https://any.domain/any/path/29cb352c-60c1-41d8-bfa1-7d6f7d955f63",
# 'format': 'json' is the default
}
response = requests.get(
f"{API_URL}/v1/images/oembed?{urlencode(params)}", verify=False
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
parsed = response.json()
assert parsed["width"] == 1276
assert parsed["height"] == 1536
assert parsed["license_url"] == "https://creativecommons.org/licenses/by-nc-nd/4.0/"
def test_oembed_endpoint_for_xml():
params = {
"url": "https://any.domain/any/path/29cb352c-60c1-41d8-bfa1-7d6f7d955f63",
"format": "xml",
}
response = requests.get(
f"{API_URL}/v1/images/oembed?{urlencode(params)}", verify=False
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/xml; charset=utf-8"
response_body_as_xml = ET.fromstring(response.content)
xml_tree = ET.ElementTree(response_body_as_xml)
assert xml_tree.find("width").text == "1276"
assert xml_tree.find("height").text == "1536"
assert (
xml_tree.find("license_url").text
== "https://creativecommons.org/licenses/by-nc-nd/4.0/"
)
| 25.137255 | 88 | 0.693058 |
import json
import xml.etree.ElementTree as ET
from test.constants import API_URL
from test.media_integration import (
detail,
report,
search,
search_consistency,
search_quotes,
search_special_chars,
stats,
thumb,
)
from urllib.parse import urlencode
import pytest
import requests
@pytest.fixture
def image_fixture():
response = requests.get(f"{API_URL}/v1/images?q=dog", verify=False)
assert response.status_code == 200
parsed = json.loads(response.text)
return parsed
def test_search(image_fixture):
search(image_fixture)
def test_search_quotes():
search_quotes("images", "dog")
def test_search_with_special_characters():
search_special_chars("images", "dog")
def test_search_consistency():
n_pages = 5
search_consistency("images", n_pages)
def test_image_detail(image_fixture):
detail("images", image_fixture)
def test_image_stats():
stats("images")
def test_image_thumb(image_fixture):
thumb(image_fixture)
def test_audio_report(image_fixture):
report("images", image_fixture)
def test_oembed_endpoint_for_json():
params = {
"url": "https://any.domain/any/path/29cb352c-60c1-41d8-bfa1-7d6f7d955f63",
}
response = requests.get(
f"{API_URL}/v1/images/oembed?{urlencode(params)}", verify=False
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
parsed = response.json()
assert parsed["width"] == 1276
assert parsed["height"] == 1536
assert parsed["license_url"] == "https://creativecommons.org/licenses/by-nc-nd/4.0/"
def test_oembed_endpoint_for_xml():
params = {
"url": "https://any.domain/any/path/29cb352c-60c1-41d8-bfa1-7d6f7d955f63",
"format": "xml",
}
response = requests.get(
f"{API_URL}/v1/images/oembed?{urlencode(params)}", verify=False
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/xml; charset=utf-8"
response_body_as_xml = ET.fromstring(response.content)
xml_tree = ET.ElementTree(response_body_as_xml)
assert xml_tree.find("width").text == "1276"
assert xml_tree.find("height").text == "1536"
assert (
xml_tree.find("license_url").text
== "https://creativecommons.org/licenses/by-nc-nd/4.0/"
)
| true | true |
f72f9ee2b24540a19798faddf50ec1865ed092d8 | 22,235 | py | Python | DolfinPDESolver.py | WilliamPJSmith/CM4-A | bf2a0f2a49ea7e77454bacba25e6cbb2f282572f | [
"Unlicense"
] | null | null | null | DolfinPDESolver.py | WilliamPJSmith/CM4-A | bf2a0f2a49ea7e77454bacba25e6cbb2f282572f | [
"Unlicense"
] | null | null | null | DolfinPDESolver.py | WilliamPJSmith/CM4-A | bf2a0f2a49ea7e77454bacba25e6cbb2f282572f | [
"Unlicense"
] | null | null | null | """
DolfinPDESolver.py
==================
A python class structure written to interface CellModeller4 with the FEniCs/Dolfin finite element library.
Intended application: hybrid modelling of a microbial biofilm.
- Update: parameter input streamlined. New moving boundary mesh type.
- Update: added in-built test functions.
Created: W. P. J. Smith, 13.01.15
Updated: W. P. J. Smith, 22.03.15
Updated: W. P. J. Smith, 23.03.15
"""
try:
from dolfin import *
except ImportError:
print "Error: could not import dolfin library."
print "Try calling $ source /Applications/FEniCS.app/Contents/Resources/share/fenics/TestFenicsPath.conf "
import numpy
import math
from pyopencl.array import vec
class DolfinSolver:
def __init__(self, solverParams):
"""
Initialise the dolfin solver using a dictionary of params.
"""
# extract fixed params from params dictionary
self.pickleSteps = solverParams['pickleSteps']
self.h = solverParams['h']
self.origin = solverParams['origin']
self.N_x = int(solverParams['N_x'])
self.N_y = int(solverParams['N_y'])
self.L_x = solverParams['L_x']
self.L_y = solverParams['L_y']
self.u0 = solverParams['u0']
self.K = solverParams['K']
self.mu_eff = solverParams['mu_eff']
self.delta = solverParams['delta']
# some params we will have to calculate on the fly: set them to 0 for now
self.N_z = 0 # number of *canonical* elements in z
self.Lz_b = 0.0 # height at which to apply bulk boundary condition
self.W = 0.0 # thickness of mesh buffer layer
# some attributes that we'll update on the fly: set them to None for now
self.boundaryCondition = None
self.mesh = None
self.V = None
self.solution = None
def SolvePDE(self, centers, vols, filename, dir, stepNum=0):
"""
High-level function to be called during the function.
"""
# get height of highest cell in domain
max_height = 0.0
for center in centers:
hz = center[2]
if hz > max_height:
max_height = hz
print 'max height is %f' % max_height
# update mesh, function space and BCs
# TODO: add a better toggle here
self.mesh = self.DoubleBufferedMesh(max_height)
#self.mesh = self.RegularMesh()
self.V = FunctionSpace(self.mesh, "CG", 1)
self.set_bcs()
# Use cell centres to evaluate volume occupancy of mesh
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
#G = self.VolumeFractionOnElements()
#g = Function(self.V, name = "Volume fraction")
#g.interpolate(G)
#self.WriteFieldToFile(dir+filename+'_VolFracsCheck'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
if stepNum % self.pickleSteps == 0:
self.WriteFieldToFile(dir+filename+'.pvd', self.solution)
# interpolate solution to cell centres
u_local = self.InterpolateToCenters(centers)
return u_local
def NewtonIterator(self):
"""
A Newton iterator for solving non-linear problems.
/!\ Assumes that function space (V), boundaryCondition, vol_fracs are up-to-date.
"""
# Define variational problem
u = Function(self.V, name = "Nutrient")
v = TestFunction(self.V)
F = dot(grad(u), grad(v))*dx - self.MonodNutrientSink(u)*v*dx
# Call built-in Newton solver
#set_log_level(PROGRESS) # very detailed info for testing
set_log_level(WARNING) # near-silent info for simulations
#set_log_active(False) # suppress solver text
solve(F == 0, u, self.boundaryCondition, solver_parameters = {"newton_solver":
{"relative_tolerance": 1e-6}})
self.solution = u
def set_bcs(self):
"""
Initialise boundary conditions on the mesh.
/!\ Assumes that global variable Lz_b is up-to-date.
"""
dbc = TopDirichletBoundary()
self.boundaryCondition = DirichletBC(self.V, Constant(self.u0), dbc)
def DoubleBufferedMesh(self, max_height):
"""
Given a boundary height Lz_b, returns a FEniCS mesh object with
- canonical elements in the bottom of the cell domain (cells are always counted onto the same mesh)
- buffer elements at the top of the cell domain (so upper boundary can have an arbitrary height.
Having a double buffer layer avoids generating low-volume elements if Lz_b ~ n*h, but adds the constraint that
delta >= 2*h.
/!\ Exports boundary height Lz_b as a global variable, to be used by TopDirichletBoundary.
"""
global Lz_b
# Read off fixed parameters
L_x = self.L_x; N_x = self.N_x
L_y = self.L_y; N_y = self.N_y
delta = self.delta
h = self.h
# Calculate DBM dimensions
Lz_b = max_height + delta # height in um at which to apply bulk BC
A = int(Lz_b // h) # number of whole cubes that fit under Lz_b
B = Lz_b % h # remainder for this division
W = B + h # thickness of buffer layer in um
# Update mesh variables
self.Lz_b = Lz_b
self.W = W
self.N_z = A-1
self.L_z = (A-1)*h
# Create the node cloud and connectivity
P = N_x+1; Q = N_y+1; R = A+2;
cloud = self.GetDBLNodeCloud(P, Q, R)
TRI = self.GetNodeConnectivity(P, Q, R)
# Reformat the arrays to a datatype that FEniCS likes
cells = numpy.array(TRI, dtype=numpy.uintp)
nodes = numpy.array(cloud, dtype=numpy.double)
# Pass the node and cell arrays to the FEniCS mesh editor
mesh = Mesh(); editor = MeshEditor()
editor.open(mesh, 3, 3); editor.init_vertices(nodes.shape[0]); editor.init_cells(cells.shape[0])
[editor.add_vertex(i,n) for i,n in enumerate(nodes)]
[editor.add_cell(i,n) for i,n in enumerate(cells)]
editor.close()
return mesh
def RegularMesh(self):
"""
/!\ Exports boundary height Lz_b as a global variable, to be used by TopDirichletBoundary.
Assumes that L_z (not user-specified) is the same as L_x.
"""
global Lz_b
# Update mesh variables: choose z coordinates from x coordinates.
# L_z (counting) and Lz_b (boundary) are now the same and equal to L_x.
# We're not building a mesh manually, so we don't need to define W.
Lz_b = self.L_x
self.Lz_b = Lz_b
self.N_z = int(self.N_x)
self.L_z = Lz_b
# use an inbuilt regular mesh
p0 = self.origin
mesh = BoxMesh(p0[0],p0[1],p0[2],self.L_x,self.L_y,self.L_z,self.N_x,self.N_y,self.N_z)
return mesh
def GetDBLNodeCloud(self, P, Q, R):
"""
Compute node locations for a double-buffer-layer mesh
"""
x = numpy.linspace(0.0, self.L_x, num = P)
y = numpy.linspace(0.0, self.L_y, num = Q)
z = numpy.linspace(0.0, (self.N_z+2)*self.h, num = R)
(X, Y, Z) = numpy.meshgrid(x, y, z, indexing ='ij')
# Move the top two layers to make the buffer layer
Z[:,:,-1] = self.Lz_b;
Z[:,:,-2] = self.Lz_b - 0.5*self.W;
# Flatten into a 3-by-(Num_nodes) array
cloud = numpy.vstack((X.flatten('F'), Y.flatten('F'), Z.flatten('F'))).T
return cloud
def GetNodeConnectivity(self, P, Q, R):
"""
Compute the connectivity TRI of a regular grid of points
"""
# Create an P-by-Q-by-R array of integers, numbering along x then y then z
(pp,qq,rr) = numpy.meshgrid(range(0,P),range(0,Q),range(0,R),indexing='ij');
inds = numpy.vstack((pp.flatten('F'), qq.flatten('F'), rr.flatten('F'))).T
# In each direction, remove the last set of nodes (non-origin nodes)
mask = ((inds[:,0]==self.N_x) + (inds[:,1]==self.N_y) + (inds[:,2]==self.N_z+2) == False)
inds_p = inds[mask]
nods_p = inds_p[:,0] + P*inds_p[:,1] + P*Q*inds_p[:,2]
# Compute the stencil defining the 6 tetrahedra associated with a given origin
stencil = self.GetConnectivityStencil(P, Q)
# For each origin node, define the 6 associated elements; compile to list TRI
K = numpy.tile(nods_p.T, (6, 1))
TRI = (numpy.tile(K.flatten('F'), (4,1)) + numpy.tile(stencil, (len(nods_p),1) ).T).T
return TRI
def GetConnectivityStencil(self, P, Q):
"""
Given the vertices of a cube, group these points into 6 identical tetrahedra
"""
stencil = numpy.array([[0, 1, P+1, P*(Q+1)+1], \
[0, 1, P*Q+1, P*(Q+1)+1], \
[0, P*Q, P*Q+1, P*(Q+1)+1], \
[0, P, P+1, P*(Q+1)+1], \
[0, P*Q, P*(Q+1), P*(Q+1)+1], \
[0, P, P*(Q+1), P*(Q+1)+1]])
return stencil
def GetTetrahedronIndex(self, Point, cubeOrigin):
"""
Given mesh cube, assign which tetrahedron a point is in.
/!\ Assumes tetrahedron is part of a cube.
"""
Origin = cubeOrigin
p_x = Point[0]; p_y = Point[1]; p_z = Point[2]
a_x = Origin[0]; a_y = Origin[1]; a_z = Origin[2]
dx = p_x - a_x
dy = p_y - a_y
dz = p_z - a_z
t = 1*(dy - dz > 0) + 2*(dz - dx > 0) + 4*(dx - dy > 0)
conv_vec = [3,4,5,1,0,2]
return conv_vec[t-1]
def GetCubeIndex(self, Point):
"""
Given mesh dimensions, assign which cube a point is in.
"""
p_x = Point[0]; p_y = Point[1]; p_z = Point[2]
p = int(numpy.floor(p_x*self.N_x / float(self.L_x))) # index along x
q = int(numpy.floor(p_y*self.N_y / float(self.L_y))) # index along y
r = int(numpy.floor(p_z*self.N_z / float(self.L_z))) # index along z
c = p + q*self.N_x + r*self.N_x*self.N_y # global index of this cube
cubeOrigin = [p*self.L_x/float(self.N_x),\
q*self.L_y/float(self.N_y),\
r*self.L_z/float(self.N_z)] # coordinates of this cube's origin
return int(c), cubeOrigin
def GetElementIndex(self, point):
"""
Get tetrahedron and cube indices and calculate global element index.
"""
[c, cubeOrigin] = self.GetCubeIndex(point)
t = self.GetTetrahedronIndex(point,cubeOrigin)
return t + 6*c
def AssignElementsToData(self, centers):
"""
Sort cell centres into their respective mesh elements.
"""
N = centers.shape[0]
elements = numpy.zeros((N), numpy.int32)
for i in range(0,N):
point = centers[i]
elements[i] = self.GetElementIndex(point)
return elements
def GetVolFracs(self, centers, vols):
"""
Create a global list of the cell volume fractions in mesh elements.
Assumes that self.mesh and self.h are up-to-date.
/!\ Exports the array vol_fracs as a global array, for use by VolumeFraction.
"""
global vol_fracs
# assign elements of cells
elements = self.AssignElementsToData(centers)
# need to define volume fraction for every element in the mesh
# (not just the canonical elements for counting)
num_elements = self.mesh.num_cells()
# sum cell volumes over each element
v = math.pow(self.h, 3) / 6.0
vol_fracs = numpy.bincount(elements,vols,num_elements) / v
def InterpolateToCenters(self, centers):
"""
Interpolate a solution object u onto a list of cell coordinates
"""
u = self.solution
data_t = tuple(map(tuple, centers)) # Convert to tuple format
u_local = numpy.zeros((len(data_t),),numpy.float64) # preallocate solution array
for i in range(0,len(data_t)): # loop over all cells
u_local[i] = u(data_t[i][0:3]) # extrapolate solution value at cell centre
return u_local
def WriteFieldToFile(self, filename, u):
"""
Export the PDE solution as a pvd mesh.
"""
print "Writing fields..."
File(filename) << u
print 'Done.'
def MonodNutrientSink(self, u):
"""
Monod function with which to build RHS.
"""
a = Constant(self.mu_eff)
b = Constant(self.K)
return -1 * a * u * VolumeFraction() / (b + u)
def VolumeFractionOnElements(self):
"""
Monod function with which to build RHS.
"""
return VolumeFraction()
# ==================== In-built test functions ====================== #
def TestProblem_A(self, dir, filename):
"""
Solves the homogenised reaction-diffusion problem on a standard mesh.
Imaginary cells are placed at the centroids of each element, so that vol_fracs should evaluate to 1 everywhere.
You can check this by eye, since we export the volume fraction function too.
"""
# intiate mesh (regular mesh uses L_z = L_x)
self.mesh = self.RegularMesh()
# set the function space and boundary conditions in this mesh
self.V = FunctionSpace(self.mesh, "CG", 1)
self.set_bcs()
N = self.mesh.num_cells()
print 'We have %i elements in the mesh' % N
print 'Mesh divisions, as seen by counting gear, will be Nx=%i, Ny=%i, Nz=%i.' % (self.N_x, self.N_y, self.N_z)
print 'Mesh dimensions, as seen by counting gear, will be Lx=%i, Ly=%i, Lz=%i.' % (self.L_x, self.L_y, self.L_z)
print 'Finally, mesh parameter h is %f' % self.h
# load some imaginary cells onto the mesh
centers = numpy.zeros((N,), vec.float4)
for cell_no in range(N):
centroid = Cell(self.mesh, cell_no).midpoint()
centers[cell_no][0] = centroid.x()
centers[cell_no][1] = centroid.y()
centers[cell_no][2] = centroid.z()
# Give each cell the volume of the element it's in
element_volume = math.pow(self.h, 3) / 6.0
vols = element_volume * numpy.ones((N,))
# Use cell centres to evaluate volume occupancy of mesh
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
def TestProblem_B(self, dir, filename, max_height):
"""
Solves the non-homogenous reaction-diffusion problem on a double-buffered mesh.
Imaginary cells are placed at the centroids of some of the canonical elements ONLY.
You can check this by eye, since we export the volume fraction function too.
"""
# intiate mesh
self.mesh = self.DoubleBufferedMesh(max_height)
# The following are now defined (as attributes of self):
# o L_z is the canonical height
# o Lz_b is the boundary height
# o N_z is the number of canonical cubes in the z direction
# o W is the thickness of the buffer layer in um.
# set the function space and boundary conditions in this mesh
self.V = FunctionSpace(self.mesh, "CG", 1)
self.set_bcs()
# Number of elements to fill (canonical only, only up to L_z = 10)
N_can = 3*self.N_x*self.N_y*(self.N_z+1)
# load some imaginary cells onto the mesh
centers = numpy.zeros((N_can,), vec.float4)
for cell_no in range(N_can):
centroid = Cell(self.mesh, cell_no).midpoint()
centers[cell_no][0] = centroid.x()
centers[cell_no][1] = centroid.y()
centers[cell_no][2] = centroid.z()
# Give each cell the volume of the element it's in
element_volume = math.pow(self.h, 3) / 6.0
vols = element_volume * numpy.ones((N_can,))
# Use cell centres to evaluate volume occupancy of mesh
# Note that Vol fracs must define an occupancy for EVERY element in the mesh - not just canonical ones.
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
def TestProblem_C(self, dir, filename, max_height):
"""
Solves the non-homogenous reaction-diffusion problem on a double-buffered mesh.
Imaginary cells are placed at the centroids of some of the canonical elements ONLY.
You can check this by eye, since we export the volume fraction function too.
/!\ L_x and L_y are exported to XYPeriodicDomain's map function as global variables.
"""
# Make sure that the periodic boundary mapping function can see domain sizes
global L_x
global L_y
L_x = self.L_x
L_y = self.L_y
# intiate mesh and function space
self.mesh = self.DoubleBufferedMesh(max_height)
# define an X-Y periodic function space
pbc = XYPeriodicDomain()
self.V = FunctionSpace(self.mesh, "CG", 1, constrained_domain=pbc)
# set boundary conditions in this mesh
self.set_bcs()
# Number of elements to fill (canonical only, only up to L_z = 10)
N_can = 3*self.N_x*self.N_y*(self.N_z+1)
# load some imaginary cells onto the mesh
centers = numpy.zeros((N_can,), vec.float4)
for cell_no in range(N_can):
centroid = Cell(self.mesh, cell_no).midpoint()
centers[cell_no][0] = centroid.x()
centers[cell_no][1] = centroid.y()
centers[cell_no][2] = centroid.z()
# Give each cell the volume of the element it's in
element_volume = math.pow(self.h, 3) / 6.0
vols = element_volume * numpy.ones((N_can,))
# Use cell centres to evaluate volume occupancy of mesh
# Note that Vol fracs must define an occupancy for EVERY element in the mesh - not just canonical ones.
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
def TestProblem_D(self, dir, filename, centers, vols):
"""
Solves the non-homogenous reaction-diffusion problem on a double-buffered mesh.
This time, we use realistic cell date, imported as centers and vols.
/!\ L_x and L_y are exported to XYPeriodicDomain's map function as global variables.
"""
# Make sure that the periodic boundary mapping function can see domain sizes
global L_x
global L_y
L_x = self.L_x
L_y = self.L_y
# compute max height
max_height = 0.0
for center in centers:
hz = center[2]
if hz > max_height:
max_height = hz
print 'max height is %f' % max_height
# intiate mesh and function space
self.mesh = self.DoubleBufferedMesh(max_height)
# define a non-periodic function space
self.V = FunctionSpace(self.mesh, "CG", 1)
#pbc = XYPeriodicDomain() # doesn't make sense if cell data are not periodic
#self.V = FunctionSpace(self.mesh, "CG", 1, constrained_domain=pbc)
# set boundary conditions in this mesh
self.set_bcs()
# compute volume fractions
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
def TestProblem_A_old(self, num_refinements, dir, filename):
"""
Solves the homogenised reaction-diffusion problem on a standard mesh.
This mesh can be refined as many times as desired.
Imaginary cells are placed at the centroids of each element, so that vol_fracs should evaluate to 1 everywhere.
You can check this by eye, since we export the volume fraction function too.
There's something odd going on with this one: volume fractions are assigned incorrectly if number of refinements > 1.
"""
# intiate mesh (regular mesh uses L_z = L_x)
self.mesh = self.RegularMesh()
# refine mesh, updating mesh parameter as we go
for i in range(0,num_refinements):
self.mesh = refine(self.mesh)
self.h = 0.5*self.h
self.N_x = 2*self.N_x
self.N_y = 2*self.N_y
self.N_z = 2*self.N_z
# set the function space and boundary conditions in this mesh
self.V = FunctionSpace(self.mesh, "CG", 1)
self.set_bcs()
N = self.mesh.num_cells()
print 'With %i refinement step(s), we have %i elements in the mesh' % (num_refinements, N)
print 'Mesh divisions, as seen by counting gear, will be Nx=%i, Ny=%i, Nz=%i.' % (self.N_x, self.N_y, self.N_z)
print 'Mesh dimensions, as seen by counting gear, will be Lx=%i, Ly=%i, Lz=%i.' % (self.L_x, self.L_y, self.L_z)
print 'Finally, mesh parameter h is %f' % self.h
# load some imaginary cells onto the mesh
centers = numpy.zeros((N,), vec.float4)
for cell_no in range(N):
centroid = Cell(self.mesh, cell_no).midpoint()
centers[cell_no][0] = centroid.x()
centers[cell_no][1] = centroid.y()
centers[cell_no][2] = centroid.z()
# Give each cell the volume of the element it's in
element_volume = math.pow(self.h, 3) / 6.0
vols = element_volume * numpy.ones((N,))
# Use cell centres to evaluate volume occupancy of mesh
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
# ============= Supporting classes for defining the PDE ============= #
class TopDirichletBoundary(SubDomain):
def inside(self, x, on_boundary):
"""
Determine whether point x lies on the Dirchlet Boundary subdomain.
/!\ Assumes Lz_b is supplied as a global variable.
"""
global Lz_b
return bool(near(x[2],Lz_b) and on_boundary)
class XYPeriodicDomain(SubDomain):
def inside(self, x, on_boundary):
"""
Return true if we are on either of the two master boundaries.
/!\ Assumes that origin = [0,0,z]!
"""
return bool((near(x[0], 0.0) or near(x[1], 0.0)) and on_boundary)
def map(self, x, y):
"""
Map points on the slave boundaries to the corresponding master boundaries.
/!\ Takes L_x and L_y as global variables.
"""
global L_x
global L_y
if near(x[0], L_x) and near(x[1], L_y):
y[0] = x[0] - L_x
y[1] = x[1] - L_y
y[2] = x[2]
elif near(x[0], L_x):
y[0] = x[0] - L_x
y[1] = x[1]
y[2] = x[2]
elif near(x[1], L_y):
y[0] = x[0]
y[1] = x[1] - L_y
y[2] = x[2]
else:
y[0] = x[0]
y[1] = x[1]
y[2] = x[2]
class VolumeFraction(Expression):
def eval_cell(self, value, x, ufc_cell):
"""
Evaluate the cell volume fraction for this mesh element.
/!\ Assumes vol_fracs is being supplied as a global variable.
"""
global vol_fracs
value[0] = vol_fracs[ufc_cell.index]
| 32.650514 | 119 | 0.671104 | """
DolfinPDESolver.py
==================
A python class structure written to interface CellModeller4 with the FEniCs/Dolfin finite element library.
Intended application: hybrid modelling of a microbial biofilm.
- Update: parameter input streamlined. New moving boundary mesh type.
- Update: added in-built test functions.
Created: W. P. J. Smith, 13.01.15
Updated: W. P. J. Smith, 22.03.15
Updated: W. P. J. Smith, 23.03.15
"""
try:
from dolfin import *
except ImportError:
print "Error: could not import dolfin library."
print "Try calling $ source /Applications/FEniCS.app/Contents/Resources/share/fenics/TestFenicsPath.conf "
import numpy
import math
from pyopencl.array import vec
class DolfinSolver:
def __init__(self, solverParams):
"""
Initialise the dolfin solver using a dictionary of params.
"""
self.pickleSteps = solverParams['pickleSteps']
self.h = solverParams['h']
self.origin = solverParams['origin']
self.N_x = int(solverParams['N_x'])
self.N_y = int(solverParams['N_y'])
self.L_x = solverParams['L_x']
self.L_y = solverParams['L_y']
self.u0 = solverParams['u0']
self.K = solverParams['K']
self.mu_eff = solverParams['mu_eff']
self.delta = solverParams['delta']
self.N_z = 0
self.Lz_b = 0.0
self.W = 0.0
self.boundaryCondition = None
self.mesh = None
self.V = None
self.solution = None
def SolvePDE(self, centers, vols, filename, dir, stepNum=0):
"""
High-level function to be called during the function.
"""
# get height of highest cell in domain
max_height = 0.0
for center in centers:
hz = center[2]
if hz > max_height:
max_height = hz
print 'max height is %f' % max_height
# update mesh, function space and BCs
# TODO: add a better toggle here
self.mesh = self.DoubleBufferedMesh(max_height)
#self.mesh = self.RegularMesh()
self.V = FunctionSpace(self.mesh, "CG", 1)
self.set_bcs()
# Use cell centres to evaluate volume occupancy of mesh
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
#G = self.VolumeFractionOnElements()
#g = Function(self.V, name = "Volume fraction")
#g.interpolate(G)
#self.WriteFieldToFile(dir+filename+'_VolFracsCheck'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
if stepNum % self.pickleSteps == 0:
self.WriteFieldToFile(dir+filename+'.pvd', self.solution)
# interpolate solution to cell centres
u_local = self.InterpolateToCenters(centers)
return u_local
def NewtonIterator(self):
"""
A Newton iterator for solving non-linear problems.
/!\ Assumes that function space (V), boundaryCondition, vol_fracs are up-to-date.
"""
# Define variational problem
u = Function(self.V, name = "Nutrient")
v = TestFunction(self.V)
F = dot(grad(u), grad(v))*dx - self.MonodNutrientSink(u)*v*dx
# Call built-in Newton solver
#set_log_level(PROGRESS) # very detailed info for testing
set_log_level(WARNING) # near-silent info for simulations
#set_log_active(False) # suppress solver text
solve(F == 0, u, self.boundaryCondition, solver_parameters = {"newton_solver":
{"relative_tolerance": 1e-6}})
self.solution = u
def set_bcs(self):
"""
Initialise boundary conditions on the mesh.
/!\ Assumes that global variable Lz_b is up-to-date.
"""
dbc = TopDirichletBoundary()
self.boundaryCondition = DirichletBC(self.V, Constant(self.u0), dbc)
def DoubleBufferedMesh(self, max_height):
"""
Given a boundary height Lz_b, returns a FEniCS mesh object with
- canonical elements in the bottom of the cell domain (cells are always counted onto the same mesh)
- buffer elements at the top of the cell domain (so upper boundary can have an arbitrary height.
Having a double buffer layer avoids generating low-volume elements if Lz_b ~ n*h, but adds the constraint that
delta >= 2*h.
/!\ Exports boundary height Lz_b as a global variable, to be used by TopDirichletBoundary.
"""
global Lz_b
# Read off fixed parameters
L_x = self.L_x; N_x = self.N_x
L_y = self.L_y; N_y = self.N_y
delta = self.delta
h = self.h
# Calculate DBM dimensions
Lz_b = max_height + delta # height in um at which to apply bulk BC
A = int(Lz_b // h) # number of whole cubes that fit under Lz_b
B = Lz_b % h # remainder for this division
W = B + h # thickness of buffer layer in um
# Update mesh variables
self.Lz_b = Lz_b
self.W = W
self.N_z = A-1
self.L_z = (A-1)*h
# Create the node cloud and connectivity
P = N_x+1; Q = N_y+1; R = A+2;
cloud = self.GetDBLNodeCloud(P, Q, R)
TRI = self.GetNodeConnectivity(P, Q, R)
# Reformat the arrays to a datatype that FEniCS likes
cells = numpy.array(TRI, dtype=numpy.uintp)
nodes = numpy.array(cloud, dtype=numpy.double)
# Pass the node and cell arrays to the FEniCS mesh editor
mesh = Mesh(); editor = MeshEditor()
editor.open(mesh, 3, 3); editor.init_vertices(nodes.shape[0]); editor.init_cells(cells.shape[0])
[editor.add_vertex(i,n) for i,n in enumerate(nodes)]
[editor.add_cell(i,n) for i,n in enumerate(cells)]
editor.close()
return mesh
def RegularMesh(self):
"""
/!\ Exports boundary height Lz_b as a global variable, to be used by TopDirichletBoundary.
Assumes that L_z (not user-specified) is the same as L_x.
"""
global Lz_b
# Update mesh variables: choose z coordinates from x coordinates.
# L_z (counting) and Lz_b (boundary) are now the same and equal to L_x.
# We're not building a mesh manually, so we don't need to define W.
Lz_b = self.L_x
self.Lz_b = Lz_b
self.N_z = int(self.N_x)
self.L_z = Lz_b
# use an inbuilt regular mesh
p0 = self.origin
mesh = BoxMesh(p0[0],p0[1],p0[2],self.L_x,self.L_y,self.L_z,self.N_x,self.N_y,self.N_z)
return mesh
def GetDBLNodeCloud(self, P, Q, R):
"""
Compute node locations for a double-buffer-layer mesh
"""
x = numpy.linspace(0.0, self.L_x, num = P)
y = numpy.linspace(0.0, self.L_y, num = Q)
z = numpy.linspace(0.0, (self.N_z+2)*self.h, num = R)
(X, Y, Z) = numpy.meshgrid(x, y, z, indexing ='ij')
# Move the top two layers to make the buffer layer
Z[:,:,-1] = self.Lz_b;
Z[:,:,-2] = self.Lz_b - 0.5*self.W;
# Flatten into a 3-by-(Num_nodes) array
cloud = numpy.vstack((X.flatten('F'), Y.flatten('F'), Z.flatten('F'))).T
return cloud
def GetNodeConnectivity(self, P, Q, R):
"""
Compute the connectivity TRI of a regular grid of points
"""
# Create an P-by-Q-by-R array of integers, numbering along x then y then z
(pp,qq,rr) = numpy.meshgrid(range(0,P),range(0,Q),range(0,R),indexing='ij');
inds = numpy.vstack((pp.flatten('F'), qq.flatten('F'), rr.flatten('F'))).T
# In each direction, remove the last set of nodes (non-origin nodes)
mask = ((inds[:,0]==self.N_x) + (inds[:,1]==self.N_y) + (inds[:,2]==self.N_z+2) == False)
inds_p = inds[mask]
nods_p = inds_p[:,0] + P*inds_p[:,1] + P*Q*inds_p[:,2]
# Compute the stencil defining the 6 tetrahedra associated with a given origin
stencil = self.GetConnectivityStencil(P, Q)
# For each origin node, define the 6 associated elements; compile to list TRI
K = numpy.tile(nods_p.T, (6, 1))
TRI = (numpy.tile(K.flatten('F'), (4,1)) + numpy.tile(stencil, (len(nods_p),1) ).T).T
return TRI
def GetConnectivityStencil(self, P, Q):
"""
Given the vertices of a cube, group these points into 6 identical tetrahedra
"""
stencil = numpy.array([[0, 1, P+1, P*(Q+1)+1], \
[0, 1, P*Q+1, P*(Q+1)+1], \
[0, P*Q, P*Q+1, P*(Q+1)+1], \
[0, P, P+1, P*(Q+1)+1], \
[0, P*Q, P*(Q+1), P*(Q+1)+1], \
[0, P, P*(Q+1), P*(Q+1)+1]])
return stencil
def GetTetrahedronIndex(self, Point, cubeOrigin):
"""
Given mesh cube, assign which tetrahedron a point is in.
/!\ Assumes tetrahedron is part of a cube.
"""
Origin = cubeOrigin
p_x = Point[0]; p_y = Point[1]; p_z = Point[2]
a_x = Origin[0]; a_y = Origin[1]; a_z = Origin[2]
dx = p_x - a_x
dy = p_y - a_y
dz = p_z - a_z
t = 1*(dy - dz > 0) + 2*(dz - dx > 0) + 4*(dx - dy > 0)
conv_vec = [3,4,5,1,0,2]
return conv_vec[t-1]
def GetCubeIndex(self, Point):
"""
Given mesh dimensions, assign which cube a point is in.
"""
p_x = Point[0]; p_y = Point[1]; p_z = Point[2]
p = int(numpy.floor(p_x*self.N_x / float(self.L_x))) # index along x
q = int(numpy.floor(p_y*self.N_y / float(self.L_y))) # index along y
r = int(numpy.floor(p_z*self.N_z / float(self.L_z))) # index along z
c = p + q*self.N_x + r*self.N_x*self.N_y # global index of this cube
cubeOrigin = [p*self.L_x/float(self.N_x),\
q*self.L_y/float(self.N_y),\
r*self.L_z/float(self.N_z)] # coordinates of this cube's origin
return int(c), cubeOrigin
def GetElementIndex(self, point):
"""
Get tetrahedron and cube indices and calculate global element index.
"""
[c, cubeOrigin] = self.GetCubeIndex(point)
t = self.GetTetrahedronIndex(point,cubeOrigin)
return t + 6*c
def AssignElementsToData(self, centers):
"""
Sort cell centres into their respective mesh elements.
"""
N = centers.shape[0]
elements = numpy.zeros((N), numpy.int32)
for i in range(0,N):
point = centers[i]
elements[i] = self.GetElementIndex(point)
return elements
def GetVolFracs(self, centers, vols):
"""
Create a global list of the cell volume fractions in mesh elements.
Assumes that self.mesh and self.h are up-to-date.
/!\ Exports the array vol_fracs as a global array, for use by VolumeFraction.
"""
global vol_fracs
elements = self.AssignElementsToData(centers)
num_elements = self.mesh.num_cells()
v = math.pow(self.h, 3) / 6.0
vol_fracs = numpy.bincount(elements,vols,num_elements) / v
def InterpolateToCenters(self, centers):
"""
Interpolate a solution object u onto a list of cell coordinates
"""
u = self.solution
data_t = tuple(map(tuple, centers))
u_local = numpy.zeros((len(data_t),),numpy.float64)
for i in range(0,len(data_t)):
u_local[i] = u(data_t[i][0:3])
return u_local
def WriteFieldToFile(self, filename, u):
"""
Export the PDE solution as a pvd mesh.
"""
print "Writing fields..."
File(filename) << u
print 'Done.'
def MonodNutrientSink(self, u):
"""
Monod function with which to build RHS.
"""
a = Constant(self.mu_eff)
b = Constant(self.K)
return -1 * a * u * VolumeFraction() / (b + u)
def VolumeFractionOnElements(self):
"""
Monod function with which to build RHS.
"""
return VolumeFraction()
def TestProblem_A(self, dir, filename):
"""
Solves the homogenised reaction-diffusion problem on a standard mesh.
Imaginary cells are placed at the centroids of each element, so that vol_fracs should evaluate to 1 everywhere.
You can check this by eye, since we export the volume fraction function too.
"""
self.mesh = self.RegularMesh()
self.V = FunctionSpace(self.mesh, "CG", 1)
self.set_bcs()
N = self.mesh.num_cells()
print 'We have %i elements in the mesh' % N
print 'Mesh divisions, as seen by counting gear, will be Nx=%i, Ny=%i, Nz=%i.' % (self.N_x, self.N_y, self.N_z)
print 'Mesh dimensions, as seen by counting gear, will be Lx=%i, Ly=%i, Lz=%i.' % (self.L_x, self.L_y, self.L_z)
print 'Finally, mesh parameter h is %f' % self.h
centers = numpy.zeros((N,), vec.float4)
for cell_no in range(N):
centroid = Cell(self.mesh, cell_no).midpoint()
centers[cell_no][0] = centroid.x()
centers[cell_no][1] = centroid.y()
centers[cell_no][2] = centroid.z()
element_volume = math.pow(self.h, 3) / 6.0
vols = element_volume * numpy.ones((N,))
# Use cell centres to evaluate volume occupancy of mesh
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
def TestProblem_B(self, dir, filename, max_height):
"""
Solves the non-homogenous reaction-diffusion problem on a double-buffered mesh.
Imaginary cells are placed at the centroids of some of the canonical elements ONLY.
You can check this by eye, since we export the volume fraction function too.
"""
# intiate mesh
self.mesh = self.DoubleBufferedMesh(max_height)
# The following are now defined (as attributes of self):
# o L_z is the canonical height
# o Lz_b is the boundary height
# o N_z is the number of canonical cubes in the z direction
# o W is the thickness of the buffer layer in um.
# set the function space and boundary conditions in this mesh
self.V = FunctionSpace(self.mesh, "CG", 1)
self.set_bcs()
# Number of elements to fill (canonical only, only up to L_z = 10)
N_can = 3*self.N_x*self.N_y*(self.N_z+1)
# load some imaginary cells onto the mesh
centers = numpy.zeros((N_can,), vec.float4)
for cell_no in range(N_can):
centroid = Cell(self.mesh, cell_no).midpoint()
centers[cell_no][0] = centroid.x()
centers[cell_no][1] = centroid.y()
centers[cell_no][2] = centroid.z()
# Give each cell the volume of the element it's in
element_volume = math.pow(self.h, 3) / 6.0
vols = element_volume * numpy.ones((N_can,))
self.GetVolFracs(centers, vols)
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
def TestProblem_C(self, dir, filename, max_height):
"""
Solves the non-homogenous reaction-diffusion problem on a double-buffered mesh.
Imaginary cells are placed at the centroids of some of the canonical elements ONLY.
You can check this by eye, since we export the volume fraction function too.
/!\ L_x and L_y are exported to XYPeriodicDomain's map function as global variables.
"""
# Make sure that the periodic boundary mapping function can see domain sizes
global L_x
global L_y
L_x = self.L_x
L_y = self.L_y
# intiate mesh and function space
self.mesh = self.DoubleBufferedMesh(max_height)
# define an X-Y periodic function space
pbc = XYPeriodicDomain()
self.V = FunctionSpace(self.mesh, "CG", 1, constrained_domain=pbc)
# set boundary conditions in this mesh
self.set_bcs()
# Number of elements to fill (canonical only, only up to L_z = 10)
N_can = 3*self.N_x*self.N_y*(self.N_z+1)
# load some imaginary cells onto the mesh
centers = numpy.zeros((N_can,), vec.float4)
for cell_no in range(N_can):
centroid = Cell(self.mesh, cell_no).midpoint()
centers[cell_no][0] = centroid.x()
centers[cell_no][1] = centroid.y()
centers[cell_no][2] = centroid.z()
# Give each cell the volume of the element it's in
element_volume = math.pow(self.h, 3) / 6.0
vols = element_volume * numpy.ones((N_can,))
self.GetVolFracs(centers, vols)
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
def TestProblem_D(self, dir, filename, centers, vols):
"""
Solves the non-homogenous reaction-diffusion problem on a double-buffered mesh.
This time, we use realistic cell date, imported as centers and vols.
/!\ L_x and L_y are exported to XYPeriodicDomain's map function as global variables.
"""
# Make sure that the periodic boundary mapping function can see domain sizes
global L_x
global L_y
L_x = self.L_x
L_y = self.L_y
# compute max height
max_height = 0.0
for center in centers:
hz = center[2]
if hz > max_height:
max_height = hz
print 'max height is %f' % max_height
# intiate mesh and function space
self.mesh = self.DoubleBufferedMesh(max_height)
# define a non-periodic function space
self.V = FunctionSpace(self.mesh, "CG", 1)
#pbc = XYPeriodicDomain() # doesn't make sense if cell data are not periodic
self.set_bcs()
self.GetVolFracs(centers, vols)
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
def TestProblem_A_old(self, num_refinements, dir, filename):
"""
Solves the homogenised reaction-diffusion problem on a standard mesh.
This mesh can be refined as many times as desired.
Imaginary cells are placed at the centroids of each element, so that vol_fracs should evaluate to 1 everywhere.
You can check this by eye, since we export the volume fraction function too.
There's something odd going on with this one: volume fractions are assigned incorrectly if number of refinements > 1.
"""
# intiate mesh (regular mesh uses L_z = L_x)
self.mesh = self.RegularMesh()
# refine mesh, updating mesh parameter as we go
for i in range(0,num_refinements):
self.mesh = refine(self.mesh)
self.h = 0.5*self.h
self.N_x = 2*self.N_x
self.N_y = 2*self.N_y
self.N_z = 2*self.N_z
# set the function space and boundary conditions in this mesh
self.V = FunctionSpace(self.mesh, "CG", 1)
self.set_bcs()
N = self.mesh.num_cells()
print 'With %i refinement step(s), we have %i elements in the mesh' % (num_refinements, N)
print 'Mesh divisions, as seen by counting gear, will be Nx=%i, Ny=%i, Nz=%i.' % (self.N_x, self.N_y, self.N_z)
print 'Mesh dimensions, as seen by counting gear, will be Lx=%i, Ly=%i, Lz=%i.' % (self.L_x, self.L_y, self.L_z)
print 'Finally, mesh parameter h is %f' % self.h
# load some imaginary cells onto the mesh
centers = numpy.zeros((N,), vec.float4)
for cell_no in range(N):
centroid = Cell(self.mesh, cell_no).midpoint()
centers[cell_no][0] = centroid.x()
centers[cell_no][1] = centroid.y()
centers[cell_no][2] = centroid.z()
# Give each cell the volume of the element it's in
element_volume = math.pow(self.h, 3) / 6.0
vols = element_volume * numpy.ones((N,))
self.GetVolFracs(centers, vols)
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
class TopDirichletBoundary(SubDomain):
def inside(self, x, on_boundary):
"""
Determine whether point x lies on the Dirchlet Boundary subdomain.
/!\ Assumes Lz_b is supplied as a global variable.
"""
global Lz_b
return bool(near(x[2],Lz_b) and on_boundary)
class XYPeriodicDomain(SubDomain):
def inside(self, x, on_boundary):
"""
Return true if we are on either of the two master boundaries.
/!\ Assumes that origin = [0,0,z]!
"""
return bool((near(x[0], 0.0) or near(x[1], 0.0)) and on_boundary)
def map(self, x, y):
"""
Map points on the slave boundaries to the corresponding master boundaries.
/!\ Takes L_x and L_y as global variables.
"""
global L_x
global L_y
if near(x[0], L_x) and near(x[1], L_y):
y[0] = x[0] - L_x
y[1] = x[1] - L_y
y[2] = x[2]
elif near(x[0], L_x):
y[0] = x[0] - L_x
y[1] = x[1]
y[2] = x[2]
elif near(x[1], L_y):
y[0] = x[0]
y[1] = x[1] - L_y
y[2] = x[2]
else:
y[0] = x[0]
y[1] = x[1]
y[2] = x[2]
class VolumeFraction(Expression):
def eval_cell(self, value, x, ufc_cell):
"""
Evaluate the cell volume fraction for this mesh element.
/!\ Assumes vol_fracs is being supplied as a global variable.
"""
global vol_fracs
value[0] = vol_fracs[ufc_cell.index]
| false | true |
f72fa0af1e6f927dec3c1c91f5deebd3c63f8593 | 10,076 | py | Python | python/tests/test_metadata.py | brianzhang01/tskit | e4d80810e19034cffa77bb14bc0b8d77537103ad | [
"MIT"
] | null | null | null | python/tests/test_metadata.py | brianzhang01/tskit | e4d80810e19034cffa77bb14bc0b8d77537103ad | [
"MIT"
] | null | null | null | python/tests/test_metadata.py | brianzhang01/tskit | e4d80810e19034cffa77bb14bc0b8d77537103ad | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2018-2019 Tskit Developers
# Copyright (c) 2017 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests for metadata handling.
"""
import io
import json
import os
import tempfile
import unittest
import pickle
import numpy as np
import python_jsonschema_objects as pjs
import msprime
import tskit
class TestMetadataHdf5RoundTrip(unittest.TestCase):
"""
Tests that we can encode metadata under various formats and this will
successfully round-trip through the HDF5 format.
"""
def setUp(self):
fd, self.temp_file = tempfile.mkstemp(prefix="msp_hdf5meta_test_")
os.close(fd)
def tearDown(self):
os.unlink(self.temp_file)
def test_json(self):
ts = msprime.simulate(10, random_seed=1)
tables = ts.dump_tables()
nodes = tables.nodes
# For each node, we create some Python metadata that can be JSON encoded.
metadata = [
{"one": j, "two": 2 * j, "three": list(range(j))} for j in range(len(nodes))]
encoded, offset = tskit.pack_strings(map(json.dumps, metadata))
nodes.set_columns(
flags=nodes.flags, time=nodes.time, population=nodes.population,
metadata_offset=offset, metadata=encoded)
self.assertTrue(np.array_equal(nodes.metadata_offset, offset))
self.assertTrue(np.array_equal(nodes.metadata, encoded))
ts1 = tables.tree_sequence()
for j, node in enumerate(ts1.nodes()):
decoded_metadata = json.loads(node.metadata.decode())
self.assertEqual(decoded_metadata, metadata[j])
ts1.dump(self.temp_file)
ts2 = tskit.load(self.temp_file)
self.assertEqual(ts1.tables.nodes, ts2.tables.nodes)
def test_pickle(self):
ts = msprime.simulate(10, random_seed=1)
tables = ts.dump_tables()
# For each node, we create some Python metadata that can be pickled
metadata = [
{"one": j, "two": 2 * j, "three": list(range(j))}
for j in range(ts.num_nodes)]
encoded, offset = tskit.pack_bytes(list(map(pickle.dumps, metadata)))
tables.nodes.set_columns(
flags=tables.nodes.flags, time=tables.nodes.time,
population=tables.nodes.population,
metadata_offset=offset, metadata=encoded)
self.assertTrue(np.array_equal(tables.nodes.metadata_offset, offset))
self.assertTrue(np.array_equal(tables.nodes.metadata, encoded))
ts1 = tables.tree_sequence()
for j, node in enumerate(ts1.nodes()):
decoded_metadata = pickle.loads(node.metadata)
self.assertEqual(decoded_metadata, metadata[j])
ts1.dump(self.temp_file)
ts2 = tskit.load(self.temp_file)
self.assertEqual(ts1.tables.nodes, ts2.tables.nodes)
class ExampleMetadata(object):
"""
Simple class that we can pickle/unpickle in metadata.
"""
def __init__(self, one=None, two=None):
self.one = one
self.two = two
class TestMetadataPickleDecoding(unittest.TestCase):
"""
Tests in which use pickle.pickle to decode metadata in nodes, sites and mutations.
"""
def test_nodes(self):
tables = tskit.TableCollection(sequence_length=1)
metadata = ExampleMetadata(one="node1", two="node2")
pickled = pickle.dumps(metadata)
tables.nodes.add_row(time=0.125, metadata=pickled)
ts = tables.tree_sequence()
node = ts.node(0)
self.assertEqual(node.time, 0.125)
self.assertEqual(node.metadata, pickled)
unpickled = pickle.loads(node.metadata)
self.assertEqual(unpickled.one, metadata.one)
self.assertEqual(unpickled.two, metadata.two)
def test_sites(self):
tables = tskit.TableCollection(sequence_length=1)
metadata = ExampleMetadata(one="node1", two="node2")
pickled = pickle.dumps(metadata)
tables.sites.add_row(position=0.1, ancestral_state="A", metadata=pickled)
ts = tables.tree_sequence()
site = ts.site(0)
self.assertEqual(site.position, 0.1)
self.assertEqual(site.ancestral_state, "A")
self.assertEqual(site.metadata, pickled)
unpickled = pickle.loads(site.metadata)
self.assertEqual(unpickled.one, metadata.one)
self.assertEqual(unpickled.two, metadata.two)
def test_mutations(self):
tables = tskit.TableCollection(sequence_length=1)
metadata = ExampleMetadata(one="node1", two="node2")
pickled = pickle.dumps(metadata)
tables.nodes.add_row(time=0)
tables.sites.add_row(position=0.1, ancestral_state="A")
tables.mutations.add_row(site=0, node=0, derived_state="T", metadata=pickled)
ts = tables.tree_sequence()
mutation = ts.site(0).mutations[0]
self.assertEqual(mutation.site, 0)
self.assertEqual(mutation.node, 0)
self.assertEqual(mutation.derived_state, "T")
self.assertEqual(mutation.metadata, pickled)
unpickled = pickle.loads(mutation.metadata)
self.assertEqual(unpickled.one, metadata.one)
self.assertEqual(unpickled.two, metadata.two)
class TestJsonSchemaDecoding(unittest.TestCase):
"""
Tests in which use json-schema to decode the metadata.
"""
schema = """{
"title": "Example Metadata",
"type": "object",
"properties": {
"one": {"type": "string"},
"two": {"type": "string"}
},
"required": ["one", "two"]
}"""
def test_nodes(self):
tables = tskit.TableCollection(sequence_length=1)
builder = pjs.ObjectBuilder(json.loads(self.schema))
ns = builder.build_classes()
metadata = ns.ExampleMetadata(one="node1", two="node2")
encoded = json.dumps(metadata.as_dict()).encode()
tables.nodes.add_row(time=0.125, metadata=encoded)
ts = tables.tree_sequence()
node = ts.node(0)
self.assertEqual(node.time, 0.125)
self.assertEqual(node.metadata, encoded)
decoded = ns.ExampleMetadata.from_json(node.metadata.decode())
self.assertEqual(decoded.one, metadata.one)
self.assertEqual(decoded.two, metadata.two)
class TestLoadTextMetadata(unittest.TestCase):
"""
Tests that use the load_text interface.
"""
def test_individuals(self):
individuals = io.StringIO("""\
id flags location metadata
0 1 0.0,1.0,0.0 abc
1 1 1.0,2.0 XYZ+
2 0 2.0,3.0,0.0 !@#$%^&*()
""")
i = tskit.parse_individuals(
individuals, strict=False, encoding='utf8', base64_metadata=False)
expected = [(1, [0.0, 1.0, 0.0], 'abc'),
(1, [1.0, 2.0], 'XYZ+'),
(0, [2.0, 3.0, 0.0], '!@#$%^&*()')]
for a, b in zip(expected, i):
self.assertEqual(a[0], b.flags)
self.assertEqual(len(a[1]), len(b.location))
for x, y in zip(a[1], b.location):
self.assertEqual(x, y)
self.assertEqual(a[2].encode('utf8'),
b.metadata)
def test_nodes(self):
nodes = io.StringIO("""\
id is_sample time metadata
0 1 0 abc
1 1 0 XYZ+
2 0 1 !@#$%^&*()
""")
n = tskit.parse_nodes(
nodes, strict=False, encoding='utf8', base64_metadata=False)
expected = ['abc', 'XYZ+', '!@#$%^&*()']
for a, b in zip(expected, n):
self.assertEqual(a.encode('utf8'),
b.metadata)
def test_sites(self):
sites = io.StringIO("""\
position ancestral_state metadata
0.1 A abc
0.5 C XYZ+
0.8 G !@#$%^&*()
""")
s = tskit.parse_sites(
sites, strict=False, encoding='utf8', base64_metadata=False)
expected = ['abc', 'XYZ+', '!@#$%^&*()']
for a, b in zip(expected, s):
self.assertEqual(a.encode('utf8'),
b.metadata)
def test_mutations(self):
mutations = io.StringIO("""\
site node derived_state metadata
0 2 C mno
0 3 G )(*&^%$#@!
""")
m = tskit.parse_mutations(
mutations, strict=False, encoding='utf8', base64_metadata=False)
expected = ['mno', ')(*&^%$#@!']
for a, b in zip(expected, m):
self.assertEqual(a.encode('utf8'),
b.metadata)
def test_populations(self):
populations = io.StringIO("""\
id metadata
0 mno
1 )(*&^%$#@!
""")
p = tskit.parse_populations(
populations, strict=False, encoding='utf8', base64_metadata=False)
expected = ['mno', ')(*&^%$#@!']
for a, b in zip(expected, p):
self.assertEqual(a.encode('utf8'),
b.metadata)
| 38.166667 | 89 | 0.609071 |
import io
import json
import os
import tempfile
import unittest
import pickle
import numpy as np
import python_jsonschema_objects as pjs
import msprime
import tskit
class TestMetadataHdf5RoundTrip(unittest.TestCase):
def setUp(self):
fd, self.temp_file = tempfile.mkstemp(prefix="msp_hdf5meta_test_")
os.close(fd)
def tearDown(self):
os.unlink(self.temp_file)
def test_json(self):
ts = msprime.simulate(10, random_seed=1)
tables = ts.dump_tables()
nodes = tables.nodes
metadata = [
{"one": j, "two": 2 * j, "three": list(range(j))} for j in range(len(nodes))]
encoded, offset = tskit.pack_strings(map(json.dumps, metadata))
nodes.set_columns(
flags=nodes.flags, time=nodes.time, population=nodes.population,
metadata_offset=offset, metadata=encoded)
self.assertTrue(np.array_equal(nodes.metadata_offset, offset))
self.assertTrue(np.array_equal(nodes.metadata, encoded))
ts1 = tables.tree_sequence()
for j, node in enumerate(ts1.nodes()):
decoded_metadata = json.loads(node.metadata.decode())
self.assertEqual(decoded_metadata, metadata[j])
ts1.dump(self.temp_file)
ts2 = tskit.load(self.temp_file)
self.assertEqual(ts1.tables.nodes, ts2.tables.nodes)
def test_pickle(self):
ts = msprime.simulate(10, random_seed=1)
tables = ts.dump_tables()
metadata = [
{"one": j, "two": 2 * j, "three": list(range(j))}
for j in range(ts.num_nodes)]
encoded, offset = tskit.pack_bytes(list(map(pickle.dumps, metadata)))
tables.nodes.set_columns(
flags=tables.nodes.flags, time=tables.nodes.time,
population=tables.nodes.population,
metadata_offset=offset, metadata=encoded)
self.assertTrue(np.array_equal(tables.nodes.metadata_offset, offset))
self.assertTrue(np.array_equal(tables.nodes.metadata, encoded))
ts1 = tables.tree_sequence()
for j, node in enumerate(ts1.nodes()):
decoded_metadata = pickle.loads(node.metadata)
self.assertEqual(decoded_metadata, metadata[j])
ts1.dump(self.temp_file)
ts2 = tskit.load(self.temp_file)
self.assertEqual(ts1.tables.nodes, ts2.tables.nodes)
class ExampleMetadata(object):
def __init__(self, one=None, two=None):
self.one = one
self.two = two
class TestMetadataPickleDecoding(unittest.TestCase):
def test_nodes(self):
tables = tskit.TableCollection(sequence_length=1)
metadata = ExampleMetadata(one="node1", two="node2")
pickled = pickle.dumps(metadata)
tables.nodes.add_row(time=0.125, metadata=pickled)
ts = tables.tree_sequence()
node = ts.node(0)
self.assertEqual(node.time, 0.125)
self.assertEqual(node.metadata, pickled)
unpickled = pickle.loads(node.metadata)
self.assertEqual(unpickled.one, metadata.one)
self.assertEqual(unpickled.two, metadata.two)
def test_sites(self):
tables = tskit.TableCollection(sequence_length=1)
metadata = ExampleMetadata(one="node1", two="node2")
pickled = pickle.dumps(metadata)
tables.sites.add_row(position=0.1, ancestral_state="A", metadata=pickled)
ts = tables.tree_sequence()
site = ts.site(0)
self.assertEqual(site.position, 0.1)
self.assertEqual(site.ancestral_state, "A")
self.assertEqual(site.metadata, pickled)
unpickled = pickle.loads(site.metadata)
self.assertEqual(unpickled.one, metadata.one)
self.assertEqual(unpickled.two, metadata.two)
def test_mutations(self):
tables = tskit.TableCollection(sequence_length=1)
metadata = ExampleMetadata(one="node1", two="node2")
pickled = pickle.dumps(metadata)
tables.nodes.add_row(time=0)
tables.sites.add_row(position=0.1, ancestral_state="A")
tables.mutations.add_row(site=0, node=0, derived_state="T", metadata=pickled)
ts = tables.tree_sequence()
mutation = ts.site(0).mutations[0]
self.assertEqual(mutation.site, 0)
self.assertEqual(mutation.node, 0)
self.assertEqual(mutation.derived_state, "T")
self.assertEqual(mutation.metadata, pickled)
unpickled = pickle.loads(mutation.metadata)
self.assertEqual(unpickled.one, metadata.one)
self.assertEqual(unpickled.two, metadata.two)
class TestJsonSchemaDecoding(unittest.TestCase):
schema = """{
"title": "Example Metadata",
"type": "object",
"properties": {
"one": {"type": "string"},
"two": {"type": "string"}
},
"required": ["one", "two"]
}"""
def test_nodes(self):
tables = tskit.TableCollection(sequence_length=1)
builder = pjs.ObjectBuilder(json.loads(self.schema))
ns = builder.build_classes()
metadata = ns.ExampleMetadata(one="node1", two="node2")
encoded = json.dumps(metadata.as_dict()).encode()
tables.nodes.add_row(time=0.125, metadata=encoded)
ts = tables.tree_sequence()
node = ts.node(0)
self.assertEqual(node.time, 0.125)
self.assertEqual(node.metadata, encoded)
decoded = ns.ExampleMetadata.from_json(node.metadata.decode())
self.assertEqual(decoded.one, metadata.one)
self.assertEqual(decoded.two, metadata.two)
class TestLoadTextMetadata(unittest.TestCase):
def test_individuals(self):
individuals = io.StringIO("""\
id flags location metadata
0 1 0.0,1.0,0.0 abc
1 1 1.0,2.0 XYZ+
2 0 2.0,3.0,0.0 !@#$%^&*()
""")
i = tskit.parse_individuals(
individuals, strict=False, encoding='utf8', base64_metadata=False)
expected = [(1, [0.0, 1.0, 0.0], 'abc'),
(1, [1.0, 2.0], 'XYZ+'),
(0, [2.0, 3.0, 0.0], '!@#$%^&*()')]
for a, b in zip(expected, i):
self.assertEqual(a[0], b.flags)
self.assertEqual(len(a[1]), len(b.location))
for x, y in zip(a[1], b.location):
self.assertEqual(x, y)
self.assertEqual(a[2].encode('utf8'),
b.metadata)
def test_nodes(self):
nodes = io.StringIO("""\
id is_sample time metadata
0 1 0 abc
1 1 0 XYZ+
2 0 1 !@#$%^&*()
""")
n = tskit.parse_nodes(
nodes, strict=False, encoding='utf8', base64_metadata=False)
expected = ['abc', 'XYZ+', '!@#$%^&*()']
for a, b in zip(expected, n):
self.assertEqual(a.encode('utf8'),
b.metadata)
def test_sites(self):
sites = io.StringIO("""\
position ancestral_state metadata
0.1 A abc
0.5 C XYZ+
0.8 G !@#$%^&*()
""")
s = tskit.parse_sites(
sites, strict=False, encoding='utf8', base64_metadata=False)
expected = ['abc', 'XYZ+', '!@#$%^&*()']
for a, b in zip(expected, s):
self.assertEqual(a.encode('utf8'),
b.metadata)
def test_mutations(self):
mutations = io.StringIO("""\
site node derived_state metadata
0 2 C mno
0 3 G )(*&^%$#@!
""")
m = tskit.parse_mutations(
mutations, strict=False, encoding='utf8', base64_metadata=False)
expected = ['mno', ')(*&^%$#@!']
for a, b in zip(expected, m):
self.assertEqual(a.encode('utf8'),
b.metadata)
def test_populations(self):
populations = io.StringIO("""\
id metadata
0 mno
1 )(*&^%$#@!
""")
p = tskit.parse_populations(
populations, strict=False, encoding='utf8', base64_metadata=False)
expected = ['mno', ')(*&^%$#@!']
for a, b in zip(expected, p):
self.assertEqual(a.encode('utf8'),
b.metadata)
| true | true |
f72fa0e8e5609366cc6f530508b8be45c3bde4a6 | 410 | py | Python | student-work/quinn_zepeda/exercism/python/difference-of-squares/difference_of_squares.py | developerQuinnZ/this_will_work | 5587a9fd030b47f9df6514e45c887b6872d2a4a1 | [
"MIT"
] | null | null | null | student-work/quinn_zepeda/exercism/python/difference-of-squares/difference_of_squares.py | developerQuinnZ/this_will_work | 5587a9fd030b47f9df6514e45c887b6872d2a4a1 | [
"MIT"
] | null | null | null | student-work/quinn_zepeda/exercism/python/difference-of-squares/difference_of_squares.py | developerQuinnZ/this_will_work | 5587a9fd030b47f9df6514e45c887b6872d2a4a1 | [
"MIT"
] | null | null | null | def square_of_sum(number):
count = 0
for i in range(1,number + 1):
count += i
c_squared = count**2
return c_squared
def sum_of_squares(number):
total = 0
for i in range(1,number + 1):
total = total + (i**2)
return total
def difference(number):
first = square_of_sum(number)
second = sum_of_squares(number)
total = abs(first - second)
return total
| 20.5 | 35 | 0.617073 | def square_of_sum(number):
count = 0
for i in range(1,number + 1):
count += i
c_squared = count**2
return c_squared
def sum_of_squares(number):
total = 0
for i in range(1,number + 1):
total = total + (i**2)
return total
def difference(number):
first = square_of_sum(number)
second = sum_of_squares(number)
total = abs(first - second)
return total
| true | true |
f72fa13b43a3421e3c2d1f7f4c41c033295dd9e3 | 385 | py | Python | tino/server.py | NotSoSmartDev/Tino | fd3a941cc1efe07cd9eff209a9e3735a8f7dd537 | [
"MIT"
] | 143 | 2020-06-10T06:07:26.000Z | 2022-03-02T10:09:16.000Z | tino/server.py | NotSoSmartDev/Tino | fd3a941cc1efe07cd9eff209a9e3735a8f7dd537 | [
"MIT"
] | 1 | 2020-06-12T21:52:57.000Z | 2020-06-12T21:52:57.000Z | tino/server.py | NotSoSmartDev/Tino | fd3a941cc1efe07cd9eff209a9e3735a8f7dd537 | [
"MIT"
] | 6 | 2020-06-11T19:21:52.000Z | 2021-12-21T08:33:27.000Z | import uvicorn
class Server(uvicorn.Server):
async def startup(self, sockets=None):
await super().startup(sockets=sockets)
for f in self.config.loaded_app.startup_funcs:
await f()
async def shutdown(self, sockets=None):
await super().shutdown(sockets=sockets)
for f in self.config.loaded_app.shutdown_funcs:
await f()
| 27.5 | 55 | 0.654545 | import uvicorn
class Server(uvicorn.Server):
async def startup(self, sockets=None):
await super().startup(sockets=sockets)
for f in self.config.loaded_app.startup_funcs:
await f()
async def shutdown(self, sockets=None):
await super().shutdown(sockets=sockets)
for f in self.config.loaded_app.shutdown_funcs:
await f()
| true | true |
f72fa17a88f924a7ba488b256d1381abe02cf435 | 381 | py | Python | src/vertex.py | lavish205/graph-db | 523cfd50ccb6fdc2644f595e3a76dd71e760bbb4 | [
"MIT"
] | 1 | 2020-09-05T03:07:35.000Z | 2020-09-05T03:07:35.000Z | src/vertex.py | lavish205/graph-db | 523cfd50ccb6fdc2644f595e3a76dd71e760bbb4 | [
"MIT"
] | null | null | null | src/vertex.py | lavish205/graph-db | 523cfd50ccb6fdc2644f595e3a76dd71e760bbb4 | [
"MIT"
] | null | null | null | from utils import Compression
class Vertex(object):
def __init__(self, identifier, ctype):
self.id = identifier
self.type = ctype
def compress(self):
return Compression.compress(self)
@staticmethod
def decompress(val):
return Compression.decompress(val)
def __repr__(self):
return '{}-{}'.format(self.id, self.type)
| 21.166667 | 49 | 0.643045 | from utils import Compression
class Vertex(object):
def __init__(self, identifier, ctype):
self.id = identifier
self.type = ctype
def compress(self):
return Compression.compress(self)
@staticmethod
def decompress(val):
return Compression.decompress(val)
def __repr__(self):
return '{}-{}'.format(self.id, self.type)
| true | true |
f72fa21904e69b4faea9e41fe537a9887bcd643e | 2,662 | py | Python | python/array.py | mr-uuid/snippets | 49bb59641d8160d7635b8d5e574cb50f9e5362e2 | [
"MIT"
] | null | null | null | python/array.py | mr-uuid/snippets | 49bb59641d8160d7635b8d5e574cb50f9e5362e2 | [
"MIT"
] | 1 | 2021-03-10T04:00:01.000Z | 2021-03-10T04:00:01.000Z | python/array.py | mr-uuid/snippets | 49bb59641d8160d7635b8d5e574cb50f9e5362e2 | [
"MIT"
] | null | null | null | import unittest
class Tester(unittest.TestCase):
def test_zip(self):
"""
zip takes to arrays and makes an array of tuples where tuple 1 is a
tuple composed of element 1 of array 1 and 2, etc...
"""
# combines to arrays into one array of tuples
self.assertEqual(
zip(sorted(set('qwerty')), sorted(set('asdfgh'))),
[('e', 'a'), ('q', 'd'), ('r', 'f'),
('t', 'g'), ('w', 'h'), ('y', 's')]
)
questions = ['name', 'quest', 'favorite color', "WILL GET SKIPPED"]
answers = ['lancelot', 'the holy grail', 'blue']
self.assertEqual(
zip(questions, answers),
[('name', 'lancelot'), ('quest', 'the holy grail'),
('favorite color', 'blue')]
)
a = [1, 2]
b = [(1), (2)]
c = [(1,), (2,)]
d = [(1, 1), (2, 2)]
self.assertEquals(
zip(a, d),
zip(b, d),
[(1, (1, 1)), (2, (2, 2))]
)
self.assertEquals(
zip(a, b),
[(1, 1), (2, 2)],
)
self.assertEquals(
zip(a, c),
zip(b, c),
[(1, (1,)), (2, (2,))],
)
self.assertEquals(
zip(c, d),
[((1,), (1, 1)), ((2,), (2, 2))],
)
def test_any(self):
"""
any([array])
=> takes an array and returns true if any of the elements are true
"""
self.assertEquals(any([True, False]), True)
self.assertEquals(any([None, "apple"]), True)
self.assertEquals(any([False, False]), False)
self.assertEquals(any([None, ""]), False)
def test_enumerate_and_string_sets(self):
"""
* set('string') => returns a set of the charcacters of the string,
it also skips any duplicate characters.
* enumerate(<list>) => returns a list of the following nature:
[(1, <first_element_of_list>), ..., (N, <Nth_element_of_list>)]
* <dict>.items() => returns a list of the following nature:
[(key, value), ...]
"""
# generates an itterator that returns [(index, value), ....]
char_list = [(index, v) for index, v in enumerate(sorted(set('abca')))]
self.assertEquals(
{0: "a", 1: 'b', 2: 'c'}.items(),
char_list
)
def test_reverse_enumerate_and_string_sets(self):
self.assertEquals(
[x for x in reversed(sorted(set(('aceg'*4) + ('bdfh'*3))))],
list(reversed(sorted(set('abcdefgh'))))
)
if __name__ == "__main__":
unittest.main()
| 30.25 | 79 | 0.472953 | import unittest
class Tester(unittest.TestCase):
def test_zip(self):
self.assertEqual(
zip(sorted(set('qwerty')), sorted(set('asdfgh'))),
[('e', 'a'), ('q', 'd'), ('r', 'f'),
('t', 'g'), ('w', 'h'), ('y', 's')]
)
questions = ['name', 'quest', 'favorite color', "WILL GET SKIPPED"]
answers = ['lancelot', 'the holy grail', 'blue']
self.assertEqual(
zip(questions, answers),
[('name', 'lancelot'), ('quest', 'the holy grail'),
('favorite color', 'blue')]
)
a = [1, 2]
b = [(1), (2)]
c = [(1,), (2,)]
d = [(1, 1), (2, 2)]
self.assertEquals(
zip(a, d),
zip(b, d),
[(1, (1, 1)), (2, (2, 2))]
)
self.assertEquals(
zip(a, b),
[(1, 1), (2, 2)],
)
self.assertEquals(
zip(a, c),
zip(b, c),
[(1, (1,)), (2, (2,))],
)
self.assertEquals(
zip(c, d),
[((1,), (1, 1)), ((2,), (2, 2))],
)
def test_any(self):
self.assertEquals(any([True, False]), True)
self.assertEquals(any([None, "apple"]), True)
self.assertEquals(any([False, False]), False)
self.assertEquals(any([None, ""]), False)
def test_enumerate_and_string_sets(self):
char_list = [(index, v) for index, v in enumerate(sorted(set('abca')))]
self.assertEquals(
{0: "a", 1: 'b', 2: 'c'}.items(),
char_list
)
def test_reverse_enumerate_and_string_sets(self):
self.assertEquals(
[x for x in reversed(sorted(set(('aceg'*4) + ('bdfh'*3))))],
list(reversed(sorted(set('abcdefgh'))))
)
if __name__ == "__main__":
unittest.main()
| true | true |
f72fa31aa61c2010032ba331da9c46f0c28c8f64 | 586 | py | Python | recipys/types.py | gmso/recipys | 8ca39a7b0ace2c678fe7a5c8271c6843db6c2c35 | [
"MIT"
] | 13 | 2021-07-24T20:49:35.000Z | 2021-08-21T18:15:16.000Z | recipys/types.py | gmso/recipys | 8ca39a7b0ace2c678fe7a5c8271c6843db6c2c35 | [
"MIT"
] | 2 | 2021-08-17T17:11:09.000Z | 2021-09-01T19:05:17.000Z | recipys/types.py | gmso/recipys | 8ca39a7b0ace2c678fe7a5c8271c6843db6c2c35 | [
"MIT"
] | 1 | 2021-07-29T16:36:35.000Z | 2021-07-29T16:36:35.000Z | from dataclasses import dataclass
from typing import Optional, List
@dataclass
class RecipeConstraints:
meal: Optional[str] = None
ingredients: Optional[List[str]] = None
@dataclass
class Printable:
title: str = ""
ingredients: str = ""
preparation: str = ""
error_message: Optional[str] = None
warning_message: Optional[str] = None
info_message: Optional[str] = None
@dataclass
class FetchingError(Exception):
message: str = "An error ocurred"
@dataclass
class PrintInterrupt(Exception):
printable: Printable
| 20.206897 | 44 | 0.677474 | from dataclasses import dataclass
from typing import Optional, List
@dataclass
class RecipeConstraints:
meal: Optional[str] = None
ingredients: Optional[List[str]] = None
@dataclass
class Printable:
title: str = ""
ingredients: str = ""
preparation: str = ""
error_message: Optional[str] = None
warning_message: Optional[str] = None
info_message: Optional[str] = None
@dataclass
class FetchingError(Exception):
message: str = "An error ocurred"
@dataclass
class PrintInterrupt(Exception):
printable: Printable
| true | true |
f72fa471e47a678d1cdcba520ccfb797969a10c8 | 43 | py | Python | src/brython_jinja2/__init__.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | 2 | 2020-09-13T17:51:55.000Z | 2020-11-25T18:47:12.000Z | src/brython_jinja2/__init__.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | 2 | 2020-11-25T19:18:15.000Z | 2021-06-01T21:48:12.000Z | src/brython_jinja2/__init__.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | null | null | null | from . import platform
from . import utils
| 14.333333 | 22 | 0.767442 | from . import platform
from . import utils
| true | true |
f72fa4cf65c6c8ffeabe5891fa79f70b7f2fe473 | 8,228 | py | Python | app/worker/tasks/importers/historical_data_importer.py | CodeTheChangeUBC/reBOOT | df6a7d9990fc261c28bf65b83b561a765dc78723 | [
"MIT"
] | 10 | 2017-10-17T04:35:44.000Z | 2021-03-19T21:12:15.000Z | app/worker/tasks/importers/historical_data_importer.py | CodeTheChangeUBC/reBOOT | df6a7d9990fc261c28bf65b83b561a765dc78723 | [
"MIT"
] | 224 | 2017-10-18T18:33:48.000Z | 2022-02-02T03:33:04.000Z | app/worker/tasks/importers/historical_data_importer.py | CodeTheChangeUBC/reBOOT | df6a7d9990fc261c28bf65b83b561a765dc78723 | [
"MIT"
] | 1 | 2018-08-02T03:10:25.000Z | 2018-08-02T03:10:25.000Z | import re
from dateutil.parser import parse
from django.utils import timezone as tz
from .base_csv_importer import BaseCsvImporter
from app.constants.item_map import ITEM_MAP
from app.enums import ItemStatusEnum
from app.models import Donor, Donation, Item, ItemDevice, ItemDeviceType
class HistoricalDataImporter(BaseCsvImporter):
"""Takes 10b format file path and imports into the database using the 10x
format into the appropriate tables.
:param str csvfile: csvfile path
"""
bulk_model = Item
def parse_row(self, row):
donor = self._goc_donor(self._parse_donor(row))
donation = self._goc_donation(self._parse_donation(row), donor)
device_type = self._goc_device_type(self._parse_device_type(row))
device = self._goc_item_device(
self._parse_item_device(row), device_type)
self.model_bulk.append(
self._new_item(self._parse_item(row), donation, device))
def _parse_donor(self, row):
"""Takes a row and parses relevant Donor data into a dict.
:param dict row: A CSV row dict
:return: Donor related data dict
:rtype: dict
"""
receipt_option_f = {
"notneeded": "REFUSED",
"email": "EMAIL",
"mail": "MAIL"
}.get(re.sub("[^a-zA-Z]+", "", row["TRV"]).lower(), "EMAIL")
documented_at_f = self._parse_date(row["Date"])
postal_f = re.sub("[^a-zA-Z0-9 ]+", "", row["Postal Code"]).upper()[:7]
return {
"donor_name": row["Donor Name"],
"contact_name": row.get("Contact", row["Donor Name"]),
"email": row["Email"],
"want_receipt": receipt_option_f,
"telephone_number": row["Telephone"],
"mobile_number": row["Mobile"],
"address_line_one": row["Address"],
"address_line_two": row.get("Unit", ""),
"city": row["City"],
"province": row["Prov."],
"postal_code": postal_f,
"customer_ref": row["CustRef"],
"documented_at": documented_at_f
}
def _parse_donation(self, row):
"""Takes a csv row and parses relevant Donation data into a dict.
:param dict row: A CSV row dict
:return: Donation related data dict
:rtype: dict
"""
donate_date_f = documented_at_f = self._parse_date(row["Date"])
return {
"tax_receipt_no": row["TR#"],
"pledge_date": donate_date_f,
"donate_date": donate_date_f,
"test_date": donate_date_f,
"valuation_date": donate_date_f,
"pick_up": row["PPC"],
"source": "HISTORICAL_DATA", # Fixed
"documented_at": documented_at_f,
"tax_receipt_created_at": tz.now()
}
def _parse_device_type(self, row):
"""Takes a csv row and parses relevant ItemDeviceType data into a dict.
:param dict row: A CSV row dict
:return: ItemDeviceType related data dict
:rtype: dict
"""
dtype = ITEM_MAP.get(row["Item Description"].lower(), None)
if dtype is None:
return {
"category": "not categorized",
"device_type": row["Item Description"],
}
return dtype
def _parse_item_device(self, row):
"""Takes a csv row and parses relevant ItemDevice data into a dict.
:param dict row: A CSV row dict
:return: ItemDevice related data dict
:rtype: dict
"""
return {
"make": row["Manufacturer"],
"model": row["Model"],
"cpu_type": "",
"speed": "",
"memory": None,
"hd_size": None,
"screen_size": "",
"hdd_serial_number": "",
"operating_system": ""
}
def _parse_item(self, row):
"""Takes a csv row and parses relevant Item data into a dict.
:param dict row: A CSV row dict
:return: Item related data dict
:rtype: dict
"""
working_f = row["Working"].lower() == "y"
donate_date_f = documented_at_f = self._parse_date(row["Date"])
batch_f = "" if row["Batch"] == "0" else row["Batch"]
particulars_f = row["Item Particulars"]
if particulars_f == "0":
particulars_f = ""
qty_f = int(row.get("Qty", 0))
try:
value_f = float(re.sub("[^0-9|.]", "", row["Value"]))
except ValueError:
value_f = 0.0
value_per_f = round(value_f / qty_f, 2)
return {
"serial_number": "",
"asset_tag": "",
"particulars": particulars_f,
"quantity": row["Qty"],
"working": working_f,
"condition": row["Condition"],
"quality": row["Quality"],
"batch": batch_f,
"value": str(value_per_f),
"verified": True,
"documented_at": documented_at_f,
"status": ItemStatusEnum.RECEIVED.name,
"notes": particulars_f,
"valuation_date": donate_date_f,
# "weight":
# "valuation_supporting_doc":
}
def _goc_donor(self, data):
"""get_or_create a Donor.
:param dict row: A Donor dict
:return: Donor object
:rtype: app.models.Donor instance
"""
try:
donor = Donor.objects.filter(
donor_name=data['donor_name'],
contact_name=data['contact_name'],
email=data['email'],
want_receipt=data['want_receipt'],
telephone_number=data['telephone_number'],
mobile_number=data['mobile_number'],
address_line_one=data['address_line_one'],
address_line_two=data['address_line_two'],
city=data['city'],
province=data['province'],
postal_code=data['postal_code'],
).first()
if donor is None:
raise Donor.DoesNotExist
except Exception:
donor = Donor.objects.create(**data)
return donor
def _goc_donation(self, data, donor):
"""get_or_create a Donation.
:param dict row: A Donation dict
:param obj donor: app.model.Donor object
:return: Donation object
:rtype: app.models.Donation instance
"""
try:
# Match by tax receipt number rather than full donation data
d = Donation.objects.get(tax_receipt_no=data.get("tax_receipt_no"))
except Exception:
d = Donation.objects.create(donor=donor, **data)
return d
def _goc_device_type(self, data):
"""get_or_create a ItemDeviceType.
:param dict row: A ItemDeviceType dict
:return: ItemDeviceType object
:rtype: app.models.ItemDeviceType instance
"""
dtype, unique = ItemDeviceType.objects.get_or_create(**data)
return dtype
def _goc_item_device(self, data, dtype):
"""get_or_create a ItemDevice.
:param dict row: A ItemDevice dict
:param obj device_type: app.model.ItemDeviceType object
:return: ItemDevice object
:rtype: app.models.ItemDevice instance
"""
i, unique = ItemDevice.objects.get_or_create(dtype=dtype, **data)
return i
def _new_item(self, data, donation, device):
"""Initialize a new Item object.
:param dict row: A Item dict
:param obj donation: app.model.Donation object
:param obj device: app.model.ItemDevice object
:return: Item object
:rtype: app.models.Item instance
"""
try:
i = Item(donation=donation, device=device, **data)
i.clean_fields()
except Exception as e:
self.logger.error(f"Item Data: {i.underscore_serialize()}")
raise e
return i
@staticmethod
def _parse_date(date_f):
""" Takes dynamic date formats and unifies them into Y-m-d format
"""
date = parse(date_f, dayfirst=True)
return date.strftime('%Y-%m-%d')
| 34.426778 | 79 | 0.565386 | import re
from dateutil.parser import parse
from django.utils import timezone as tz
from .base_csv_importer import BaseCsvImporter
from app.constants.item_map import ITEM_MAP
from app.enums import ItemStatusEnum
from app.models import Donor, Donation, Item, ItemDevice, ItemDeviceType
class HistoricalDataImporter(BaseCsvImporter):
bulk_model = Item
def parse_row(self, row):
donor = self._goc_donor(self._parse_donor(row))
donation = self._goc_donation(self._parse_donation(row), donor)
device_type = self._goc_device_type(self._parse_device_type(row))
device = self._goc_item_device(
self._parse_item_device(row), device_type)
self.model_bulk.append(
self._new_item(self._parse_item(row), donation, device))
def _parse_donor(self, row):
receipt_option_f = {
"notneeded": "REFUSED",
"email": "EMAIL",
"mail": "MAIL"
}.get(re.sub("[^a-zA-Z]+", "", row["TRV"]).lower(), "EMAIL")
documented_at_f = self._parse_date(row["Date"])
postal_f = re.sub("[^a-zA-Z0-9 ]+", "", row["Postal Code"]).upper()[:7]
return {
"donor_name": row["Donor Name"],
"contact_name": row.get("Contact", row["Donor Name"]),
"email": row["Email"],
"want_receipt": receipt_option_f,
"telephone_number": row["Telephone"],
"mobile_number": row["Mobile"],
"address_line_one": row["Address"],
"address_line_two": row.get("Unit", ""),
"city": row["City"],
"province": row["Prov."],
"postal_code": postal_f,
"customer_ref": row["CustRef"],
"documented_at": documented_at_f
}
def _parse_donation(self, row):
donate_date_f = documented_at_f = self._parse_date(row["Date"])
return {
"tax_receipt_no": row["TR#"],
"pledge_date": donate_date_f,
"donate_date": donate_date_f,
"test_date": donate_date_f,
"valuation_date": donate_date_f,
"pick_up": row["PPC"],
"source": "HISTORICAL_DATA",
"documented_at": documented_at_f,
"tax_receipt_created_at": tz.now()
}
def _parse_device_type(self, row):
dtype = ITEM_MAP.get(row["Item Description"].lower(), None)
if dtype is None:
return {
"category": "not categorized",
"device_type": row["Item Description"],
}
return dtype
def _parse_item_device(self, row):
return {
"make": row["Manufacturer"],
"model": row["Model"],
"cpu_type": "",
"speed": "",
"memory": None,
"hd_size": None,
"screen_size": "",
"hdd_serial_number": "",
"operating_system": ""
}
def _parse_item(self, row):
working_f = row["Working"].lower() == "y"
donate_date_f = documented_at_f = self._parse_date(row["Date"])
batch_f = "" if row["Batch"] == "0" else row["Batch"]
particulars_f = row["Item Particulars"]
if particulars_f == "0":
particulars_f = ""
qty_f = int(row.get("Qty", 0))
try:
value_f = float(re.sub("[^0-9|.]", "", row["Value"]))
except ValueError:
value_f = 0.0
value_per_f = round(value_f / qty_f, 2)
return {
"serial_number": "",
"asset_tag": "",
"particulars": particulars_f,
"quantity": row["Qty"],
"working": working_f,
"condition": row["Condition"],
"quality": row["Quality"],
"batch": batch_f,
"value": str(value_per_f),
"verified": True,
"documented_at": documented_at_f,
"status": ItemStatusEnum.RECEIVED.name,
"notes": particulars_f,
"valuation_date": donate_date_f,
}
def _goc_donor(self, data):
try:
donor = Donor.objects.filter(
donor_name=data['donor_name'],
contact_name=data['contact_name'],
email=data['email'],
want_receipt=data['want_receipt'],
telephone_number=data['telephone_number'],
mobile_number=data['mobile_number'],
address_line_one=data['address_line_one'],
address_line_two=data['address_line_two'],
city=data['city'],
province=data['province'],
postal_code=data['postal_code'],
).first()
if donor is None:
raise Donor.DoesNotExist
except Exception:
donor = Donor.objects.create(**data)
return donor
def _goc_donation(self, data, donor):
try:
d = Donation.objects.get(tax_receipt_no=data.get("tax_receipt_no"))
except Exception:
d = Donation.objects.create(donor=donor, **data)
return d
def _goc_device_type(self, data):
dtype, unique = ItemDeviceType.objects.get_or_create(**data)
return dtype
def _goc_item_device(self, data, dtype):
i, unique = ItemDevice.objects.get_or_create(dtype=dtype, **data)
return i
def _new_item(self, data, donation, device):
try:
i = Item(donation=donation, device=device, **data)
i.clean_fields()
except Exception as e:
self.logger.error(f"Item Data: {i.underscore_serialize()}")
raise e
return i
@staticmethod
def _parse_date(date_f):
date = parse(date_f, dayfirst=True)
return date.strftime('%Y-%m-%d')
| true | true |
f72fa56028b7563a6c7ecc30355b5545eb740df6 | 16,939 | py | Python | arxiv_public_data/authors.py | The-Collaboratory/arxiv-public-datasets | a3578b2a693c964ed8310d8ddc3a3801f33ce3c9 | [
"MIT"
] | null | null | null | arxiv_public_data/authors.py | The-Collaboratory/arxiv-public-datasets | a3578b2a693c964ed8310d8ddc3a3801f33ce3c9 | [
"MIT"
] | null | null | null | arxiv_public_data/authors.py | The-Collaboratory/arxiv-public-datasets | a3578b2a693c964ed8310d8ddc3a3801f33ce3c9 | [
"MIT"
] | null | null | null | # https://github.com/arXiv/arxiv-base@32e6ad0
"""
Copyright 2017 Cornell University
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""Parse Authors lines to extract author and affiliation data."""
import re
import os
import gzip
import json
from itertools import dropwhile
from typing import Dict, Iterator, List, Tuple
from multiprocessing import Pool, cpu_count
# from arxiv_public_data.tex2utf import tex2utf
# from arxiv_public_data.config import LOGGER, DIR_OUTPUT
# logger = LOGGER.getChild('authorsplit')
PREFIX_MATCH = 'van|der|de|la|von|del|della|da|mac|ter|dem|di|vaziri'
"""
Takes data from an Author: line in the current arXiv abstract
file and returns a structured set of data:
author_list_ptr = [
[ author1_keyname, author1_firstnames, author1_suffix, affil1, affil2 ] ,
[ author2_keyname, author2_firstnames, author1_suffix, affil1 ] ,
[ author3_keyname, author3_firstnames, author1_suffix ]
]
Abstracted from Dienst software for OAI1 and other uses. This
routine should just go away when a better metadata structure is
adopted that deals with names and affiliations properly.
Must remember that there is at least one person one the archive
who has only one name, this should clearly be considered the key name.
Code originally written by Christina Scovel, Simeon Warner Dec99/Jan00
2000-10-16 - separated.
2000-12-07 - added support for suffix
2003-02-14 - get surname prefixes from arXiv::Filters::Index [Simeon]
2007-10-01 - created test script, some tidying [Simeon]
2018-05-25 - Translated from Perl to Python [Brian C.]
"""
def parse_author_affil(authors: str) -> List[List[str]]:
"""
Parse author line and returns an list of author and affiliation data.
The list for each author will have at least three elements for
keyname, firstname(s) and suffix. The keyname will always have content
but the other strings might be empty strings if there is no firstname
or suffix. Any additional elements after the first three are affiliations,
there may be zero or more.
Handling of prefix "XX collaboration" etc. is duplicated here and in
arXiv::HTML::AuthorLink -- it shouldn't be. Likely should just be here.
This routine is just a wrapper around the two parts that first split
the authors line into parts, and then back propagate the affiliations.
The first part is to be used along for display where we do not want
to back propagate affiliation information.
:param authors: string of authors from abs file or similar
:return:
Returns a structured set of data:
author_list_ptr = [
[ author1_keyname, author1_firstnames, author1_suffix, affil1, affil2 ],
[ author2_keyname, author2_firstnames, author1_suffix, affil1 ] ,
[ author3_keyname, author3_firstnames, author1_suffix ]
]
"""
return _parse_author_affil_back_propagate(
**_parse_author_affil_split(authors))
def _parse_author_affil_split(author_line: str) -> Dict:
"""
Split author line into author and affiliation data.
Take author line, tidy spacing and punctuation, and then split up into
individual author an affiliation data. Has special cases to avoid splitting
an initial collaboration name and records in $back_propagate_affiliation_to
the fact that affiliations should not be back propagated to collaboration
names.
Does not handle multiple collaboration names.
"""
if not author_line:
return {'author_list': [], 'back_prop': 0}
names: List[str] = split_authors(author_line)
if not names:
return {'author_list': [], 'back_prop': 0}
names = _remove_double_commas(names)
# get rid of commas at back
namesIter: Iterator[str] = reversed(
list(dropwhile(lambda x: x == ',', reversed(names))))
# get rid of commas at front
names = list(dropwhile(lambda x: x == ',', namesIter))
# Extract all names (all parts not starting with comma or paren)
names = list(map(_tidy_name, filter(
lambda x: re.match('^[^](,]', x), names)))
names = list(filter(lambda n: not re.match(
r'^\s*et\.?\s+al\.?\s*', n, flags=re.IGNORECASE), names))
(names, author_list,
back_propagate_affiliations_to) = _collaboration_at_start(names)
(enumaffils) = _enum_collaboration_at_end(author_line)
# Split name into keyname and firstnames/initials.
# Deal with different patterns in turn: prefixes, suffixes, plain
# and single name.
patterns = [('double-prefix',
r'^(.*)\s+(' + PREFIX_MATCH + r')\s(' +
PREFIX_MATCH + r')\s(\S+)$'),
('name-prefix-name',
r'^(.*)\s+(' + PREFIX_MATCH + r')\s(\S+)$'),
('name-name-prefix',
r'^(.*)\s+(\S+)\s(I|II|III|IV|V|Sr|Jr|Sr\.|Jr\.)$'),
('name-name',
r'^(.*)\s+(\S+)$'), ]
# Now go through names in turn and try to get affiliations
# to go with them
for name in names:
pattern_matches = ((mtype, re.match(m, name, flags=re.IGNORECASE))
for (mtype, m) in patterns)
(mtype, match) = next(((mtype, m)
for (mtype, m) in pattern_matches
if m is not None), ('default', None))
if match is None:
author_entry = [name, '', '']
elif mtype == 'double-prefix':
s = '{} {} {}'.format(match.group(
2), match.group(3), match.group(4))
author_entry = [s, match.group(1), '']
elif mtype == 'name-prefix-name':
s = '{} {}'.format(match.group(2), match.group(3))
author_entry = [s, match.group(1), '']
elif mtype == 'name-name-prefix':
author_entry = [match.group(2), match.group(1), match.group(3)]
elif mtype == 'name-name':
author_entry = [match.group(2), match.group(1), '']
else:
author_entry = [name, '', '']
# search back in author_line for affiliation
author_entry = _add_affiliation(
author_line, enumaffils, author_entry, name)
author_list.append(author_entry)
return {'author_list': author_list,
'back_prop': back_propagate_affiliations_to}
def parse_author_affil_utf(authors: str) -> List:
"""
Call parse_author_affil() and do TeX to UTF conversion.
Output structure is the same but should be in UTF and not TeX.
"""
if not authors:
return []
return list(map(lambda author: list(map(tex2utf, author)),
parse_author_affil(authors)))
def _remove_double_commas(items: List[str]) -> List[str]:
parts: List[str] = []
last = ''
for pt in items:
if pt == ',' and last == ',':
continue
else:
parts.append(pt)
last = pt
return parts
def _tidy_name(name: str) -> str:
name = re.sub(r'\s\s+', ' ', name) # also gets rid of CR
# add space after dot (except in TeX)
name = re.sub(r'(?<!\\)\.(\S)', r'. \g<1>', name)
return name
def _collaboration_at_start(names: List[str]) \
-> Tuple[List[str], List[List[str]], int]:
"""Perform special handling of collaboration at start."""
author_list = []
back_propagate_affiliations_to = 0
while len(names) > 0:
m = re.search(r'([a-z0-9\s]+\s+(collaboration|group|team))',
names[0], flags=re.IGNORECASE)
if not m:
break
# Add to author list
author_list.append([m.group(1), '', ''])
back_propagate_affiliations_to += 1
# Remove from names
names.pop(0)
# Also swallow and following comma or colon
if names and (names[0] == ',' or names[0] == ':'):
names.pop(0)
return names, author_list, back_propagate_affiliations_to
def _enum_collaboration_at_end(author_line: str)->Dict:
"""Get separate set of enumerated affiliations from end of author_line."""
# Now see if we have a separate set of enumerated affiliations
# This is indicated by finding '(\s*('
line_m = re.search(r'\(\s*\((.*)$', author_line)
if not line_m:
return {}
enumaffils = {}
affils = re.sub(r'\s*\)\s*$', '', line_m.group(1))
# Now expect to have '1) affil1 (2) affil2 (3) affil3'
for affil in affils.split('('):
# Now expect `1) affil1 ', discard if no match
m = re.match(r'^(\d+)\)\s*(\S.*\S)\s*$', affil)
if m:
enumaffils[m.group(1)] = re.sub(r'[\.,\s]*$', '', m.group(2))
return enumaffils
def _add_affiliation(author_line: str,
enumaffils: Dict,
author_entry: List[str],
name: str) -> List:
"""
Add author affiliation to author_entry if one is found in author_line.
This should deal with these cases
Smith B(labX) Smith B(1) Smith B(1, 2) Smith B(1 & 2) Smith B(1 and 2)
"""
en = re.escape(name)
namerex = r'{}\s*\(([^\(\)]+)'.format(en.replace(' ', 's*'))
m = re.search(namerex, author_line, flags=re.IGNORECASE)
if not m:
return author_entry
# Now see if we have enumerated references (just commas, digits, &, and)
affils = m.group(1).rstrip().lstrip()
affils = re.sub(r'(&|and)/,', ',', affils, flags=re.IGNORECASE)
if re.match(r'^[\d,\s]+$', affils):
for affil in affils.split(','):
if affil in enumaffils:
author_entry.append(enumaffils[affil])
else:
author_entry.append(affils)
return author_entry
def _parse_author_affil_back_propagate(author_list: List[List[str]],
back_prop: int) -> List[List[str]]:
"""Back propagate author affiliation.
Take the author list structure generated by parse_author_affil_split(..)
and propagate affiliation information backwards to preceeding author
entries where none was give. Stop before entry $back_prop to avoid
adding affiliation information to collaboration names.
given, eg:
a.b.first, c.d.second (affil)
implies
a.b.first (affil), c.d.second (affil)
and in more complex cases:
a.b.first, c.d.second (1), e.f.third, g.h.forth (2,3)
implies
a.b.first (1), c.d.second (1), e.f.third (2,3), g.h.forth (2,3)
"""
last_affil: List[str] = []
for x in range(len(author_list) - 1, max(back_prop - 1, -1), -1):
author_entry = author_list[x]
if len(author_entry) > 3: # author has affiliation,store
last_affil = author_entry
elif last_affil:
# author doesn't have affil but later one did => copy
author_entry.extend(last_affil[3:])
return author_list
def split_authors(authors: str) -> List:
"""
Split author string into authors entity lists.
Take an author line as a string and return a reference to a list of the
different name and affiliation blocks. While this does normalize spacing
and 'and', it is a key feature that the set of strings returned can be
concatenated to reproduce the original authors line. This code thus
provides a very graceful degredation for badly formatted authors lines, as
the text at least shows up.
"""
# split authors field into blocks with boundaries of ( and )
if not authors:
return []
aus = re.split(r'(\(|\))', authors)
aus = list(filter(lambda x: x != '', aus))
blocks = []
if len(aus) == 1:
blocks.append(authors)
else:
c = ''
depth = 0
for bit in aus:
if bit == '':
continue
if bit == '(': # track open parentheses
depth += 1
if depth == 1:
blocks.append(c)
c = '('
else:
c = c + bit
elif bit == ')': # track close parentheses
depth -= 1
c = c + bit
if depth == 0:
blocks.append(c)
c = ''
else: # haven't closed, so keep accumulating
continue
else:
c = c + bit
if c:
blocks.append(c)
listx = []
for block in blocks:
block = re.sub(r'\s+', ' ', block)
if re.match(r'^\(', block): # it is a comment
listx.append(block)
else: # it is a name
block = re.sub(r',?\s+(and|\&)\s', ',', block)
names = re.split(r'(,|:)\s*', block)
for name in names:
if not name:
continue
name = name.rstrip().lstrip()
if name:
listx.append(name)
# Recombine suffixes that were separated with a comma
parts: List[str] = []
for p in listx:
if re.match(r'^(Jr\.?|Sr\.?\[IV]{2,})$', p) \
and len(parts) >= 2 \
and parts[-1] == ',' \
and not re.match(r'\)$', parts[-2]):
separator = parts.pop()
last = parts.pop()
recomb = "{}{} {}".format(last, separator, p)
parts.append(recomb)
else:
parts.append(p)
return parts
def parse_authorline(authors: str) -> str:
"""
The external facing function from this module. Converts a complex authorline
into a simple one with only UTF-8.
Parameters
----------
authors : string
The raw author line from the metadata
Returns
-------
clean_authors : string
String represeting cleaned author line
Examples
--------
>>> parse_authorline('A. Losev, S. Shadrin, I. Shneiberg')
'Losev, A.; Shadrin, S.; Shneiberg, I.'
>>> parse_authorline("C. Bal\\'azs, E. L. Berger, P. M. Nadolsky, C.-P. Yuan")
'Balázs, C.; Berger, E. L.; Nadolsky, P. M.; Yuan, C. -P.'
>>> parse_authorline('Stephen C. Power (Lancaster University), Baruch Solel (Technion)')
'Power, Stephen C.; Solel, Baruch'
>>> parse_authorline("L. Scheck (1), H.-Th. Janka (1), T. Foglizzo (2), and K. Kifonidis (1)\n ((1) MPI for Astrophysics, Garching; (2) Service d'Astrophysique, CEA-Saclay)")
'Scheck, L.; Janka, H. -Th.; Foglizzo, T.; Kifonidis, K.'
"""
names = parse_author_affil_utf(authors)
return '; '.join([', '.join([q for q in n[:2] if q]) for n in names])
def _parse_article_authors(article_author):
try:
return [article_author[0], parse_author_affil_utf(article_author[1])]
except Exception as e:
msg = "Author split failed for article {}".format(article_author[0])
logger.error(msg)
logger.exception(e)
return [article_author[0], '']
def parse_authorline_parallel(article_authors, n_processes=None):
"""
Parallelize `parse_authorline`
Parameters
----------
article_authors : list
list of tuples (arXiv id, author strings from metadata)
(optional)
n_processes : int
number of processes
Returns
-------
authorsplit : list
list of author strings in standardized format
[
[ author1_keyname, author1_firstnames, author1_suffix, affil1,
affil2 ] ,
[ author2_keyname, author2_firstnames, author1_suffix, affil1 ] ,
[ author3_keyname, author3_firstnames, author1_suffix ]
]
"""
logger.info(
'Parsing author lines for {} articles...'.format(len(article_authors))
)
pool = Pool(n_processes)
parsed = pool.map(_parse_article_authors, article_authors)
outdict = {aid: auth for aid, auth in parsed}
filename = os.path.join(DIR_OUTPUT, 'authors-parsed.json.gz')
logger.info('Saving to {}'.format(filename))
with gzip.open(filename, 'wb') as fout:
fout.write(json.dumps(outdict).encode('utf-8'))
| 36.040426 | 179 | 0.611724 |
import re
import os
import gzip
import json
from itertools import dropwhile
from typing import Dict, Iterator, List, Tuple
from multiprocessing import Pool, cpu_count
PREFIX_MATCH = 'van|der|de|la|von|del|della|da|mac|ter|dem|di|vaziri'
def parse_author_affil(authors: str) -> List[List[str]]:
return _parse_author_affil_back_propagate(
**_parse_author_affil_split(authors))
def _parse_author_affil_split(author_line: str) -> Dict:
if not author_line:
return {'author_list': [], 'back_prop': 0}
names: List[str] = split_authors(author_line)
if not names:
return {'author_list': [], 'back_prop': 0}
names = _remove_double_commas(names)
namesIter: Iterator[str] = reversed(
list(dropwhile(lambda x: x == ',', reversed(names))))
names = list(dropwhile(lambda x: x == ',', namesIter))
names = list(map(_tidy_name, filter(
lambda x: re.match('^[^](,]', x), names)))
names = list(filter(lambda n: not re.match(
r'^\s*et\.?\s+al\.?\s*', n, flags=re.IGNORECASE), names))
(names, author_list,
back_propagate_affiliations_to) = _collaboration_at_start(names)
(enumaffils) = _enum_collaboration_at_end(author_line)
patterns = [('double-prefix',
r'^(.*)\s+(' + PREFIX_MATCH + r')\s(' +
PREFIX_MATCH + r')\s(\S+)$'),
('name-prefix-name',
r'^(.*)\s+(' + PREFIX_MATCH + r')\s(\S+)$'),
('name-name-prefix',
r'^(.*)\s+(\S+)\s(I|II|III|IV|V|Sr|Jr|Sr\.|Jr\.)$'),
('name-name',
r'^(.*)\s+(\S+)$'), ]
for name in names:
pattern_matches = ((mtype, re.match(m, name, flags=re.IGNORECASE))
for (mtype, m) in patterns)
(mtype, match) = next(((mtype, m)
for (mtype, m) in pattern_matches
if m is not None), ('default', None))
if match is None:
author_entry = [name, '', '']
elif mtype == 'double-prefix':
s = '{} {} {}'.format(match.group(
2), match.group(3), match.group(4))
author_entry = [s, match.group(1), '']
elif mtype == 'name-prefix-name':
s = '{} {}'.format(match.group(2), match.group(3))
author_entry = [s, match.group(1), '']
elif mtype == 'name-name-prefix':
author_entry = [match.group(2), match.group(1), match.group(3)]
elif mtype == 'name-name':
author_entry = [match.group(2), match.group(1), '']
else:
author_entry = [name, '', '']
author_entry = _add_affiliation(
author_line, enumaffils, author_entry, name)
author_list.append(author_entry)
return {'author_list': author_list,
'back_prop': back_propagate_affiliations_to}
def parse_author_affil_utf(authors: str) -> List:
if not authors:
return []
return list(map(lambda author: list(map(tex2utf, author)),
parse_author_affil(authors)))
def _remove_double_commas(items: List[str]) -> List[str]:
parts: List[str] = []
last = ''
for pt in items:
if pt == ',' and last == ',':
continue
else:
parts.append(pt)
last = pt
return parts
def _tidy_name(name: str) -> str:
name = re.sub(r'\s\s+', ' ', name)
name = re.sub(r'(?<!\\)\.(\S)', r'. \g<1>', name)
return name
def _collaboration_at_start(names: List[str]) \
-> Tuple[List[str], List[List[str]], int]:
author_list = []
back_propagate_affiliations_to = 0
while len(names) > 0:
m = re.search(r'([a-z0-9\s]+\s+(collaboration|group|team))',
names[0], flags=re.IGNORECASE)
if not m:
break
author_list.append([m.group(1), '', ''])
back_propagate_affiliations_to += 1
names.pop(0)
if names and (names[0] == ',' or names[0] == ':'):
names.pop(0)
return names, author_list, back_propagate_affiliations_to
def _enum_collaboration_at_end(author_line: str)->Dict:
line_m = re.search(r'\(\s*\((.*)$', author_line)
if not line_m:
return {}
enumaffils = {}
affils = re.sub(r'\s*\)\s*$', '', line_m.group(1))
for affil in affils.split('('):
m = re.match(r'^(\d+)\)\s*(\S.*\S)\s*$', affil)
if m:
enumaffils[m.group(1)] = re.sub(r'[\.,\s]*$', '', m.group(2))
return enumaffils
def _add_affiliation(author_line: str,
enumaffils: Dict,
author_entry: List[str],
name: str) -> List:
en = re.escape(name)
namerex = r'{}\s*\(([^\(\)]+)'.format(en.replace(' ', 's*'))
m = re.search(namerex, author_line, flags=re.IGNORECASE)
if not m:
return author_entry
# Now see if we have enumerated references (just commas, digits, &, and)
affils = m.group(1).rstrip().lstrip()
affils = re.sub(r'(&|and)/,', ',', affils, flags=re.IGNORECASE)
if re.match(r'^[\d,\s]+$', affils):
for affil in affils.split(','):
if affil in enumaffils:
author_entry.append(enumaffils[affil])
else:
author_entry.append(affils)
return author_entry
def _parse_author_affil_back_propagate(author_list: List[List[str]],
back_prop: int) -> List[List[str]]:
last_affil: List[str] = []
for x in range(len(author_list) - 1, max(back_prop - 1, -1), -1):
author_entry = author_list[x]
if len(author_entry) > 3: # author has affiliation,store
last_affil = author_entry
elif last_affil:
# author doesn't have affil but later one did => copy
author_entry.extend(last_affil[3:])
return author_list
def split_authors(authors: str) -> List:
if not authors:
return []
aus = re.split(r'(\(|\))', authors)
aus = list(filter(lambda x: x != '', aus))
blocks = []
if len(aus) == 1:
blocks.append(authors)
else:
c = ''
depth = 0
for bit in aus:
if bit == '':
continue
if bit == '(':
depth += 1
if depth == 1:
blocks.append(c)
c = '('
else:
c = c + bit
elif bit == ')':
depth -= 1
c = c + bit
if depth == 0:
blocks.append(c)
c = ''
else:
continue
else:
c = c + bit
if c:
blocks.append(c)
listx = []
for block in blocks:
block = re.sub(r'\s+', ' ', block)
if re.match(r'^\(', block): # it is a comment
listx.append(block)
else: # it is a name
block = re.sub(r',?\s+(and|\&)\s', ',', block)
names = re.split(r'(,|:)\s*', block)
for name in names:
if not name:
continue
name = name.rstrip().lstrip()
if name:
listx.append(name)
# Recombine suffixes that were separated with a comma
parts: List[str] = []
for p in listx:
if re.match(r'^(Jr\.?|Sr\.?\[IV]{2,})$', p) \
and len(parts) >= 2 \
and parts[-1] == ',' \
and not re.match(r'\)$', parts[-2]):
separator = parts.pop()
last = parts.pop()
recomb = "{}{} {}".format(last, separator, p)
parts.append(recomb)
else:
parts.append(p)
return parts
def parse_authorline(authors: str) -> str:
names = parse_author_affil_utf(authors)
return '; '.join([', '.join([q for q in n[:2] if q]) for n in names])
def _parse_article_authors(article_author):
try:
return [article_author[0], parse_author_affil_utf(article_author[1])]
except Exception as e:
msg = "Author split failed for article {}".format(article_author[0])
logger.error(msg)
logger.exception(e)
return [article_author[0], '']
def parse_authorline_parallel(article_authors, n_processes=None):
logger.info(
'Parsing author lines for {} articles...'.format(len(article_authors))
)
pool = Pool(n_processes)
parsed = pool.map(_parse_article_authors, article_authors)
outdict = {aid: auth for aid, auth in parsed}
filename = os.path.join(DIR_OUTPUT, 'authors-parsed.json.gz')
logger.info('Saving to {}'.format(filename))
with gzip.open(filename, 'wb') as fout:
fout.write(json.dumps(outdict).encode('utf-8'))
| true | true |
f72fa578f87bc853943bee3e503ed704b0ffd3fc | 32,916 | py | Python | plugin/lighthouse/composer/shell.py | chubbymaggie/lighthouse | e6c494a0c8dd2aca09b71b981e8c0c03d9078cdd | [
"MIT"
] | 1 | 2017-10-27T23:02:29.000Z | 2017-10-27T23:02:29.000Z | plugin/lighthouse/composer/shell.py | yrp604/lighthouse | b92a25906fb2513d8bfc4454c41e6378984d9ad9 | [
"MIT"
] | null | null | null | plugin/lighthouse/composer/shell.py | yrp604/lighthouse | b92a25906fb2513d8bfc4454c41e6378984d9ad9 | [
"MIT"
] | null | null | null | from .parser import *
from lighthouse.util import *
#------------------------------------------------------------------------------
# Composing Shell
#------------------------------------------------------------------------------
class ComposingShell(QtWidgets.QWidget):
"""
The ComposingShell UI for interactive coverage composition.
This class ties together all the individual components that make up
the Composing Shell, wrapping it up in a nice portable widget. This
includes the label sitting at the head of the shell, the text box
(the shell, a.k.a ComposingLine), and the composition parser.
In theory, multiple ComposingShell objects could be instantiated and
placed in various dialogs, forms, views, etc. These shells are fairly
independent, but obviously must communicate with the director.
"""
def __init__(self, director, model, table=None):
super(ComposingShell, self).__init__()
self.setObjectName(self.__class__.__name__)
# external entities
self._director = director
self._palette = director._palette
self._model = model
self._table = table
# command / input
self._search_text = ""
self._command_timer = QtCore.QTimer()
# the last known user AST
self._last_ast = None
# composition parser related members
self._parser = CompositionParser()
self._parser_error = None
self._parsed_tokens = []
self._shorthand = []
# configure the widget for use
self._ui_init()
#--------------------------------------------------------------------------
# Properties
#--------------------------------------------------------------------------
@property
def text(self):
"""
The existing shell text.
"""
return str(self._line.toPlainText())
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init(self):
"""
Initialize UI elements.
"""
# initialize a monospace font to use with our widget(s)
self._font = MonospaceFont()
self._font_metrics = QtGui.QFontMetricsF(self._font)
# initialize our ui elements
self._ui_init_shell()
self._ui_init_completer()
self._ui_init_signals()
self._ui_layout()
def _ui_init_shell(self):
"""
Initialize the shell UI elements.
"""
# the composer label at the head of the shell
self._line_label = QtWidgets.QLabel("Composer")
self._line_label.setStyleSheet("QLabel { margin: 0 1ex 0 1ex }")
self._line_label.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)
self._line_label.setFont(self._font)
self._line_label.setFixedWidth(self._line_label.sizeHint().width())
# the text box / shell / ComposingLine
self._line = ComposingLine()
# configure the shell background & default text color
palette = self._line.palette()
palette.setColor(QtGui.QPalette.Base, self._palette.overview_bg)
palette.setColor(QtGui.QPalette.Text, self._palette.composer_fg)
palette.setColor(QtGui.QPalette.WindowText, self._palette.composer_fg)
self._line.setPalette(palette)
def _ui_init_completer(self):
"""
Initialize the coverage hint UI elements.
"""
# NOTE/COMPAT:
if using_pyqt5:
self._completer_model = QtCore.QStringListModel([])
else:
self._completer_model = QtGui.QStringListModel([])
self._completer = QtWidgets.QCompleter(self)
self._completer.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
self._completer.setModelSorting(QtWidgets.QCompleter.CaseInsensitivelySortedModel)
self._completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self._completer.setModel(self._completer_model)
self._completer.setWrapAround(False)
self._completer.popup().setFont(self._font)
self._completer.setWidget(self._line)
def _ui_init_signals(self):
"""
Connect UI signals.
"""
# text changed in the shell
self._line.textChanged.connect(self._ui_shell_text_changed)
# cursor position changed in the shell
self._line.cursorPositionChanged.connect(self._ui_shell_cursor_changed)
# return key pressed in the shell
self._line.returnPressed.connect(self._ui_shell_return_pressed)
# register for cues from the director
self._director.coverage_created(self._internal_refresh)
self._director.coverage_deleted(self._internal_refresh)
self._director.coverage_modified(self._internal_refresh)
# register for cues from the model
self._model.layoutChanged.connect(self._ui_shell_text_changed)
def _ui_layout(self):
"""
Layout the major UI elements of the widget.
"""
# create a qt layout for the 'compser' (the shell)
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0,0,0,0)
#
# Shell Layout:
# [ [ 'Composer' ][ ComposingLine ... ] ]
#
layout.addWidget(self._line_label)
layout.addWidget(self._line)
# apply the widget layout
self.setLayout(layout)
#--------------------------------------------------------------------------
# Refresh
#--------------------------------------------------------------------------
def refresh(self):
"""
Public refresh of the shell.
"""
self._internal_refresh()
@idafast
def _internal_refresh(self):
"""
Internal refresh of the shell.
"""
self._refresh_hint_list()
def _refresh_hint_list(self):
"""
Refresh the shell coverage hint contents.
"""
# get the most recent coverage strings from the director
detailed_strings = [self._director.get_coverage_string(x) for x in self._director.coverage_names]
self._completer_model.setStringList(detailed_strings)
self._shorthand = [x[0] for x in detailed_strings]
# queue a UI coverage hint if necessary
self._ui_hint_coverage_refresh()
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
def _ui_hint_tooltip(self, text, index):
"""
Display a non-intrusive error tooltip to the user.
"""
#
# hide the coverage hint if it is visible. things can look cluttered
# down by the shell if we're trying to show both.
#
self._ui_hint_coverage_hide()
# create a cursor and move it to the parse error location on the shell
cursor_tip = QtGui.QTextCursor(self._line.document())
cursor_tip.setPosition(index)
#
# using our carefully positioned cursor, we can now extract the relative
# pixel position of the parse error on the shell and map its global
# (absolute) pixel position on the screen.
#
position = self._line.mapToGlobal(self._line.cursorRect(cursor_tip).topLeft())
# draw the tooltip at the computed parse error position
x = QtWidgets.QToolTip.showText(position, text)
def _ui_shell_cursor_changed(self):
"""
Cursor position changed in the shell.
"""
self._ui_hint_coverage_refresh()
def _ui_shell_text_changed(self):
"""
Text changed in the shell.
"""
text = self.text
#
# a Search, eg '/DnsParse_'
#
if self.is_search(text):
self._execute_search(text)
self._highlight_search()
return
# not a search query clear any lingering filters for it
else:
self._model.filter_string("")
#
# a Jump, eg '0x804010a' or 'sub_1400016F0'
#
if self.is_jump(text) and self._table:
self._line_label.setText("Jump")
self._highlight_jump()
return
#
# a Composition, eg '(A | B) - C'
#
self._execute_composition(text)
self._highlight_composition()
self._ui_hint_coverage_refresh()
def _ui_shell_return_pressed(self):
"""
Return / Enter pressed in the shell.
The user pressed 'enter' in the shell, this means we want to try
and save their composition as a new coverage set to the director.
"""
text = self.text
# a search query has no accept state, nothing to do
if self.is_search(text):
return
# jump to the function entry containing the requested address
if self.is_jump(text) and self._table:
self._execute_jump(text)
return
# attempt to save the user crafted composition
self._accept_composition()
#--------------------------------------------------------------------------
# Search
#--------------------------------------------------------------------------
@staticmethod
def is_search(text):
"""
Check if a string (text) looks like a search query.
A search query is used to filter functions listed in the coverage
overview table based on their name.
eg: text = '/DnsParse_'
"""
return (text and text[0] == "/")
def _execute_search(self, text):
"""
Execute the search semantics.
"""
self._search_text = text[1:]
#
# if the user input is only "/" (starting to type something), hint
# that they are entering the Search mode. nothing else to do!
#
if text == "/":
self._line_label.setText("Search")
return
#
# stop an existing command timer if there is one running. we are about
# to schedule a new one or execute inline. so the old/deferred command
# is no longer needed.
#
self._command_timer.stop()
#
# if the functions list is HUGE, we want to defer the filtering until
# we think the user has stopped typing as each pass may take awhile
# to compute (while blocking the main thread...)
#
if self._director.metadata.is_big():
self._command_timer = singleshot(1000, self._execute_search_internal)
self._command_timer.start()
#
# the database is not *massive*, let's execute the search immediately
#
else:
self._execute_search_internal()
# done
return
def _execute_search_internal(self):
"""
Execute the actual search filtering & coverage metrics.
"""
# the given text is a real search query, apply it as a filter now
self._model.filter_string(self._search_text)
# compute coverage % of the visible (filtered) results
percent = self._model.get_modeled_coverage_percent()
# show the coverage % of the search results in the shell label
self._line_label.setText("%1.2f%%" % percent)
def _highlight_search(self):
"""
Syntax highlight a search query.
"""
self._line.setUpdatesEnabled(False)
################# UPDATES DISABLED #################
# clear any existing text colors
self._color_clear()
# color search based on if there are any matching results
if self._model.rowCount():
self._color_text(self._palette.valid_text, start=1)
else:
self._color_text(self._palette.invalid_text, start=1)
################# UPDATES ENABLED #################
self._line.setUpdatesEnabled(True)
# done
return
#--------------------------------------------------------------------------
# Jump
#--------------------------------------------------------------------------
def is_jump(self, text):
"""
Check if a string (text) looks like a jump query.
A jump query is used to jump to a function in the coverage overview
table based on their address.
eg: text = '0x8040100', or 'sub_1400016F0'
"""
return self._compute_jump(text) != 0
def _compute_jump(self, text):
"""
Compute the function address destination of a jump target from a string.
eg: text = '0x8040100', or 'sub_8040100' --> jump to function 0x8040100
"""
text = text.strip()
#
# if the user input is less than two characters, we automatically
# dismiss it as a valid jump target. the primary reasons for this
# is to avoid possible shorthand parsing clashes.
#
# eg: imagine the user has a valid function named 'A' that they want to
# jump to - well we actually choose to ignore that request here.
#
# We favor the importance of shorthand symbols as used in compositions.
#
if len(text) < 2:
return 0
#
# attempt to convert the user input from a hex number eg '0x8040105'
# to its corresponding function address validated by the director
#
try:
address = int(text, 16)
except ValueError:
pass
else:
function_metadata = self._director.metadata.get_function(address)
if function_metadata:
return function_metadata.address
#
# the user string did not translate to a parsable hex number (address)
# or the function it falls within could not be found in the director.
#
# attempt to convert the user input from a function name, eg 'main',
# or 'sub_1400016F0' to a function address validated by the director.
#
# special case to make 'sub_*' prefixed user inputs case insensitive
if text.lower().startswith("sub_"):
text = "sub_" + text[4:].upper()
# look up the text function name within the director's metadata
function_metadata = self._director.metadata.get_function_by_name(text)
if function_metadata:
return function_metadata.address
#
# the user string did not translate to a function name that could
# be found in the director.
#
# failure, the user input (text) isn't a jump ...
return 0
def _execute_jump(self, text):
"""
Execute the jump semantics.
"""
assert self._table
# retrieve the jump target
function_address = self._compute_jump(text)
assert function_address
# select the function entry in the coverage overview table
self._table.selectRow(self._model.func2row[function_address])
self._table.scrollTo(
self._table.currentIndex(),
QtWidgets.QAbstractItemView.PositionAtCenter
)
def _highlight_jump(self):
"""
Syntax highlight a jump query.
"""
self._line.setUpdatesEnabled(False)
################# UPDATES DISABLED #################
# clear any existing text colors
self._color_clear()
# color jump
self._color_text(self._palette.valid_text)
################# UPDATES ENABLED #################
self._line.setUpdatesEnabled(True)
# done
return
#--------------------------------------------------------------------------
# Composition
#--------------------------------------------------------------------------
def _execute_composition(self, text):
"""
Execute a composition query.
"""
# reset the shell head text
self._line_label.setText("Composer")
# attempt to parse & execute a composition
try:
# clear any previous parse attempts/failures
self._parser_error = None
# attempt to parse the user input against the composition grammar
self._parsed_tokens, ast = self._parser.parse(text, self._shorthand)
# if the AST changed since the last parse, inform the director
if not ast_equal(self._last_ast, ast):
self._director.cache_composition(ast)
# save the newly parsed ast
self._last_ast = ast
# parse failure
except ParseError as e:
self._parser_error = e
#
# even though we failed to generate an AST that can be evaluated
# by the director, we still want to save the list of tokens parsed.
# these tokens will still be used for basic syntax highlighting.
#
self._parsed_tokens = e.parsed_tokens
# done
return True
def _highlight_composition(self):
"""
Syntax highlight a composition.
"""
self._line.setUpdatesEnabled(False)
################# UPDATES DISABLED #################
# clear any existing text colors
self._color_clear()
# the parse failed, so there will be invalid text to highlight
if self._parser_error:
self._color_invalid()
# paint any valid tokens
self._color_tokens()
################# UPDATES ENABLED #################
self._line.setUpdatesEnabled(True)
# done
return
def _accept_composition(self):
"""
Save the user crafted composition to the director.
"""
#
# if there's an existing parse error on the shell, there's nothing we
# can do but pop a hint for the user and have them try again
#
if self._parser_error:
self._ui_hint_tooltip("Invalid Composition", self._parser_error.error_index)
return
#
# While the user is picking a name for the new composite, we might as well
# try and cache it asynchronously :-). kick the caching off now.
#
self._director.cache_composition(self._last_ast, force=True)
#
# the user has entered a valid composition that we have parsed. we
# want to save this to the director, but first we need a name for the
# new composition. pop a simple dialog prompting the user for a
# composition name
#
ok, coverage_name = prompt_string(
"Composition Name:",
"Please enter a name for this composition",
"COMP_%s" % self.text
)
# the user did not enter a coverage name or hit cancel - abort the save
if not (ok and coverage_name):
return
#
# all good, ask the director to save the last composition
# composition under the given coverage name
#
self._director.add_composition(coverage_name, self._last_ast)
# switch to the newly created composition
self._director.select_coverage(coverage_name)
#--------------------------------------------------------------------------
# Coverage Hint
#--------------------------------------------------------------------------
def _ui_hint_coverage_refresh(self):
"""
Draw the coverage hint as applicable.
"""
#
# if the shell is not focused (or empty), don't bother to show a hint
# as it frequently gets in the way and is really annoying...
#
if not (self._line.hasFocus() or self.text):
return
# scrape info from the current shell text state
cursor_index = self._line.textCursor().position()
text_token = self._get_cursor_coverage_token(cursor_index)
#
# if the user's text cursor is touching the index that produced the
# parse error (assuming there was one) ...
#
if self._parser_error and self._parser_error.error_index == cursor_index:
#
# if the parse error indicates the parse failed because it expected
# a coverage token but didn't get one, show the complete coverage
# list. The user should know their list of options bro.
#
if self._parser_error.expected == TokenCoverageSingle:
self._ui_hint_coverage_show()
#
# if the user's text cursor is touching a valid coverage token, we want
# to pop a hint that shows the details for the coverage matching that
# explicit token / shorthand. It's a subtle convenience :-)
#
elif text_token and (text_token.type == "COVERAGE_TOKEN"):
self._ui_hint_coverage_show(text_token.value)
#
# if the user's text cursor is not touching any text index of interest,
# there's no reason for us to show any sort of hints. be sure any hints
# are hidden.
#
else:
self._ui_hint_coverage_hide()
# done
return
def _ui_hint_coverage_show(self, prefix=''):
"""
Show the coverage hint at the shell's cursor position.
Optionally, one can specify a prefix (eg, the shorthand 'A') to
limit the scope of coverage items hinted.
"""
#
# if the completer is already visible and showing the requested prefix,
# then we have nothing to do. this will help mitigate refresh flickers
#
if self._completer.popup().isVisible() and \
self._completer.completionPrefix() == prefix:
return
# if there was anything previously selected in the popup, clear it now
self._completer.popup().clearSelection()
# show only hints matching the given prefix
# eg: prefix = 'A' will show only entry 'A - 42.30% - drcov.8...'
self._completer.setCompletionPrefix(prefix)
# specify the position and size of the hint popup
cr = self._line.cursorRect()
cr.setWidth(self._completer.popup().sizeHintForColumn(0))
# show the coverage hint popup
self._completer.complete(cr)
self._completer.popup().repaint() # reduces hint flicker on the Hot Shell
# done
return
def _ui_hint_coverage_hide(self):
"""
Hide the coverage hint.
"""
self._completer.popup().hide()
def _get_cursor_coverage_token(self, index):
"""
Get the coverage token touching the cursor (if there is one).
"""
# iterate through the list of parsed tokens on the line edit / shell
for text_token in self._parsed_tokens:
# skip any non-coverage text tokens
if not text_token.type == "COVERAGE_TOKEN":
continue
# if this coverage text token touches our cursor, return it
if text_token.span[0] <= index <= text_token.span[1]:
return text_token
# no coverage token on either side of the cursor
return None
#--------------------------------------------------------------------------
# Composition Highlighting
#--------------------------------------------------------------------------
def _color_tokens(self):
"""
Syntax highlight the valid composition tokens.
"""
# more code-friendly, readable aliases
TOKEN_COLORS = self._palette.TOKEN_COLORS
#
# in order to syntax highlight text of interest, we must use a text
# cursor as the vehicle to move around the text box (shell) and
# manipulate its contents (eg, painting colors)
#
# this is simply the way Qt exposes this functionality
#
# alias the user cursor, and save its original (current) position
cursor = self._line.textCursor()
cursor_position = cursor.position()
# configure text formatting properties we want our cursor to apply
highlight = QtGui.QTextCharFormat()
highlight.setFontWeight(QtGui.QFont.Bold) # bolds text we 'type'
#
# we are about to start painting our text, but we want to disable the
# shell from emitting any textChanged/cursorMoved kind of signals
# that originate from our painting code.
#
# we use the blockSignals gateways below to disable/enable the signals
# for the duration of our painting.
#
self._line.blockSignals(True)
################# UPDATES DISABLED #################
# iterate through every parsed token, and paint it
for token in self._parsed_tokens:
# if the palette doesn't define a color for this token, ignore it
if token.type not in TOKEN_COLORS:
continue
# alias the start and end indexes of the text token to paint
token_start, token_end = token.span
# 'click' and 'drag' to select the token text
cursor.setPosition(token_start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(token_end, QtGui.QTextCursor.KeepAnchor)
# configure the colors/style for this explicit token
#highlight.setBackground(QtGui.QBrush(QtGui.QColor(TOKEN_COLORS[token.type])))
highlight.setForeground(QtGui.QBrush(QtGui.QColor(TOKEN_COLORS[token.type])))
cursor.setCharFormat(highlight)
#
# we are done painting all the parsed tokens. let's restore the user
# cursor back to its original state so they are none-the-wiser
#
cursor.setPosition(cursor_position)
cursor.setCharFormat(QtGui.QTextCharFormat())
self._line.setTextCursor(cursor)
################# UPDATES ENABLED #################
self._line.blockSignals(False)
# done
return
def _color_invalid(self):
"""
Highlight the invalid (un-parsable) text.
Please read through the _color_tokens() function for a more
complete walkthrough of the text painting process.
"""
assert self._parser_error
# the invalid text starts from the token that caused a parse error
invalid_start = self._parser_error.error_index
invalid_text = self.text[invalid_start:]
# no invalid text? nothing to highlight I guess!
if not invalid_text:
return
# alias the user cursor, and save its original (current) position
cursor = self._line.textCursor()
cursor_position = cursor.position()
# setup the invalid text highlighter
invalid_color = self._palette.invalid_highlight
highlight = QtGui.QTextCharFormat()
highlight.setFontWeight(QtGui.QFont.Bold)
highlight.setBackground(QtGui.QBrush(QtGui.QColor(invalid_color)))
self._line.blockSignals(True)
################# UPDATES DISABLED #################
# select the invalid text
cursor.setPosition(invalid_start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(len(self.text), QtGui.QTextCursor.KeepAnchor)
# insert a highlighted version of the invalid text
cursor.setCharFormat(highlight)
# reset the cursor position & style
cursor.setPosition(cursor_position)
cursor.setCharFormat(QtGui.QTextCharFormat())
self._line.setTextCursor(cursor)
################# UPDATES ENABLED #################
self._line.blockSignals(False)
# done
return
#--------------------------------------------------------------------------
# General Highlighting
#--------------------------------------------------------------------------
def _color_clear(self):
"""
Clear any existing text colors.
"""
self._color_text()
def _color_text(self, color=None, start=0, end=0):
"""
Color shell text with the given color.
"""
# if no end was specified, apply the style till the end of input
if end == 0:
end = len(self.text)
# alias the user cursor, and save its original (current) position
cursor = self._line.textCursor()
cursor_position = cursor.position()
# setup a simple font coloring (or clearing) text format
simple = QtGui.QTextCharFormat()
if color:
simple.setForeground(QtGui.QBrush(QtGui.QColor(color)))
self._line.blockSignals(True)
################# UPDATES DISABLED #################
# select the entire line
cursor.setPosition(start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(end, QtGui.QTextCursor.KeepAnchor)
# set all the text to the simple format
cursor.setCharFormat(simple)
# reset the cursor position & style
cursor.setPosition(cursor_position)
self._line.setTextCursor(cursor)
################# UPDATES ENABLED #################
self._line.blockSignals(False)
# done
return
#------------------------------------------------------------------------------
# Composing Line
#------------------------------------------------------------------------------
class ComposingLine(QtWidgets.QPlainTextEdit):
"""
The textbox UI where user compositions are entered (typed).
While this a QLineEdit may appear to be more appropriate for our
'Composing Shell', its support for syntax highlighting like features
are completely absent.
QPlainTextEdit has much better support for coloring or highlighting
entered text, so we subclass from it and make a best effort attempt
to make it appear and act like a QLineEdit 'shell'
"""
#
# QLineEdit has a signal called 'returnPressed' which fires when the
# user hits 'return' or 'enter'. This is a convenient signal, but
# QPlainTextEdit does *not* have an equivalent.
#
# We define and fire this signal ourself for consistency and the same
# conveniences as the one QLineEdit offers.
#
returnPressed = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(ComposingLine, self).__init__(parent)
self.setObjectName(self.__class__.__name__)
# configure the widget for use
self._ui_init()
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init(self):
"""
Initialize UI elements.
"""
# initialize a monospace font to use with our widget(s)
self._font = MonospaceFont()
self._font_metrics = QtGui.QFontMetricsF(self._font)
self.setFont(self._font)
# configure the QPlainTextEdit to appear and act as much like a
# QLineEdit as possible (a single line text box)
self.setWordWrapMode(QtGui.QTextOption.NoWrap)
self.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setTabChangesFocus(True)
self.setMaximumBlockCount(1)
# set the height of the textbox based on some arbitrary math :D
LINE_PADDING = self.document().documentMargin()*2
line_height = self._font_metrics.height() + LINE_PADDING - 2
self.setFixedHeight(line_height)
#--------------------------------------------------------------------------
# QPlainTextEdit Overloads
#--------------------------------------------------------------------------
def keyPressEvent(self, e):
"""
Overload of the key press event.
"""
# trap the return/enter key event
if e.key() == QtCore.Qt.Key_Return or \
e.key() == QtCore.Qt.Key_Enter:
#
# fire our convenience signal notifying listerns that the user
# pressed enter. this signal firing indicates the user is
# probably trying to complete their query / input.
#
self.returnPressed.emit()
#
# now we must consume the keypress so it doesn't get passed on
# to any other widgets/handlers/put in the text box
#
e.accept()
# business as usual
else:
super(ComposingLine, self).keyPressEvent(e)
def timerEvent(self, e):
"""
Stubbed out to prevent the QPlainTextEdit selection autoscroll.
"""
return
| 32.981964 | 105 | 0.571363 | from .parser import *
from lighthouse.util import *
class ComposingShell(QtWidgets.QWidget):
def __init__(self, director, model, table=None):
super(ComposingShell, self).__init__()
self.setObjectName(self.__class__.__name__)
self._director = director
self._palette = director._palette
self._model = model
self._table = table
self._search_text = ""
self._command_timer = QtCore.QTimer()
self._last_ast = None
self._parser = CompositionParser()
self._parser_error = None
self._parsed_tokens = []
self._shorthand = []
self._ui_init()
@property
def text(self):
return str(self._line.toPlainText())
def _ui_init(self):
self._font = MonospaceFont()
self._font_metrics = QtGui.QFontMetricsF(self._font)
self._ui_init_shell()
self._ui_init_completer()
self._ui_init_signals()
self._ui_layout()
def _ui_init_shell(self):
self._line_label = QtWidgets.QLabel("Composer")
self._line_label.setStyleSheet("QLabel { margin: 0 1ex 0 1ex }")
self._line_label.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)
self._line_label.setFont(self._font)
self._line_label.setFixedWidth(self._line_label.sizeHint().width())
self._line = ComposingLine()
palette = self._line.palette()
palette.setColor(QtGui.QPalette.Base, self._palette.overview_bg)
palette.setColor(QtGui.QPalette.Text, self._palette.composer_fg)
palette.setColor(QtGui.QPalette.WindowText, self._palette.composer_fg)
self._line.setPalette(palette)
def _ui_init_completer(self):
if using_pyqt5:
self._completer_model = QtCore.QStringListModel([])
else:
self._completer_model = QtGui.QStringListModel([])
self._completer = QtWidgets.QCompleter(self)
self._completer.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
self._completer.setModelSorting(QtWidgets.QCompleter.CaseInsensitivelySortedModel)
self._completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self._completer.setModel(self._completer_model)
self._completer.setWrapAround(False)
self._completer.popup().setFont(self._font)
self._completer.setWidget(self._line)
def _ui_init_signals(self):
self._line.textChanged.connect(self._ui_shell_text_changed)
self._line.cursorPositionChanged.connect(self._ui_shell_cursor_changed)
self._line.returnPressed.connect(self._ui_shell_return_pressed)
self._director.coverage_created(self._internal_refresh)
self._director.coverage_deleted(self._internal_refresh)
self._director.coverage_modified(self._internal_refresh)
self._model.layoutChanged.connect(self._ui_shell_text_changed)
def _ui_layout(self):
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0,0,0,0)
layout.addWidget(self._line_label)
layout.addWidget(self._line)
self.setLayout(layout)
def refresh(self):
self._internal_refresh()
@idafast
def _internal_refresh(self):
self._refresh_hint_list()
def _refresh_hint_list(self):
detailed_strings = [self._director.get_coverage_string(x) for x in self._director.coverage_names]
self._completer_model.setStringList(detailed_strings)
self._shorthand = [x[0] for x in detailed_strings]
self._ui_hint_coverage_refresh()
def _ui_hint_tooltip(self, text, index):
#
self._ui_hint_coverage_hide()
# create a cursor and move it to the parse error location on the shell
cursor_tip = QtGui.QTextCursor(self._line.document())
cursor_tip.setPosition(index)
#
# using our carefully positioned cursor, we can now extract the relative
# pixel position of the parse error on the shell and map its global
# (absolute) pixel position on the screen.
#
position = self._line.mapToGlobal(self._line.cursorRect(cursor_tip).topLeft())
# draw the tooltip at the computed parse error position
x = QtWidgets.QToolTip.showText(position, text)
def _ui_shell_cursor_changed(self):
self._ui_hint_coverage_refresh()
def _ui_shell_text_changed(self):
text = self.text
#
# a Search, eg '/DnsParse_'
#
if self.is_search(text):
self._execute_search(text)
self._highlight_search()
return
# not a search query clear any lingering filters for it
else:
self._model.filter_string("")
#
# a Jump, eg '0x804010a' or 'sub_1400016F0'
#
if self.is_jump(text) and self._table:
self._line_label.setText("Jump")
self._highlight_jump()
return
#
# a Composition, eg '(A | B) - C'
#
self._execute_composition(text)
self._highlight_composition()
self._ui_hint_coverage_refresh()
def _ui_shell_return_pressed(self):
text = self.text
# a search query has no accept state, nothing to do
if self.is_search(text):
return
# jump to the function entry containing the requested address
if self.is_jump(text) and self._table:
self._execute_jump(text)
return
# attempt to save the user crafted composition
self._accept_composition()
#--------------------------------------------------------------------------
# Search
#--------------------------------------------------------------------------
@staticmethod
def is_search(text):
return (text and text[0] == "/")
def _execute_search(self, text):
self._search_text = text[1:]
#
# if the user input is only "/" (starting to type something), hint
# that they are entering the Search mode. nothing else to do!
#
if text == "/":
self._line_label.setText("Search")
return
#
# stop an existing command timer if there is one running. we are about
# to schedule a new one or execute inline. so the old/deferred command
# is no longer needed.
#
self._command_timer.stop()
#
# if the functions list is HUGE, we want to defer the filtering until
# we think the user has stopped typing as each pass may take awhile
# to compute (while blocking the main thread...)
#
if self._director.metadata.is_big():
self._command_timer = singleshot(1000, self._execute_search_internal)
self._command_timer.start()
#
# the database is not *massive*, let's execute the search immediately
else:
self._execute_search_internal()
return
def _execute_search_internal(self):
self._model.filter_string(self._search_text)
percent = self._model.get_modeled_coverage_percent()
self._line_label.setText("%1.2f%%" % percent)
def _highlight_search(self):
self._line.setUpdatesEnabled(False)
self._table.scrollTo(
self._table.currentIndex(),
QtWidgets.QAbstractItemView.PositionAtCenter
)
def _highlight_jump(self):
self._line.setUpdatesEnabled(False)
ady visible and showing the requested prefix,
# then we have nothing to do. this will help mitigate refresh flickers
#
if self._completer.popup().isVisible() and \
self._completer.completionPrefix() == prefix:
return
# if there was anything previously selected in the popup, clear it now
self._completer.popup().clearSelection()
# show only hints matching the given prefix
# eg: prefix = 'A' will show only entry 'A - 42.30% - drcov.8...'
self._completer.setCompletionPrefix(prefix)
# specify the position and size of the hint popup
cr = self._line.cursorRect()
cr.setWidth(self._completer.popup().sizeHintForColumn(0))
# show the coverage hint popup
self._completer.complete(cr)
self._completer.popup().repaint() # reduces hint flicker on the Hot Shell
# done
return
def _ui_hint_coverage_hide(self):
self._completer.popup().hide()
def _get_cursor_coverage_token(self, index):
# iterate through the list of parsed tokens on the line edit / shell
for text_token in self._parsed_tokens:
# skip any non-coverage text tokens
if not text_token.type == "COVERAGE_TOKEN":
continue
# if this coverage text token touches our cursor, return it
if text_token.span[0] <= index <= text_token.span[1]:
return text_token
# no coverage token on either side of the cursor
return None
#--------------------------------------------------------------------------
# Composition Highlighting
#--------------------------------------------------------------------------
def _color_tokens(self):
# more code-friendly, readable aliases
TOKEN_COLORS = self._palette.TOKEN_COLORS
#
# in order to syntax highlight text of interest, we must use a text
# cursor as the vehicle to move around the text box (shell) and
# manipulate its contents (eg, painting colors)
#
# this is simply the way Qt exposes this functionality
#
# alias the user cursor, and save its original (current) position
cursor = self._line.textCursor()
cursor_position = cursor.position()
# configure text formatting properties we want our cursor to apply
highlight = QtGui.QTextCharFormat()
highlight.setFontWeight(QtGui.QFont.Bold) # bolds text we 'type'
#
# we are about to start painting our text, but we want to disable the
# shell from emitting any textChanged/cursorMoved kind of signals
# that originate from our painting code.
#
# we use the blockSignals gateways below to disable/enable the signals
# for the duration of our painting.
#
self._line.blockSignals(True)
################# UPDATES DISABLED #################
# iterate through every parsed token, and paint it
for token in self._parsed_tokens:
# if the palette doesn't define a color for this token, ignore it
if token.type not in TOKEN_COLORS:
continue
token_start, token_end = token.span
cursor.setPosition(token_start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(token_end, QtGui.QTextCursor.KeepAnchor)
highlight.setForeground(QtGui.QBrush(QtGui.QColor(TOKEN_COLORS[token.type])))
cursor.setCharFormat(highlight)
# cursor back to its original state so they are none-the-wiser
#
cursor.setPosition(cursor_position)
cursor.setCharFormat(QtGui.QTextCharFormat())
self._line.setTextCursor(cursor)
################# UPDATES ENABLED #################
self._line.blockSignals(False)
# done
return
def _color_invalid(self):
assert self._parser_error
# the invalid text starts from the token that caused a parse error
invalid_start = self._parser_error.error_index
invalid_text = self.text[invalid_start:]
# no invalid text? nothing to highlight I guess!
if not invalid_text:
return
# alias the user cursor, and save its original (current) position
cursor = self._line.textCursor()
cursor_position = cursor.position()
# setup the invalid text highlighter
invalid_color = self._palette.invalid_highlight
highlight = QtGui.QTextCharFormat()
highlight.setFontWeight(QtGui.QFont.Bold)
highlight.setBackground(QtGui.QBrush(QtGui.QColor(invalid_color)))
self._line.blockSignals(True)
################# UPDATES DISABLED #################
# select the invalid text
cursor.setPosition(invalid_start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(len(self.text), QtGui.QTextCursor.KeepAnchor)
# insert a highlighted version of the invalid text
cursor.setCharFormat(highlight)
# reset the cursor position & style
cursor.setPosition(cursor_position)
cursor.setCharFormat(QtGui.QTextCharFormat())
self._line.setTextCursor(cursor)
################# UPDATES ENABLED #################
self._line.blockSignals(False)
# done
return
#--------------------------------------------------------------------------
# General Highlighting
#--------------------------------------------------------------------------
def _color_clear(self):
self._color_text()
def _color_text(self, color=None, start=0, end=0):
# if no end was specified, apply the style till the end of input
if end == 0:
end = len(self.text)
# alias the user cursor, and save its original (current) position
cursor = self._line.textCursor()
cursor_position = cursor.position()
# setup a simple font coloring (or clearing) text format
simple = QtGui.QTextCharFormat()
if color:
simple.setForeground(QtGui.QBrush(QtGui.QColor(color)))
self._line.blockSignals(True)
################# UPDATES DISABLED #################
# select the entire line
cursor.setPosition(start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(end, QtGui.QTextCursor.KeepAnchor)
# set all the text to the simple format
cursor.setCharFormat(simple)
# reset the cursor position & style
cursor.setPosition(cursor_position)
self._line.setTextCursor(cursor)
################# UPDATES ENABLED #################
self._line.blockSignals(False)
# done
return
#------------------------------------------------------------------------------
# Composing Line
#------------------------------------------------------------------------------
class ComposingLine(QtWidgets.QPlainTextEdit):
#
# QLineEdit has a signal called 'returnPressed' which fires when the
# user hits 'return' or 'enter'. This is a convenient signal, but
# QPlainTextEdit does *not* have an equivalent.
#
# We define and fire this signal ourself for consistency and the same
# conveniences as the one QLineEdit offers.
#
returnPressed = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(ComposingLine, self).__init__(parent)
self.setObjectName(self.__class__.__name__)
# configure the widget for use
self._ui_init()
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init(self):
# initialize a monospace font to use with our widget(s)
self._font = MonospaceFont()
self._font_metrics = QtGui.QFontMetricsF(self._font)
self.setFont(self._font)
# configure the QPlainTextEdit to appear and act as much like a
# QLineEdit as possible (a single line text box)
self.setWordWrapMode(QtGui.QTextOption.NoWrap)
self.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setTabChangesFocus(True)
self.setMaximumBlockCount(1)
# set the height of the textbox based on some arbitrary math :D
LINE_PADDING = self.document().documentMargin()*2
line_height = self._font_metrics.height() + LINE_PADDING - 2
self.setFixedHeight(line_height)
#--------------------------------------------------------------------------
# QPlainTextEdit Overloads
#--------------------------------------------------------------------------
def keyPressEvent(self, e):
# trap the return/enter key event
if e.key() == QtCore.Qt.Key_Return or \
e.key() == QtCore.Qt.Key_Enter:
#
# fire our convenience signal notifying listerns that the user
# pressed enter. this signal firing indicates the user is
# probably trying to complete their query / input.
#
self.returnPressed.emit()
#
# now we must consume the keypress so it doesn't get passed on
e.accept()
else:
super(ComposingLine, self).keyPressEvent(e)
def timerEvent(self, e):
return
| true | true |
f72fa5c26a43311b6bb327f0a6c60d08da6bf7f5 | 122 | py | Python | jobbing/models_remote/__init__.py | davidall-amdocs/jobbing | b13311da07606366dfbe2eb737483a5820038557 | [
"Apache-2.0"
] | null | null | null | jobbing/models_remote/__init__.py | davidall-amdocs/jobbing | b13311da07606366dfbe2eb737483a5820038557 | [
"Apache-2.0"
] | null | null | null | jobbing/models_remote/__init__.py | davidall-amdocs/jobbing | b13311da07606366dfbe2eb737483a5820038557 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# flake8: noqa
from __future__ import absolute_import
from jobbing.models_remote.zip_code import ZipCode | 20.333333 | 50 | 0.819672 |
from __future__ import absolute_import
from jobbing.models_remote.zip_code import ZipCode | true | true |
f72fa5d84e135d78f70840c368783f3034e3d49b | 2,946 | py | Python | payment_lib_examples/alipay-sdk-python/virtual_environment/lib/python3.4/site-packages/alipay/aop/api/domain/AOIinfo.py | cuhk-mobitec/S3KVetter | 9ae79a242afbe6edae27c17065a88feca2896cf6 | [
"Apache-2.0"
] | null | null | null | payment_lib_examples/alipay-sdk-python/virtual_environment/lib/python3.4/site-packages/alipay/aop/api/domain/AOIinfo.py | cuhk-mobitec/S3KVetter | 9ae79a242afbe6edae27c17065a88feca2896cf6 | [
"Apache-2.0"
] | null | null | null | payment_lib_examples/alipay-sdk-python/virtual_environment/lib/python3.4/site-packages/alipay/aop/api/domain/AOIinfo.py | cuhk-mobitec/S3KVetter | 9ae79a242afbe6edae27c17065a88feca2896cf6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AOIinfo(object):
def __init__(self):
self._adcode = None
self._area = None
self._distance = None
self._id = None
self._location = None
self._name = None
@property
def adcode(self):
return self._adcode
@adcode.setter
def adcode(self, value):
self._adcode = value
@property
def area(self):
return self._area
@area.setter
def area(self, value):
self._area = value
@property
def distance(self):
return self._distance
@distance.setter
def distance(self, value):
self._distance = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def location(self):
return self._location
@location.setter
def location(self, value):
self._location = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def to_alipay_dict(self):
params = dict()
if self.adcode:
if hasattr(self.adcode, 'to_alipay_dict'):
params['adcode'] = self.adcode.to_alipay_dict()
else:
params['adcode'] = self.adcode
if self.area:
if hasattr(self.area, 'to_alipay_dict'):
params['area'] = self.area.to_alipay_dict()
else:
params['area'] = self.area
if self.distance:
if hasattr(self.distance, 'to_alipay_dict'):
params['distance'] = self.distance.to_alipay_dict()
else:
params['distance'] = self.distance
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.location:
if hasattr(self.location, 'to_alipay_dict'):
params['location'] = self.location.to_alipay_dict()
else:
params['location'] = self.location
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AOIinfo()
if 'adcode' in d:
o.adcode = d['adcode']
if 'area' in d:
o.area = d['area']
if 'distance' in d:
o.distance = d['distance']
if 'id' in d:
o.id = d['id']
if 'location' in d:
o.location = d['location']
if 'name' in d:
o.name = d['name']
return o
| 25.396552 | 67 | 0.5241 |
import json
from alipay.aop.api.constant.ParamConstants import *
class AOIinfo(object):
def __init__(self):
self._adcode = None
self._area = None
self._distance = None
self._id = None
self._location = None
self._name = None
@property
def adcode(self):
return self._adcode
@adcode.setter
def adcode(self, value):
self._adcode = value
@property
def area(self):
return self._area
@area.setter
def area(self, value):
self._area = value
@property
def distance(self):
return self._distance
@distance.setter
def distance(self, value):
self._distance = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def location(self):
return self._location
@location.setter
def location(self, value):
self._location = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def to_alipay_dict(self):
params = dict()
if self.adcode:
if hasattr(self.adcode, 'to_alipay_dict'):
params['adcode'] = self.adcode.to_alipay_dict()
else:
params['adcode'] = self.adcode
if self.area:
if hasattr(self.area, 'to_alipay_dict'):
params['area'] = self.area.to_alipay_dict()
else:
params['area'] = self.area
if self.distance:
if hasattr(self.distance, 'to_alipay_dict'):
params['distance'] = self.distance.to_alipay_dict()
else:
params['distance'] = self.distance
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.location:
if hasattr(self.location, 'to_alipay_dict'):
params['location'] = self.location.to_alipay_dict()
else:
params['location'] = self.location
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AOIinfo()
if 'adcode' in d:
o.adcode = d['adcode']
if 'area' in d:
o.area = d['area']
if 'distance' in d:
o.distance = d['distance']
if 'id' in d:
o.id = d['id']
if 'location' in d:
o.location = d['location']
if 'name' in d:
o.name = d['name']
return o
| true | true |
f72fa5dd5997ddce41d5321eaba142024cd484ab | 21,887 | py | Python | setup.py | pexip/os-mod-wsgi | 969aee194275c599dd769c645c080dceeea5639e | [
"Apache-2.0"
] | 1 | 2019-04-22T16:49:34.000Z | 2019-04-22T16:49:34.000Z | setup.py | pexip/os-mod-wsgi | 969aee194275c599dd769c645c080dceeea5639e | [
"Apache-2.0"
] | null | null | null | setup.py | pexip/os-mod-wsgi | 969aee194275c599dd769c645c080dceeea5639e | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import sys
import fnmatch
import subprocess
import tarfile
import shutil
import stat
import re
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
from setuptools import setup
from distutils.core import Extension
from distutils.sysconfig import get_config_var as get_python_config
from distutils.sysconfig import get_python_lib
# Before anything else, this setup.py uses some tricks to potentially
# install Apache. This can be from a local tarball, or from precompiled
# Apache binaries for Heroku and OpenShift environments downloaded from
# Amazon S3. Once they are installed, then the installation of the
# mod_wsgi package itself will be triggered, ensuring that it can be
# built against the precompiled Apache binaries which were installed.
#
# First work out whether we are actually running on either Heroku or
# OpenShift. If we are, then we identify the set of precompiled binaries
# we are to use and copy it into the Python installation.
PREFIX = 'https://s3.amazonaws.com'
BUCKET = os.environ.get('MOD_WSGI_REMOTE_S3_BUCKET_NAME', 'modwsgi.org')
REMOTE_TARBALL_NAME = os.environ.get('MOD_WSGI_REMOTE_PACKAGES_NAME')
LOCAL_TARBALL_FILE = os.environ.get('MOD_WSGI_LOCAL_PACKAGES_FILE')
TGZ_OPENSHIFT='mod_wsgi-packages-openshift-centos6-apache-2.4.12-1.tar.gz'
TGZ_HEROKU='mod_wsgi-packages-heroku-cedar14-apache-2.4.12-1.tar.gz'
if not REMOTE_TARBALL_NAME and not LOCAL_TARBALL_FILE:
if os.environ.get('OPENSHIFT_HOMEDIR'):
REMOTE_TARBALL_NAME = TGZ_OPENSHIFT
elif os.path.isdir('/app/.heroku'):
REMOTE_TARBALL_NAME = TGZ_HEROKU
REMOTE_TARBALL_URL = None
if LOCAL_TARBALL_FILE is None and REMOTE_TARBALL_NAME:
REMOTE_TARBALL_URL = '%s/%s/%s' % (PREFIX, BUCKET, REMOTE_TARBALL_NAME)
WITH_TARBALL_PACKAGE = False
if REMOTE_TARBALL_URL or LOCAL_TARBALL_FILE:
WITH_TARBALL_PACKAGE = True
# If we are doing an install, download the tarball and unpack it into
# the 'packages' subdirectory. We will then add everything in that
# directory as package data so that it will be installed into the Python
# installation.
if WITH_TARBALL_PACKAGE:
if REMOTE_TARBALL_URL:
if not os.path.isfile(REMOTE_TARBALL_NAME):
print('Downloading', REMOTE_TARBALL_URL)
urlretrieve(REMOTE_TARBALL_URL, REMOTE_TARBALL_NAME+'.download')
os.rename(REMOTE_TARBALL_NAME+'.download', REMOTE_TARBALL_NAME)
LOCAL_TARBALL_FILE = REMOTE_TARBALL_NAME
if LOCAL_TARBALL_FILE:
shutil.rmtree('src/packages', ignore_errors=True)
tar = tarfile.open(LOCAL_TARBALL_FILE)
tar.extractall('src/packages')
tar.close()
open('src/packages/__init__.py', 'a').close()
package_files = []
for root, dirs, files in os.walk('src/packages', topdown=False):
for name in files:
path = os.path.join(root, name).split('/', 1)[1]
package_files.append(path)
print('adding ', path)
print('Running setup for Apache')
setup(name = 'mod_wsgi-packages',
version = '1.0.0',
packages = ['mod_wsgi', 'mod_wsgi.packages'],
package_dir = {'mod_wsgi': 'src'},
package_data = {'mod_wsgi': package_files},
)
# From this point on we will now actually install mod_wsgi. First we need
# to work out what all the available source code files are that should be
# compiled.
source_files = [os.path.join('src/server', name) for name in
os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'src/server')) if fnmatch.fnmatch(name, '*.c')]
# Work out all the Apache specific compilation flags. This is done using
# the standard Apache apxs command unless we are installing our own build
# of Apache. In that case we use Python code to do the equivalent of apxs
# as apxs will not work due to paths not matching where it was installed.
def find_program(names, default=None, paths=[]):
for name in names:
for path in os.environ['PATH'].split(':') + paths:
program = os.path.join(path, name)
if os.path.exists(program):
return program
return default
APXS = os.environ.get('APXS')
WITH_HTTPD_PACKAGE = False
if APXS is None:
APXS = find_program(['mod_wsgi-apxs'],
paths=[os.path.dirname(sys.executable)])
if APXS is not None:
WITH_HTTPD_PACKAGE = True
if APXS is None:
APXS = find_program(['mod_wsgi-apxs', 'apxs2', 'apxs'],
'apxs', ['/usr/sbin', os.getcwd()])
elif not os.path.isabs(APXS):
APXS = find_program([APXS], APXS, ['/usr/sbin', os.getcwd()])
WITHOUT_APXS = False
WITH_WINDOWS_APACHE = None
WITH_MACOSX_APACHE = None
if not WITH_TARBALL_PACKAGE:
if not os.path.isabs(APXS) or not os.access(APXS, os.X_OK):
WITHOUT_APXS = True
if WITHOUT_APXS and os.name == 'nt':
APACHE_ROOTDIR = os.environ.get('MOD_WSGI_APACHE_ROOTDIR')
if APACHE_ROOTDIR:
if os.path.exists(APACHE_ROOTDIR):
WITH_WINDOWS_APACHE = APACHE_ROOTDIR
else:
raise RuntimeError('The Apache directory %r does not exist.' %
APACHE_ROOTDIR)
else:
if os.path.exists('c:\\Apache24'):
WITH_WINDOWS_APACHE = 'c:\\Apache24'
elif os.path.exists('c:\\Apache22'):
WITH_WINDOWS_APACHE = 'c:\\Apache22'
elif os.path.exists('c:\\Apache2'):
WITH_WINDOWS_APACHE = 'c:\\Apache2'
else:
raise RuntimeError('No Apache installation can be found. Set the '
'MOD_WSGI_APACHE_ROOTDIR environment to its location.')
elif WITHOUT_APXS and sys.platform == 'darwin':
WITH_MACOSX_APACHE = '/Applications/Xcode.app'
if WITHOUT_APXS and not WITH_WINDOWS_APACHE and not WITH_MACOSX_APACHE:
raise RuntimeError('The %r command appears not to be installed or '
'is not executable. Please check the list of prerequisites '
'in the documentation for this package and install any '
'missing Apache httpd server packages.' % APXS)
if WITH_WINDOWS_APACHE:
def get_apxs_config(name):
if name == 'INCLUDEDIR':
return WITH_WINDOWS_APACHE + '/include'
elif name == 'LIBEXECDIR':
return WITH_WINDOWS_APACHE + '/lib'
else:
return ''
def get_apr_includes():
return ''
def get_apu_includes():
return ''
elif WITH_MACOSX_APACHE:
def get_apxs_config(name):
if name == 'BINDIR':
return '/usr/bin'
elif name == 'SBINDIR':
return '/usr/sbin'
elif name == 'LIBEXECDIR':
return '/usr/libexec/apache2'
elif name == 'PROGNAME':
return 'httpd'
elif name == 'SHLIBPATH_VAR':
return 'DYLD_LIBRARY_PATH'
else:
return ''
def get_apr_includes():
return ''
def get_apu_includes():
return ''
elif WITH_TARBALL_PACKAGE:
SCRIPT_DIR = os.path.join(os.path.dirname(__file__), 'src', 'packages')
CONFIG_FILE = os.path.join(SCRIPT_DIR, 'apache/build/config_vars.mk')
CONFIG = {}
with open(CONFIG_FILE) as fp:
for line in fp.readlines():
name, value = line.split('=', 1)
name = name.strip()
value = value.strip()
CONFIG[name] = value
_varprog = re.compile(r'\$(\w+|(?:\{[^}]*\}|\([^)]*\)))')
def expand_vars(value):
if '$' not in value:
return value
i = 0
while True:
m = _varprog.search(value, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith('{') and name.endswith('}'):
name = name[1:-1]
elif name.startswith('(') and name.endswith(')'):
name = name[1:-1]
if name in CONFIG:
tail = value[j:]
value = value[:i] + CONFIG.get(name, '')
i = len(value)
value += tail
else:
i = j
return value
def get_apxs_config(name):
value = CONFIG.get(name, '')
sub_value = expand_vars(value)
while value != sub_value:
value = sub_value
sub_value = expand_vars(value)
return sub_value.replace('/mod_wsgi-packages/', SCRIPT_DIR+'/')
def get_apr_includes():
return ''
def get_apu_includes():
return ''
CONFIG['PREFIX'] = get_apxs_config('prefix')
CONFIG['TARGET'] = get_apxs_config('target')
CONFIG['SYSCONFDIR'] = get_apxs_config('sysconfdir')
CONFIG['INCLUDEDIR'] = get_apxs_config('includedir')
CONFIG['LIBEXECDIR'] = get_apxs_config('libexecdir')
CONFIG['BINDIR'] = get_apxs_config('bindir')
CONFIG['SBINDIR'] = get_apxs_config('sbindir')
CONFIG['PROGNAME'] = get_apxs_config('progname')
else:
def get_apxs_config(query):
p = subprocess.Popen([APXS, '-q', query],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if isinstance(out, bytes):
out = out.decode('UTF-8')
return out.strip()
def get_apr_includes():
if not APR_CONFIG:
return ''
p = subprocess.Popen([APR_CONFIG, '--includes'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if isinstance(out, bytes):
out = out.decode('UTF-8')
return out.strip()
def get_apu_includes():
if not APU_CONFIG:
return ''
p = subprocess.Popen([APU_CONFIG, '--includes'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if isinstance(out, bytes):
out = out.decode('UTF-8')
return out.strip()
INCLUDEDIR = get_apxs_config('INCLUDEDIR')
CPPFLAGS = get_apxs_config('CPPFLAGS').split()
CFLAGS = get_apxs_config('CFLAGS').split()
EXTRA_INCLUDES = get_apxs_config('EXTRA_INCLUDES').split()
EXTRA_CPPFLAGS = get_apxs_config('EXTRA_CPPFLAGS').split()
EXTRA_CFLAGS = get_apxs_config('EXTRA_CFLAGS').split()
APR_CONFIG = get_apxs_config('APR_CONFIG')
APU_CONFIG = get_apxs_config('APU_CONFIG')
# Make sure that 'apr-1-config' exists. If it doesn't we may be running
# on MacOS X Sierra, which has decided to not provide either it or the
# 'apu-1-config' script and otherwise completely broken 'apxs'. In that
# case we manually set the locations of the Apache and APR header files.
if (not os.path.exists(APR_CONFIG) and
os.path.exists('/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk')):
INCLUDEDIR = '/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/apache2'
APR_INCLUDES = ['-I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/apr-1']
APU_INCLUDES = []
elif (not os.path.exists(APR_CONFIG) and
os.path.exists('/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk')):
INCLUDEDIR = '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk/usr/include/apache2'
APR_INCLUDES = ['-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk/usr/include/apr-1']
APU_INCLUDES = []
else:
APR_INCLUDES = get_apr_includes().split()
APU_INCLUDES = get_apu_includes().split()
if not os.path.exists(APR_CONFIG) and not INCLUDEDIR:
if sys.platform == 'darwin':
# Likely no Xcode application installed or location of SDK in
# Xcode has changed with a new release of Xcode application.
raise RuntimeError('No Apache installation can be found, do you '
'have the full Apple Xcode installed. It is not enough to '
'have just the xcode command line tools installed.')
else:
# Set INCLUDEDIR just to avoid having an empty path. Probably
# should raise an exception here.
INCLUDEDIR = '/usr/include'
# Write out apxs_config.py which caches various configuration related to
# Apache. For the case of using our own Apache build, this needs to
# calculate values dynamically based on where binaries were installed.
# This is necessary as on OpenShift the virtual environment gets copied
# for each gear to a different path. We can't therefore rely on a hard
# coded path.
BINDIR = get_apxs_config('BINDIR')
SBINDIR = get_apxs_config('SBINDIR')
PROGNAME = get_apxs_config('PROGNAME')
MPM_NAME = get_apxs_config('MPM_NAME')
LIBEXECDIR = get_apxs_config('LIBEXECDIR')
SHLIBPATH_VAR = get_apxs_config('SHLIBPATH_VAR')
APXS_CONFIG_TEMPLATE = """
import os
WITH_TARBALL_PACKAGE = %(WITH_TARBALL_PACKAGE)r
WITH_HTTPD_PACKAGE = %(WITH_HTTPD_PACKAGE)r
if WITH_HTTPD_PACKAGE:
from mod_wsgi_packages.httpd import __file__ as PACKAGES_ROOTDIR
PACKAGES_ROOTDIR = os.path.dirname(PACKAGES_ROOTDIR)
BINDIR = os.path.join(PACKAGES_ROOTDIR, 'bin')
SBINDIR = BINDIR
LIBEXECDIR = os.path.join(PACKAGES_ROOTDIR, 'modules')
SHLIBPATH = os.path.join(PACKAGES_ROOTDIR, 'lib')
elif WITH_TARBALL_PACKAGE:
from mod_wsgi.packages import __file__ as PACKAGES_ROOTDIR
PACKAGES_ROOTDIR = os.path.dirname(PACKAGES_ROOTDIR)
BINDIR = os.path.join(PACKAGES_ROOTDIR, 'apache', 'bin')
SBINDIR = BINDIR
LIBEXECDIR = os.path.join(PACKAGES_ROOTDIR, 'apache', 'modules')
SHLIBPATH = []
SHLIBPATH.append(os.path.join(PACKAGES_ROOTDIR, 'apr-util', 'lib'))
SHLIBPATH.append(os.path.join(PACKAGES_ROOTDIR, 'apr', 'lib'))
SHLIBPATH = ':'.join(SHLIBPATH)
else:
BINDIR = '%(BINDIR)s'
SBINDIR = '%(SBINDIR)s'
LIBEXECDIR = '%(LIBEXECDIR)s'
SHLIBPATH = ''
MPM_NAME = '%(MPM_NAME)s'
PROGNAME = '%(PROGNAME)s'
SHLIBPATH_VAR = '%(SHLIBPATH_VAR)s'
if os.path.exists(os.path.join(SBINDIR, PROGNAME)):
HTTPD = os.path.join(SBINDIR, PROGNAME)
elif os.path.exists(os.path.join(BINDIR, PROGNAME)):
HTTPD = os.path.join(BINDIR, PROGNAME)
else:
HTTPD = PROGNAME
if os.path.exists(os.path.join(SBINDIR, 'rotatelogs')):
ROTATELOGS = os.path.join(SBINDIR, 'rotatelogs')
elif os.path.exists(os.path.join(BINDIR, 'rotatelogs')):
ROTATELOGS = os.path.join(BINDIR, 'rotatelogs')
else:
ROTATELOGS = 'rotatelogs'
"""
with open(os.path.join(os.path.dirname(__file__),
'src/server/apxs_config.py'), 'w') as fp:
print(APXS_CONFIG_TEMPLATE % dict(
WITH_TARBALL_PACKAGE=WITH_TARBALL_PACKAGE,
WITH_HTTPD_PACKAGE=WITH_HTTPD_PACKAGE,
BINDIR=BINDIR, SBINDIR=SBINDIR, LIBEXECDIR=LIBEXECDIR,
MPM_NAME=MPM_NAME, PROGNAME=PROGNAME,
SHLIBPATH_VAR=SHLIBPATH_VAR), file=fp)
# Work out location of Python library and how to link it.
PYTHON_VERSION = get_python_config('VERSION')
if os.name == 'nt':
if hasattr(sys, 'real_prefix'):
PYTHON_LIBDIR = sys.real_prefix
else:
PYTHON_LIBDIR = get_python_config('BINDIR')
PYTHON_LDFLAGS = []
PYTHON_LDLIBS = ['%s/libs/python%s.lib' % (PYTHON_LIBDIR, PYTHON_VERSION),
'%s/lib/libhttpd.lib' % WITH_WINDOWS_APACHE,
'%s/lib/libapr-1.lib' % WITH_WINDOWS_APACHE,
'%s/lib/libaprutil-1.lib' % WITH_WINDOWS_APACHE,
'%s/lib/libapriconv-1.lib' % WITH_WINDOWS_APACHE]
else:
PYTHON_LDVERSION = get_python_config('LDVERSION') or PYTHON_VERSION
PYTHON_LIBDIR = get_python_config('LIBDIR')
PYTHON_CFGDIR = get_python_lib(plat_specific=1, standard_lib=1) + '/config'
if PYTHON_LDVERSION and PYTHON_LDVERSION != PYTHON_VERSION:
PYTHON_CFGDIR = '%s-%s' % (PYTHON_CFGDIR, PYTHON_LDVERSION)
if not os.path.exists(PYTHON_CFGDIR):
PYTHON_CFGDIR = '%s-%s' % (PYTHON_CFGDIR, sys.platform)
PYTHON_LDFLAGS = ['-L%s' % PYTHON_LIBDIR, '-L%s' % PYTHON_CFGDIR]
PYTHON_LDLIBS = ['-lpython%s' % PYTHON_LDVERSION]
if os.path.exists(os.path.join(PYTHON_LIBDIR,
'libpython%s.a' % PYTHON_VERSION)):
PYTHON_LDLIBS = ['-lpython%s' % PYTHON_VERSION]
if os.path.exists(os.path.join(PYTHON_CFGDIR,
'libpython%s.a' % PYTHON_VERSION)):
PYTHON_LDLIBS = ['-lpython%s' % PYTHON_VERSION]
# Create the final set of compilation flags to be used.
INCLUDE_DIRS = [INCLUDEDIR]
EXTRA_COMPILE_FLAGS = (EXTRA_INCLUDES + CPPFLAGS + EXTRA_CPPFLAGS +
CFLAGS + EXTRA_CFLAGS + APR_INCLUDES + APU_INCLUDES)
EXTRA_LINK_ARGS = PYTHON_LDFLAGS + PYTHON_LDLIBS
# Force adding of LD_RUN_PATH for platforms that may need it.
if os.name != 'nt':
LD_RUN_PATH = os.environ.get('LD_RUN_PATH', '')
LD_RUN_PATH += ':%s:%s' % (PYTHON_LIBDIR, PYTHON_CFGDIR)
LD_RUN_PATH = LD_RUN_PATH.lstrip(':')
os.environ['LD_RUN_PATH'] = LD_RUN_PATH
# On MacOS X, recent versions of Apple's Apache do not support compiling
# Apache modules with a target older than 10.8. This is because it
# screws up Apache APR % formats for apr_time_t, which breaks daemon
# mode queue time. For the target to be 10.8 or newer for now if Python
# installation supports older versions. This means that things will not
# build for older MacOS X versions. Deal with these when they occur.
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET')
if target is None:
target = get_python_config('MACOSX_DEPLOYMENT_TARGET')
if target:
target_version = tuple(map(int, target.split('.')))
#assert target_version >= (10, 8), \
# 'Minimum of 10.8 for MACOSX_DEPLOYMENT_TARGET'
if target_version < (10, 8):
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.8'
# Now add the definitions to build everything.
if os.name == 'nt':
extension_name = 'mod_wsgi.server.mod_wsgi'
else:
extension_name = 'mod_wsgi.server.mod_wsgi-py%s%s' % sys.version_info[:2]
extension = Extension(extension_name, source_files,
include_dirs=INCLUDE_DIRS, extra_compile_args=EXTRA_COMPILE_FLAGS,
extra_link_args=EXTRA_LINK_ARGS)
def _documentation():
result = []
prefix = 'docs/_build/html'
for root, dirs, files in os.walk(prefix, topdown=False):
for name in files:
if root == prefix:
result.append(os.path.join(root[len(prefix):], name))
else:
result.append(os.path.join(root[len(prefix)+1:], name))
return result
def _version():
path = 'src/server/wsgi_version.h'
pattern = r'#define MOD_WSGI_VERSION_STRING "(?P<version>[^"]*)"'
with open(path, 'r') as fp:
match = re.search(pattern, fp.read(), flags=re.MULTILINE)
return match.group('version')
# Final check to make sure a shared library for Python does actually
# exist. Warn if one doesn't as we really want a shared library.
SHARED_LIBRARY_WARNING = """
WARNING: The Python installation you are using does not appear to have
been installed with a shared library, or in the case of MacOS X, as a
framework. Where these are not present, the compilation of mod_wsgi may
fail, or if it does succeed, will result in extra memory being used by
all processes at run time as a result of the static library needing to
be loaded in its entirety to every process. It is highly recommended
that you reinstall the Python installation being used from source code,
supplying the '--enable-shared' option to the 'configure' script when
configuring the source code prior to building and installing it.
"""
if os.name != 'nt':
if (not get_python_config('Py_ENABLE_SHARED') and
not get_python_config('PYTHONFRAMEWORK')):
print(SHARED_LIBRARY_WARNING)
# Now finally run distutils.
long_description = open('README.rst').read()
setup(name = 'mod_wsgi',
version = _version(),
description = 'Installer for Apache/mod_wsgi.',
long_description = long_description,
author = 'Graham Dumpleton',
author_email = 'Graham.Dumpleton@gmail.com',
maintainer = 'Graham Dumpleton',
maintainer_email = 'Graham.Dumpleton@gmail.com',
url = 'http://www.modwsgi.org/',
bugtrack_url = 'https://github.com/GrahamDumpleton/mod_wsgi/issues',
license = 'Apache License, Version 2.0',
platforms = [],
download_url = None,
classifiers = [
'Development Status :: 6 - Mature',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: SunOS/Solaris',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server'
],
keywords = 'mod_wsgi wsgi apache',
packages = ['mod_wsgi', 'mod_wsgi.server', 'mod_wsgi.server.management',
'mod_wsgi.server.management.commands', 'mod_wsgi.docs',
'mod_wsgi.images'],
package_dir = {'mod_wsgi': 'src', 'mod_wsgi.docs': 'docs/_build/html',
'mod_wsgi.images': 'images'},
package_data = {'mod_wsgi.docs': _documentation(),
'mod_wsgi.images': ['snake-whiskey.jpg']},
ext_modules = [extension],
entry_points = { 'console_scripts':
['mod_wsgi-express = mod_wsgi.server:main'],},
zip_safe = False,
)
| 36.971284 | 160 | 0.670489 | from __future__ import print_function
import os
import sys
import fnmatch
import subprocess
import tarfile
import shutil
import stat
import re
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
from setuptools import setup
from distutils.core import Extension
from distutils.sysconfig import get_config_var as get_python_config
from distutils.sysconfig import get_python_lib
PREFIX = 'https://s3.amazonaws.com'
BUCKET = os.environ.get('MOD_WSGI_REMOTE_S3_BUCKET_NAME', 'modwsgi.org')
REMOTE_TARBALL_NAME = os.environ.get('MOD_WSGI_REMOTE_PACKAGES_NAME')
LOCAL_TARBALL_FILE = os.environ.get('MOD_WSGI_LOCAL_PACKAGES_FILE')
TGZ_OPENSHIFT='mod_wsgi-packages-openshift-centos6-apache-2.4.12-1.tar.gz'
TGZ_HEROKU='mod_wsgi-packages-heroku-cedar14-apache-2.4.12-1.tar.gz'
if not REMOTE_TARBALL_NAME and not LOCAL_TARBALL_FILE:
if os.environ.get('OPENSHIFT_HOMEDIR'):
REMOTE_TARBALL_NAME = TGZ_OPENSHIFT
elif os.path.isdir('/app/.heroku'):
REMOTE_TARBALL_NAME = TGZ_HEROKU
REMOTE_TARBALL_URL = None
if LOCAL_TARBALL_FILE is None and REMOTE_TARBALL_NAME:
REMOTE_TARBALL_URL = '%s/%s/%s' % (PREFIX, BUCKET, REMOTE_TARBALL_NAME)
WITH_TARBALL_PACKAGE = False
if REMOTE_TARBALL_URL or LOCAL_TARBALL_FILE:
WITH_TARBALL_PACKAGE = True
if WITH_TARBALL_PACKAGE:
if REMOTE_TARBALL_URL:
if not os.path.isfile(REMOTE_TARBALL_NAME):
print('Downloading', REMOTE_TARBALL_URL)
urlretrieve(REMOTE_TARBALL_URL, REMOTE_TARBALL_NAME+'.download')
os.rename(REMOTE_TARBALL_NAME+'.download', REMOTE_TARBALL_NAME)
LOCAL_TARBALL_FILE = REMOTE_TARBALL_NAME
if LOCAL_TARBALL_FILE:
shutil.rmtree('src/packages', ignore_errors=True)
tar = tarfile.open(LOCAL_TARBALL_FILE)
tar.extractall('src/packages')
tar.close()
open('src/packages/__init__.py', 'a').close()
package_files = []
for root, dirs, files in os.walk('src/packages', topdown=False):
for name in files:
path = os.path.join(root, name).split('/', 1)[1]
package_files.append(path)
print('adding ', path)
print('Running setup for Apache')
setup(name = 'mod_wsgi-packages',
version = '1.0.0',
packages = ['mod_wsgi', 'mod_wsgi.packages'],
package_dir = {'mod_wsgi': 'src'},
package_data = {'mod_wsgi': package_files},
)
source_files = [os.path.join('src/server', name) for name in
os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'src/server')) if fnmatch.fnmatch(name, '*.c')]
def find_program(names, default=None, paths=[]):
for name in names:
for path in os.environ['PATH'].split(':') + paths:
program = os.path.join(path, name)
if os.path.exists(program):
return program
return default
APXS = os.environ.get('APXS')
WITH_HTTPD_PACKAGE = False
if APXS is None:
APXS = find_program(['mod_wsgi-apxs'],
paths=[os.path.dirname(sys.executable)])
if APXS is not None:
WITH_HTTPD_PACKAGE = True
if APXS is None:
APXS = find_program(['mod_wsgi-apxs', 'apxs2', 'apxs'],
'apxs', ['/usr/sbin', os.getcwd()])
elif not os.path.isabs(APXS):
APXS = find_program([APXS], APXS, ['/usr/sbin', os.getcwd()])
WITHOUT_APXS = False
WITH_WINDOWS_APACHE = None
WITH_MACOSX_APACHE = None
if not WITH_TARBALL_PACKAGE:
if not os.path.isabs(APXS) or not os.access(APXS, os.X_OK):
WITHOUT_APXS = True
if WITHOUT_APXS and os.name == 'nt':
APACHE_ROOTDIR = os.environ.get('MOD_WSGI_APACHE_ROOTDIR')
if APACHE_ROOTDIR:
if os.path.exists(APACHE_ROOTDIR):
WITH_WINDOWS_APACHE = APACHE_ROOTDIR
else:
raise RuntimeError('The Apache directory %r does not exist.' %
APACHE_ROOTDIR)
else:
if os.path.exists('c:\\Apache24'):
WITH_WINDOWS_APACHE = 'c:\\Apache24'
elif os.path.exists('c:\\Apache22'):
WITH_WINDOWS_APACHE = 'c:\\Apache22'
elif os.path.exists('c:\\Apache2'):
WITH_WINDOWS_APACHE = 'c:\\Apache2'
else:
raise RuntimeError('No Apache installation can be found. Set the '
'MOD_WSGI_APACHE_ROOTDIR environment to its location.')
elif WITHOUT_APXS and sys.platform == 'darwin':
WITH_MACOSX_APACHE = '/Applications/Xcode.app'
if WITHOUT_APXS and not WITH_WINDOWS_APACHE and not WITH_MACOSX_APACHE:
raise RuntimeError('The %r command appears not to be installed or '
'is not executable. Please check the list of prerequisites '
'in the documentation for this package and install any '
'missing Apache httpd server packages.' % APXS)
if WITH_WINDOWS_APACHE:
def get_apxs_config(name):
if name == 'INCLUDEDIR':
return WITH_WINDOWS_APACHE + '/include'
elif name == 'LIBEXECDIR':
return WITH_WINDOWS_APACHE + '/lib'
else:
return ''
def get_apr_includes():
return ''
def get_apu_includes():
return ''
elif WITH_MACOSX_APACHE:
def get_apxs_config(name):
if name == 'BINDIR':
return '/usr/bin'
elif name == 'SBINDIR':
return '/usr/sbin'
elif name == 'LIBEXECDIR':
return '/usr/libexec/apache2'
elif name == 'PROGNAME':
return 'httpd'
elif name == 'SHLIBPATH_VAR':
return 'DYLD_LIBRARY_PATH'
else:
return ''
def get_apr_includes():
return ''
def get_apu_includes():
return ''
elif WITH_TARBALL_PACKAGE:
SCRIPT_DIR = os.path.join(os.path.dirname(__file__), 'src', 'packages')
CONFIG_FILE = os.path.join(SCRIPT_DIR, 'apache/build/config_vars.mk')
CONFIG = {}
with open(CONFIG_FILE) as fp:
for line in fp.readlines():
name, value = line.split('=', 1)
name = name.strip()
value = value.strip()
CONFIG[name] = value
_varprog = re.compile(r'\$(\w+|(?:\{[^}]*\}|\([^)]*\)))')
def expand_vars(value):
if '$' not in value:
return value
i = 0
while True:
m = _varprog.search(value, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith('{') and name.endswith('}'):
name = name[1:-1]
elif name.startswith('(') and name.endswith(')'):
name = name[1:-1]
if name in CONFIG:
tail = value[j:]
value = value[:i] + CONFIG.get(name, '')
i = len(value)
value += tail
else:
i = j
return value
def get_apxs_config(name):
value = CONFIG.get(name, '')
sub_value = expand_vars(value)
while value != sub_value:
value = sub_value
sub_value = expand_vars(value)
return sub_value.replace('/mod_wsgi-packages/', SCRIPT_DIR+'/')
def get_apr_includes():
return ''
def get_apu_includes():
return ''
CONFIG['PREFIX'] = get_apxs_config('prefix')
CONFIG['TARGET'] = get_apxs_config('target')
CONFIG['SYSCONFDIR'] = get_apxs_config('sysconfdir')
CONFIG['INCLUDEDIR'] = get_apxs_config('includedir')
CONFIG['LIBEXECDIR'] = get_apxs_config('libexecdir')
CONFIG['BINDIR'] = get_apxs_config('bindir')
CONFIG['SBINDIR'] = get_apxs_config('sbindir')
CONFIG['PROGNAME'] = get_apxs_config('progname')
else:
def get_apxs_config(query):
p = subprocess.Popen([APXS, '-q', query],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if isinstance(out, bytes):
out = out.decode('UTF-8')
return out.strip()
def get_apr_includes():
if not APR_CONFIG:
return ''
p = subprocess.Popen([APR_CONFIG, '--includes'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if isinstance(out, bytes):
out = out.decode('UTF-8')
return out.strip()
def get_apu_includes():
if not APU_CONFIG:
return ''
p = subprocess.Popen([APU_CONFIG, '--includes'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if isinstance(out, bytes):
out = out.decode('UTF-8')
return out.strip()
INCLUDEDIR = get_apxs_config('INCLUDEDIR')
CPPFLAGS = get_apxs_config('CPPFLAGS').split()
CFLAGS = get_apxs_config('CFLAGS').split()
EXTRA_INCLUDES = get_apxs_config('EXTRA_INCLUDES').split()
EXTRA_CPPFLAGS = get_apxs_config('EXTRA_CPPFLAGS').split()
EXTRA_CFLAGS = get_apxs_config('EXTRA_CFLAGS').split()
APR_CONFIG = get_apxs_config('APR_CONFIG')
APU_CONFIG = get_apxs_config('APU_CONFIG')
# on MacOS X Sierra, which has decided to not provide either it or the
# 'apu-1-config' script and otherwise completely broken 'apxs'. In that
# case we manually set the locations of the Apache and APR header files.
if (not os.path.exists(APR_CONFIG) and
os.path.exists('/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk')):
INCLUDEDIR = '/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/apache2'
APR_INCLUDES = ['-I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/apr-1']
APU_INCLUDES = []
elif (not os.path.exists(APR_CONFIG) and
os.path.exists('/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk')):
INCLUDEDIR = '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk/usr/include/apache2'
APR_INCLUDES = ['-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk/usr/include/apr-1']
APU_INCLUDES = []
else:
APR_INCLUDES = get_apr_includes().split()
APU_INCLUDES = get_apu_includes().split()
if not os.path.exists(APR_CONFIG) and not INCLUDEDIR:
if sys.platform == 'darwin':
# Likely no Xcode application installed or location of SDK in
# Xcode has changed with a new release of Xcode application.
raise RuntimeError('No Apache installation can be found, do you '
'have the full Apple Xcode installed. It is not enough to '
'have just the xcode command line tools installed.')
else:
# Set INCLUDEDIR just to avoid having an empty path. Probably
# should raise an exception here.
INCLUDEDIR = '/usr/include'
# Write out apxs_config.py which caches various configuration related to
# Apache. For the case of using our own Apache build, this needs to
# calculate values dynamically based on where binaries were installed.
# This is necessary as on OpenShift the virtual environment gets copied
# for each gear to a different path. We can't therefore rely on a hard
BINDIR = get_apxs_config('BINDIR')
SBINDIR = get_apxs_config('SBINDIR')
PROGNAME = get_apxs_config('PROGNAME')
MPM_NAME = get_apxs_config('MPM_NAME')
LIBEXECDIR = get_apxs_config('LIBEXECDIR')
SHLIBPATH_VAR = get_apxs_config('SHLIBPATH_VAR')
APXS_CONFIG_TEMPLATE = """
import os
WITH_TARBALL_PACKAGE = %(WITH_TARBALL_PACKAGE)r
WITH_HTTPD_PACKAGE = %(WITH_HTTPD_PACKAGE)r
if WITH_HTTPD_PACKAGE:
from mod_wsgi_packages.httpd import __file__ as PACKAGES_ROOTDIR
PACKAGES_ROOTDIR = os.path.dirname(PACKAGES_ROOTDIR)
BINDIR = os.path.join(PACKAGES_ROOTDIR, 'bin')
SBINDIR = BINDIR
LIBEXECDIR = os.path.join(PACKAGES_ROOTDIR, 'modules')
SHLIBPATH = os.path.join(PACKAGES_ROOTDIR, 'lib')
elif WITH_TARBALL_PACKAGE:
from mod_wsgi.packages import __file__ as PACKAGES_ROOTDIR
PACKAGES_ROOTDIR = os.path.dirname(PACKAGES_ROOTDIR)
BINDIR = os.path.join(PACKAGES_ROOTDIR, 'apache', 'bin')
SBINDIR = BINDIR
LIBEXECDIR = os.path.join(PACKAGES_ROOTDIR, 'apache', 'modules')
SHLIBPATH = []
SHLIBPATH.append(os.path.join(PACKAGES_ROOTDIR, 'apr-util', 'lib'))
SHLIBPATH.append(os.path.join(PACKAGES_ROOTDIR, 'apr', 'lib'))
SHLIBPATH = ':'.join(SHLIBPATH)
else:
BINDIR = '%(BINDIR)s'
SBINDIR = '%(SBINDIR)s'
LIBEXECDIR = '%(LIBEXECDIR)s'
SHLIBPATH = ''
MPM_NAME = '%(MPM_NAME)s'
PROGNAME = '%(PROGNAME)s'
SHLIBPATH_VAR = '%(SHLIBPATH_VAR)s'
if os.path.exists(os.path.join(SBINDIR, PROGNAME)):
HTTPD = os.path.join(SBINDIR, PROGNAME)
elif os.path.exists(os.path.join(BINDIR, PROGNAME)):
HTTPD = os.path.join(BINDIR, PROGNAME)
else:
HTTPD = PROGNAME
if os.path.exists(os.path.join(SBINDIR, 'rotatelogs')):
ROTATELOGS = os.path.join(SBINDIR, 'rotatelogs')
elif os.path.exists(os.path.join(BINDIR, 'rotatelogs')):
ROTATELOGS = os.path.join(BINDIR, 'rotatelogs')
else:
ROTATELOGS = 'rotatelogs'
"""
with open(os.path.join(os.path.dirname(__file__),
'src/server/apxs_config.py'), 'w') as fp:
print(APXS_CONFIG_TEMPLATE % dict(
WITH_TARBALL_PACKAGE=WITH_TARBALL_PACKAGE,
WITH_HTTPD_PACKAGE=WITH_HTTPD_PACKAGE,
BINDIR=BINDIR, SBINDIR=SBINDIR, LIBEXECDIR=LIBEXECDIR,
MPM_NAME=MPM_NAME, PROGNAME=PROGNAME,
SHLIBPATH_VAR=SHLIBPATH_VAR), file=fp)
PYTHON_VERSION = get_python_config('VERSION')
if os.name == 'nt':
if hasattr(sys, 'real_prefix'):
PYTHON_LIBDIR = sys.real_prefix
else:
PYTHON_LIBDIR = get_python_config('BINDIR')
PYTHON_LDFLAGS = []
PYTHON_LDLIBS = ['%s/libs/python%s.lib' % (PYTHON_LIBDIR, PYTHON_VERSION),
'%s/lib/libhttpd.lib' % WITH_WINDOWS_APACHE,
'%s/lib/libapr-1.lib' % WITH_WINDOWS_APACHE,
'%s/lib/libaprutil-1.lib' % WITH_WINDOWS_APACHE,
'%s/lib/libapriconv-1.lib' % WITH_WINDOWS_APACHE]
else:
PYTHON_LDVERSION = get_python_config('LDVERSION') or PYTHON_VERSION
PYTHON_LIBDIR = get_python_config('LIBDIR')
PYTHON_CFGDIR = get_python_lib(plat_specific=1, standard_lib=1) + '/config'
if PYTHON_LDVERSION and PYTHON_LDVERSION != PYTHON_VERSION:
PYTHON_CFGDIR = '%s-%s' % (PYTHON_CFGDIR, PYTHON_LDVERSION)
if not os.path.exists(PYTHON_CFGDIR):
PYTHON_CFGDIR = '%s-%s' % (PYTHON_CFGDIR, sys.platform)
PYTHON_LDFLAGS = ['-L%s' % PYTHON_LIBDIR, '-L%s' % PYTHON_CFGDIR]
PYTHON_LDLIBS = ['-lpython%s' % PYTHON_LDVERSION]
if os.path.exists(os.path.join(PYTHON_LIBDIR,
'libpython%s.a' % PYTHON_VERSION)):
PYTHON_LDLIBS = ['-lpython%s' % PYTHON_VERSION]
if os.path.exists(os.path.join(PYTHON_CFGDIR,
'libpython%s.a' % PYTHON_VERSION)):
PYTHON_LDLIBS = ['-lpython%s' % PYTHON_VERSION]
INCLUDE_DIRS = [INCLUDEDIR]
EXTRA_COMPILE_FLAGS = (EXTRA_INCLUDES + CPPFLAGS + EXTRA_CPPFLAGS +
CFLAGS + EXTRA_CFLAGS + APR_INCLUDES + APU_INCLUDES)
EXTRA_LINK_ARGS = PYTHON_LDFLAGS + PYTHON_LDLIBS
if os.name != 'nt':
LD_RUN_PATH = os.environ.get('LD_RUN_PATH', '')
LD_RUN_PATH += ':%s:%s' % (PYTHON_LIBDIR, PYTHON_CFGDIR)
LD_RUN_PATH = LD_RUN_PATH.lstrip(':')
os.environ['LD_RUN_PATH'] = LD_RUN_PATH
# Apache modules with a target older than 10.8. This is because it
# screws up Apache APR % formats for apr_time_t, which breaks daemon
# mode queue time. For the target to be 10.8 or newer for now if Python
# installation supports older versions. This means that things will not
# build for older MacOS X versions. Deal with these when they occur.
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET')
if target is None:
target = get_python_config('MACOSX_DEPLOYMENT_TARGET')
if target:
target_version = tuple(map(int, target.split('.')))
#assert target_version >= (10, 8), \
# 'Minimum of 10.8 for MACOSX_DEPLOYMENT_TARGET'
if target_version < (10, 8):
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.8'
# Now add the definitions to build everything.
if os.name == 'nt':
extension_name = 'mod_wsgi.server.mod_wsgi'
else:
extension_name = 'mod_wsgi.server.mod_wsgi-py%s%s' % sys.version_info[:2]
extension = Extension(extension_name, source_files,
include_dirs=INCLUDE_DIRS, extra_compile_args=EXTRA_COMPILE_FLAGS,
extra_link_args=EXTRA_LINK_ARGS)
def _documentation():
result = []
prefix = 'docs/_build/html'
for root, dirs, files in os.walk(prefix, topdown=False):
for name in files:
if root == prefix:
result.append(os.path.join(root[len(prefix):], name))
else:
result.append(os.path.join(root[len(prefix)+1:], name))
return result
def _version():
path = 'src/server/wsgi_version.h'
pattern = r'
with open(path, 'r') as fp:
match = re.search(pattern, fp.read(), flags=re.MULTILINE)
return match.group('version')
# Final check to make sure a shared library for Python does actually
# exist. Warn if one doesn't as we really want a shared library.
SHARED_LIBRARY_WARNING = """
WARNING: The Python installation you are using does not appear to have
been installed with a shared library, or in the case of MacOS X, as a
framework. Where these are not present, the compilation of mod_wsgi may
fail, or if it does succeed, will result in extra memory being used by
all processes at run time as a result of the static library needing to
be loaded in its entirety to every process. It is highly recommended
that you reinstall the Python installation being used from source code,
supplying the '--enable-shared' option to the 'configure' script when
configuring the source code prior to building and installing it.
"""
if os.name != 'nt':
if (not get_python_config('Py_ENABLE_SHARED') and
not get_python_config('PYTHONFRAMEWORK')):
print(SHARED_LIBRARY_WARNING)
# Now finally run distutils.
long_description = open('README.rst').read()
setup(name = 'mod_wsgi',
version = _version(),
description = 'Installer for Apache/mod_wsgi.',
long_description = long_description,
author = 'Graham Dumpleton',
author_email = 'Graham.Dumpleton@gmail.com',
maintainer = 'Graham Dumpleton',
maintainer_email = 'Graham.Dumpleton@gmail.com',
url = 'http://www.modwsgi.org/',
bugtrack_url = 'https://github.com/GrahamDumpleton/mod_wsgi/issues',
license = 'Apache License, Version 2.0',
platforms = [],
download_url = None,
classifiers = [
'Development Status :: 6 - Mature',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: SunOS/Solaris',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server'
],
keywords = 'mod_wsgi wsgi apache',
packages = ['mod_wsgi', 'mod_wsgi.server', 'mod_wsgi.server.management',
'mod_wsgi.server.management.commands', 'mod_wsgi.docs',
'mod_wsgi.images'],
package_dir = {'mod_wsgi': 'src', 'mod_wsgi.docs': 'docs/_build/html',
'mod_wsgi.images': 'images'},
package_data = {'mod_wsgi.docs': _documentation(),
'mod_wsgi.images': ['snake-whiskey.jpg']},
ext_modules = [extension],
entry_points = { 'console_scripts':
['mod_wsgi-express = mod_wsgi.server:main'],},
zip_safe = False,
)
| true | true |
f72fa655c2b497532a44793ccd4680c179b00048 | 4,942 | py | Python | zhiliao/filebrowsersafe/fields.py | gladgod/zhiliao | 573dfbe56734388c9657cb6749d267f4a8885d5b | [
"BSD-3-Clause"
] | null | null | null | zhiliao/filebrowsersafe/fields.py | gladgod/zhiliao | 573dfbe56734388c9657cb6749d267f4a8885d5b | [
"BSD-3-Clause"
] | null | null | null | zhiliao/filebrowsersafe/fields.py | gladgod/zhiliao | 573dfbe56734388c9657cb6749d267f4a8885d5b | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from future.builtins import str
from future.builtins import super
# coding: utf-8
# imports
import os
import datetime
# django imports
from django.db import models
from django import forms
from django.core.files.storage import default_storage
from django.forms.widgets import Input
from django.db.models.fields import Field
from django.template.loader import render_to_string
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
# filebrowser imports
from .settings import *
from .base import FileObject
from .functions import url_to_path, get_directory
from future.utils import with_metaclass
class FileBrowseWidget(Input):
input_type = 'text'
class Media:
js = (os.path.join(URL_FILEBROWSER_MEDIA, 'js/AddFileBrowser.js'), )
def __init__(self, attrs=None):
self.directory = attrs.get('directory', '')
self.extensions = attrs.get('extensions', '')
self.format = attrs.get('format', '')
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def render(self, name, value, attrs=None):
if value is None:
value = ""
directory = self.directory
if self.directory:
if callable(self.directory):
directory = self.directory()
directory = os.path.normpath(datetime.datetime.now().strftime(directory))
fullpath = os.path.join(get_directory(), directory)
if not default_storage.isdir(fullpath):
default_storage.makedirs(fullpath)
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
final_attrs['search_icon'] = URL_FILEBROWSER_MEDIA + 'img/filebrowser_icon_show.gif'
final_attrs['directory'] = directory
final_attrs['extensions'] = self.extensions
final_attrs['format'] = self.format
final_attrs['ADMIN_THUMBNAIL'] = ADMIN_THUMBNAIL
final_attrs['DEBUG'] = DEBUG
if value != "":
try:
final_attrs['directory'] = os.path.split(value.path_relative_directory)[0]
except:
pass
return render_to_string("filebrowser/custom_field.html", dict(locals(), MEDIA_URL=MEDIA_URL))
class FileBrowseFormField(forms.CharField):
widget = FileBrowseWidget
default_error_messages = {
'extension': _(u'Extension %(ext)s is not allowed. Only %(allowed)s is allowed.'),
}
def __init__(self, max_length=None, min_length=None,
directory=None, extensions=None, format=None,
*args, **kwargs):
self.max_length, self.min_length = max_length, min_length
self.directory = directory
self.extensions = extensions
if format:
self.format = format or ''
self.extensions = extensions or EXTENSIONS.get(format)
super(FileBrowseFormField, self).__init__(*args, **kwargs)
def clean(self, value):
value = super(FileBrowseFormField, self).clean(value)
if value == '':
return value
file_extension = os.path.splitext(value)[1].lower().split("?")[0]
if self.extensions and not file_extension in self.extensions:
raise forms.ValidationError(self.error_messages['extension'] % {'ext': file_extension, 'allowed': ", ".join(self.extensions)})
return value
class FileBrowseField(with_metaclass(models.SubfieldBase, Field)):
def __init__(self, *args, **kwargs):
self.directory = kwargs.pop('directory', '')
self.extensions = kwargs.pop('extensions', '')
self.format = kwargs.pop('format', '')
return super(FileBrowseField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value or isinstance(value, FileObject):
return value
return FileObject(url_to_path(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return smart_str(value)
def get_manipulator_field_objs(self):
return [oldforms.TextField]
def get_internal_type(self):
return "CharField"
def formfield(self, **kwargs):
attrs = {}
attrs["directory"] = self.directory
attrs["extensions"] = self.extensions
attrs["format"] = self.format
defaults = {
'form_class': FileBrowseFormField,
'widget': FileBrowseWidget(attrs=attrs),
'directory': self.directory,
'extensions': self.extensions,
'format': self.format
}
defaults.update(kwargs)
return super(FileBrowseField, self).formfield(**defaults)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^filebrowsersafe\.fields\.FileBrowseField"])
except ImportError:
pass
| 35.3 | 138 | 0.653784 | from __future__ import unicode_literals
from future.builtins import str
from future.builtins import super
import os
import datetime
from django.db import models
from django import forms
from django.core.files.storage import default_storage
from django.forms.widgets import Input
from django.db.models.fields import Field
from django.template.loader import render_to_string
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from .settings import *
from .base import FileObject
from .functions import url_to_path, get_directory
from future.utils import with_metaclass
class FileBrowseWidget(Input):
input_type = 'text'
class Media:
js = (os.path.join(URL_FILEBROWSER_MEDIA, 'js/AddFileBrowser.js'), )
def __init__(self, attrs=None):
self.directory = attrs.get('directory', '')
self.extensions = attrs.get('extensions', '')
self.format = attrs.get('format', '')
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def render(self, name, value, attrs=None):
if value is None:
value = ""
directory = self.directory
if self.directory:
if callable(self.directory):
directory = self.directory()
directory = os.path.normpath(datetime.datetime.now().strftime(directory))
fullpath = os.path.join(get_directory(), directory)
if not default_storage.isdir(fullpath):
default_storage.makedirs(fullpath)
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
final_attrs['search_icon'] = URL_FILEBROWSER_MEDIA + 'img/filebrowser_icon_show.gif'
final_attrs['directory'] = directory
final_attrs['extensions'] = self.extensions
final_attrs['format'] = self.format
final_attrs['ADMIN_THUMBNAIL'] = ADMIN_THUMBNAIL
final_attrs['DEBUG'] = DEBUG
if value != "":
try:
final_attrs['directory'] = os.path.split(value.path_relative_directory)[0]
except:
pass
return render_to_string("filebrowser/custom_field.html", dict(locals(), MEDIA_URL=MEDIA_URL))
class FileBrowseFormField(forms.CharField):
widget = FileBrowseWidget
default_error_messages = {
'extension': _(u'Extension %(ext)s is not allowed. Only %(allowed)s is allowed.'),
}
def __init__(self, max_length=None, min_length=None,
directory=None, extensions=None, format=None,
*args, **kwargs):
self.max_length, self.min_length = max_length, min_length
self.directory = directory
self.extensions = extensions
if format:
self.format = format or ''
self.extensions = extensions or EXTENSIONS.get(format)
super(FileBrowseFormField, self).__init__(*args, **kwargs)
def clean(self, value):
value = super(FileBrowseFormField, self).clean(value)
if value == '':
return value
file_extension = os.path.splitext(value)[1].lower().split("?")[0]
if self.extensions and not file_extension in self.extensions:
raise forms.ValidationError(self.error_messages['extension'] % {'ext': file_extension, 'allowed': ", ".join(self.extensions)})
return value
class FileBrowseField(with_metaclass(models.SubfieldBase, Field)):
def __init__(self, *args, **kwargs):
self.directory = kwargs.pop('directory', '')
self.extensions = kwargs.pop('extensions', '')
self.format = kwargs.pop('format', '')
return super(FileBrowseField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value or isinstance(value, FileObject):
return value
return FileObject(url_to_path(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return smart_str(value)
def get_manipulator_field_objs(self):
return [oldforms.TextField]
def get_internal_type(self):
return "CharField"
def formfield(self, **kwargs):
attrs = {}
attrs["directory"] = self.directory
attrs["extensions"] = self.extensions
attrs["format"] = self.format
defaults = {
'form_class': FileBrowseFormField,
'widget': FileBrowseWidget(attrs=attrs),
'directory': self.directory,
'extensions': self.extensions,
'format': self.format
}
defaults.update(kwargs)
return super(FileBrowseField, self).formfield(**defaults)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^filebrowsersafe\.fields\.FileBrowseField"])
except ImportError:
pass
| true | true |
f72fa985b72bc3ee85306c46c1ed7734c3a40686 | 2,607 | py | Python | bahamutexporter/core/service.py | Yooootsuba/bahamut-exporter | 1fcf95f7eca86709ece1ed1d2704c540731682d5 | [
"MIT"
] | 2 | 2022-01-10T06:53:11.000Z | 2022-01-10T07:41:51.000Z | bahamutexporter/core/service.py | Yooootsuba/bahamut-exporter | 1fcf95f7eca86709ece1ed1d2704c540731682d5 | [
"MIT"
] | null | null | null | bahamutexporter/core/service.py | Yooootsuba/bahamut-exporter | 1fcf95f7eca86709ece1ed1d2704c540731682d5 | [
"MIT"
] | null | null | null | import re
import html
import json
import requests
from bs4 import BeautifulSoup
class BamahutExporterService:
def __init__(self):
self.session = requests.Session()
self.session.headers.update({'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36'})
def is_last_page(self, page, response):
return page > int(re.search('var args =.*page=([0-9]+)', response.text).group(1))
def parse_replies(self, bsn, snB):
replies = []
response = self.session.get('https://forum.gamer.com.tw/ajax/moreCommend.php', params = {'bsn': bsn, 'snB': snB}).json()
response.pop('next_snC')
for reply in response.values():
replies.append(
{
'username' : reply['userid'],
'nickname' : reply['nick'],
'datetime' : reply['wtime'],
'content' : reply['content'],
'comment' : html.escape('{"content":"%s"}' % reply['content']),
}
)
replies.reverse()
return replies
def parse_floor(self, bsn, floor):
if (hint := floor.find('div', {'class': 'hint'})) is not None:
return {
'floor' : floor.find('div', {'class': 'floor'}).text,
'hint' : hint.text,
}
else:
return {
'floor' : floor.find('a', {'class': 'floor tippy-gpbp'}).text,
'username' : floor.find('a', {'class': 'userid'}).text,
'nickname' : floor.find('a', {'class': 'username'}).text,
'datetime' : floor.find('a', {'class': 'edittime tippy-post-info'}).get('data-mtime'),
'content' : floor.find('div', {'class': 'c-article__content'}),
'replies' : self.parse_replies(bsn, floor.get('id').replace('post_', '')),
}
def export(self, bsn, snA):
page = 0
floors = []
while True:
# Get page
page += 1
response = self.session.get('https://forum.gamer.com.tw/C.php', params = {'bsn': bsn, 'snA': snA, 'page': page})
soup = BeautifulSoup(response.text, 'html.parser')
# Break loop when the page is last
if self.is_last_page(page, response):
return floors
# Get floors
for floor in soup.find_all('section', {'class': 'c-section', 'id': re.compile('.*')}):
floors.append(self.parse_floor(bsn, floor))
| 35.22973 | 159 | 0.513617 | import re
import html
import json
import requests
from bs4 import BeautifulSoup
class BamahutExporterService:
def __init__(self):
self.session = requests.Session()
self.session.headers.update({'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36'})
def is_last_page(self, page, response):
return page > int(re.search('var args =.*page=([0-9]+)', response.text).group(1))
def parse_replies(self, bsn, snB):
replies = []
response = self.session.get('https://forum.gamer.com.tw/ajax/moreCommend.php', params = {'bsn': bsn, 'snB': snB}).json()
response.pop('next_snC')
for reply in response.values():
replies.append(
{
'username' : reply['userid'],
'nickname' : reply['nick'],
'datetime' : reply['wtime'],
'content' : reply['content'],
'comment' : html.escape('{"content":"%s"}' % reply['content']),
}
)
replies.reverse()
return replies
def parse_floor(self, bsn, floor):
if (hint := floor.find('div', {'class': 'hint'})) is not None:
return {
'floor' : floor.find('div', {'class': 'floor'}).text,
'hint' : hint.text,
}
else:
return {
'floor' : floor.find('a', {'class': 'floor tippy-gpbp'}).text,
'username' : floor.find('a', {'class': 'userid'}).text,
'nickname' : floor.find('a', {'class': 'username'}).text,
'datetime' : floor.find('a', {'class': 'edittime tippy-post-info'}).get('data-mtime'),
'content' : floor.find('div', {'class': 'c-article__content'}),
'replies' : self.parse_replies(bsn, floor.get('id').replace('post_', '')),
}
def export(self, bsn, snA):
page = 0
floors = []
while True:
page += 1
response = self.session.get('https://forum.gamer.com.tw/C.php', params = {'bsn': bsn, 'snA': snA, 'page': page})
soup = BeautifulSoup(response.text, 'html.parser')
if self.is_last_page(page, response):
return floors
for floor in soup.find_all('section', {'class': 'c-section', 'id': re.compile('.*')}):
floors.append(self.parse_floor(bsn, floor))
| true | true |
f72faa1d19c3225e9a6701fb72dfb2c626ceba53 | 3,099 | py | Python | airflow/dags/udac_example_dag.py | aliiae/data-engineering | 96ee73a52de9504fc7e9eda748c90e0966c4fa03 | [
"MIT"
] | null | null | null | airflow/dags/udac_example_dag.py | aliiae/data-engineering | 96ee73a52de9504fc7e9eda748c90e0966c4fa03 | [
"MIT"
] | null | null | null | airflow/dags/udac_example_dag.py | aliiae/data-engineering | 96ee73a52de9504fc7e9eda748c90e0966c4fa03 | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators import (
StageToRedshiftOperator,
LoadFactOperator,
LoadDimensionOperator,
DataQualityOperator,
)
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.postgres_operator import PostgresOperator
from helpers import SqlQueries
REDSHIFT_CONN_ID = 'redshift'
AWS_CREDENTIALS_ID = 'aws_credentials'
INPUT_BUCKET = 'udacity-dend'
default_args = {
'owner': 'udacity',
'start_date': datetime(2018, 11, 1),
'depends_on_past': False,
'retries': 3,
'retry_delay': timedelta(seconds=300),
'catchup': False,
}
fact_table_name_and_query = ('songplays', SqlQueries.songplay_table_insert)
dim_tables_name_to_query = {
'users': SqlQueries.user_table_insert,
'songs': SqlQueries.song_table_insert,
'artists': SqlQueries.artist_table_insert,
'time': SqlQueries.time_table_insert,
}
dag = DAG(
'udac_example_dag',
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
schedule_interval='@hourly',
)
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
create_tables = PostgresOperator(
task_id='Create_tables',
dag=dag,
postgres_conn_id=REDSHIFT_CONN_ID,
sql='/create_tables.sql',
)
stage_events_to_redshift = StageToRedshiftOperator(
task_id='Stage_events',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
aws_credentials_id=AWS_CREDENTIALS_ID,
s3_bucket=INPUT_BUCKET,
s3_key='log_data/{execution_date.year}/{execution_date.month}/',
table='staging_events',
file_format="JSON 's3://udacity-dend/log_json_path.json'",
provide_context=True,
)
stage_songs_to_redshift = StageToRedshiftOperator(
task_id='Stage_songs',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
aws_credentials_id=AWS_CREDENTIALS_ID,
s3_bucket=INPUT_BUCKET,
s3_key='song_data',
table='staging_songs',
file_format="JSON 'auto'",
provide_context=True,
)
load_songplays_table = LoadFactOperator(
task_id=f'Load_{fact_table_name_and_query[0]}_fact_table',
dag=dag,
table=fact_table_name_and_query[0],
conn_id=REDSHIFT_CONN_ID,
sql=fact_table_name_and_query[1],
)
dim_operators = [
LoadDimensionOperator(
task_id=f'Load_{dim_table_name}_dim_table',
dag=dag,
table=dim_table_name,
conn_id=REDSHIFT_CONN_ID,
sql=dim_query,
)
for dim_table_name, dim_query in dim_tables_name_to_query.items()
]
run_quality_checks = DataQualityOperator(
task_id='Run_data_quality_checks',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
tables=list(dim_tables_name_to_query) + [fact_table_name_and_query[0]],
)
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
start_operator >> create_tables
create_tables >> [stage_events_to_redshift, stage_songs_to_redshift]
[stage_events_to_redshift, stage_songs_to_redshift] >> load_songplays_table
load_songplays_table >> dim_operators
dim_operators + [load_songplays_table] >> run_quality_checks
run_quality_checks >> end_operator
| 28.431193 | 75 | 0.757018 | from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators import (
StageToRedshiftOperator,
LoadFactOperator,
LoadDimensionOperator,
DataQualityOperator,
)
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.postgres_operator import PostgresOperator
from helpers import SqlQueries
REDSHIFT_CONN_ID = 'redshift'
AWS_CREDENTIALS_ID = 'aws_credentials'
INPUT_BUCKET = 'udacity-dend'
default_args = {
'owner': 'udacity',
'start_date': datetime(2018, 11, 1),
'depends_on_past': False,
'retries': 3,
'retry_delay': timedelta(seconds=300),
'catchup': False,
}
fact_table_name_and_query = ('songplays', SqlQueries.songplay_table_insert)
dim_tables_name_to_query = {
'users': SqlQueries.user_table_insert,
'songs': SqlQueries.song_table_insert,
'artists': SqlQueries.artist_table_insert,
'time': SqlQueries.time_table_insert,
}
dag = DAG(
'udac_example_dag',
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
schedule_interval='@hourly',
)
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
create_tables = PostgresOperator(
task_id='Create_tables',
dag=dag,
postgres_conn_id=REDSHIFT_CONN_ID,
sql='/create_tables.sql',
)
stage_events_to_redshift = StageToRedshiftOperator(
task_id='Stage_events',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
aws_credentials_id=AWS_CREDENTIALS_ID,
s3_bucket=INPUT_BUCKET,
s3_key='log_data/{execution_date.year}/{execution_date.month}/',
table='staging_events',
file_format="JSON 's3://udacity-dend/log_json_path.json'",
provide_context=True,
)
stage_songs_to_redshift = StageToRedshiftOperator(
task_id='Stage_songs',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
aws_credentials_id=AWS_CREDENTIALS_ID,
s3_bucket=INPUT_BUCKET,
s3_key='song_data',
table='staging_songs',
file_format="JSON 'auto'",
provide_context=True,
)
load_songplays_table = LoadFactOperator(
task_id=f'Load_{fact_table_name_and_query[0]}_fact_table',
dag=dag,
table=fact_table_name_and_query[0],
conn_id=REDSHIFT_CONN_ID,
sql=fact_table_name_and_query[1],
)
dim_operators = [
LoadDimensionOperator(
task_id=f'Load_{dim_table_name}_dim_table',
dag=dag,
table=dim_table_name,
conn_id=REDSHIFT_CONN_ID,
sql=dim_query,
)
for dim_table_name, dim_query in dim_tables_name_to_query.items()
]
run_quality_checks = DataQualityOperator(
task_id='Run_data_quality_checks',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
tables=list(dim_tables_name_to_query) + [fact_table_name_and_query[0]],
)
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
start_operator >> create_tables
create_tables >> [stage_events_to_redshift, stage_songs_to_redshift]
[stage_events_to_redshift, stage_songs_to_redshift] >> load_songplays_table
load_songplays_table >> dim_operators
dim_operators + [load_songplays_table] >> run_quality_checks
run_quality_checks >> end_operator
| true | true |
f72faaed66845c4f79aa0c5322e4ebcb367d7bcc | 766 | py | Python | Misc/d3_heatmap.py | mahnooranjum/Python_Programming | ba251e0e855842112efeb968d06458c60eaf1bd3 | [
"MIT"
] | null | null | null | Misc/d3_heatmap.py | mahnooranjum/Python_Programming | ba251e0e855842112efeb968d06458c60eaf1bd3 | [
"MIT"
] | null | null | null | Misc/d3_heatmap.py | mahnooranjum/Python_Programming | ba251e0e855842112efeb968d06458c60eaf1bd3 | [
"MIT"
] | null | null | null | '''
Mahnoor Anjum
Python:
Trivariate Analysis
'''
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
import random
from mpl_toolkits.mplot3d import Axes3D
# sns.set()
path = 'data/private/savepath/'
filename = 'v3_1'
genpath = 'data/private/gen/'
genname = 'g3_1'
data = pd.read_csv(path + filename+'.csv')
gen = pd.read_csv(genpath + genname + '.csv')
k = 50
data = data.sample(k)
x = data['x1']
y = data['x2']
z = data['x3']
fig = plt.figure(figsize=(20,20))
data = pd.DataFrame({'X': x, 'Y': y, 'Z': z})
data_pivoted = data.pivot("X", "Y", "Z")
ax = sns.heatmap(data_pivoted)
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(str(k)+"_samples")
| 18.238095 | 45 | 0.66188 | import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
import random
from mpl_toolkits.mplot3d import Axes3D
path = 'data/private/savepath/'
filename = 'v3_1'
genpath = 'data/private/gen/'
genname = 'g3_1'
data = pd.read_csv(path + filename+'.csv')
gen = pd.read_csv(genpath + genname + '.csv')
k = 50
data = data.sample(k)
x = data['x1']
y = data['x2']
z = data['x3']
fig = plt.figure(figsize=(20,20))
data = pd.DataFrame({'X': x, 'Y': y, 'Z': z})
data_pivoted = data.pivot("X", "Y", "Z")
ax = sns.heatmap(data_pivoted)
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(str(k)+"_samples")
| true | true |
f72fab45a2f4c772cc4437cfc0c167040cf1b405 | 1,591 | py | Python | processout/invoicerisk.py | processout/processout-python | f8311702fe2d392817dcb66347a7a13b9cd91e92 | [
"MIT"
] | 1 | 2017-02-22T10:57:10.000Z | 2017-02-22T10:57:10.000Z | processout/invoicerisk.py | processout/processout-python | f8311702fe2d392817dcb66347a7a13b9cd91e92 | [
"MIT"
] | 1 | 2018-12-14T14:30:57.000Z | 2018-12-14T14:30:57.000Z | processout/invoicerisk.py | processout/processout-python | f8311702fe2d392817dcb66347a7a13b9cd91e92 | [
"MIT"
] | 2 | 2018-12-13T23:08:11.000Z | 2018-12-30T19:52:31.000Z | try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
import processout
import json
from processout.networking.request import Request
from processout.networking.response import Response
# The content of this file was automatically generated
class InvoiceRisk(object):
def __init__(self, client, prefill = None):
self._client = client
self._score = None
self._is_legit = None
if prefill != None:
self.fill_with_data(prefill)
@property
def score(self):
"""Get score"""
return self._score
@score.setter
def score(self, val):
"""Set score
Keyword argument:
val -- New score value"""
self._score = val
return self
@property
def is_legit(self):
"""Get is_legit"""
return self._is_legit
@is_legit.setter
def is_legit(self, val):
"""Set is_legit
Keyword argument:
val -- New is_legit value"""
self._is_legit = val
return self
def fill_with_data(self, data):
"""Fill the current object with the new values pulled from data
Keyword argument:
data -- The data from which to pull the new values"""
if "score" in data.keys():
self.score = data["score"]
if "is_legit" in data.keys():
self.is_legit = data["is_legit"]
return self
def to_json(self):
return {
"score": self.score,
"is_legit": self.is_legit,
}
| 23.057971 | 71 | 0.588938 | try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
import processout
import json
from processout.networking.request import Request
from processout.networking.response import Response
class InvoiceRisk(object):
def __init__(self, client, prefill = None):
self._client = client
self._score = None
self._is_legit = None
if prefill != None:
self.fill_with_data(prefill)
@property
def score(self):
return self._score
@score.setter
def score(self, val):
self._score = val
return self
@property
def is_legit(self):
return self._is_legit
@is_legit.setter
def is_legit(self, val):
self._is_legit = val
return self
def fill_with_data(self, data):
if "score" in data.keys():
self.score = data["score"]
if "is_legit" in data.keys():
self.is_legit = data["is_legit"]
return self
def to_json(self):
return {
"score": self.score,
"is_legit": self.is_legit,
}
| true | true |
f72fac17ae338185f2e1470376f1a6802595e44a | 1,514 | py | Python | jnpy/app/vnpy_webtrader/__init__.py | jojoquant/jonpy | 58692f8fbf398aab7be915a63d0a376e2e0e664c | [
"MIT"
] | 5 | 2020-05-19T07:32:39.000Z | 2022-03-14T09:09:48.000Z | jnpy/app/vnpy_webtrader/__init__.py | jojoquant/jonpy | 58692f8fbf398aab7be915a63d0a376e2e0e664c | [
"MIT"
] | null | null | null | jnpy/app/vnpy_webtrader/__init__.py | jojoquant/jonpy | 58692f8fbf398aab7be915a63d0a376e2e0e664c | [
"MIT"
] | 3 | 2020-04-02T08:30:17.000Z | 2020-05-03T12:12:05.000Z | # The MIT License (MIT)
#
# Copyright (c) 2015-present, Xiaoyou Chen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pathlib import Path
from vnpy.trader.app import BaseApp
from .engine import WebEngine, APP_NAME
class WebTraderApp(BaseApp):
""""""
app_name = APP_NAME
app_module = __module__
app_path = Path(__file__).parent
display_name = "Web服务"
engine_class = WebEngine
widget_name = "WebManager"
icon_name = "web.ico"
| 37.85 | 81 | 0.738441 |
from pathlib import Path
from vnpy.trader.app import BaseApp
from .engine import WebEngine, APP_NAME
class WebTraderApp(BaseApp):
app_name = APP_NAME
app_module = __module__
app_path = Path(__file__).parent
display_name = "Web服务"
engine_class = WebEngine
widget_name = "WebManager"
icon_name = "web.ico"
| true | true |
f72facb0930186c23043da299ca86f58450425a0 | 3,448 | py | Python | src/plugins/radeky_bot/utils/bilibiliuploader/bilibiliuploader.py | Radekyspec/Radeky_bot | 24ee088026c7443723a5e9c72abfb512ca3b3327 | [
"MIT"
] | 4 | 2021-12-25T10:17:13.000Z | 2022-03-03T03:29:07.000Z | src/plugins/radeky_bot/utils/bilibiliuploader/bilibiliuploader.py | Radekyspec/Radeky_bot | 24ee088026c7443723a5e9c72abfb512ca3b3327 | [
"MIT"
] | null | null | null | src/plugins/radeky_bot/utils/bilibiliuploader/bilibiliuploader.py | Radekyspec/Radeky_bot | 24ee088026c7443723a5e9c72abfb512ca3b3327 | [
"MIT"
] | 1 | 2021-12-25T10:17:16.000Z | 2021-12-25T10:17:16.000Z | from . import core
from .util import cipher
from nonebot import logger
import json
class BilibiliUploader():
def __init__(self):
self.access_token = None
self.refresh_token = None
self.sid = None
self.mid = None
def login(self, username, password):
code, self.access_token, self.refresh_token, self.sid, self.mid, _ = core.login(username, password)
if code != 0: # success
logger.error("login fail, error code = {}".format(code))
def login_by_access_token(self, access_token, refresh_token=None):
self.access_token = access_token
self.refresh_token = refresh_token
self.sid, self.mid, _ = core.login_by_access_token(access_token)
def login_by_access_token_file(self, file_name):
with open(file_name, "r") as f:
login_data = json.loads(f.read())
self.access_token = login_data["access_token"]
self.refresh_token = login_data["refresh_token"]
self.sid, self.mid, _ = core.login_by_access_token(self.access_token)
def save_login_data(self, file_name=None):
login_data = json.dumps(
{
"access_token": self.access_token,
"refresh_token": self.refresh_token
}
)
try:
with open(file_name, "w+") as f:
f.write(login_data)
finally:
return login_data
def upload(self,
parts,
copyright: int,
title: str,
tid: int,
tag: str,
desc: str,
source: str = '',
cover: str = '',
no_reprint: int = 0,
open_elec: int = 1,
max_retry: int = 5,
thread_pool_workers: int = 1):
return core.upload(self.access_token,
self.sid,
self.mid,
parts,
copyright,
title,
tid,
tag,
desc,
source,
cover,
no_reprint,
open_elec,
max_retry,
thread_pool_workers)
def edit(self,
avid=None,
bvid=None,
parts=None,
insert_index=None,
copyright=None,
title=None,
tid=None,
tag=None,
desc=None,
source=None,
cover=None,
no_reprint=None,
open_elec=None,
max_retry: int = 5,
thread_pool_workers: int = 1):
if not avid and not bvid:
logger.warning("please provide avid or bvid")
return None, None
if not avid:
avid = cipher.bv2av(bvid)
if not isinstance(parts, list):
parts = [parts]
if type(avid) is str:
avid = int(avid)
core.edit_videos(
self.access_token,
self.sid,
self.mid,
avid,
bvid,
parts,
insert_index,
copyright,
title,
tid,
tag,
desc,
source,
cover,
no_reprint,
open_elec,
max_retry,
thread_pool_workers
)
| 28.733333 | 107 | 0.476798 | from . import core
from .util import cipher
from nonebot import logger
import json
class BilibiliUploader():
def __init__(self):
self.access_token = None
self.refresh_token = None
self.sid = None
self.mid = None
def login(self, username, password):
code, self.access_token, self.refresh_token, self.sid, self.mid, _ = core.login(username, password)
if code != 0:
logger.error("login fail, error code = {}".format(code))
def login_by_access_token(self, access_token, refresh_token=None):
self.access_token = access_token
self.refresh_token = refresh_token
self.sid, self.mid, _ = core.login_by_access_token(access_token)
def login_by_access_token_file(self, file_name):
with open(file_name, "r") as f:
login_data = json.loads(f.read())
self.access_token = login_data["access_token"]
self.refresh_token = login_data["refresh_token"]
self.sid, self.mid, _ = core.login_by_access_token(self.access_token)
def save_login_data(self, file_name=None):
login_data = json.dumps(
{
"access_token": self.access_token,
"refresh_token": self.refresh_token
}
)
try:
with open(file_name, "w+") as f:
f.write(login_data)
finally:
return login_data
def upload(self,
parts,
copyright: int,
title: str,
tid: int,
tag: str,
desc: str,
source: str = '',
cover: str = '',
no_reprint: int = 0,
open_elec: int = 1,
max_retry: int = 5,
thread_pool_workers: int = 1):
return core.upload(self.access_token,
self.sid,
self.mid,
parts,
copyright,
title,
tid,
tag,
desc,
source,
cover,
no_reprint,
open_elec,
max_retry,
thread_pool_workers)
def edit(self,
avid=None,
bvid=None,
parts=None,
insert_index=None,
copyright=None,
title=None,
tid=None,
tag=None,
desc=None,
source=None,
cover=None,
no_reprint=None,
open_elec=None,
max_retry: int = 5,
thread_pool_workers: int = 1):
if not avid and not bvid:
logger.warning("please provide avid or bvid")
return None, None
if not avid:
avid = cipher.bv2av(bvid)
if not isinstance(parts, list):
parts = [parts]
if type(avid) is str:
avid = int(avid)
core.edit_videos(
self.access_token,
self.sid,
self.mid,
avid,
bvid,
parts,
insert_index,
copyright,
title,
tid,
tag,
desc,
source,
cover,
no_reprint,
open_elec,
max_retry,
thread_pool_workers
)
| true | true |
f72fad071798fd320fcd248a5514555dad9f2802 | 1,006 | py | Python | netvisor_api_client/requests/product.py | ajmyyra/netvisor-api-client | b12f90606b8f66580873a1ff16737fa614aee5ef | [
"MIT"
] | null | null | null | netvisor_api_client/requests/product.py | ajmyyra/netvisor-api-client | b12f90606b8f66580873a1ff16737fa614aee5ef | [
"MIT"
] | null | null | null | netvisor_api_client/requests/product.py | ajmyyra/netvisor-api-client | b12f90606b8f66580873a1ff16737fa614aee5ef | [
"MIT"
] | null | null | null | """
netvisor.requests.product
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2013-2016 by Fast Monkeys Oy | 2019- by Heltti Oy
:license: MIT, see LICENSE for more details.
"""
from .base import Request
from ..exc import InvalidData
from ..responses.products import GetProductResponse, ProductListResponse
class GetProductRequest(Request):
method = 'GET'
uri = 'GetProduct.nv'
response_cls = GetProductResponse
def parse_response(self, response):
data = super(GetProductRequest, self).parse_response(response)
self.ensure_not_empty(data)
return data
def ensure_not_empty(self, data):
if data is None:
raise InvalidData(
'Data form incorrect:. '
'Product not found with Netvisor identifier: {0}'.format(
self.params['id']
)
)
class ProductListRequest(Request):
method = 'GET'
uri = 'ProductList.nv'
response_cls = ProductListResponse
| 27.189189 | 73 | 0.621272 | from .base import Request
from ..exc import InvalidData
from ..responses.products import GetProductResponse, ProductListResponse
class GetProductRequest(Request):
method = 'GET'
uri = 'GetProduct.nv'
response_cls = GetProductResponse
def parse_response(self, response):
data = super(GetProductRequest, self).parse_response(response)
self.ensure_not_empty(data)
return data
def ensure_not_empty(self, data):
if data is None:
raise InvalidData(
'Data form incorrect:. '
'Product not found with Netvisor identifier: {0}'.format(
self.params['id']
)
)
class ProductListRequest(Request):
method = 'GET'
uri = 'ProductList.nv'
response_cls = ProductListResponse
| true | true |
f72fad7595d798f28e7a2ab25421f74e4783d2ed | 4,409 | py | Python | tests/experimental/test_util.py | claytonlemons/python-pachyderm | 6e7f4c5c66486233390f4ba3f05a2056f1e21e9f | [
"Apache-2.0"
] | null | null | null | tests/experimental/test_util.py | claytonlemons/python-pachyderm | 6e7f4c5c66486233390f4ba3f05a2056f1e21e9f | [
"Apache-2.0"
] | null | null | null | tests/experimental/test_util.py | claytonlemons/python-pachyderm | 6e7f4c5c66486233390f4ba3f05a2056f1e21e9f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Tests of utility functions."""
import os
import json
import tempfile
import python_pachyderm
from python_pachyderm.experimental.service import pps_proto
from tests import util
# bp_to_pb: PfsInput -> PFSInput
# script that copies a file using just stdlibs
TEST_STDLIB_SOURCE = """
from shutil import copyfile
print("copying")
copyfile("/pfs/{}/file.dat", "/pfs/out/file.dat")
"""
# script that copies a file with padding and colorized output, using
# third-party libraries (defined in `TEST_REQUIREMENTS_SOURCE`.)
TEST_LIB_SOURCE = """
from termcolor import cprint
from leftpad import left_pad
cprint('copying', 'green')
with open('/pfs/{}/file.dat', 'r') as f:
contents = f.read()
with open('/pfs/out/file.dat', 'w') as f:
f.write(left_pad(contents, 5))
"""
TEST_REQUIREMENTS_SOURCE = """
# WCGW?
leftpad==0.1.2
termcolor==1.1.0
"""
TEST_PIPELINE_SPEC = """
{
"pipeline": {
"name": "foobar"
},
"description": "A pipeline that performs image edge detection by using the OpenCV library.",
"input": {
"pfs": {
"glob": "/*",
"repo": "images"
}
},
"transform": {
"cmd": [ "python3", "/edges.py" ],
"image": "pachyderm/opencv"
}
}
"""
def check_expected_files(client: python_pachyderm.Client, commit, expected):
for fi in client.walk_file(commit, "/"):
path = fi.file.path
assert path in expected, "unexpected path: {}".format(path)
expected.remove(path)
for path in expected:
assert False, "expected path not found: {}".format(path)
def test_put_files():
client = python_pachyderm.experimental.Client()
client.delete_all()
repo_name = util.create_test_repo(client, "put_files")
with tempfile.TemporaryDirectory(suffix="python_pachyderm") as d:
# create a temporary directory with these files:
# 0.txt 1.txt 2.txt 3.txt 4.txt 0/0.txt 1/1.txt 2/2.txt
# 3/3.txt 4/4.txt
for i in range(5):
os.makedirs(os.path.join(d, str(i)))
for j in range(5):
with open(os.path.join(d, "{}.txt".format(j)), "w") as f:
f.write(str(j))
with open(os.path.join(d, str(j), "{}.txt".format(j)), "w") as f:
f.write(str(j))
# add the files under both `/` and `/sub` (the latter redundantly to
# test both for correct path handling and the ability to put files
# that already exist)
commit = (repo_name, "master")
python_pachyderm.put_files(client, d, commit, "/")
python_pachyderm.put_files(client, d, commit, "/sub")
python_pachyderm.put_files(client, d, commit, "/sub/")
expected = set(["/", "/sub/"])
for i in range(5):
expected.add("/{}/".format(i))
expected.add("/{}.txt".format(i))
expected.add("/{}/{}.txt".format(i, i))
expected.add("/sub/{}/".format(i))
expected.add("/sub/{}.txt".format(i))
expected.add("/sub/{}/{}.txt".format(i, i))
check_expected_files(client, commit, expected)
def test_put_files_single_file():
client = python_pachyderm.experimental.Client()
client.delete_all()
repo_name = util.create_test_repo(client, "put_files_single_file")
with tempfile.NamedTemporaryFile() as f:
f.write(b"abcd")
f.flush()
commit = (repo_name, "master")
python_pachyderm.put_files(client, f.name, commit, "/f1.txt")
python_pachyderm.put_files(client, f.name, commit, "/f/f1")
expected = set(["/", "/f1.txt", "/f/", "/f/f1"])
check_expected_files(client, commit, expected)
def test_parse_json_pipeline_spec():
req = python_pachyderm.experimental.parse_json_pipeline_spec(TEST_PIPELINE_SPEC)
check_pipeline_spec(req)
def test_parse_dict_pipeline_spec():
req = python_pachyderm.experimental.parse_dict_pipeline_spec(
json.loads(TEST_PIPELINE_SPEC)
)
check_pipeline_spec(req)
def check_pipeline_spec(req):
assert req == pps_proto.CreatePipelineRequest(
pipeline=pps_proto.Pipeline(name="foobar"),
description="A pipeline that performs image edge detection by using the OpenCV library.",
input=pps_proto.Input(
pfs=pps_proto.PfsInput(glob="/*", repo="images"),
),
transform=pps_proto.Transform(
cmd=["python3", "/edges.py"],
image="pachyderm/opencv",
),
)
| 29.198675 | 97 | 0.635972 |
import os
import json
import tempfile
import python_pachyderm
from python_pachyderm.experimental.service import pps_proto
from tests import util
TEST_STDLIB_SOURCE = """
from shutil import copyfile
print("copying")
copyfile("/pfs/{}/file.dat", "/pfs/out/file.dat")
"""
TEST_LIB_SOURCE = """
from termcolor import cprint
from leftpad import left_pad
cprint('copying', 'green')
with open('/pfs/{}/file.dat', 'r') as f:
contents = f.read()
with open('/pfs/out/file.dat', 'w') as f:
f.write(left_pad(contents, 5))
"""
TEST_REQUIREMENTS_SOURCE = """
# WCGW?
leftpad==0.1.2
termcolor==1.1.0
"""
TEST_PIPELINE_SPEC = """
{
"pipeline": {
"name": "foobar"
},
"description": "A pipeline that performs image edge detection by using the OpenCV library.",
"input": {
"pfs": {
"glob": "/*",
"repo": "images"
}
},
"transform": {
"cmd": [ "python3", "/edges.py" ],
"image": "pachyderm/opencv"
}
}
"""
def check_expected_files(client: python_pachyderm.Client, commit, expected):
for fi in client.walk_file(commit, "/"):
path = fi.file.path
assert path in expected, "unexpected path: {}".format(path)
expected.remove(path)
for path in expected:
assert False, "expected path not found: {}".format(path)
def test_put_files():
client = python_pachyderm.experimental.Client()
client.delete_all()
repo_name = util.create_test_repo(client, "put_files")
with tempfile.TemporaryDirectory(suffix="python_pachyderm") as d:
for i in range(5):
os.makedirs(os.path.join(d, str(i)))
for j in range(5):
with open(os.path.join(d, "{}.txt".format(j)), "w") as f:
f.write(str(j))
with open(os.path.join(d, str(j), "{}.txt".format(j)), "w") as f:
f.write(str(j))
commit = (repo_name, "master")
python_pachyderm.put_files(client, d, commit, "/")
python_pachyderm.put_files(client, d, commit, "/sub")
python_pachyderm.put_files(client, d, commit, "/sub/")
expected = set(["/", "/sub/"])
for i in range(5):
expected.add("/{}/".format(i))
expected.add("/{}.txt".format(i))
expected.add("/{}/{}.txt".format(i, i))
expected.add("/sub/{}/".format(i))
expected.add("/sub/{}.txt".format(i))
expected.add("/sub/{}/{}.txt".format(i, i))
check_expected_files(client, commit, expected)
def test_put_files_single_file():
client = python_pachyderm.experimental.Client()
client.delete_all()
repo_name = util.create_test_repo(client, "put_files_single_file")
with tempfile.NamedTemporaryFile() as f:
f.write(b"abcd")
f.flush()
commit = (repo_name, "master")
python_pachyderm.put_files(client, f.name, commit, "/f1.txt")
python_pachyderm.put_files(client, f.name, commit, "/f/f1")
expected = set(["/", "/f1.txt", "/f/", "/f/f1"])
check_expected_files(client, commit, expected)
def test_parse_json_pipeline_spec():
req = python_pachyderm.experimental.parse_json_pipeline_spec(TEST_PIPELINE_SPEC)
check_pipeline_spec(req)
def test_parse_dict_pipeline_spec():
req = python_pachyderm.experimental.parse_dict_pipeline_spec(
json.loads(TEST_PIPELINE_SPEC)
)
check_pipeline_spec(req)
def check_pipeline_spec(req):
assert req == pps_proto.CreatePipelineRequest(
pipeline=pps_proto.Pipeline(name="foobar"),
description="A pipeline that performs image edge detection by using the OpenCV library.",
input=pps_proto.Input(
pfs=pps_proto.PfsInput(glob="/*", repo="images"),
),
transform=pps_proto.Transform(
cmd=["python3", "/edges.py"],
image="pachyderm/opencv",
),
)
| true | true |
f72fad937cc7df0f5730a5937036f84b9f33c80d | 1,322 | py | Python | src/evaluator.py | ryoutoku/real-coded-genetic-algorithm | 4d0c72527e497e57e18c56d43e8c217a388ca86f | [
"MIT"
] | 2 | 2019-05-14T02:04:02.000Z | 2020-04-02T00:19:01.000Z | src/evaluator.py | ryoutoku/real-coded-genetic-algorithm | 4d0c72527e497e57e18c56d43e8c217a388ca86f | [
"MIT"
] | null | null | null | src/evaluator.py | ryoutoku/real-coded-genetic-algorithm | 4d0c72527e497e57e18c56d43e8c217a388ca86f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import math
from individual import Individual
class Evaluator(metaclass=ABCMeta):
def __init__(self):
Individual.set_evaluator(self)
def evaluate(self, individual):
"""個体を評価する
Args:
individual (individual): 評価する個体
Returns:
float: 評価値
"""
return self._evaluate_function(individual.gene)
@abstractmethod
def _evaluate_function(self, gene):
"""実際に個体を評価する実態
Args:
gene (np.array): 評価する遺伝子
"""
pass
class Sphere(Evaluator):
def _evaluate_function(self, gene):
"""Sphere関数として評価する
f(x_1...x_n) = x_1**2 + x_2**2 + ... x_n**2
Args:
gene (np.array): 評価する遺伝子
Returns:
float: 評価値
"""
return (gene**2).sum()
class Rosenbrock(Evaluator):
def _evaluate_function(self, gene):
"""Rosenbrock関数として評価する
f(x_1...x_n = 100(x_2 - x_1**2)**2 + (x_1 - 1)**2 + ...
Args:
gene (np.array): 評価する遺伝子
Returns:
float: 評価値
"""
result = 0.0
for gene_1, gene_2 in zip(gene, gene[1:]):
result += 100.0 * (gene_2 - gene_1**2) ** 2 + (gene_1 - 1)**2
return result
| 20.338462 | 73 | 0.53177 |
from abc import ABCMeta, abstractmethod
import math
from individual import Individual
class Evaluator(metaclass=ABCMeta):
def __init__(self):
Individual.set_evaluator(self)
def evaluate(self, individual):
return self._evaluate_function(individual.gene)
@abstractmethod
def _evaluate_function(self, gene):
pass
class Sphere(Evaluator):
def _evaluate_function(self, gene):
return (gene**2).sum()
class Rosenbrock(Evaluator):
def _evaluate_function(self, gene):
result = 0.0
for gene_1, gene_2 in zip(gene, gene[1:]):
result += 100.0 * (gene_2 - gene_1**2) ** 2 + (gene_1 - 1)**2
return result
| true | true |
f72fae73b6d958f9d94b097c0f0e32534effcad9 | 21,412 | py | Python | cogs/leveling.py | DevRohit06/Abode | 5864cdf3c74d51bea325d123e075a1becc541c91 | [
"MIT"
] | 15 | 2020-12-28T12:07:09.000Z | 2021-08-21T15:35:53.000Z | cogs/leveling.py | DevRohit06/Abode | 5864cdf3c74d51bea325d123e075a1becc541c91 | [
"MIT"
] | null | null | null | cogs/leveling.py | DevRohit06/Abode | 5864cdf3c74d51bea325d123e075a1becc541c91 | [
"MIT"
] | 8 | 2020-12-29T04:24:12.000Z | 2021-04-04T23:29:13.000Z | import discord
from discord.ext import commands, tasks
import datetime
import random
from prettytable import PrettyTable
import random
from random import randint
data = ['Water', 'Air', 'Earth', 'Fire', 'Destruction',
'Illusion', 'Time', 'Space', 'Karma', 'Chaos']
paths = random.choice(data)
luck = random.randint(1, 100)
data1 = ['Demon', 'Human', 'Dragon', 'Beast',
'Phoenix', 'Spirit', 'Giant', 'Fey']
color = 0xa100f2
guild = 757098499836739594
class vein8(commands.Cog, name='leveling'):
def __init__(self, Bot):
self.Bot = Bot
self.Bot.scholar_chat = self.Bot.get_channel(757108786497585172)
async def ModLog(self,ctx,commandname =None ,mod= None, target = None, amount :3 =None, Reason =None,
channel=None, content = None, jump = None):
guild = self.Bot.get_guild(self.Bot.guild_id)
log_channel= self.Bot.get_channel(759583119396700180)
embed = discord.Embed(color = random.choice(self.Bot.color_list),timestamp = datetime.datetime.utcnow())
embed.set_author(name=f"{commandname}",icon_url=ctx.author.avatar_url)
if mod !=None:
embed.add_field(name = "Mod", value = f"{mod.display_name} | {mod.mention}")
if target != None:
embed.add_field(name = "Target", value = f"{target.display_name} | {target.mention}")
if amount != None:
embed.add_field(name= "Amount", value= f'``{amount}``', inline=False)
if channel!= None:
embed.add_field(name = "On channel", value=f"{channel}")
if content!= None:
embed.add_field(name = "Content", value= f"```css\n{content}```", inline=False)
if jump != None:
embed.add_field(name = "Jump", value = f"[Here]({jump})")
if Reason !=None:
embed.add_field(name= "Reason ", value= f"```css\n{Reason}```", inline=False)
embed.set_thumbnail(url = guild.icon_url)
embed.set_footer(icon_url = mod.avatar_url)
await log_channel.send(embed=embed)
return self.ModLog
@commands.Cog.listener()
@commands.guild_only()
async def on_message(self, message):
# remove the unnecessay things
if isinstance(message.channel, discord.channel.DMChannel):
return
if message.guild.id != 757098499836739594:
return
if message.author.id == 759784064361299989:
return
if message.author.bot:
return
if self.Bot.DEFAULT_PREFIX == '&':
return
race = random.choice(data1)
strength = random.randint(1, 10)
speed = random.randint(1, 10)
defense = random.randint(1, 10)
soul = random.randint(1, 10)
Hp = random.randint(50, 350)
#My server memers ain't lower than 50, that's for sure :)
wisdom = random.randint(50, 100)
bot1 = message.guild.get_channel(781535649843904562)
bot2 = message.guild.get_channel(757136943149613076)
music = message.guild.get_channel(768684108770574366)
testing = message.guild.get_channel(757941959796195484)
if message.channel.id == bot1.id:
return
if message.channel.id == (music.id) or message.channel.id == (testing.id):
return
author_id = str(message.author.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = {"_id": author_id}
# checks if user is in the db or not
if (collection.find_one({"_id": author_id}) == None):
leauge = "Novice scholar"
Realm = "Mortal"
Path = paths
lol = "No aliases"
user_data = {"_id": author_id, "points": 1, "Leauge": leauge, "Qi": 0, "Daos": 0, "Path": Path, "Realm": Realm, "Luck": luck,
"Species": race, "Strength": strength, "Speed": speed, "Defense": defense, "Soul": soul, "Health": Hp, "Name": lol
, "Wisdom": wisdom}
collection.insert_one(user_data)
else:
query = {"_id": author_id}
level = collection.find(query)
for lvl in level:
cur_p = lvl['points']
new_p = cur_p + 1
# this is a mess
cur_q = lvl['Qi']
new_q = cur_q + 0.25
Leauge = lvl['Leauge']
dao = lvl['Daos']
stre = lvl['Strength']
sped = lvl['Speed']
defen = lvl['Defense']
sol = lvl['Soul']
health = lvl['Health']
if (new_q % 200) == 0:
await message.channel.send(f'<:Cuppedfist:757112296094040104> Congragulations! {message.author.mention}, your Qi just reached **{new_q}**.')
elif (new_q % 600) == 0:
await message.channel.send(f'{message.author}, you now have comprehendded ``{dao}`` heavenly dao(s).')
collection.update_one({"_id": author_id}, {
"$set": {"Daos": +1}})
if (new_q == 500):
ok = 'Star'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``Star realm`` expert.\nAnd also, you earned the ``intermediate scholar`` medal.')
new_medal1 = 'Intermediate scholar'
collection.update_one({"_id": author_id}, {
"$set": {"Leauge": new_medal1}})
elif (new_q == 1500):
await message.channel.send(f'<:Cuppedfist:757112296094040104> {member.author.mention}, Congragulations you earned the ``Expert scholar`` medal.')
new_medal2 = 'Expert scholar'
collection.upate_one({"_id": author_id}, {
"$set": {"Leauge": new_medal2}})
elif (new_q % 10) == 0:
strength1 = random.randint(1, 15)
speed1 = random.randint(1, 10)
defense1 = random.randint(1, 25)
soul1 = random.randint(1, 5)
Hp1 = random.randint(1, 20)
collection.update_one({"_id": author_id}, {
"$set": {"Strength": stre + strength1}})
collection.update_one({"_id": author_id}, {
"$set": {"Speed": sped + speed1}})
collection.update_one({"_id": author_id}, {
"$set": {"Defense": defen + defense1}})
collection.update_one({"_id": author_id}, {
"$set": {"Soul": sol + soul1}})
collection.update_one({"_id": author_id}, {
"$set": {"Health": health + Hp1}})
if (new_q == 1100):
ok = 'Transcendent'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'{message.author.mention},<:Cuppedfist:757112296094040104> Congragulations! you just brokethrough to become a ``Transcendent realm`` expert.')
if (new_q == 2500):
ok = 'Saint'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``Saint realm`` expert.')
if (new_q == 5100):
ok = 'God'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``God realm``expert.')
if (new_q == 10001):
ok = 'Chaotic'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``Chaotic realm`` expert.')
collection.update_one({"_id": author_id}, {
"$set": {'points': new_p}})
collection.update_one({"_id": author_id}, {
"$set": {"Qi": new_q}})
@commands.command(aliases=['apoints'], hidden=True)
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def addpoints(self, ctx, member: discord.Member, amount, *, reason=None):
channel = ctx.guild.get_channel(780785741101137926)
if ctx.guild.id != (guild):
return await ctx.send('<:WeirdChamp:757112297096216627> Come to the main server if you dare.')
if int(amount) <= 2000:
memeber_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = {"_id": memeber_id}
query = {"_id": memeber_id}
points = collection.find(query)
if collection.find_one({"_id": memeber_id} == None):
await ctx.send(f"{ctx.author.name}, No such user by the name {member.name} exists. ")
for point in points:
old_p = point['points']
amount_n = int(amount)
new_p = (int(old_p) + int(amount_n))
collection.update_one({"_id": memeber_id}, {
"$set": {"points": new_p}})
await ctx.send(f"Sucessfully added ``{amount}`` points to {member.name}. Now {member.name} has ``{new_p}`` in total.")
await self.ModLog(ctx = ctx, mod= ctx.author, target=member, commandname="Points Given!", channel=ctx.channel.mention
, amount = amount, jump= ctx.message.jump_url, Reason=reason)
elif int(amount) >= 2000:
await ctx.send(f"<:WeirdChamp:757112297096216627> {ctx.author.name}, 2000 is the limit for now.")
@commands.command(aliases=['rpoints'], hidden=True)
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def removepoints(self, ctx, member: discord.Member, amount, *, Reason):
channel = ctx.guild.get_channel(780785741101137926)
if ctx.guild.id != 757098499836739594:
return await ctx.send('<:WeirdChamp:757112297096216627> Come to the main server if you dare.')
if ctx.author.top_role < member.top_role:
return await ctx.send("You can't remove points of someone higher than you.")
if int(amount) <= 2000:
memeber_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = {"_id": memeber_id}
query = {"_id": memeber_id}
points = collection.find(query)
if collection.find_one({"_id": memeber_id} == None):
await ctx.send(f"{ctx.author.name}, No such user by the name {member.name} exists. ")
for point in points:
old_p = point['points']
amount_n = int(amount)
new_p = (int(old_p) - int(amount_n))
collection.update_one({"_id": memeber_id}, {
"$set": {"points": new_p}})
await ctx.send(f"Sucessfully removed ``{amount}`` points from {member.name}. Now {member.name} has ``{new_p}`` in total.")
await self.ModLog(ctx = ctx, mod= ctx.author, target=member, commandname="Points Removed!", channel=ctx.channel.mention
, amount = amount, jump= ctx.message.jump_url, Reason=reason)
else:
await ctx.send(f"{ctx.author.name}, you can't remove more than 2000 points. <:WeirdChamp:757112297096216627>")
@commands.command(aliases=["points", "qi", "p", 'stats'], description=f'Show your stats and general info.')
@commands.guild_only()
async def point(self, ctx):
if ctx.message.channel.id == 757108786497585172:
return
try:
member = ctx.author
member_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
qurey = {"_id": member_id}
users = collection.find(qurey)
total = collection.count()
hm = collection.find().sort("Qi", -1)
a = 0
for x in hm:
idd = x["_id"]
if idd == member_id:
break
else:
a += 1
for lvl in users:
_id = lvl['_id']
points = lvl['points']
medal = lvl['Leauge']
dao = lvl['Daos']
stre = lvl['Strength']
sped = lvl['Speed']
defen = lvl['Defense']
sol = lvl['Soul']
health = lvl['Health']
luk = lvl['Luck']
qi = lvl['Qi']
realm = lvl['Realm']
speci = lvl['Species']
pth = lvl['Path']
nme = lvl['Name']
try:
wisdom = lvl['Wisdom']
except:
wisdom = "Use .update to see your wisom"
embed = discord.Embed(
color=color, timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=f'{ctx.guild.icon_url}')
embed.set_author(name=f'{member.name} ',
icon_url=f'{member.avatar_url}')
embed.add_field(name=f'__#{int(a) +1}/{total}__', value=f'**Aliases** :{nme} \n'
f'**Realm** : {str(realm)}\n'
f'**Species** : {str(speci)}')
embed.add_field(name="__Legacy__", value=f'**Path** : {str(pth)}\n'
f'**Medals** : {str(medal)}\n'
f'**Daos** : {str(dao)}')
embed.add_field(name='__Accomplishments__', value=f'**Qi : ** {str(qi)}\n'
f'**Points : ** {str(points)}\n'
f' **Luck : ** {str(luk)}'
)
embed.add_field(name='__Stats__', value=f'**Strength :** {str(stre)}\n'
f'**Defense :** {str(defen)}\n'
f'**Speed** : {str(sped)}\n'
f'**Soul : **{str(sol)}\n'
f'**Health : ** {str(health)}\n'
f'**Wisdom : ** {str(wisdom)}')
embed.set_footer(text=f"Abode of Scholars")
await ctx.send(embed=embed)
except:
await ctx.send(f'Your data probably isn\'nt saved on the database.')
@commands.command(aliases=["puser", "statsu"], description=f'Shows shats on another user, be sure to use the user id.')
@commands.guild_only()
async def pu(self, ctx, member_id: int):
if ctx.guild.id != (guild):
return
member = ctx.guild.get_member(member_id)
member_id = str(member_id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
qurey = {"_id": member_id}
users = collection.find(qurey)
total = collection.count()
hm = collection.find().sort("Qi", -1)
a = 0
for x in hm:
idd = x["_id"]
if idd == member_id:
break
else:
a += 1
for lvl in users:
_id = lvl['_id']
points = lvl['points']
medal = lvl['Leauge']
dao = lvl['Daos']
stre = lvl['Strength']
sped = lvl['Speed']
defen = lvl['Defense']
sol = lvl['Soul']
health = lvl['Health']
luk = lvl['Luck']
qi = lvl['Qi']
realm = lvl['Realm']
speci = lvl['Species']
pth = lvl['Path']
try:
wisdom = lvl['wisdom']
except:
wisdom = "Use .update to see your wisom"
embed = discord.Embed(
color=color, timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=f'{ctx.guild.icon_url}')
embed.set_author(name=f'{member.name} ',
icon_url=f'{member.avatar_url}')
embed.add_field(name=f'__Main__', value=f'**Rank** : #{int(a) +1}/{total}\n'
f'**Realm** : {str(realm)}\n'
f'**Species** : {str(speci)}')
embed.add_field(name="__Legacy__", value=f'**Path** : {str(pth)}\n'
f'**Medals** : {str(medal)}\n'
f'**Daos** : {str(dao)}')
embed.add_field(name='__Accomplishments__', value=f'**Qi : ** {str(qi)}\n'
f'**Points : ** {str(points)}\n'
f' **Luck : ** {str(luk)}', inline=False)
embed.add_field(name='__Stats__', value=f'**Strength :** {str(stre)}\n'
f'**Defense :** {str(defen)}\n'
f'**Speed** : {str(sped)}\n'
f'**Soul : **{str(sol)}\n'
f'**Health : ** {str(health)}\n'
f'**Wisdom :** {str(wisdom)} ')
embed.set_footer(text=f"Abode of Scholars")
await ctx.send(embed=embed)
@commands.command(aliases=['aliases', 'cname'], description=f'Add your cultivator name.')
@commands.guild_only()
async def nickname(self, ctx, *, arg):
if len(arg) > 10:
return await ctx.send('Bruh you can\'t go over 10 characthers.')
if ctx.guild.id != (guild):
return
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = str(ctx.author.id)
name = str(arg)
name = str(arg)
collection.update_one({"_id": user_id}, {"$set": {"Name": name}})
await ctx.send(f'{ctx.author.mention} Your cultivator name was sucessfully set to {arg}.')
@commands.command(aliases=["lb"], description='Shows the top 10 cultivators on the server.')
@commands.guild_only()
async def leaderboard(self, ctx):
if ctx.channel.id == self.Bot.scholar_chat:
return
member = discord.Member or ctx.author
memeber_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
collection2 = db['Levels1']
users = collection.find().sort("Qi", -1).limit(10)
names = collection2.find().sort("Name", 1)
a2 = []
nme1 = []
name2 = []
pts1 = []
pth1 = []
table = PrettyTable()
table1 = PrettyTable()
a = 0
table.field_names = ["Rank", "Aliases", "Qi", "Points", "Path"]
table1.field_names = ["Rank", "Aliases", "Qi", "Points"]
table.align = "c"
for u in users:
user_id = u['_id']
qi = u['Qi']
pts = u['points']
pth = u['Path']
nme = u['Name']
a += 1
hm = str(pts)
hm1 = str(qi)
pts1.append(hm)
nme1.append(nme)
name2.append(hm1)
pth1.append(pth)
'''embed.add_field(name='Aliases', value=f"\n\n".join(nme1))
embed.add_field(name='Qi', value="\n\n".join(name2))
embed.add_field(name="Points", value=" \n\n ".join(pts1))
#embed.add_field(name=f"{a}", value=f'**Aliases : {nme}** \n**Qi : ** {qi}\n**Points : ** {pts} \n**Path : **{pth}')
embed.set_footer(text=f'To remove the \'None\' from your name, add your Cultivator name through .aliases')
await ctx.send(embed=embed)'''
table.add_row([a, f'{nme}', qi, pts, f'{pth}'])
table1.add_row([a, f'{nme}', qi, pts])
if ctx.author.is_on_mobile():
await ctx.send(f'```prolog\n{table}```')
else:
embed = discord.Embed(
title="Leaderboard \n``You can add your aliases by [.aliases <yourname>]``", color=color, description=f'```prolog\n{table1}```')
embed.set_thumbnail(url=f'{ctx.guild.icon_url}')
embed.set_footer(text=f'Requested by {ctx.author.name}')
await ctx.send(embed=embed)
def setup(Bot):
Bot.add_cog(vein8(Bot))
print("Leveling cog is working.")
| 42.066798 | 240 | 0.500841 | import discord
from discord.ext import commands, tasks
import datetime
import random
from prettytable import PrettyTable
import random
from random import randint
data = ['Water', 'Air', 'Earth', 'Fire', 'Destruction',
'Illusion', 'Time', 'Space', 'Karma', 'Chaos']
paths = random.choice(data)
luck = random.randint(1, 100)
data1 = ['Demon', 'Human', 'Dragon', 'Beast',
'Phoenix', 'Spirit', 'Giant', 'Fey']
color = 0xa100f2
guild = 757098499836739594
class vein8(commands.Cog, name='leveling'):
def __init__(self, Bot):
self.Bot = Bot
self.Bot.scholar_chat = self.Bot.get_channel(757108786497585172)
async def ModLog(self,ctx,commandname =None ,mod= None, target = None, amount :3 =None, Reason =None,
channel=None, content = None, jump = None):
guild = self.Bot.get_guild(self.Bot.guild_id)
log_channel= self.Bot.get_channel(759583119396700180)
embed = discord.Embed(color = random.choice(self.Bot.color_list),timestamp = datetime.datetime.utcnow())
embed.set_author(name=f"{commandname}",icon_url=ctx.author.avatar_url)
if mod !=None:
embed.add_field(name = "Mod", value = f"{mod.display_name} | {mod.mention}")
if target != None:
embed.add_field(name = "Target", value = f"{target.display_name} | {target.mention}")
if amount != None:
embed.add_field(name= "Amount", value= f'``{amount}``', inline=False)
if channel!= None:
embed.add_field(name = "On channel", value=f"{channel}")
if content!= None:
embed.add_field(name = "Content", value= f"```css\n{content}```", inline=False)
if jump != None:
embed.add_field(name = "Jump", value = f"[Here]({jump})")
if Reason !=None:
embed.add_field(name= "Reason ", value= f"```css\n{Reason}```", inline=False)
embed.set_thumbnail(url = guild.icon_url)
embed.set_footer(icon_url = mod.avatar_url)
await log_channel.send(embed=embed)
return self.ModLog
@commands.Cog.listener()
@commands.guild_only()
async def on_message(self, message):
if isinstance(message.channel, discord.channel.DMChannel):
return
if message.guild.id != 757098499836739594:
return
if message.author.id == 759784064361299989:
return
if message.author.bot:
return
if self.Bot.DEFAULT_PREFIX == '&':
return
race = random.choice(data1)
strength = random.randint(1, 10)
speed = random.randint(1, 10)
defense = random.randint(1, 10)
soul = random.randint(1, 10)
Hp = random.randint(50, 350)
wisdom = random.randint(50, 100)
bot1 = message.guild.get_channel(781535649843904562)
bot2 = message.guild.get_channel(757136943149613076)
music = message.guild.get_channel(768684108770574366)
testing = message.guild.get_channel(757941959796195484)
if message.channel.id == bot1.id:
return
if message.channel.id == (music.id) or message.channel.id == (testing.id):
return
author_id = str(message.author.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = {"_id": author_id}
if (collection.find_one({"_id": author_id}) == None):
leauge = "Novice scholar"
Realm = "Mortal"
Path = paths
lol = "No aliases"
user_data = {"_id": author_id, "points": 1, "Leauge": leauge, "Qi": 0, "Daos": 0, "Path": Path, "Realm": Realm, "Luck": luck,
"Species": race, "Strength": strength, "Speed": speed, "Defense": defense, "Soul": soul, "Health": Hp, "Name": lol
, "Wisdom": wisdom}
collection.insert_one(user_data)
else:
query = {"_id": author_id}
level = collection.find(query)
for lvl in level:
cur_p = lvl['points']
new_p = cur_p + 1
cur_q = lvl['Qi']
new_q = cur_q + 0.25
Leauge = lvl['Leauge']
dao = lvl['Daos']
stre = lvl['Strength']
sped = lvl['Speed']
defen = lvl['Defense']
sol = lvl['Soul']
health = lvl['Health']
if (new_q % 200) == 0:
await message.channel.send(f'<:Cuppedfist:757112296094040104> Congragulations! {message.author.mention}, your Qi just reached **{new_q}**.')
elif (new_q % 600) == 0:
await message.channel.send(f'{message.author}, you now have comprehendded ``{dao}`` heavenly dao(s).')
collection.update_one({"_id": author_id}, {
"$set": {"Daos": +1}})
if (new_q == 500):
ok = 'Star'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``Star realm`` expert.\nAnd also, you earned the ``intermediate scholar`` medal.')
new_medal1 = 'Intermediate scholar'
collection.update_one({"_id": author_id}, {
"$set": {"Leauge": new_medal1}})
elif (new_q == 1500):
await message.channel.send(f'<:Cuppedfist:757112296094040104> {member.author.mention}, Congragulations you earned the ``Expert scholar`` medal.')
new_medal2 = 'Expert scholar'
collection.upate_one({"_id": author_id}, {
"$set": {"Leauge": new_medal2}})
elif (new_q % 10) == 0:
strength1 = random.randint(1, 15)
speed1 = random.randint(1, 10)
defense1 = random.randint(1, 25)
soul1 = random.randint(1, 5)
Hp1 = random.randint(1, 20)
collection.update_one({"_id": author_id}, {
"$set": {"Strength": stre + strength1}})
collection.update_one({"_id": author_id}, {
"$set": {"Speed": sped + speed1}})
collection.update_one({"_id": author_id}, {
"$set": {"Defense": defen + defense1}})
collection.update_one({"_id": author_id}, {
"$set": {"Soul": sol + soul1}})
collection.update_one({"_id": author_id}, {
"$set": {"Health": health + Hp1}})
if (new_q == 1100):
ok = 'Transcendent'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'{message.author.mention},<:Cuppedfist:757112296094040104> Congragulations! you just brokethrough to become a ``Transcendent realm`` expert.')
if (new_q == 2500):
ok = 'Saint'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``Saint realm`` expert.')
if (new_q == 5100):
ok = 'God'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``God realm``expert.')
if (new_q == 10001):
ok = 'Chaotic'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``Chaotic realm`` expert.')
collection.update_one({"_id": author_id}, {
"$set": {'points': new_p}})
collection.update_one({"_id": author_id}, {
"$set": {"Qi": new_q}})
@commands.command(aliases=['apoints'], hidden=True)
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def addpoints(self, ctx, member: discord.Member, amount, *, reason=None):
channel = ctx.guild.get_channel(780785741101137926)
if ctx.guild.id != (guild):
return await ctx.send('<:WeirdChamp:757112297096216627> Come to the main server if you dare.')
if int(amount) <= 2000:
memeber_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = {"_id": memeber_id}
query = {"_id": memeber_id}
points = collection.find(query)
if collection.find_one({"_id": memeber_id} == None):
await ctx.send(f"{ctx.author.name}, No such user by the name {member.name} exists. ")
for point in points:
old_p = point['points']
amount_n = int(amount)
new_p = (int(old_p) + int(amount_n))
collection.update_one({"_id": memeber_id}, {
"$set": {"points": new_p}})
await ctx.send(f"Sucessfully added ``{amount}`` points to {member.name}. Now {member.name} has ``{new_p}`` in total.")
await self.ModLog(ctx = ctx, mod= ctx.author, target=member, commandname="Points Given!", channel=ctx.channel.mention
, amount = amount, jump= ctx.message.jump_url, Reason=reason)
elif int(amount) >= 2000:
await ctx.send(f"<:WeirdChamp:757112297096216627> {ctx.author.name}, 2000 is the limit for now.")
@commands.command(aliases=['rpoints'], hidden=True)
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def removepoints(self, ctx, member: discord.Member, amount, *, Reason):
channel = ctx.guild.get_channel(780785741101137926)
if ctx.guild.id != 757098499836739594:
return await ctx.send('<:WeirdChamp:757112297096216627> Come to the main server if you dare.')
if ctx.author.top_role < member.top_role:
return await ctx.send("You can't remove points of someone higher than you.")
if int(amount) <= 2000:
memeber_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = {"_id": memeber_id}
query = {"_id": memeber_id}
points = collection.find(query)
if collection.find_one({"_id": memeber_id} == None):
await ctx.send(f"{ctx.author.name}, No such user by the name {member.name} exists. ")
for point in points:
old_p = point['points']
amount_n = int(amount)
new_p = (int(old_p) - int(amount_n))
collection.update_one({"_id": memeber_id}, {
"$set": {"points": new_p}})
await ctx.send(f"Sucessfully removed ``{amount}`` points from {member.name}. Now {member.name} has ``{new_p}`` in total.")
await self.ModLog(ctx = ctx, mod= ctx.author, target=member, commandname="Points Removed!", channel=ctx.channel.mention
, amount = amount, jump= ctx.message.jump_url, Reason=reason)
else:
await ctx.send(f"{ctx.author.name}, you can't remove more than 2000 points. <:WeirdChamp:757112297096216627>")
@commands.command(aliases=["points", "qi", "p", 'stats'], description=f'Show your stats and general info.')
@commands.guild_only()
async def point(self, ctx):
if ctx.message.channel.id == 757108786497585172:
return
try:
member = ctx.author
member_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
qurey = {"_id": member_id}
users = collection.find(qurey)
total = collection.count()
hm = collection.find().sort("Qi", -1)
a = 0
for x in hm:
idd = x["_id"]
if idd == member_id:
break
else:
a += 1
for lvl in users:
_id = lvl['_id']
points = lvl['points']
medal = lvl['Leauge']
dao = lvl['Daos']
stre = lvl['Strength']
sped = lvl['Speed']
defen = lvl['Defense']
sol = lvl['Soul']
health = lvl['Health']
luk = lvl['Luck']
qi = lvl['Qi']
realm = lvl['Realm']
speci = lvl['Species']
pth = lvl['Path']
nme = lvl['Name']
try:
wisdom = lvl['Wisdom']
except:
wisdom = "Use .update to see your wisom"
embed = discord.Embed(
color=color, timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=f'{ctx.guild.icon_url}')
embed.set_author(name=f'{member.name} ',
icon_url=f'{member.avatar_url}')
embed.add_field(name=f'__#{int(a) +1}/{total}__', value=f'**Aliases** :{nme} \n'
f'**Realm** : {str(realm)}\n'
f'**Species** : {str(speci)}')
embed.add_field(name="__Legacy__", value=f'**Path** : {str(pth)}\n'
f'**Medals** : {str(medal)}\n'
f'**Daos** : {str(dao)}')
embed.add_field(name='__Accomplishments__', value=f'**Qi : ** {str(qi)}\n'
f'**Points : ** {str(points)}\n'
f' **Luck : ** {str(luk)}'
)
embed.add_field(name='__Stats__', value=f'**Strength :** {str(stre)}\n'
f'**Defense :** {str(defen)}\n'
f'**Speed** : {str(sped)}\n'
f'**Soul : **{str(sol)}\n'
f'**Health : ** {str(health)}\n'
f'**Wisdom : ** {str(wisdom)}')
embed.set_footer(text=f"Abode of Scholars")
await ctx.send(embed=embed)
except:
await ctx.send(f'Your data probably isn\'nt saved on the database.')
@commands.command(aliases=["puser", "statsu"], description=f'Shows shats on another user, be sure to use the user id.')
@commands.guild_only()
async def pu(self, ctx, member_id: int):
if ctx.guild.id != (guild):
return
member = ctx.guild.get_member(member_id)
member_id = str(member_id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
qurey = {"_id": member_id}
users = collection.find(qurey)
total = collection.count()
hm = collection.find().sort("Qi", -1)
a = 0
for x in hm:
idd = x["_id"]
if idd == member_id:
break
else:
a += 1
for lvl in users:
_id = lvl['_id']
points = lvl['points']
medal = lvl['Leauge']
dao = lvl['Daos']
stre = lvl['Strength']
sped = lvl['Speed']
defen = lvl['Defense']
sol = lvl['Soul']
health = lvl['Health']
luk = lvl['Luck']
qi = lvl['Qi']
realm = lvl['Realm']
speci = lvl['Species']
pth = lvl['Path']
try:
wisdom = lvl['wisdom']
except:
wisdom = "Use .update to see your wisom"
embed = discord.Embed(
color=color, timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=f'{ctx.guild.icon_url}')
embed.set_author(name=f'{member.name} ',
icon_url=f'{member.avatar_url}')
embed.add_field(name=f'__Main__', value=f'**Rank** :
f'**Realm** : {str(realm)}\n'
f'**Species** : {str(speci)}')
embed.add_field(name="__Legacy__", value=f'**Path** : {str(pth)}\n'
f'**Medals** : {str(medal)}\n'
f'**Daos** : {str(dao)}')
embed.add_field(name='__Accomplishments__', value=f'**Qi : ** {str(qi)}\n'
f'**Points : ** {str(points)}\n'
f' **Luck : ** {str(luk)}', inline=False)
embed.add_field(name='__Stats__', value=f'**Strength :** {str(stre)}\n'
f'**Defense :** {str(defen)}\n'
f'**Speed** : {str(sped)}\n'
f'**Soul : **{str(sol)}\n'
f'**Health : ** {str(health)}\n'
f'**Wisdom :** {str(wisdom)} ')
embed.set_footer(text=f"Abode of Scholars")
await ctx.send(embed=embed)
@commands.command(aliases=['aliases', 'cname'], description=f'Add your cultivator name.')
@commands.guild_only()
async def nickname(self, ctx, *, arg):
if len(arg) > 10:
return await ctx.send('Bruh you can\'t go over 10 characthers.')
if ctx.guild.id != (guild):
return
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = str(ctx.author.id)
name = str(arg)
name = str(arg)
collection.update_one({"_id": user_id}, {"$set": {"Name": name}})
await ctx.send(f'{ctx.author.mention} Your cultivator name was sucessfully set to {arg}.')
@commands.command(aliases=["lb"], description='Shows the top 10 cultivators on the server.')
@commands.guild_only()
async def leaderboard(self, ctx):
if ctx.channel.id == self.Bot.scholar_chat:
return
member = discord.Member or ctx.author
memeber_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
collection2 = db['Levels1']
users = collection.find().sort("Qi", -1).limit(10)
names = collection2.find().sort("Name", 1)
a2 = []
nme1 = []
name2 = []
pts1 = []
pth1 = []
table = PrettyTable()
table1 = PrettyTable()
a = 0
table.field_names = ["Rank", "Aliases", "Qi", "Points", "Path"]
table1.field_names = ["Rank", "Aliases", "Qi", "Points"]
table.align = "c"
for u in users:
user_id = u['_id']
qi = u['Qi']
pts = u['points']
pth = u['Path']
nme = u['Name']
a += 1
hm = str(pts)
hm1 = str(qi)
pts1.append(hm)
nme1.append(nme)
name2.append(hm1)
pth1.append(pth)
table.add_row([a, f'{nme}', qi, pts, f'{pth}'])
table1.add_row([a, f'{nme}', qi, pts])
if ctx.author.is_on_mobile():
await ctx.send(f'```prolog\n{table}```')
else:
embed = discord.Embed(
title="Leaderboard \n``You can add your aliases by [.aliases <yourname>]``", color=color, description=f'```prolog\n{table1}```')
embed.set_thumbnail(url=f'{ctx.guild.icon_url}')
embed.set_footer(text=f'Requested by {ctx.author.name}')
await ctx.send(embed=embed)
def setup(Bot):
Bot.add_cog(vein8(Bot))
print("Leveling cog is working.")
| true | true |
f72fb037fc55f7ca2cf905ff81db0de58b739c7f | 363 | py | Python | trips-backend/tours/migrations/0003_auto_20210312_1921.py | pgarr/best-trips | edc45f6e822b70aa9bfa6f9d4ee8b2c58df54310 | [
"MIT"
] | null | null | null | trips-backend/tours/migrations/0003_auto_20210312_1921.py | pgarr/best-trips | edc45f6e822b70aa9bfa6f9d4ee8b2c58df54310 | [
"MIT"
] | null | null | null | trips-backend/tours/migrations/0003_auto_20210312_1921.py | pgarr/best-trips | edc45f6e822b70aa9bfa6f9d4ee8b2c58df54310 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-12 18:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tours', '0002_auto_20210308_2016'),
]
operations = [
migrations.RenameField(
model_name='reservation',
old_name='user',
new_name='owner',
),
]
| 19.105263 | 47 | 0.584022 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tours', '0002_auto_20210308_2016'),
]
operations = [
migrations.RenameField(
model_name='reservation',
old_name='user',
new_name='owner',
),
]
| true | true |
f72fb07aed75906e4fd55adb3081b3b4d85b4c83 | 108 | py | Python | runscript_DDmass.py | HarrisonWinch96/DarkDisk_Microlensing | e25d59051771318239116a8d2036aca8ce70236d | [
"BSD-3-Clause"
] | null | null | null | runscript_DDmass.py | HarrisonWinch96/DarkDisk_Microlensing | e25d59051771318239116a8d2036aca8ce70236d | [
"BSD-3-Clause"
] | null | null | null | runscript_DDmass.py | HarrisonWinch96/DarkDisk_Microlensing | e25d59051771318239116a8d2036aca8ce70236d | [
"BSD-3-Clause"
] | null | null | null | from plotgen_functions import DD_mass_constraints
import sys
q = float(sys.argv[1])
DD_mass_constraints(q) | 18 | 49 | 0.824074 | from plotgen_functions import DD_mass_constraints
import sys
q = float(sys.argv[1])
DD_mass_constraints(q) | true | true |
f72fb1455f5a6d221b2e71016d342136e0b43efe | 127 | py | Python | instance/config.py | ags68/CS4398Project | a9652a3670fdbfd2f482104b77573a2d1f138c69 | [
"Unlicense"
] | 3 | 2017-02-27T02:13:52.000Z | 2017-03-05T03:54:25.000Z | instance/config.py | ags68/CS4398Project | a9652a3670fdbfd2f482104b77573a2d1f138c69 | [
"Unlicense"
] | null | null | null | instance/config.py | ags68/CS4398Project | a9652a3670fdbfd2f482104b77573a2d1f138c69 | [
"Unlicense"
] | null | null | null | # instance/config.py
SECRET_KEY = 'p9Bv<3Eid9%$i01'
SQLALCHEMY_DATABASE_URI = 'mysql://esss_admin:esss2017@localhost/esss_db'
| 25.4 | 73 | 0.787402 |
SECRET_KEY = 'p9Bv<3Eid9%$i01'
SQLALCHEMY_DATABASE_URI = 'mysql://esss_admin:esss2017@localhost/esss_db'
| true | true |
f72fb15cca4227d9405d5ece1245eb1451ce43c4 | 134,835 | py | Python | Lib/test/test_ssl.py | harveyqing/read_cPython_source | 3ff91638c8c9df6e5ac8dd5235447b5571781535 | [
"PSF-2.0"
] | 36 | 2015-02-04T10:43:31.000Z | 2022-03-30T13:01:12.000Z | Lib/test/test_ssl.py | harveyqing/read_cPython_source | 3ff91638c8c9df6e5ac8dd5235447b5571781535 | [
"PSF-2.0"
] | 9 | 2015-03-17T05:56:16.000Z | 2021-11-17T09:31:50.000Z | src/test/python/cpython-3f944f44ee41/Lib/test/test_ssl.py | bkiers/python3-parser | 38496e94b3935e5abca6ab86fd98136c51f92fd7 | [
"MIT"
] | 22 | 2015-05-13T17:37:35.000Z | 2022-01-25T06:24:42.000Z | # Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
from unittest import mock
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
WRONGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh512.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_SSLv3
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_SSLv3')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 2.0
self.assertLess(n, 0x20000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 2)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 26)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by OpenSSL, the format might change
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with ssl.wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2,
ctx.options)
ctx.options |= ssl.OP_NO_SSLv3
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value by OpenSSL
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but SVN_PYTHON_ORG_ROOT_CERT is a CA cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(SVN_PYTHON_ORG_ROOT_CERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen(5)
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect(("svn.python.org", 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex(('svn.python.org', 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex(('svn.python.org', 443))
if rc == 0:
self.skipTest("svn.python.org responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
rc = s.connect_ex(("svn.python.org", 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet("svn.python.org"):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="svn.python.org")
if ssl.HAS_SNI:
s.connect(("svn.python.org", 443))
s.close()
else:
self.assertRaises(ValueError, s.connect, ("svn.python.org", 443))
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = ("svn.python.org", 443)
with support.transient_internet(remote[0]):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(remote)
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet("svn.python.org"):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(("svn.python.org", 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_protocols.append(self.sslconn.selected_npn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server:
try:
with socket.socket() as sock:
s = ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x.args[1])
except OSError as x:
if support.verbose:
sys.stdout.write("\nOSError is %s\n" % x.args[1])
except OSError as x:
if x.errno != errno.ENOENT:
raise
if support.verbose:
sys.stdout.write("\OSError is %s\n" % str(x))
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_npn_protocol': s.selected_npn_protocol()
})
s.close()
stats['server_npn_protocols'] = server.selected_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
@needs_sni
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
# Server with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, True,
client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, True)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, True,
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://%s:%d/%s' % (
HOST, server.port, os.path.split(CERTFILE)[1])
f = urllib.request.urlopen(url)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
s.close()
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests, SSLErrorTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
| 44.780804 | 117 | 0.553135 |
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
from unittest import mock
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CRLFILE = data_file("revocation.crl")
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
WRONGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh512.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone
return -time.timezone
def asn1time(cert_time):
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
OL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
proto = ssl.PROTOCOL_SSLv3
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_SSLv3')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 2.0
self.assertLess(n, 0x20000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 2)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 26)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by OpenSSL, the format might change
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with ssl.wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
fail(cert, '<unsupported>')
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
0:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT")
self.cert_time_fail("Jan 5 09:34:43 2018")
self.cert_time_fail("Jan 5 09:34:43 2018 UTC")
self.cert_time_fail("Jan 35 09:34:43 2018 GMT")
self.cert_time_fail("Jon 5 09:34:43 2018 GMT")
self.cert_time_fail("Jan 5 24:00:00 2018 GMT")
self.cert_time_fail("Jan 5 09:60:43 2018 GMT")
newyear_ts = 1230768000.0
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT")
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2,
ctx.options)
ctx.options |= ssl.OP_NO_SSLv3
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value by OpenSSL
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but SVN_PYTHON_ORG_ROOT_CERT is a CA cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(SVN_PYTHON_ORG_ROOT_CERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen(5)
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect(("svn.python.org", 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
"svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
ython.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex(('svn.python.org', 443))
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
select.select([], [s], [], 5.0)
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex(('svn.python.org', 443))
if rc == 0:
self.skipTest("svn.python.org responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
rc = s.connect_ex(("svn.python.org", 444))
c, (errno.ECONNREFUSED, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="svn.python.org")
if ssl.HAS_SNI:
s.connect(("svn.python.org", 443))
s.close()
else:
self.assertRaises(ValueError, s.connect, ("svn.python.org", 443))
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
with support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
os.read(fd, 0)
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = ("svn.python.org", 443)
with support.transient_internet(remote[0]):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(remote)
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(remote)
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet("svn.python.org"):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(("svn.python.org", 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_protocols.append(self.sslconn.selected_npn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server:
try:
with socket.socket() as sock:
s = ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x.args[1])
except OSError as x:
if support.verbose:
sys.stdout.write("\nOSError is %s\n" % x.args[1])
except OSError as x:
if x.errno != errno.ENOENT:
raise
if support.verbose:
sys.stdout.write("\OSError is %s\n" % str(x))
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_npn_protocol': s.selected_npn_protocol()
})
s.close()
stats['server_npn_protocols'] = server.selected_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
@needs_sni
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
# Server with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, True,
client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, True)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, True,
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://%s:%d/%s' % (
HOST, server.port, os.path.split(CERTFILE)[1])
f = urllib.request.urlopen(url)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
s.close()
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests, SSLErrorTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
| true | true |
f72fb180675bc7af5ad554ef5a79f7262a0c0838 | 5,272 | py | Python | devel/lib/python2.7/dist-packages/learning_topic/msg/_Person.py | youxiangming/ROS_learning | 29a88ed40982c71d32d469498ad67fba5d9d0943 | [
"MIT"
] | null | null | null | devel/lib/python2.7/dist-packages/learning_topic/msg/_Person.py | youxiangming/ROS_learning | 29a88ed40982c71d32d469498ad67fba5d9d0943 | [
"MIT"
] | null | null | null | devel/lib/python2.7/dist-packages/learning_topic/msg/_Person.py | youxiangming/ROS_learning | 29a88ed40982c71d32d469498ad67fba5d9d0943 | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from learning_topic/Person.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Person(genpy.Message):
_md5sum = "8cf74e85a44e7a35ab62353a46e326a3"
_type = "learning_topic/Person"
_has_header = False # flag to mark the presence of a Header object
_full_text = """string name
uint8 sex
uint8 age
uint8 unknown =0
uint8 male=1
uint8 female=2
uint16 age1
float64 height"""
# Pseudo-constants
unknown = 0
male = 1
female = 2
__slots__ = ['name','sex','age','age1','height']
_slot_types = ['string','uint8','uint8','uint16','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
name,sex,age,age1,height
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Person, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.name is None:
self.name = ''
if self.sex is None:
self.sex = 0
if self.age is None:
self.age = 0
if self.age1 is None:
self.age1 = 0
if self.height is None:
self.height = 0.
else:
self.name = ''
self.sex = 0
self.age = 0
self.age1 = 0
self.height = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2BHd().pack(_x.sex, _x.age, _x.age1, _x.height))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
_x = self
start = end
end += 12
(_x.sex, _x.age, _x.age1, _x.height,) = _get_struct_2BHd().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2BHd().pack(_x.sex, _x.age, _x.age1, _x.height))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
_x = self
start = end
end += 12
(_x.sex, _x.age, _x.age1, _x.height,) = _get_struct_2BHd().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2BHd = None
def _get_struct_2BHd():
global _struct_2BHd
if _struct_2BHd is None:
_struct_2BHd = struct.Struct("<2BHd")
return _struct_2BHd
| 30.830409 | 145 | 0.628604 |
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Person(genpy.Message):
_md5sum = "8cf74e85a44e7a35ab62353a46e326a3"
_type = "learning_topic/Person"
_has_header = False
_full_text = """string name
uint8 sex
uint8 age
uint8 unknown =0
uint8 male=1
uint8 female=2
uint16 age1
float64 height"""
unknown = 0
male = 1
female = 2
__slots__ = ['name','sex','age','age1','height']
_slot_types = ['string','uint8','uint8','uint16','float64']
def __init__(self, *args, **kwds):
if args or kwds:
super(Person, self).__init__(*args, **kwds)
if self.name is None:
self.name = ''
if self.sex is None:
self.sex = 0
if self.age is None:
self.age = 0
if self.age1 is None:
self.age1 = 0
if self.height is None:
self.height = 0.
else:
self.name = ''
self.sex = 0
self.age = 0
self.age1 = 0
self.height = 0.
def _get_types(self):
return self._slot_types
def serialize(self, buff):
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2BHd().pack(_x.sex, _x.age, _x.age1, _x.height))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
_x = self
start = end
end += 12
(_x.sex, _x.age, _x.age1, _x.height,) = _get_struct_2BHd().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e)
def serialize_numpy(self, buff, numpy):
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2BHd().pack(_x.sex, _x.age, _x.age1, _x.height))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
_x = self
start = end
end += 12
(_x.sex, _x.age, _x.age1, _x.height,) = _get_struct_2BHd().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e)
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2BHd = None
def _get_struct_2BHd():
global _struct_2BHd
if _struct_2BHd is None:
_struct_2BHd = struct.Struct("<2BHd")
return _struct_2BHd
| true | true |
f72fb182d47331d80baf9b9fb6534806b6d38c8b | 1,856 | py | Python | appengine/tictactoe/webhandler.py | salendron/ai-kindergarten | 02782730753b7d9b70fcff2dcfbff0f7a4ed6e0e | [
"MIT"
] | null | null | null | appengine/tictactoe/webhandler.py | salendron/ai-kindergarten | 02782730753b7d9b70fcff2dcfbff0f7a4ed6e0e | [
"MIT"
] | null | null | null | appengine/tictactoe/webhandler.py | salendron/ai-kindergarten | 02782730753b7d9b70fcff2dcfbff0f7a4ed6e0e | [
"MIT"
] | null | null | null | from bottle import SimpleTemplate
from bottle import request
from .game import Game
from .player import AIPlayer
from .recorder import save_game, get_stats, get_last_training
player_names = {
'm': 'Bob',
'h': 'You'
}
def render_field(idx, game):
current_state = game.states[-1]
if current_state[idx] == -1:
return -1
elif current_state[idx] == 0:
return 0
else:
return 1
def index():
token = request.forms.get('game_token')
game = Game.from_token(token)
winner = -1
move = request.forms.get('move')
if move is not None:
game.play(int(move))
winner = game.get_winner()
if winner == -1:
game.play(AIPlayer().get_move(game))
winner = game.get_winner()
if token is None and game.players[0] == 'm':
# first round and bob has the first move
game.play(AIPlayer().get_move(game))
if winner != -1:
save_game(game)
#load stats
stats_h, stats_m, stats_t = get_stats()
tpl = SimpleTemplate(name="index.tpl", lookup=['./static/web/'])
return tpl.render(
content='tictactoe',
token=game.to_token(),
winner=winner,
player0=player_names[game.players[0]],
player1=player_names[game.players[1]],
winner_name=player_names[game.players[winner]] if winner != -1 and winner != 2 else "",
stats_h=stats_h,
stats_m=stats_m,
stats_t=stats_t,
last_train=get_last_training(),
field0=render_field(0, game),
field1=render_field(1, game),
field2=render_field(2, game),
field3=render_field(3, game),
field4=render_field(4, game),
field5=render_field(5, game),
field6=render_field(6, game),
field7=render_field(7, game),
field8=render_field(8, game)
)
| 25.081081 | 95 | 0.609375 | from bottle import SimpleTemplate
from bottle import request
from .game import Game
from .player import AIPlayer
from .recorder import save_game, get_stats, get_last_training
player_names = {
'm': 'Bob',
'h': 'You'
}
def render_field(idx, game):
current_state = game.states[-1]
if current_state[idx] == -1:
return -1
elif current_state[idx] == 0:
return 0
else:
return 1
def index():
token = request.forms.get('game_token')
game = Game.from_token(token)
winner = -1
move = request.forms.get('move')
if move is not None:
game.play(int(move))
winner = game.get_winner()
if winner == -1:
game.play(AIPlayer().get_move(game))
winner = game.get_winner()
if token is None and game.players[0] == 'm':
game.play(AIPlayer().get_move(game))
if winner != -1:
save_game(game)
stats_h, stats_m, stats_t = get_stats()
tpl = SimpleTemplate(name="index.tpl", lookup=['./static/web/'])
return tpl.render(
content='tictactoe',
token=game.to_token(),
winner=winner,
player0=player_names[game.players[0]],
player1=player_names[game.players[1]],
winner_name=player_names[game.players[winner]] if winner != -1 and winner != 2 else "",
stats_h=stats_h,
stats_m=stats_m,
stats_t=stats_t,
last_train=get_last_training(),
field0=render_field(0, game),
field1=render_field(1, game),
field2=render_field(2, game),
field3=render_field(3, game),
field4=render_field(4, game),
field5=render_field(5, game),
field6=render_field(6, game),
field7=render_field(7, game),
field8=render_field(8, game)
)
| true | true |
f72fb19d379b24571a1791dda2d07bd1524c5a49 | 1,313 | py | Python | Dynamic Obstacle Simulation/config.py | TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning | 717b9f07f4ed625ee33ab8ec22ce78dc2907d759 | [
"MIT"
] | 3 | 2022-01-07T19:37:03.000Z | 2022-03-15T08:50:28.000Z | Dynamic Obstacle Simulation/config.py | TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning | 717b9f07f4ed625ee33ab8ec22ce78dc2907d759 | [
"MIT"
] | null | null | null | Dynamic Obstacle Simulation/config.py | TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning | 717b9f07f4ed625ee33ab8ec22ce78dc2907d759 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 16 12:59:37 2021
@author: vxr131730
Author: Venkatraman Renganathan
Email: vrengana@utdallas.edu
Github: https://github.com/venkatramanrenganathan
- Create a configuration file for RRT*. Functions that use RRT* outputs
will use some of these configurations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import numpy as np
import pickle
maxIter = 100
Trial_Num = 1
Trial_Total = 100
num_states = 6
num_outputs = 4
total_dim = num_states + num_outputs
# Flag to select the estimator
# 1: UKF, 0: EKF
estimatorSelector = 1
# Flag to select DR or CC Risk Estimation
# True - Use DR Risk Constraints
# False - Use Chance Constraints
DRFlag = True
# Tracking Horizon for Car
carTrackHorizon = 4
# Flag to decide if dynamic obstacle or static obstacle based simulation
# True: Dynamic Obstacle, False: Static Obstacle
dynamicObs = True
# List of obstacle velocities
obsVelocities = [0.10, 0.20]
# Select velocitySelector to choose among the velocities list items
velocitySelector = 0
# Based on the dynamicObs flag, decide the velocity of obstacle
if dynamicObs:
constantObsVelocity = obsVelocities[velocitySelector]
else:
constantObsVelocity = 0
| 24.314815 | 80 | 0.677837 |
import numpy as np
import pickle
maxIter = 100
Trial_Num = 1
Trial_Total = 100
num_states = 6
num_outputs = 4
total_dim = num_states + num_outputs
estimatorSelector = 1
DRFlag = True
carTrackHorizon = 4
dynamicObs = True
obsVelocities = [0.10, 0.20]
velocitySelector = 0
if dynamicObs:
constantObsVelocity = obsVelocities[velocitySelector]
else:
constantObsVelocity = 0
| true | true |
f72fb1b068a952dc4984202737257c15d8acf40e | 2,692 | py | Python | koans/about_asserts.py | sayeda-khaled/python_koans | c834a95ece3cb1dd87b5c9923089c8c5b60b7154 | [
"MIT"
] | null | null | null | koans/about_asserts.py | sayeda-khaled/python_koans | c834a95ece3cb1dd87b5c9923089c8c5b60b7154 | [
"MIT"
] | null | null | null | koans/about_asserts.py | sayeda-khaled/python_koans | c834a95ece3cb1dd87b5c9923089c8c5b60b7154 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutAsserts(Koan):
def test_assert_truth(self):
"""
We shall contemplate truth by testing reality, via asserts.
"""
# Confused? This video should help:
#
# http://bit.ly/about_asserts
# self.assertTrue(False) # This should be True
self.assertTrue(True) # This should be True
def test_assert_with_message(self):
"""
Enlightenment may be more easily achieved with appropriate messages.
"""
# self.assertTrue(False, "This should be True -- Please fix this")
self.assertTrue(True, "This should be True -- Please fix this")
def test_fill_in_values(self):
"""
Sometimes we will ask you to fill in the values
"""
# self.assertEqual(__, 1 + 1)
# https://www.geeksforgeeks.org/python-unittest-assertequal-function/
self.assertEqual(2, 1 + 1) #asserting that the first argument equals the second one
def test_assert_equality(self):
"""
To understand reality, we must compare our expectations against reality.
"""
expected_value = 2
actual_value = 1 + 1
self.assertTrue(expected_value == actual_value)
def test_a_better_way_of_asserting_equality(self):
"""
Some ways of asserting equality are better than others.
"""
expected_value = 2
actual_value = 1 + 1
self.assertEqual(expected_value, actual_value)
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self):
"""
Understand what lies within.
"""
# This throws an AssertionError exception
# assert False
# This throws an AssertionError exception
assert True
def test_that_sometimes_we_need_to_know_the_class_type(self):
"""
What is in a class name?
"""
# Sometimes we will ask you what the class type of an object is.
#
# For example, contemplate the text string "navel". What is its class type?
# The koans runner will include this feedback for this koan:
#
# AssertionError: '-=> FILL ME IN! <=-' != <type 'str'>
#
# So "navel".__class__ is equal to <type 'str'>? No not quite. This
# is just what it displays. The answer is simply str.
#
# See for yourself:
self.assertEqual(str, "navel".__class__) # It's str, not <type 'str'>
# Need an illustration? More reading can be found here:
#
# https://github.com/gregmalcolm/python_koans/wiki/Class-Attribute
| 31.302326 | 91 | 0.614413 |
from runner.koan import *
class AboutAsserts(Koan):
def test_assert_truth(self):
rue(True)
def test_assert_with_message(self):
self.assertTrue(True, "This should be True -- Please fix this")
def test_fill_in_values(self):
self.assertEqual(2, 1 + 1)
def test_assert_equality(self):
expected_value = 2
actual_value = 1 + 1
self.assertTrue(expected_value == actual_value)
def test_a_better_way_of_asserting_equality(self):
expected_value = 2
actual_value = 1 + 1
self.assertEqual(expected_value, actual_value)
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self):
assert True
def test_that_sometimes_we_need_to_know_the_class_type(self):
self.assertEqual(str, "navel".__class__)
# Need an illustration? More reading can be found here:
#
# https://github.com/gregmalcolm/python_koans/wiki/Class-Attribute
| true | true |
f72fb25142c4d22ce4847d1c4623c584a3ad02c2 | 1,326 | py | Python | aimsprop/pes.py | mtzgroup/aimsprop | 464d88ad7a817da73027fd2ab7b12476bf59f83d | [
"MIT"
] | 1 | 2022-03-28T13:11:56.000Z | 2022-03-28T13:11:56.000Z | aimsprop/pes.py | mtzgroup/aimsprop | 464d88ad7a817da73027fd2ab7b12476bf59f83d | [
"MIT"
] | 11 | 2021-03-17T17:53:58.000Z | 2021-07-17T17:59:25.000Z | aimsprop/pes.py | mtzgroup/aimsprop | 464d88ad7a817da73027fd2ab7b12476bf59f83d | [
"MIT"
] | 2 | 2021-04-05T08:36:35.000Z | 2021-05-20T22:12:12.000Z | import numpy as np
from .bundle import Bundle
def compute_pes(
bundle: Bundle,
carrier_frequency: float,
alpha: float,
eKT: np.ndarray,
) -> Bundle:
"""Compute the simple photoelectron spectroscopy, with Guassian blurring
User is responsible for calculating and assigning properties to the bundle frames:
Dyson Orbitals
Ionization Potential (IP)
Params:
bundle: the Bundle object to compute the property for (modified in
place)
carrier_frequency: experimental probe pulse carrier frequency (hbar*omega)
alpha: the Guassian blurring exponent
eKT: electron energies
Return:
bundle: reference to the input Bundle object. The property
key "pes" is set to computed PES property.
"""
for frame in bundle.frames:
IPs = frame.properties["IP"]
dyson_norms = frame.properties["dyson_norms"]
pes = np.zeros_like(eKT)
for ind, (state, IP) in enumerate(IPs):
dyson_norm = dyson_norms[np.where(dyson_norms[:, 0] == state), 1][0]
pes += (
dyson_norm
* np.sqrt(alpha / np.pi)
* np.exp(-alpha * (carrier_frequency - IP - eKT) ** 2)
)
frame.properties["pes"] = pes
return bundle
| 29.466667 | 86 | 0.610106 | import numpy as np
from .bundle import Bundle
def compute_pes(
bundle: Bundle,
carrier_frequency: float,
alpha: float,
eKT: np.ndarray,
) -> Bundle:
for frame in bundle.frames:
IPs = frame.properties["IP"]
dyson_norms = frame.properties["dyson_norms"]
pes = np.zeros_like(eKT)
for ind, (state, IP) in enumerate(IPs):
dyson_norm = dyson_norms[np.where(dyson_norms[:, 0] == state), 1][0]
pes += (
dyson_norm
* np.sqrt(alpha / np.pi)
* np.exp(-alpha * (carrier_frequency - IP - eKT) ** 2)
)
frame.properties["pes"] = pes
return bundle
| true | true |
f72fb25f8ffe85e2f2a3357c66821d11c44d4856 | 17,524 | py | Python | salesforce/xmltodict.py | iechenybsh/exiahuangl | 6834ec0d40a53ece3c0ff738c5b728616da26a02 | [
"Apache-2.0"
] | 29 | 2016-12-15T07:14:44.000Z | 2022-03-17T17:24:08.000Z | salesforce/xmltodict.py | iechenybsh/exiahuangl | 6834ec0d40a53ece3c0ff738c5b728616da26a02 | [
"Apache-2.0"
] | 5 | 2017-02-14T02:04:44.000Z | 2019-02-19T19:25:22.000Z | salesforce/xmltodict.py | iechenybsh/exiahuangl | 6834ec0d40a53ece3c0ff738c5b728616da26a02 | [
"Apache-2.0"
] | 12 | 2016-12-16T08:02:45.000Z | 2020-02-20T00:00:58.000Z | #!/usr/bin/env python
"Makes working with XML feel like you are working with JSON"
## https://github.com/martinblech/xmltodict
try:
from defusedexpat import pyexpat as expat
except ImportError:
from xml.parsers import expat
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
try: # pragma no cover
from cStringIO import StringIO
except ImportError: # pragma no cover
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try: # pragma no cover
from collections import OrderedDict
except ImportError: # pragma no cover
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
try: # pragma no cover
_basestring = basestring
except NameError: # pragma no cover
_basestring = str
try: # pragma no cover
_unicode = unicode
except NameError: # pragma no cover
_unicode = str
__author__ = 'Martin Blech'
__version__ = '0.11.0'
__license__ = 'MIT'
class ParsingInterrupted(Exception):
pass
class _DictSAXHandler(object):
def __init__(self,
item_depth=0,
item_callback=lambda *args: True,
xml_attribs=True,
attr_prefix='@',
cdata_key='#text',
force_cdata=False,
cdata_separator='',
postprocessor=None,
dict_constructor=OrderedDict,
strip_whitespace=True,
namespace_separator=':',
namespaces=None,
force_list=None):
self.path = []
self.stack = []
self.data = []
self.item = None
self.item_depth = item_depth
self.xml_attribs = xml_attribs
self.item_callback = item_callback
self.attr_prefix = attr_prefix
self.cdata_key = cdata_key
self.force_cdata = force_cdata
self.cdata_separator = cdata_separator
self.postprocessor = postprocessor
self.dict_constructor = dict_constructor
self.strip_whitespace = strip_whitespace
self.namespace_separator = namespace_separator
self.namespaces = namespaces
self.namespace_declarations = OrderedDict()
self.force_list = force_list
def _build_name(self, full_name):
if not self.namespaces:
return full_name
i = full_name.rfind(self.namespace_separator)
if i == -1:
return full_name
namespace, name = full_name[:i], full_name[i+1:]
short_namespace = self.namespaces.get(namespace, namespace)
if not short_namespace:
return name
else:
return self.namespace_separator.join((short_namespace, name))
def _attrs_to_dict(self, attrs):
if isinstance(attrs, dict):
return attrs
return self.dict_constructor(zip(attrs[0::2], attrs[1::2]))
def startNamespaceDecl(self, prefix, uri):
self.namespace_declarations[prefix or ''] = uri
def startElement(self, full_name, attrs):
name = self._build_name(full_name)
attrs = self._attrs_to_dict(attrs)
if attrs and self.namespace_declarations:
attrs['xmlns'] = self.namespace_declarations
self.namespace_declarations = OrderedDict()
self.path.append((name, attrs or None))
if len(self.path) > self.item_depth:
self.stack.append((self.item, self.data))
if self.xml_attribs:
attr_entries = []
for key, value in attrs.items():
key = self.attr_prefix+self._build_name(key)
if self.postprocessor:
entry = self.postprocessor(self.path, key, value)
else:
entry = (key, value)
if entry:
attr_entries.append(entry)
attrs = self.dict_constructor(attr_entries)
else:
attrs = None
self.item = attrs or None
self.data = []
def endElement(self, full_name):
name = self._build_name(full_name)
if len(self.path) == self.item_depth:
item = self.item
if item is None:
item = (None if not self.data
else self.cdata_separator.join(self.data))
should_continue = self.item_callback(self.path, item)
if not should_continue:
raise ParsingInterrupted()
if len(self.stack):
data = (None if not self.data
else self.cdata_separator.join(self.data))
item = self.item
self.item, self.data = self.stack.pop()
if self.strip_whitespace and data:
data = data.strip() or None
if data and self.force_cdata and item is None:
item = self.dict_constructor()
if item is not None:
if data:
self.push_data(item, self.cdata_key, data)
self.item = self.push_data(self.item, name, item)
else:
self.item = self.push_data(self.item, name, data)
else:
self.item = None
self.data = []
self.path.pop()
def characters(self, data):
if not self.data:
self.data = [data]
else:
self.data.append(data)
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except KeyError:
if self._should_force_list(key, data):
item[key] = [data]
else:
item[key] = data
return item
def _should_force_list(self, key, value):
if not self.force_list:
return False
try:
return key in self.force_list
except TypeError:
return self.force_list(self.path[:-1], key, value)
def parse(xml_input, encoding=None, expat=expat, process_namespaces=False,
namespace_separator=':', disable_entities=True, **kwargs):
"""Parse the given XML input and convert it into a dictionary.
`xml_input` can either be a `string` or a file-like object.
If `xml_attribs` is `True`, element attributes are put in the dictionary
among regular child elements, using `@` as a prefix to avoid collisions. If
set to `False`, they are just ignored.
Simple example::
>>> import xmltodict
>>> doc = xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>
... \"\"\")
>>> doc['a']['@prop']
u'x'
>>> doc['a']['b']
[u'1', u'2']
If `item_depth` is `0`, the function returns a dictionary for the root
element (default behavior). Otherwise, it calls `item_callback` every time
an item at the specified depth is found and returns `None` in the end
(streaming mode).
The callback function receives two parameters: the `path` from the document
root to the item (name-attribs pairs), and the `item` (dict). If the
callback's return value is false-ish, parsing will be stopped with the
:class:`ParsingInterrupted` exception.
Streaming example::
>>> def handle(path, item):
... print('path:%s item:%s' % (path, item))
... return True
...
>>> xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>\"\"\", item_depth=2, item_callback=handle)
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:1
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:2
The optional argument `postprocessor` is a function that takes `path`,
`key` and `value` as positional arguments and returns a new `(key, value)`
pair where both `key` and `value` may have changed. Usage example::
>>> def postprocessor(path, key, value):
... try:
... return key + ':int', int(value)
... except (ValueError, TypeError):
... return key, value
>>> xmltodict.parse('<a><b>1</b><b>2</b><b>x</b></a>',
... postprocessor=postprocessor)
OrderedDict([(u'a', OrderedDict([(u'b:int', [1, 2]), (u'b', u'x')]))])
You can pass an alternate version of `expat` (such as `defusedexpat`) by
using the `expat` parameter. E.g:
>>> import defusedexpat
>>> xmltodict.parse('<a>hello</a>', expat=defusedexpat.pyexpat)
OrderedDict([(u'a', u'hello')])
You can use the force_list argument to force lists to be created even
when there is only a single child of a given level of hierarchy. The
force_list argument is a tuple of keys. If the key for a given level
of hierarchy is in the force_list argument, that level of hierarchy
will have a list as a child (even if there is only one sub-element).
The index_keys operation takes precendence over this. This is applied
after any user-supplied postprocessor has already run.
For example, given this input:
<servers>
<server>
<name>host1</name>
<os>Linux</os>
<interfaces>
<interface>
<name>em0</name>
<ip_address>10.0.0.1</ip_address>
</interface>
</interfaces>
</server>
</servers>
If called with force_list=('interface',), it will produce
this dictionary:
{'servers':
{'server':
{'name': 'host1',
'os': 'Linux'},
'interfaces':
{'interface':
[ {'name': 'em0', 'ip_address': '10.0.0.1' } ] } } }
`force_list` can also be a callable that receives `path`, `key` and
`value`. This is helpful in cases where the logic that decides whether
a list should be forced is more complex.
"""
handler = _DictSAXHandler(namespace_separator=namespace_separator,
**kwargs)
if isinstance(xml_input, _unicode):
if not encoding:
encoding = 'utf-8'
xml_input = xml_input.encode(encoding)
if not process_namespaces:
namespace_separator = None
parser = expat.ParserCreate(
encoding,
namespace_separator
)
try:
parser.ordered_attributes = True
except AttributeError:
# Jython's expat does not support ordered_attributes
pass
parser.StartNamespaceDeclHandler = handler.startNamespaceDecl
parser.StartElementHandler = handler.startElement
parser.EndElementHandler = handler.endElement
parser.CharacterDataHandler = handler.characters
parser.buffer_text = True
if disable_entities:
try:
# Attempt to disable DTD in Jython's expat parser (Xerces-J).
feature = "http://apache.org/xml/features/disallow-doctype-decl"
parser._reader.setFeature(feature, True)
except AttributeError:
# For CPython / expat parser.
# Anything not handled ends up here and entities aren't expanded.
parser.DefaultHandler = lambda x: None
# Expects an integer return; zero means failure -> expat.ExpatError.
parser.ExternalEntityRefHandler = lambda *x: 1
if hasattr(xml_input, 'read'):
parser.ParseFile(xml_input)
else:
parser.Parse(xml_input, True)
return handler.item
def _process_namespace(name, namespaces, ns_sep=':', attr_prefix='@'):
if not namespaces:
return name
try:
ns, name = name.rsplit(ns_sep, 1)
except ValueError:
pass
else:
ns_res = namespaces.get(ns.strip(attr_prefix))
name = '{0}{1}{2}{3}'.format(
attr_prefix if ns.startswith(attr_prefix) else '',
ns_res, ns_sep, name) if ns_res else name
return name
def _emit(key, value, content_handler,
attr_prefix='@',
cdata_key='#text',
depth=0,
preprocessor=None,
pretty=False,
newl='\n',
indent='\t',
namespace_separator=':',
namespaces=None,
full_document=True):
key = _process_namespace(key, namespaces, namespace_separator, attr_prefix)
if preprocessor is not None:
result = preprocessor(key, value)
if result is None:
return
key, value = result
if (not hasattr(value, '__iter__')
or isinstance(value, _basestring)
or isinstance(value, dict)):
value = [value]
for index, v in enumerate(value):
if full_document and depth == 0 and index > 0:
raise ValueError('document with multiple roots')
if v is None:
v = OrderedDict()
elif isinstance(v, bool):
if v:
v = _unicode('true')
else:
v = _unicode('false')
elif not isinstance(v, dict):
v = _unicode(v)
if isinstance(v, _basestring):
v = OrderedDict(((cdata_key, v),))
cdata = None
attrs = OrderedDict()
children = []
for ik, iv in v.items():
if ik == cdata_key:
cdata = iv
continue
if ik.startswith(attr_prefix):
ik = _process_namespace(ik, namespaces, namespace_separator,
attr_prefix)
if ik == '@xmlns' and isinstance(iv, dict):
for k, v in iv.items():
attr = 'xmlns{0}'.format(':{0}'.format(k) if k else '')
attrs[attr] = _unicode(v)
continue
if not isinstance(iv, _unicode):
iv = _unicode(iv)
attrs[ik[len(attr_prefix):]] = iv
continue
children.append((ik, iv))
if pretty:
content_handler.ignorableWhitespace(depth * indent)
content_handler.startElement(key, AttributesImpl(attrs))
if pretty and children:
content_handler.ignorableWhitespace(newl)
for child_key, child_value in children:
_emit(child_key, child_value, content_handler,
attr_prefix, cdata_key, depth+1, preprocessor,
pretty, newl, indent, namespaces=namespaces,
namespace_separator=namespace_separator)
if cdata is not None:
content_handler.characters(cdata)
if pretty and children:
content_handler.ignorableWhitespace(depth * indent)
content_handler.endElement(key)
if pretty and depth:
content_handler.ignorableWhitespace(newl)
def unparse(input_dict, output=None, encoding='utf-8', full_document=True,
short_empty_elements=False,
**kwargs):
"""Emit an XML document for the given `input_dict` (reverse of `parse`).
The resulting XML document is returned as a string, but if `output` (a
file-like object) is specified, it is written there instead.
Dictionary keys prefixed with `attr_prefix` (default=`'@'`) are interpreted
as XML node attributes, whereas keys equal to `cdata_key`
(default=`'#text'`) are treated as character data.
The `pretty` parameter (default=`False`) enables pretty-printing. In this
mode, lines are terminated with `'\n'` and indented with `'\t'`, but this
can be customized with the `newl` and `indent` parameters.
"""
if full_document and len(input_dict) != 1:
raise ValueError('Document must have exactly one root.')
must_return = False
if output is None:
output = StringIO()
must_return = True
if short_empty_elements:
content_handler = XMLGenerator(output, encoding, True)
else:
content_handler = XMLGenerator(output, encoding)
if full_document:
content_handler.startDocument()
for key, value in input_dict.items():
_emit(key, value, content_handler, full_document=full_document,
**kwargs)
if full_document:
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value
if __name__ == '__main__': # pragma: no cover
import sys
import marshal
try:
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
except AttributeError:
stdin = sys.stdin
stdout = sys.stdout
(item_depth,) = sys.argv[1:]
item_depth = int(item_depth)
def handle_item(path, item):
marshal.dump((path, item), stdout)
return True
try:
root = parse(stdin,
item_depth=item_depth,
item_callback=handle_item,
dict_constructor=dict)
if item_depth == 0:
handle_item([], root)
except KeyboardInterrupt:
pass | 37.127119 | 80 | 0.579605 |
t as expat
except ImportError:
from xml.parsers import expat
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
try:
_basestring = basestring
except NameError:
_basestring = str
try:
_unicode = unicode
except NameError:
_unicode = str
__author__ = 'Martin Blech'
__version__ = '0.11.0'
__license__ = 'MIT'
class ParsingInterrupted(Exception):
pass
class _DictSAXHandler(object):
def __init__(self,
item_depth=0,
item_callback=lambda *args: True,
xml_attribs=True,
attr_prefix='@',
cdata_key='#text',
force_cdata=False,
cdata_separator='',
postprocessor=None,
dict_constructor=OrderedDict,
strip_whitespace=True,
namespace_separator=':',
namespaces=None,
force_list=None):
self.path = []
self.stack = []
self.data = []
self.item = None
self.item_depth = item_depth
self.xml_attribs = xml_attribs
self.item_callback = item_callback
self.attr_prefix = attr_prefix
self.cdata_key = cdata_key
self.force_cdata = force_cdata
self.cdata_separator = cdata_separator
self.postprocessor = postprocessor
self.dict_constructor = dict_constructor
self.strip_whitespace = strip_whitespace
self.namespace_separator = namespace_separator
self.namespaces = namespaces
self.namespace_declarations = OrderedDict()
self.force_list = force_list
def _build_name(self, full_name):
if not self.namespaces:
return full_name
i = full_name.rfind(self.namespace_separator)
if i == -1:
return full_name
namespace, name = full_name[:i], full_name[i+1:]
short_namespace = self.namespaces.get(namespace, namespace)
if not short_namespace:
return name
else:
return self.namespace_separator.join((short_namespace, name))
def _attrs_to_dict(self, attrs):
if isinstance(attrs, dict):
return attrs
return self.dict_constructor(zip(attrs[0::2], attrs[1::2]))
def startNamespaceDecl(self, prefix, uri):
self.namespace_declarations[prefix or ''] = uri
def startElement(self, full_name, attrs):
name = self._build_name(full_name)
attrs = self._attrs_to_dict(attrs)
if attrs and self.namespace_declarations:
attrs['xmlns'] = self.namespace_declarations
self.namespace_declarations = OrderedDict()
self.path.append((name, attrs or None))
if len(self.path) > self.item_depth:
self.stack.append((self.item, self.data))
if self.xml_attribs:
attr_entries = []
for key, value in attrs.items():
key = self.attr_prefix+self._build_name(key)
if self.postprocessor:
entry = self.postprocessor(self.path, key, value)
else:
entry = (key, value)
if entry:
attr_entries.append(entry)
attrs = self.dict_constructor(attr_entries)
else:
attrs = None
self.item = attrs or None
self.data = []
def endElement(self, full_name):
name = self._build_name(full_name)
if len(self.path) == self.item_depth:
item = self.item
if item is None:
item = (None if not self.data
else self.cdata_separator.join(self.data))
should_continue = self.item_callback(self.path, item)
if not should_continue:
raise ParsingInterrupted()
if len(self.stack):
data = (None if not self.data
else self.cdata_separator.join(self.data))
item = self.item
self.item, self.data = self.stack.pop()
if self.strip_whitespace and data:
data = data.strip() or None
if data and self.force_cdata and item is None:
item = self.dict_constructor()
if item is not None:
if data:
self.push_data(item, self.cdata_key, data)
self.item = self.push_data(self.item, name, item)
else:
self.item = self.push_data(self.item, name, data)
else:
self.item = None
self.data = []
self.path.pop()
def characters(self, data):
if not self.data:
self.data = [data]
else:
self.data.append(data)
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except KeyError:
if self._should_force_list(key, data):
item[key] = [data]
else:
item[key] = data
return item
def _should_force_list(self, key, value):
if not self.force_list:
return False
try:
return key in self.force_list
except TypeError:
return self.force_list(self.path[:-1], key, value)
def parse(xml_input, encoding=None, expat=expat, process_namespaces=False,
namespace_separator=':', disable_entities=True, **kwargs):
handler = _DictSAXHandler(namespace_separator=namespace_separator,
**kwargs)
if isinstance(xml_input, _unicode):
if not encoding:
encoding = 'utf-8'
xml_input = xml_input.encode(encoding)
if not process_namespaces:
namespace_separator = None
parser = expat.ParserCreate(
encoding,
namespace_separator
)
try:
parser.ordered_attributes = True
except AttributeError:
pass
parser.StartNamespaceDeclHandler = handler.startNamespaceDecl
parser.StartElementHandler = handler.startElement
parser.EndElementHandler = handler.endElement
parser.CharacterDataHandler = handler.characters
parser.buffer_text = True
if disable_entities:
try:
# Attempt to disable DTD in Jython's expat parser (Xerces-J).
feature = "http://apache.org/xml/features/disallow-doctype-decl"
parser._reader.setFeature(feature, True)
except AttributeError:
parser.DefaultHandler = lambda x: None
# Expects an integer return; zero means failure -> expat.ExpatError.
parser.ExternalEntityRefHandler = lambda *x: 1
if hasattr(xml_input, 'read'):
parser.ParseFile(xml_input)
else:
parser.Parse(xml_input, True)
return handler.item
def _process_namespace(name, namespaces, ns_sep=':', attr_prefix='@'):
if not namespaces:
return name
try:
ns, name = name.rsplit(ns_sep, 1)
except ValueError:
pass
else:
ns_res = namespaces.get(ns.strip(attr_prefix))
name = '{0}{1}{2}{3}'.format(
attr_prefix if ns.startswith(attr_prefix) else '',
ns_res, ns_sep, name) if ns_res else name
return name
def _emit(key, value, content_handler,
attr_prefix='@',
cdata_key='
depth=0,
preprocessor=None,
pretty=False,
newl='\n',
indent='\t',
namespace_separator=':',
namespaces=None,
full_document=True):
key = _process_namespace(key, namespaces, namespace_separator, attr_prefix)
if preprocessor is not None:
result = preprocessor(key, value)
if result is None:
return
key, value = result
if (not hasattr(value, '__iter__')
or isinstance(value, _basestring)
or isinstance(value, dict)):
value = [value]
for index, v in enumerate(value):
if full_document and depth == 0 and index > 0:
raise ValueError('document with multiple roots')
if v is None:
v = OrderedDict()
elif isinstance(v, bool):
if v:
v = _unicode('true')
else:
v = _unicode('false')
elif not isinstance(v, dict):
v = _unicode(v)
if isinstance(v, _basestring):
v = OrderedDict(((cdata_key, v),))
cdata = None
attrs = OrderedDict()
children = []
for ik, iv in v.items():
if ik == cdata_key:
cdata = iv
continue
if ik.startswith(attr_prefix):
ik = _process_namespace(ik, namespaces, namespace_separator,
attr_prefix)
if ik == '@xmlns' and isinstance(iv, dict):
for k, v in iv.items():
attr = 'xmlns{0}'.format(':{0}'.format(k) if k else '')
attrs[attr] = _unicode(v)
continue
if not isinstance(iv, _unicode):
iv = _unicode(iv)
attrs[ik[len(attr_prefix):]] = iv
continue
children.append((ik, iv))
if pretty:
content_handler.ignorableWhitespace(depth * indent)
content_handler.startElement(key, AttributesImpl(attrs))
if pretty and children:
content_handler.ignorableWhitespace(newl)
for child_key, child_value in children:
_emit(child_key, child_value, content_handler,
attr_prefix, cdata_key, depth+1, preprocessor,
pretty, newl, indent, namespaces=namespaces,
namespace_separator=namespace_separator)
if cdata is not None:
content_handler.characters(cdata)
if pretty and children:
content_handler.ignorableWhitespace(depth * indent)
content_handler.endElement(key)
if pretty and depth:
content_handler.ignorableWhitespace(newl)
def unparse(input_dict, output=None, encoding='utf-8', full_document=True,
short_empty_elements=False,
**kwargs):
if full_document and len(input_dict) != 1:
raise ValueError('Document must have exactly one root.')
must_return = False
if output is None:
output = StringIO()
must_return = True
if short_empty_elements:
content_handler = XMLGenerator(output, encoding, True)
else:
content_handler = XMLGenerator(output, encoding)
if full_document:
content_handler.startDocument()
for key, value in input_dict.items():
_emit(key, value, content_handler, full_document=full_document,
**kwargs)
if full_document:
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value
if __name__ == '__main__': # pragma: no cover
import sys
import marshal
try:
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
except AttributeError:
stdin = sys.stdin
stdout = sys.stdout
(item_depth,) = sys.argv[1:]
item_depth = int(item_depth)
def handle_item(path, item):
marshal.dump((path, item), stdout)
return True
try:
root = parse(stdin,
item_depth=item_depth,
item_callback=handle_item,
dict_constructor=dict)
if item_depth == 0:
handle_item([], root)
except KeyboardInterrupt:
pass | true | true |
f72fb28880a0595528ca930a65ee989250ef37de | 410 | py | Python | src/index.py | dekokun/aws-lambda-parallelization-factor-test | a9d73cd15a3549e1b908cf4504c136173f54839c | [
"MIT"
] | 1 | 2019-12-19T10:37:50.000Z | 2019-12-19T10:37:50.000Z | src/index.py | dekokun/aws-lambda-parallelization-factor-test | a9d73cd15a3549e1b908cf4504c136173f54839c | [
"MIT"
] | null | null | null | src/index.py | dekokun/aws-lambda-parallelization-factor-test | a9d73cd15a3549e1b908cf4504c136173f54839c | [
"MIT"
] | null | null | null | import base64
import json
import time
def lambda_handler(event, context):
print ('start handler')
count = len(event['Records'])
print ('Get record count:')
print (count)
for record in event['Records']:
payload = base64.b64decode(record['kinesis']['data']).decode("utf-8")
print("Payload: " + payload)
time.sleep(1)
return 'Successfully {} records.'.format(count)
| 24.117647 | 77 | 0.643902 | import base64
import json
import time
def lambda_handler(event, context):
print ('start handler')
count = len(event['Records'])
print ('Get record count:')
print (count)
for record in event['Records']:
payload = base64.b64decode(record['kinesis']['data']).decode("utf-8")
print("Payload: " + payload)
time.sleep(1)
return 'Successfully {} records.'.format(count)
| true | true |
f72fb295397c8e42dff827498e5277948595c2fd | 926 | py | Python | backend/migrations/versions/d2f3d6010615_modify_message_table.py | ClassesOver/Stories | 419eb30b1afe053772be5e3e65b0cd1038eb0c81 | [
"Apache-2.0"
] | 1 | 2020-12-29T09:06:37.000Z | 2020-12-29T09:06:37.000Z | backend/migrations/versions/d2f3d6010615_modify_message_table.py | ClassesOver/blog | 419eb30b1afe053772be5e3e65b0cd1038eb0c81 | [
"Apache-2.0"
] | null | null | null | backend/migrations/versions/d2f3d6010615_modify_message_table.py | ClassesOver/blog | 419eb30b1afe053772be5e3e65b0cd1038eb0c81 | [
"Apache-2.0"
] | null | null | null | """modify message table
Revision ID: d2f3d6010615
Revises: fbb3ebcf5f90
Create Date: 2020-12-24 11:56:01.558233
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd2f3d6010615'
down_revision = 'fbb3ebcf5f90'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('message', schema=None) as batch_op:
batch_op.add_column(sa.Column('res_id', sa.Integer(), nullable=True))
batch_op.add_column(sa.Column('res_model', sa.String(length=16), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('message', schema=None) as batch_op:
batch_op.drop_column('res_model')
batch_op.drop_column('res_id')
# ### end Alembic commands ###
| 26.457143 | 88 | 0.698704 | from alembic import op
import sqlalchemy as sa
revision = 'd2f3d6010615'
down_revision = 'fbb3ebcf5f90'
branch_labels = None
depends_on = None
def upgrade():
umn('res_model', sa.String(length=16), nullable=True))
| true | true |
f72fb2fc1f4ca1f1629f405f76b905ba489cd3be | 1,136 | py | Python | app/core/tests/test_model.py | burakkirlaroglu/recipe-app-api | 0953921bce860502746af4447fb90136e7070faf | [
"MIT"
] | null | null | null | app/core/tests/test_model.py | burakkirlaroglu/recipe-app-api | 0953921bce860502746af4447fb90136e7070faf | [
"MIT"
] | null | null | null | app/core/tests/test_model.py | burakkirlaroglu/recipe-app-api | 0953921bce860502746af4447fb90136e7070faf | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTest(TestCase):
def test_create_user_with_email_successfully(self):
email = 'test@test.com'
password = '12345'
user = get_user_model().objects.create_user(
email=email,
password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = 'test@TEST.COM'
user = get_user_model().objects.create_user(email, '1234')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(email=None, password='1234')
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(email='test@test.com',
password='123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 32.457143 | 79 | 0.649648 | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTest(TestCase):
def test_create_user_with_email_successfully(self):
email = 'test@test.com'
password = '12345'
user = get_user_model().objects.create_user(
email=email,
password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
email = 'test@TEST.COM'
user = get_user_model().objects.create_user(email, '1234')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(email=None, password='1234')
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(email='test@test.com',
password='123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| true | true |
f72fb3a7fe0c4c14b9ceafe1c2d3cdb20fb8fa98 | 1,358 | py | Python | src/araugment/augment.py | ashaheedq/araugment | 53436c0c00c924f80238f9c4ae09c9966953e4d1 | [
"MIT"
] | null | null | null | src/araugment/augment.py | ashaheedq/araugment | 53436c0c00c924f80238f9c4ae09c9966953e4d1 | [
"MIT"
] | null | null | null | src/araugment/augment.py | ashaheedq/araugment | 53436c0c00c924f80238f9c4ae09c9966953e4d1 | [
"MIT"
] | null | null | null | # coding:utf-8
# author Abdulshaheed Alqunber
# version : 1.0.0
from google_trans_new import google_translator
import markovify as mk
def back_translate(text, language_src="ar", language_dst="zh"):
"""Translate text to a foreign language then translate back to original language to augment data
Parameters:
text (string): non-empty string
original: language of input text, must match the format in this link
https://github.com/lushan88a/google_trans_new/blob/main/constant.py
language: language in which the text is going to be translated to
Returns:
string: the back translated text.
"""
try:
t = google_translator()
# translate to target language
translated_text = t.translate(text.strip(), language_dst)
# translate to orignal language
translated_back = t.translate(translated_text, language_src)
return translated_back
# failed to translate, return original
except:
return text
def markov(document, n):
"""This method uses Markov chains to string together n new sequences of words based on previous sequences.
Parameters:
document (list): list of sentences
Returns:
list: list of new generated sentences
"""
text_model = mk.Text(document)
return [text_model.make_sentence() for i in range(n)]
| 31.581395 | 110 | 0.700295 |
from google_trans_new import google_translator
import markovify as mk
def back_translate(text, language_src="ar", language_dst="zh"):
try:
t = google_translator()
translated_text = t.translate(text.strip(), language_dst)
translated_back = t.translate(translated_text, language_src)
return translated_back
except:
return text
def markov(document, n):
text_model = mk.Text(document)
return [text_model.make_sentence() for i in range(n)]
| true | true |
f72fb5cbe866769c53578f3efd0203eed8351817 | 11,982 | py | Python | configs/visualgenome_kr/VGCOCO_PredCls_motif_wS82_linearranker_faster_rcnn_x101_64x4d_fpn_1x.py | yizhe-ang/MMSceneGraph | d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba | [
"MIT"
] | 24 | 2021-10-14T03:28:28.000Z | 2022-03-29T09:30:04.000Z | configs/visualgenome_kr/VGCOCO_PredCls_motif_wS82_linearranker_faster_rcnn_x101_64x4d_fpn_1x.py | yizhe-ang/MMSceneGraph | d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba | [
"MIT"
] | 4 | 2021-12-14T15:04:49.000Z | 2022-02-19T09:54:42.000Z | configs/visualgenome_kr/VGCOCO_PredCls_motif_wS82_linearranker_faster_rcnn_x101_64x4d_fpn_1x.py | yizhe-ang/MMSceneGraph | d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba | [
"MIT"
] | 4 | 2021-10-31T11:23:06.000Z | 2021-12-17T06:38:50.000Z | # dataset settings
dataset_type = 'VisualGenomeKRDataset'
data_root = 'data/visualgenomekr/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_relmaps']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
# Since the forward process may need gt info, annos must be loaded.
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
# NOTE: Do not change the img to DC.
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']),
dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
test_kr_pipeline = [
dict(type='LoadImageFromFile'),
# Since the forward process may need gt info, annos must be loaded.
dict(type='LoadAnnotations', with_bbox=True, with_rel=True, with_keyrel=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
# NOTE: Do not change the img to DC.
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']),
dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
data = dict(
imgs_per_gpu=8,
workers_per_gpu=2,
train=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=train_pipeline,
num_im=-1,
num_val_im=1000,
split='train',
img_prefix=data_root + 'Images/'),
val=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_pipeline,
num_im=-1,
num_val_im=1000,
split='val',
img_prefix=data_root + 'Images/'),
test=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_pipeline,
num_im=-1,
split='test',
img_prefix=data_root + 'Images/'),
test_kr=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_kr_pipeline,
num_im=-1,
split='test',
split_type='withkey',
img_prefix=data_root + 'Images/'))
# model settings
dataset_config = data['train'].copy()
dataset_config.update(dict(cache=data_root + 'VG_statistics.cache'))
model = dict(
type='FasterRCNN',
pretrained='checkpoints/mmlab/imnet/resnext101_64x4d-ee2c6f71.pth',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=201,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
# embedded a saliency detector
saliency_detector=dict(
type='SCRNSaliencyDetector',
pretrained='./saliency_experiments/SOC_SCRN/latest.pth',
eval_mode=True,
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch')),
relation_head=dict(
type='MotifHead',
dataset_config=dataset_config,
num_classes=201,
num_predicates=81,
use_bias=True,
head_config=dict(
use_gt_box=True,
use_gt_label=True,
use_vision=True,
embed_dim=200,
hidden_dim=512,
roi_dim=1024,
context_pooling_dim=4096,
dropout_rate=0.2,
context_object_layer=1,
context_edge_layer=1,
glove_dir='data/glove/',
causal_effect_analysis=False),
bbox_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
# mask_roi_layer=dict(type='ShapeAwareRoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
# mask_roi_layer=dict(type='ShapeAwareRoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=True,
separate_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_sampler=dict(
type='Motif',
pos_iou_thr=0.5,
require_overlap=False, # for sgdet training, not require
num_sample_per_gt_rel=4,
num_rel_per_image=128,
pos_fraction=0.25,
test_overlap=True # for testing
),
# relation ranker
relation_ranker=dict(
type='LinearRanker', # LinearRanker-KLDiv 10; LSTMRanker-KLdiv 1000; TransformerRanker 1000
comb_factor=0.8,
area_form='rect',
loss=dict(
type='KLDivLoss', loss_weight=10),
input_dim=4096),
loss_object=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_relation=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=50, # Follow the setting in TDE, 80 Bboxes are selected.
mask_thr_binary=0.5,
rle_mask_encode=False, # do not transform the mask into rle.
crop_mask=True, # so that the mask shape is the same as bbox, instead of image shape
format_mask_result=False, # do not transform to the result format like bbox
to_tensor=True))
find_unused_parameters = True
evaluation = dict(interval=1, metric='predcls', relation_mode=True, classwise=True, key_first=False)
# optimizer
optimizer = dict(type='SGD', lr=0.06, momentum=0.9, weight_decay=0.0001,
freeze_modules=['backbone', 'neck', 'rpn_head', 'bbox_head', 'saliency_detector'])
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[7, 10])
checkpoint_config = dict(interval=1)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './new_experiments/VGCOCO_PredCls_motif_wS82_linearranker_faster_rcnn_x101_64x4d_fpn_1x'
load_from = './new_experiments/VGKR_Detection_faster_rcnn_x101_64x4d_fpn_1x/latest.pth'
# load_mapping = dict(align_dict={'relation_head.bbox_roi_extractor.visual_bbox_head': 'bbox_head.shared_fcs',
# 'relation_head.relation_roi_extractor.visual_bbox_head': 'bbox_head.shared_fcs'})
resume_from = None
workflow = [('train', 1), ('val', 1)]
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs=dict(
project=work_dir.split('/')[-1],
name='train-1',
config=work_dir + '/cfg.yaml'))
])
| 36.867692 | 115 | 0.580204 |
dataset_type = 'VisualGenomeKRDataset'
data_root = 'data/visualgenomekr/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_relmaps']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']),
dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
test_kr_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True, with_keyrel=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']),
dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
data = dict(
imgs_per_gpu=8,
workers_per_gpu=2,
train=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=train_pipeline,
num_im=-1,
num_val_im=1000,
split='train',
img_prefix=data_root + 'Images/'),
val=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_pipeline,
num_im=-1,
num_val_im=1000,
split='val',
img_prefix=data_root + 'Images/'),
test=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_pipeline,
num_im=-1,
split='test',
img_prefix=data_root + 'Images/'),
test_kr=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_kr_pipeline,
num_im=-1,
split='test',
split_type='withkey',
img_prefix=data_root + 'Images/'))
dataset_config = data['train'].copy()
dataset_config.update(dict(cache=data_root + 'VG_statistics.cache'))
model = dict(
type='FasterRCNN',
pretrained='checkpoints/mmlab/imnet/resnext101_64x4d-ee2c6f71.pth',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=201,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
saliency_detector=dict(
type='SCRNSaliencyDetector',
pretrained='./saliency_experiments/SOC_SCRN/latest.pth',
eval_mode=True,
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch')),
relation_head=dict(
type='MotifHead',
dataset_config=dataset_config,
num_classes=201,
num_predicates=81,
use_bias=True,
head_config=dict(
use_gt_box=True,
use_gt_label=True,
use_vision=True,
embed_dim=200,
hidden_dim=512,
roi_dim=1024,
context_pooling_dim=4096,
dropout_rate=0.2,
context_object_layer=1,
context_edge_layer=1,
glove_dir='data/glove/',
causal_effect_analysis=False),
bbox_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=True,
separate_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_sampler=dict(
type='Motif',
pos_iou_thr=0.5,
require_overlap=False,
num_sample_per_gt_rel=4,
num_rel_per_image=128,
pos_fraction=0.25,
test_overlap=True
),
relation_ranker=dict(
type='LinearRanker',
comb_factor=0.8,
area_form='rect',
loss=dict(
type='KLDivLoss', loss_weight=10),
input_dim=4096),
loss_object=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_relation=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=50,
mask_thr_binary=0.5,
rle_mask_encode=False,
crop_mask=True,
format_mask_result=False,
to_tensor=True))
find_unused_parameters = True
evaluation = dict(interval=1, metric='predcls', relation_mode=True, classwise=True, key_first=False)
optimizer = dict(type='SGD', lr=0.06, momentum=0.9, weight_decay=0.0001,
freeze_modules=['backbone', 'neck', 'rpn_head', 'bbox_head', 'saliency_detector'])
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[7, 10])
checkpoint_config = dict(interval=1)
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './new_experiments/VGCOCO_PredCls_motif_wS82_linearranker_faster_rcnn_x101_64x4d_fpn_1x'
load_from = './new_experiments/VGKR_Detection_faster_rcnn_x101_64x4d_fpn_1x/latest.pth'
resume_from = None
workflow = [('train', 1), ('val', 1)]
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs=dict(
project=work_dir.split('/')[-1],
name='train-1',
config=work_dir + '/cfg.yaml'))
])
| true | true |
f72fb606aa4f0da5563e1b53b0ee4a213e3fad46 | 777 | py | Python | bindings/python/tools/python/ostk/mathematics/__init__.py | open-space-collective/library-mathematics | fdb4769a56a8fe35ffefb01a79c03cfca1f91958 | [
"Apache-2.0"
] | 5 | 2018-08-20T06:47:24.000Z | 2019-07-15T03:36:52.000Z | bindings/python/tools/python/ostk/mathematics/__init__.py | open-space-collective/open-space-toolkit-mathematics | 4b97f97f4aaa87bff848381a3519c6f764461378 | [
"Apache-2.0"
] | 6 | 2020-01-05T20:18:18.000Z | 2021-10-14T09:36:44.000Z | bindings/python/tools/python/ostk/mathematics/__init__.py | open-space-collective/open-space-toolkit-mathematics | 4b97f97f4aaa87bff848381a3519c6f764461378 | [
"Apache-2.0"
] | 2 | 2020-03-05T18:18:13.000Z | 2020-04-07T17:42:24.000Z | ################################################################################################################################################################
# @project Open Space Toolkit ▸ Mathematics
# @file bindings/python/tools/python/ostk/mathematics/__init__.py
# @author Lucas Brémond <lucas@loftorbital.com>
# @license Apache License 2.0
################################################################################################################################################################
from ostk.core import *
from .OpenSpaceToolkitMathematicsPy import *
################################################################################################################################################################
| 51.8 | 160 | 0.262548 | true | true | |
f72fb630e3bedcdefa0eb57e63dd067409e8bfcd | 440 | py | Python | src/core/__init__.py | el-ideal-ideas/MocaCommands | 33dda0b05ca3b34f4855ff264bcf70d016dfb1c0 | [
"MIT"
] | null | null | null | src/core/__init__.py | el-ideal-ideas/MocaCommands | 33dda0b05ca3b34f4855ff264bcf70d016dfb1c0 | [
"MIT"
] | null | null | null | src/core/__init__.py | el-ideal-ideas/MocaCommands | 33dda0b05ca3b34f4855ff264bcf70d016dfb1c0 | [
"MIT"
] | null | null | null | # -- Imports --------------------------------------------------------------------------
from .core import (
VERSION, TOP_DIR, CONFIG_DIR, LOG_DIR, SRC_DIR, STATIC_DIR, STORAGE_DIR, SYSTEM_CONFIG, SANIC_CONFIG, SERVER_CONFIG,
COMMANDS_DIR, COMMANDS_CONFIG, IP_BLACKLIST_FILE, API_KEY_FILE, SCRIPTS_DIR, system_config, commands, ip_blacklist
)
# -------------------------------------------------------------------------- Imports --
| 48.888889 | 120 | 0.511364 |
from .core import (
VERSION, TOP_DIR, CONFIG_DIR, LOG_DIR, SRC_DIR, STATIC_DIR, STORAGE_DIR, SYSTEM_CONFIG, SANIC_CONFIG, SERVER_CONFIG,
COMMANDS_DIR, COMMANDS_CONFIG, IP_BLACKLIST_FILE, API_KEY_FILE, SCRIPTS_DIR, system_config, commands, ip_blacklist
)
| true | true |
f72fb6db38ad0943de7ba102448dbf868e181c11 | 18,487 | py | Python | MultiServer.py | Schulzer99/MultiWorld-Utilities | 031f7e6e21dc80290524e51c165cd28ac10747b3 | [
"MIT"
] | null | null | null | MultiServer.py | Schulzer99/MultiWorld-Utilities | 031f7e6e21dc80290524e51c165cd28ac10747b3 | [
"MIT"
] | null | null | null | MultiServer.py | Schulzer99/MultiWorld-Utilities | 031f7e6e21dc80290524e51c165cd28ac10747b3 | [
"MIT"
] | null | null | null | import argparse
import asyncio
import functools
import json
import logging
import re
import shlex
import urllib.request
import zlib
import ModuleUpdate
ModuleUpdate.update()
import websockets
import aioconsole
import Items
import Regions
from MultiClient import ReceivedItem, get_item_name_from_id, get_location_name_from_address
class Client:
def __init__(self, socket):
self.socket = socket
self.auth = False
self.name = None
self.team = None
self.slot = None
self.send_index = 0
class Context:
def __init__(self, host, port, password):
self.data_filename = None
self.save_filename = None
self.disable_save = False
self.player_names = {}
self.rom_names = {}
self.remote_items = set()
self.locations = {}
self.host = host
self.port = port
self.password = password
self.server = None
self.countdown_timer = 0
self.clients = []
self.received_items = {}
async def send_msgs(websocket, msgs):
if not websocket or not websocket.open or websocket.closed:
return
try:
await websocket.send(json.dumps(msgs))
except websockets.ConnectionClosed:
pass
def broadcast_all(ctx : Context, msgs):
for client in ctx.clients:
if client.auth:
asyncio.create_task(send_msgs(client.socket, msgs))
def broadcast_team(ctx : Context, team, msgs):
for client in ctx.clients:
if client.auth and client.team == team:
asyncio.create_task(send_msgs(client.socket, msgs))
def notify_all(ctx : Context, text):
logging.info("Notice (all): %s" % text)
broadcast_all(ctx, [['Print', text]])
def notify_team(ctx : Context, team : int, text : str):
logging.info("Notice (Team #%d): %s" % (team+1, text))
broadcast_team(ctx, team, [['Print', text]])
def notify_client(client : Client, text : str):
if not client.auth:
return
logging.info("Notice (Player %s in team %d): %s" % (client.name, client.team+1, text))
asyncio.create_task(send_msgs(client.socket, [['Print', text]]))
async def server(websocket, path, ctx : Context):
client = Client(websocket)
ctx.clients.append(client)
try:
await on_client_connected(ctx, client)
async for data in websocket:
for msg in json.loads(data):
if len(msg) == 1:
cmd = msg
args = None
else:
cmd = msg[0]
args = msg[1]
await process_client_cmd(ctx, client, cmd, args)
except Exception as e:
if not isinstance(e, websockets.WebSocketException):
logging.exception(e)
finally:
await on_client_disconnected(ctx, client)
ctx.clients.remove(client)
async def on_client_connected(ctx : Context, client : Client):
await send_msgs(client.socket, [['RoomInfo', {
'password': ctx.password is not None,
'players': [(client.team, client.slot, client.name) for client in ctx.clients if client.auth]
}]])
async def on_client_disconnected(ctx : Context, client : Client):
if client.auth:
await on_client_left(ctx, client)
async def on_client_joined(ctx : Context, client : Client):
notify_all(ctx, "%s (Team #%d) has joined the game" % (client.name, client.team + 1))
async def on_client_left(ctx : Context, client : Client):
notify_all(ctx, "%s (Team #%d) has left the game" % (client.name, client.team + 1))
async def countdown(ctx : Context, timer):
notify_all(ctx, f'[Server]: Starting countdown of {timer}s')
if ctx.countdown_timer:
ctx.countdown_timer = timer
return
ctx.countdown_timer = timer
while ctx.countdown_timer > 0:
notify_all(ctx, f'[Server]: {ctx.countdown_timer}')
ctx.countdown_timer -= 1
await asyncio.sleep(1)
notify_all(ctx, f'[Server]: GO')
def get_connected_players_string(ctx : Context):
auth_clients = [c for c in ctx.clients if c.auth]
if not auth_clients:
return 'No player connected'
auth_clients.sort(key=lambda c: (c.team, c.slot))
current_team = 0
text = 'Team #1: '
for c in auth_clients:
if c.team != current_team:
text += f':: Team #{c.team + 1}: '
current_team = c.team
text += f'{c.name} '
return 'Connected players: ' + text[:-1]
def get_received_items(ctx : Context, team, player):
return ctx.received_items.setdefault((team, player), [])
def tuplize_received_items(items):
return [(item.item, item.location, item.player) for item in items]
def send_new_items(ctx : Context):
for client in ctx.clients:
if not client.auth:
continue
items = get_received_items(ctx, client.team, client.slot)
if len(items) > client.send_index:
asyncio.create_task(send_msgs(client.socket, [['ReceivedItems', (client.send_index, tuplize_received_items(items)[client.send_index:])]]))
client.send_index = len(items)
def forfeit_player(ctx : Context, team, slot):
all_locations = [values[0] for values in Regions.location_table.values() if type(values[0]) is int]
notify_all(ctx, "%s (Team #%d) has forfeited" % (ctx.player_names[(team, slot)], team + 1))
register_location_checks(ctx, team, slot, all_locations)
def register_location_checks(ctx : Context, team, slot, locations):
found_items = False
for location in locations:
if (location, slot) in ctx.locations:
target_item, target_player = ctx.locations[(location, slot)]
if target_player != slot or slot in ctx.remote_items:
found = False
recvd_items = get_received_items(ctx, team, target_player)
for recvd_item in recvd_items:
if recvd_item.location == location and recvd_item.player == slot:
found = True
break
if not found:
new_item = ReceivedItem(target_item, location, slot)
recvd_items.append(new_item)
if slot != target_player:
broadcast_team(ctx, team, [['ItemSent', (slot, location, target_player, target_item)]])
logging.info('(Team #%d) %s sent %s to %s (%s)' % (team+1, ctx.player_names[(team, slot)], get_item_name_from_id(target_item), ctx.player_names[(team, target_player)], get_location_name_from_address(location)))
found_items = True
send_new_items(ctx)
if found_items and not ctx.disable_save:
try:
with open(ctx.save_filename, "wb") as f:
jsonstr = json.dumps((list(ctx.rom_names.items()),
[(k, [i.__dict__ for i in v]) for k, v in ctx.received_items.items()]))
f.write(zlib.compress(jsonstr.encode("utf-8")))
except Exception as e:
logging.exception(e)
async def process_client_cmd(ctx : Context, client : Client, cmd, args):
if type(cmd) is not str:
await send_msgs(client.socket, [['InvalidCmd']])
return
if cmd == 'Connect':
if not args or type(args) is not dict or \
'password' not in args or type(args['password']) not in [str, type(None)] or \
'rom' not in args or type(args['rom']) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'Connect']])
return
errors = set()
if ctx.password is not None and args['password'] != ctx.password:
errors.add('InvalidPassword')
if tuple(args['rom']) not in ctx.rom_names:
errors.add('InvalidRom')
else:
team, slot = ctx.rom_names[tuple(args['rom'])]
if any([c.slot == slot and c.team == team for c in ctx.clients if c.auth]):
errors.add('SlotAlreadyTaken')
else:
client.name = ctx.player_names[(team, slot)]
client.team = team
client.slot = slot
if errors:
await send_msgs(client.socket, [['ConnectionRefused', list(errors)]])
else:
client.auth = True
reply = [['Connected', [(client.team, client.slot), [(p, n) for (t, p), n in ctx.player_names.items() if t == client.team]]]]
items = get_received_items(ctx, client.team, client.slot)
if items:
reply.append(['ReceivedItems', (0, tuplize_received_items(items))])
client.send_index = len(items)
await send_msgs(client.socket, reply)
await on_client_joined(ctx, client)
if not client.auth:
return
if cmd == 'Sync':
items = get_received_items(ctx, client.team, client.slot)
if items:
client.send_index = len(items)
await send_msgs(client.socket, [['ReceivedItems', (0, tuplize_received_items(items))]])
if cmd == 'LocationChecks':
if type(args) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'LocationChecks']])
return
register_location_checks(ctx, client.team, client.slot, args)
if cmd == 'LocationScouts':
if type(args) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'LocationScouts']])
return
locs = []
for location in args:
if type(location) is not int or 0 >= location > len(Regions.location_table):
await send_msgs(client.socket, [['InvalidArguments', 'LocationScouts']])
return
loc_name = list(Regions.location_table.keys())[location - 1]
target_item, target_player = ctx.locations[(Regions.location_table[loc_name][0], client.slot)]
replacements = {'SmallKey': 0xA2, 'BigKey': 0x9D, 'Compass': 0x8D, 'Map': 0x7D}
item_type = [i[2] for i in Items.item_table.values() if type(i[3]) is int and i[3] == target_item]
if item_type:
target_item = replacements.get(item_type[0], target_item)
locs.append([loc_name, location, target_item, target_player])
logging.info(f"{client.name} in team {client.team+1} scouted {', '.join([l[0] for l in locs])}")
await send_msgs(client.socket, [['LocationInfo', [l[1:] for l in locs]]])
if cmd == 'Say':
if type(args) is not str or not args.isprintable():
await send_msgs(client.socket, [['InvalidArguments', 'Say']])
return
notify_all(ctx, client.name + ': ' + args)
if args.startswith('!players'):
notify_all(ctx, get_connected_players_string(ctx))
if args.startswith('!forfeit'):
forfeit_player(ctx, client.team, client.slot)
if args.startswith('!countdown'):
try:
timer = int(args.split()[1])
except (IndexError, ValueError):
timer = 10
asyncio.create_task(countdown(ctx, timer))
def set_password(ctx : Context, password):
ctx.password = password
logging.warning('Password set to ' + password if password is not None else 'Password disabled')
async def console(ctx : Context):
while True:
input = await aioconsole.ainput()
try:
command = shlex.split(input)
if not command:
continue
if command[0] == '/exit':
ctx.server.ws_server.close()
break
if command[0] == '/players':
logging.info(get_connected_players_string(ctx))
if command[0] == '/password':
set_password(ctx, command[1] if len(command) > 1 else None)
if command[0] == '/kick' and len(command) > 1:
team = int(command[2]) - 1 if len(command) > 2 and command[2].isdigit() else None
for client in ctx.clients:
if client.auth and client.name.lower() == command[1].lower() and (team is None or team == client.team):
if client.socket and not client.socket.closed:
await client.socket.close()
if command[0] == '/forfeitslot' and len(command) > 1 and command[1].isdigit():
if len(command) > 2 and command[2].isdigit():
team = int(command[1]) - 1
slot = int(command[2])
else:
team = 0
slot = int(command[1])
forfeit_player(ctx, team, slot)
if command[0] == '/forfeitplayer' and len(command) > 1:
seeked_player = command[1].lower()
for (team, slot), name in ctx.player_names.items():
if name.lower() == seeked_player:
forfeit_player(ctx, team, slot)
if command[0] == '/senditem' and len(command) > 2:
[(player, item)] = re.findall(r'\S* (\S*) (.*)', input)
if item in Items.item_table:
for client in ctx.clients:
if client.auth and client.name.lower() == player.lower():
new_item = ReceivedItem(Items.item_table[item][3], "cheat console", client.slot)
get_received_items(ctx, client.team, client.slot).append(new_item)
notify_all(ctx, 'Cheat console: sending "' + item + '" to ' + client.name)
send_new_items(ctx)
else:
logging.warning("Unknown item: " + item)
if command[0] == '/hint':
for (team,slot), name in ctx.player_names.items():
if len(command) == 1:
print("Use /hint {Playername} {itemname}\nFor example /hint Berserker Lamp")
elif name.lower() == command[1].lower():
item = " ".join(command[2:])
if item in Items.item_table:
seeked_item_id = Items.item_table[item][3]
for check, result in ctx.locations.items():
item_id, receiving_player = result
if receiving_player == slot and item_id == seeked_item_id:
location_id, finding_player = check
name_finder = ctx.player_names[team, finding_player]
hint = f"[Hint]: {name}'s {item} can be found at " \
f"{get_location_name_from_address(location_id)} in {name_finder}'s World"
notify_team(ctx, team, hint)
else:
logging.warning("Unknown item: " + item)
if command[0][0] != '/':
notify_all(ctx, '[Server]: ' + input)
except:
import traceback
traceback.print_exc()
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', default=None)
parser.add_argument('--port', default=38281, type=int)
parser.add_argument('--password', default=None)
parser.add_argument('--multidata', default=None)
parser.add_argument('--savefile', default=None)
parser.add_argument('--disable_save', default=False, action='store_true')
parser.add_argument('--loglevel', default='info', choices=['debug', 'info', 'warning', 'error', 'critical'])
args = parser.parse_args()
logging.basicConfig(format='[%(asctime)s] %(message)s', level=getattr(logging, args.loglevel.upper(), logging.INFO))
ctx = Context(args.host, args.port, args.password)
ctx.data_filename = args.multidata
try:
if not ctx.data_filename:
import tkinter
import tkinter.filedialog
root = tkinter.Tk()
root.withdraw()
ctx.data_filename = tkinter.filedialog.askopenfilename(filetypes=(("Multiworld data","*multidata"),))
with open(ctx.data_filename, 'rb') as f:
jsonobj = json.loads(zlib.decompress(f.read()).decode("utf-8"))
for team, names in enumerate(jsonobj['names']):
for player, name in enumerate(names, 1):
ctx.player_names[(team, player)] = name
ctx.rom_names = {tuple(rom): (team, slot) for slot, team, rom in jsonobj['roms']}
ctx.remote_items = set(jsonobj['remote_items'])
ctx.locations = {tuple(k): tuple(v) for k, v in jsonobj['locations']}
except Exception as e:
logging.error('Failed to read multiworld data (%s)' % e)
return
ip = urllib.request.urlopen('https://v4.ident.me').read().decode('utf8') if not ctx.host else ctx.host
logging.info('Hosting game at %s:%d (%s)' % (ip, ctx.port, 'No password' if not ctx.password else 'Password: %s' % ctx.password))
ctx.disable_save = args.disable_save
if not ctx.disable_save:
if not ctx.save_filename:
ctx.save_filename = (ctx.data_filename[:-9] if ctx.data_filename[-9:] == 'multidata' else (ctx.data_filename + '_')) + 'multisave'
try:
with open(ctx.save_filename, 'rb') as f:
jsonobj = json.loads(zlib.decompress(f.read()).decode("utf-8"))
rom_names = jsonobj[0]
received_items = {tuple(k): [ReceivedItem(**i) for i in v] for k, v in jsonobj[1]}
if not all([ctx.rom_names[tuple(rom)] == (team, slot) for rom, (team, slot) in rom_names]):
raise Exception('Save file mismatch, will start a new game')
ctx.received_items = received_items
logging.info('Loaded save file with %d received items for %d players' % (sum([len(p) for p in received_items.values()]), len(received_items)))
except FileNotFoundError:
logging.error('No save data found, starting a new game')
except Exception as e:
logging.info(e)
ctx.server = websockets.serve(functools.partial(server,ctx=ctx), ctx.host, ctx.port, ping_timeout=None, ping_interval=None)
await ctx.server
await console(ctx)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.run_until_complete(asyncio.gather(*asyncio.Task.all_tasks()))
loop.close()
| 42.793981 | 230 | 0.584411 | import argparse
import asyncio
import functools
import json
import logging
import re
import shlex
import urllib.request
import zlib
import ModuleUpdate
ModuleUpdate.update()
import websockets
import aioconsole
import Items
import Regions
from MultiClient import ReceivedItem, get_item_name_from_id, get_location_name_from_address
class Client:
def __init__(self, socket):
self.socket = socket
self.auth = False
self.name = None
self.team = None
self.slot = None
self.send_index = 0
class Context:
def __init__(self, host, port, password):
self.data_filename = None
self.save_filename = None
self.disable_save = False
self.player_names = {}
self.rom_names = {}
self.remote_items = set()
self.locations = {}
self.host = host
self.port = port
self.password = password
self.server = None
self.countdown_timer = 0
self.clients = []
self.received_items = {}
async def send_msgs(websocket, msgs):
if not websocket or not websocket.open or websocket.closed:
return
try:
await websocket.send(json.dumps(msgs))
except websockets.ConnectionClosed:
pass
def broadcast_all(ctx : Context, msgs):
for client in ctx.clients:
if client.auth:
asyncio.create_task(send_msgs(client.socket, msgs))
def broadcast_team(ctx : Context, team, msgs):
for client in ctx.clients:
if client.auth and client.team == team:
asyncio.create_task(send_msgs(client.socket, msgs))
def notify_all(ctx : Context, text):
logging.info("Notice (all): %s" % text)
broadcast_all(ctx, [['Print', text]])
def notify_team(ctx : Context, team : int, text : str):
logging.info("Notice (Team #%d): %s" % (team+1, text))
broadcast_team(ctx, team, [['Print', text]])
def notify_client(client : Client, text : str):
if not client.auth:
return
logging.info("Notice (Player %s in team %d): %s" % (client.name, client.team+1, text))
asyncio.create_task(send_msgs(client.socket, [['Print', text]]))
async def server(websocket, path, ctx : Context):
client = Client(websocket)
ctx.clients.append(client)
try:
await on_client_connected(ctx, client)
async for data in websocket:
for msg in json.loads(data):
if len(msg) == 1:
cmd = msg
args = None
else:
cmd = msg[0]
args = msg[1]
await process_client_cmd(ctx, client, cmd, args)
except Exception as e:
if not isinstance(e, websockets.WebSocketException):
logging.exception(e)
finally:
await on_client_disconnected(ctx, client)
ctx.clients.remove(client)
async def on_client_connected(ctx : Context, client : Client):
await send_msgs(client.socket, [['RoomInfo', {
'password': ctx.password is not None,
'players': [(client.team, client.slot, client.name) for client in ctx.clients if client.auth]
}]])
async def on_client_disconnected(ctx : Context, client : Client):
if client.auth:
await on_client_left(ctx, client)
async def on_client_joined(ctx : Context, client : Client):
notify_all(ctx, "%s (Team #%d) has joined the game" % (client.name, client.team + 1))
async def on_client_left(ctx : Context, client : Client):
notify_all(ctx, "%s (Team #%d) has left the game" % (client.name, client.team + 1))
async def countdown(ctx : Context, timer):
notify_all(ctx, f'[Server]: Starting countdown of {timer}s')
if ctx.countdown_timer:
ctx.countdown_timer = timer
return
ctx.countdown_timer = timer
while ctx.countdown_timer > 0:
notify_all(ctx, f'[Server]: {ctx.countdown_timer}')
ctx.countdown_timer -= 1
await asyncio.sleep(1)
notify_all(ctx, f'[Server]: GO')
def get_connected_players_string(ctx : Context):
auth_clients = [c for c in ctx.clients if c.auth]
if not auth_clients:
return 'No player connected'
auth_clients.sort(key=lambda c: (c.team, c.slot))
current_team = 0
text = 'Team #1: '
for c in auth_clients:
if c.team != current_team:
text += f':: Team #{c.team + 1}: '
current_team = c.team
text += f'{c.name} '
return 'Connected players: ' + text[:-1]
def get_received_items(ctx : Context, team, player):
return ctx.received_items.setdefault((team, player), [])
def tuplize_received_items(items):
return [(item.item, item.location, item.player) for item in items]
def send_new_items(ctx : Context):
for client in ctx.clients:
if not client.auth:
continue
items = get_received_items(ctx, client.team, client.slot)
if len(items) > client.send_index:
asyncio.create_task(send_msgs(client.socket, [['ReceivedItems', (client.send_index, tuplize_received_items(items)[client.send_index:])]]))
client.send_index = len(items)
def forfeit_player(ctx : Context, team, slot):
all_locations = [values[0] for values in Regions.location_table.values() if type(values[0]) is int]
notify_all(ctx, "%s (Team #%d) has forfeited" % (ctx.player_names[(team, slot)], team + 1))
register_location_checks(ctx, team, slot, all_locations)
def register_location_checks(ctx : Context, team, slot, locations):
found_items = False
for location in locations:
if (location, slot) in ctx.locations:
target_item, target_player = ctx.locations[(location, slot)]
if target_player != slot or slot in ctx.remote_items:
found = False
recvd_items = get_received_items(ctx, team, target_player)
for recvd_item in recvd_items:
if recvd_item.location == location and recvd_item.player == slot:
found = True
break
if not found:
new_item = ReceivedItem(target_item, location, slot)
recvd_items.append(new_item)
if slot != target_player:
broadcast_team(ctx, team, [['ItemSent', (slot, location, target_player, target_item)]])
logging.info('(Team #%d) %s sent %s to %s (%s)' % (team+1, ctx.player_names[(team, slot)], get_item_name_from_id(target_item), ctx.player_names[(team, target_player)], get_location_name_from_address(location)))
found_items = True
send_new_items(ctx)
if found_items and not ctx.disable_save:
try:
with open(ctx.save_filename, "wb") as f:
jsonstr = json.dumps((list(ctx.rom_names.items()),
[(k, [i.__dict__ for i in v]) for k, v in ctx.received_items.items()]))
f.write(zlib.compress(jsonstr.encode("utf-8")))
except Exception as e:
logging.exception(e)
async def process_client_cmd(ctx : Context, client : Client, cmd, args):
if type(cmd) is not str:
await send_msgs(client.socket, [['InvalidCmd']])
return
if cmd == 'Connect':
if not args or type(args) is not dict or \
'password' not in args or type(args['password']) not in [str, type(None)] or \
'rom' not in args or type(args['rom']) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'Connect']])
return
errors = set()
if ctx.password is not None and args['password'] != ctx.password:
errors.add('InvalidPassword')
if tuple(args['rom']) not in ctx.rom_names:
errors.add('InvalidRom')
else:
team, slot = ctx.rom_names[tuple(args['rom'])]
if any([c.slot == slot and c.team == team for c in ctx.clients if c.auth]):
errors.add('SlotAlreadyTaken')
else:
client.name = ctx.player_names[(team, slot)]
client.team = team
client.slot = slot
if errors:
await send_msgs(client.socket, [['ConnectionRefused', list(errors)]])
else:
client.auth = True
reply = [['Connected', [(client.team, client.slot), [(p, n) for (t, p), n in ctx.player_names.items() if t == client.team]]]]
items = get_received_items(ctx, client.team, client.slot)
if items:
reply.append(['ReceivedItems', (0, tuplize_received_items(items))])
client.send_index = len(items)
await send_msgs(client.socket, reply)
await on_client_joined(ctx, client)
if not client.auth:
return
if cmd == 'Sync':
items = get_received_items(ctx, client.team, client.slot)
if items:
client.send_index = len(items)
await send_msgs(client.socket, [['ReceivedItems', (0, tuplize_received_items(items))]])
if cmd == 'LocationChecks':
if type(args) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'LocationChecks']])
return
register_location_checks(ctx, client.team, client.slot, args)
if cmd == 'LocationScouts':
if type(args) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'LocationScouts']])
return
locs = []
for location in args:
if type(location) is not int or 0 >= location > len(Regions.location_table):
await send_msgs(client.socket, [['InvalidArguments', 'LocationScouts']])
return
loc_name = list(Regions.location_table.keys())[location - 1]
target_item, target_player = ctx.locations[(Regions.location_table[loc_name][0], client.slot)]
replacements = {'SmallKey': 0xA2, 'BigKey': 0x9D, 'Compass': 0x8D, 'Map': 0x7D}
item_type = [i[2] for i in Items.item_table.values() if type(i[3]) is int and i[3] == target_item]
if item_type:
target_item = replacements.get(item_type[0], target_item)
locs.append([loc_name, location, target_item, target_player])
logging.info(f"{client.name} in team {client.team+1} scouted {', '.join([l[0] for l in locs])}")
await send_msgs(client.socket, [['LocationInfo', [l[1:] for l in locs]]])
if cmd == 'Say':
if type(args) is not str or not args.isprintable():
await send_msgs(client.socket, [['InvalidArguments', 'Say']])
return
notify_all(ctx, client.name + ': ' + args)
if args.startswith('!players'):
notify_all(ctx, get_connected_players_string(ctx))
if args.startswith('!forfeit'):
forfeit_player(ctx, client.team, client.slot)
if args.startswith('!countdown'):
try:
timer = int(args.split()[1])
except (IndexError, ValueError):
timer = 10
asyncio.create_task(countdown(ctx, timer))
def set_password(ctx : Context, password):
ctx.password = password
logging.warning('Password set to ' + password if password is not None else 'Password disabled')
async def console(ctx : Context):
while True:
input = await aioconsole.ainput()
try:
command = shlex.split(input)
if not command:
continue
if command[0] == '/exit':
ctx.server.ws_server.close()
break
if command[0] == '/players':
logging.info(get_connected_players_string(ctx))
if command[0] == '/password':
set_password(ctx, command[1] if len(command) > 1 else None)
if command[0] == '/kick' and len(command) > 1:
team = int(command[2]) - 1 if len(command) > 2 and command[2].isdigit() else None
for client in ctx.clients:
if client.auth and client.name.lower() == command[1].lower() and (team is None or team == client.team):
if client.socket and not client.socket.closed:
await client.socket.close()
if command[0] == '/forfeitslot' and len(command) > 1 and command[1].isdigit():
if len(command) > 2 and command[2].isdigit():
team = int(command[1]) - 1
slot = int(command[2])
else:
team = 0
slot = int(command[1])
forfeit_player(ctx, team, slot)
if command[0] == '/forfeitplayer' and len(command) > 1:
seeked_player = command[1].lower()
for (team, slot), name in ctx.player_names.items():
if name.lower() == seeked_player:
forfeit_player(ctx, team, slot)
if command[0] == '/senditem' and len(command) > 2:
[(player, item)] = re.findall(r'\S* (\S*) (.*)', input)
if item in Items.item_table:
for client in ctx.clients:
if client.auth and client.name.lower() == player.lower():
new_item = ReceivedItem(Items.item_table[item][3], "cheat console", client.slot)
get_received_items(ctx, client.team, client.slot).append(new_item)
notify_all(ctx, 'Cheat console: sending "' + item + '" to ' + client.name)
send_new_items(ctx)
else:
logging.warning("Unknown item: " + item)
if command[0] == '/hint':
for (team,slot), name in ctx.player_names.items():
if len(command) == 1:
print("Use /hint {Playername} {itemname}\nFor example /hint Berserker Lamp")
elif name.lower() == command[1].lower():
item = " ".join(command[2:])
if item in Items.item_table:
seeked_item_id = Items.item_table[item][3]
for check, result in ctx.locations.items():
item_id, receiving_player = result
if receiving_player == slot and item_id == seeked_item_id:
location_id, finding_player = check
name_finder = ctx.player_names[team, finding_player]
hint = f"[Hint]: {name}'s {item} can be found at " \
f"{get_location_name_from_address(location_id)} in {name_finder}'s World"
notify_team(ctx, team, hint)
else:
logging.warning("Unknown item: " + item)
if command[0][0] != '/':
notify_all(ctx, '[Server]: ' + input)
except:
import traceback
traceback.print_exc()
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', default=None)
parser.add_argument('--port', default=38281, type=int)
parser.add_argument('--password', default=None)
parser.add_argument('--multidata', default=None)
parser.add_argument('--savefile', default=None)
parser.add_argument('--disable_save', default=False, action='store_true')
parser.add_argument('--loglevel', default='info', choices=['debug', 'info', 'warning', 'error', 'critical'])
args = parser.parse_args()
logging.basicConfig(format='[%(asctime)s] %(message)s', level=getattr(logging, args.loglevel.upper(), logging.INFO))
ctx = Context(args.host, args.port, args.password)
ctx.data_filename = args.multidata
try:
if not ctx.data_filename:
import tkinter
import tkinter.filedialog
root = tkinter.Tk()
root.withdraw()
ctx.data_filename = tkinter.filedialog.askopenfilename(filetypes=(("Multiworld data","*multidata"),))
with open(ctx.data_filename, 'rb') as f:
jsonobj = json.loads(zlib.decompress(f.read()).decode("utf-8"))
for team, names in enumerate(jsonobj['names']):
for player, name in enumerate(names, 1):
ctx.player_names[(team, player)] = name
ctx.rom_names = {tuple(rom): (team, slot) for slot, team, rom in jsonobj['roms']}
ctx.remote_items = set(jsonobj['remote_items'])
ctx.locations = {tuple(k): tuple(v) for k, v in jsonobj['locations']}
except Exception as e:
logging.error('Failed to read multiworld data (%s)' % e)
return
ip = urllib.request.urlopen('https://v4.ident.me').read().decode('utf8') if not ctx.host else ctx.host
logging.info('Hosting game at %s:%d (%s)' % (ip, ctx.port, 'No password' if not ctx.password else 'Password: %s' % ctx.password))
ctx.disable_save = args.disable_save
if not ctx.disable_save:
if not ctx.save_filename:
ctx.save_filename = (ctx.data_filename[:-9] if ctx.data_filename[-9:] == 'multidata' else (ctx.data_filename + '_')) + 'multisave'
try:
with open(ctx.save_filename, 'rb') as f:
jsonobj = json.loads(zlib.decompress(f.read()).decode("utf-8"))
rom_names = jsonobj[0]
received_items = {tuple(k): [ReceivedItem(**i) for i in v] for k, v in jsonobj[1]}
if not all([ctx.rom_names[tuple(rom)] == (team, slot) for rom, (team, slot) in rom_names]):
raise Exception('Save file mismatch, will start a new game')
ctx.received_items = received_items
logging.info('Loaded save file with %d received items for %d players' % (sum([len(p) for p in received_items.values()]), len(received_items)))
except FileNotFoundError:
logging.error('No save data found, starting a new game')
except Exception as e:
logging.info(e)
ctx.server = websockets.serve(functools.partial(server,ctx=ctx), ctx.host, ctx.port, ping_timeout=None, ping_interval=None)
await ctx.server
await console(ctx)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.run_until_complete(asyncio.gather(*asyncio.Task.all_tasks()))
loop.close()
| true | true |
f72fb70520827d455a85f96859c23ec6801cf6f3 | 5,184 | py | Python | tests/test_entrypoint.py | tdilauro/circulation-core | 8086ca8cbedd5f4b2a0c44df97889d078ff79aac | [
"Apache-2.0"
] | 1 | 2021-11-16T00:58:43.000Z | 2021-11-16T00:58:43.000Z | tests/test_entrypoint.py | tdilauro/circulation-core | 8086ca8cbedd5f4b2a0c44df97889d078ff79aac | [
"Apache-2.0"
] | 16 | 2021-05-17T19:24:47.000Z | 2021-12-15T13:57:34.000Z | tests/test_entrypoint.py | tdilauro/circulation-core | 8086ca8cbedd5f4b2a0c44df97889d078ff79aac | [
"Apache-2.0"
] | 1 | 2021-05-12T19:11:52.000Z | 2021-05-12T19:11:52.000Z | import json
import pytest
from ..entrypoint import (
AudiobooksEntryPoint,
EbooksEntryPoint,
EntryPoint,
EverythingEntryPoint,
MediumEntryPoint,
)
from ..external_search import Filter
from ..model import Edition, Work
from ..testing import DatabaseTest
class TestEntryPoint(DatabaseTest):
def test_defaults(self):
everything, ebooks, audiobooks = EntryPoint.ENTRY_POINTS
assert EverythingEntryPoint == everything
assert EbooksEntryPoint == ebooks
assert AudiobooksEntryPoint == audiobooks
display = EntryPoint.DISPLAY_TITLES
assert "eBooks" == display[ebooks]
assert "Audiobooks" == display[audiobooks]
assert Edition.BOOK_MEDIUM == EbooksEntryPoint.INTERNAL_NAME
assert Edition.AUDIO_MEDIUM == AudiobooksEntryPoint.INTERNAL_NAME
assert "http://schema.org/CreativeWork" == everything.URI
for ep in (EbooksEntryPoint, AudiobooksEntryPoint):
assert ep.URI == Edition.medium_to_additional_type[ep.INTERNAL_NAME]
def test_no_changes(self):
# EntryPoint doesn't modify queries or search filters.
qu = self._db.query(Edition)
assert qu == EntryPoint.modify_database_query(self._db, qu)
args = dict(arg="value")
filter = object()
assert filter == EverythingEntryPoint.modify_search_filter(filter)
def test_register(self):
class Mock(object):
pass
args = [Mock, "Mock!"]
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(*args)
assert "must define INTERNAL_NAME" in str(excinfo.value)
# Test successful registration.
Mock.INTERNAL_NAME = "a name"
EntryPoint.register(*args)
assert Mock in EntryPoint.ENTRY_POINTS
assert "Mock!" == EntryPoint.DISPLAY_TITLES[Mock]
assert Mock not in EntryPoint.DEFAULT_ENABLED
# Can't register twice.
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(*args)
assert "Duplicate entry point internal name: a name" in str(excinfo.value)
EntryPoint.unregister(Mock)
# Test successful registration as a default-enabled entry point.
EntryPoint.register(*args, default_enabled=True)
assert Mock in EntryPoint.DEFAULT_ENABLED
# Can't register two different entry points with the same
# display name.
class Mock2(object):
INTERNAL_NAME = "mock2"
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(Mock2, "Mock!")
assert "Duplicate entry point display name: Mock!" in str(excinfo.value)
EntryPoint.unregister(Mock)
assert Mock not in EntryPoint.DEFAULT_ENABLED
class TestEverythingEntryPoint(DatabaseTest):
def test_no_changes(self):
# EverythingEntryPoint doesn't modify queries or searches
# beyond the default behavior for any entry point.
assert "All" == EverythingEntryPoint.INTERNAL_NAME
qu = self._db.query(Edition)
assert qu == EntryPoint.modify_database_query(self._db, qu)
args = dict(arg="value")
filter = object()
assert filter == EverythingEntryPoint.modify_search_filter(filter)
class TestMediumEntryPoint(DatabaseTest):
def test_modify_database_query(self):
# Create a video, and a entry point that contains videos.
work = self._work(with_license_pool=True)
work.license_pools[0].presentation_edition.medium = Edition.VIDEO_MEDIUM
class Videos(MediumEntryPoint):
INTERNAL_NAME = Edition.VIDEO_MEDIUM
qu = self._db.query(Work)
# The default entry points filter out the video.
for entrypoint in EbooksEntryPoint, AudiobooksEntryPoint:
modified = entrypoint.modify_database_query(self._db, qu)
assert [] == modified.all()
# But the video entry point includes it.
videos = Videos.modify_database_query(self._db, qu)
assert [work.id] == [x.id for x in videos]
def test_modify_search_filter(self):
class Mock(MediumEntryPoint):
INTERNAL_NAME = object()
filter = Filter(media=object())
Mock.modify_search_filter(filter)
assert [Mock.INTERNAL_NAME] == filter.media
class TestLibrary(DatabaseTest):
"""Test a Library's interaction with EntryPoints."""
def test_enabled_entrypoints(self):
l = self._default_library
setting = l.setting(EntryPoint.ENABLED_SETTING)
# When the value is not set, the default is used.
assert EntryPoint.DEFAULT_ENABLED == list(l.entrypoints)
setting.value = None
assert EntryPoint.DEFAULT_ENABLED == list(l.entrypoints)
# Names that don't correspond to registered entry points are
# ignored. Names that do are looked up.
setting.value = json.dumps(
["no such entry point", AudiobooksEntryPoint.INTERNAL_NAME]
)
assert [AudiobooksEntryPoint] == list(l.entrypoints)
# An empty list is a valid value.
setting.value = json.dumps([])
assert [] == list(l.entrypoints)
| 34.331126 | 82 | 0.673032 | import json
import pytest
from ..entrypoint import (
AudiobooksEntryPoint,
EbooksEntryPoint,
EntryPoint,
EverythingEntryPoint,
MediumEntryPoint,
)
from ..external_search import Filter
from ..model import Edition, Work
from ..testing import DatabaseTest
class TestEntryPoint(DatabaseTest):
def test_defaults(self):
everything, ebooks, audiobooks = EntryPoint.ENTRY_POINTS
assert EverythingEntryPoint == everything
assert EbooksEntryPoint == ebooks
assert AudiobooksEntryPoint == audiobooks
display = EntryPoint.DISPLAY_TITLES
assert "eBooks" == display[ebooks]
assert "Audiobooks" == display[audiobooks]
assert Edition.BOOK_MEDIUM == EbooksEntryPoint.INTERNAL_NAME
assert Edition.AUDIO_MEDIUM == AudiobooksEntryPoint.INTERNAL_NAME
assert "http://schema.org/CreativeWork" == everything.URI
for ep in (EbooksEntryPoint, AudiobooksEntryPoint):
assert ep.URI == Edition.medium_to_additional_type[ep.INTERNAL_NAME]
def test_no_changes(self):
qu = self._db.query(Edition)
assert qu == EntryPoint.modify_database_query(self._db, qu)
args = dict(arg="value")
filter = object()
assert filter == EverythingEntryPoint.modify_search_filter(filter)
def test_register(self):
class Mock(object):
pass
args = [Mock, "Mock!"]
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(*args)
assert "must define INTERNAL_NAME" in str(excinfo.value)
# Test successful registration.
Mock.INTERNAL_NAME = "a name"
EntryPoint.register(*args)
assert Mock in EntryPoint.ENTRY_POINTS
assert "Mock!" == EntryPoint.DISPLAY_TITLES[Mock]
assert Mock not in EntryPoint.DEFAULT_ENABLED
# Can't register twice.
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(*args)
assert "Duplicate entry point internal name: a name" in str(excinfo.value)
EntryPoint.unregister(Mock)
EntryPoint.register(*args, default_enabled=True)
assert Mock in EntryPoint.DEFAULT_ENABLED
# display name.
class Mock2(object):
INTERNAL_NAME = "mock2"
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(Mock2, "Mock!")
assert "Duplicate entry point display name: Mock!" in str(excinfo.value)
EntryPoint.unregister(Mock)
assert Mock not in EntryPoint.DEFAULT_ENABLED
class TestEverythingEntryPoint(DatabaseTest):
def test_no_changes(self):
# EverythingEntryPoint doesn't modify queries or searches
assert "All" == EverythingEntryPoint.INTERNAL_NAME
qu = self._db.query(Edition)
assert qu == EntryPoint.modify_database_query(self._db, qu)
args = dict(arg="value")
filter = object()
assert filter == EverythingEntryPoint.modify_search_filter(filter)
class TestMediumEntryPoint(DatabaseTest):
def test_modify_database_query(self):
work = self._work(with_license_pool=True)
work.license_pools[0].presentation_edition.medium = Edition.VIDEO_MEDIUM
class Videos(MediumEntryPoint):
INTERNAL_NAME = Edition.VIDEO_MEDIUM
qu = self._db.query(Work)
for entrypoint in EbooksEntryPoint, AudiobooksEntryPoint:
modified = entrypoint.modify_database_query(self._db, qu)
assert [] == modified.all()
videos = Videos.modify_database_query(self._db, qu)
assert [work.id] == [x.id for x in videos]
def test_modify_search_filter(self):
class Mock(MediumEntryPoint):
INTERNAL_NAME = object()
filter = Filter(media=object())
Mock.modify_search_filter(filter)
assert [Mock.INTERNAL_NAME] == filter.media
class TestLibrary(DatabaseTest):
def test_enabled_entrypoints(self):
l = self._default_library
setting = l.setting(EntryPoint.ENABLED_SETTING)
assert EntryPoint.DEFAULT_ENABLED == list(l.entrypoints)
setting.value = None
assert EntryPoint.DEFAULT_ENABLED == list(l.entrypoints)
# ignored. Names that do are looked up.
setting.value = json.dumps(
["no such entry point", AudiobooksEntryPoint.INTERNAL_NAME]
)
assert [AudiobooksEntryPoint] == list(l.entrypoints)
# An empty list is a valid value.
setting.value = json.dumps([])
assert [] == list(l.entrypoints)
| true | true |
f72fb746949f5509b0ded3a7b05fc061d6d0201a | 879 | bzl | Python | third_party/farmhash/workspace.bzl | dmpiergiacomo/tensorflow | 0ecdc6dc2dbc2381c9317f274bd39281294dfc97 | [
"Apache-2.0"
] | 6 | 2021-03-30T07:42:04.000Z | 2022-03-23T02:42:36.000Z | third_party/farmhash/workspace.bzl | dmpiergiacomo/tensorflow | 0ecdc6dc2dbc2381c9317f274bd39281294dfc97 | [
"Apache-2.0"
] | 1,056 | 2019-12-15T01:20:31.000Z | 2022-02-10T02:06:28.000Z | third_party/farmhash/workspace.bzl | dmpiergiacomo/tensorflow | 0ecdc6dc2dbc2381c9317f274bd39281294dfc97 | [
"Apache-2.0"
] | 6 | 2016-09-07T04:00:15.000Z | 2022-01-12T01:47:38.000Z | """Provides the repository macro to import farmhash."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports farmhash."""
# Attention: tools parse and update these lines.
FARMHASH_COMMIT = "816a4ae622e964763ca0862d9dbd19324a1eaf45"
FARMHASH_SHA256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0"
tf_http_archive(
name = "farmhash_archive",
build_file = "//third_party/farmhash:farmhash.BUILD",
sha256 = FARMHASH_SHA256,
strip_prefix = "farmhash-{commit}".format(commit = FARMHASH_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/{commit}.tar.gz".format(commit = FARMHASH_COMMIT),
"https://github.com/google/farmhash/archive/{commit}.tar.gz".format(commit = FARMHASH_COMMIT),
],
)
| 39.954545 | 151 | 0.697383 |
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
FARMHASH_COMMIT = "816a4ae622e964763ca0862d9dbd19324a1eaf45"
FARMHASH_SHA256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0"
tf_http_archive(
name = "farmhash_archive",
build_file = "//third_party/farmhash:farmhash.BUILD",
sha256 = FARMHASH_SHA256,
strip_prefix = "farmhash-{commit}".format(commit = FARMHASH_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/{commit}.tar.gz".format(commit = FARMHASH_COMMIT),
"https://github.com/google/farmhash/archive/{commit}.tar.gz".format(commit = FARMHASH_COMMIT),
],
)
| true | true |
f72fb7689d8d911e8d95780e9541175223f17521 | 2,251 | py | Python | apis/forecast.py | mrasap/powderbooking_backend | ffc6e31f52bd78a18293adbfbb30cd0211a4700c | [
"Apache-2.0"
] | null | null | null | apis/forecast.py | mrasap/powderbooking_backend | ffc6e31f52bd78a18293adbfbb30cd0211a4700c | [
"Apache-2.0"
] | null | null | null | apis/forecast.py | mrasap/powderbooking_backend | ffc6e31f52bd78a18293adbfbb30cd0211a4700c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Michael Kemna.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask_restplus import Namespace, Resource
from powderbooking.models import model_forecast
from sqlalchemy import MetaData
from database import db
from database.query import Query
from utils.convert_models import convert_sqlalchemy_to_restplus_model
api = Namespace('forecast', description='Weather reports of a overview')
forecast = convert_sqlalchemy_to_restplus_model(table=model_forecast(metadata=MetaData()))
api.add_model(name=forecast.name, definition=forecast)
@api.route('/current/<int:resort_id>')
@api.param('resort_id', 'The overview identifier')
@api.response(404, 'No current forecast report for given overview identifier found')
class ForecastCurrent(Resource):
@api.doc('get_current_forecast_report')
@api.marshal_list_with(fields=forecast)
def get(self, resort_id: int):
"""Get the current forecast report from today for the given overview identifier"""
result = db.execute_query(Query.select_forecast_current, resort_id=resort_id)
if result.rowcount > 0:
return result.fetchall()
api.abort(404)
@api.route('/past/<int:resort_id>')
@api.param('resort_id', 'The overview identifier')
@api.response(404, 'No past forecast report for given overview identifier found')
class ForecastPast(Resource):
@api.doc('get_past_forecast_report')
@api.marshal_list_with(fields=forecast)
def get(self, resort_id: int):
"""Get the past forecast reports of today for the given overview identifier"""
result = db.execute_query(Query.select_forecast_past, resort_id=resort_id)
if result.rowcount > 0:
return result.fetchall()
api.abort(404)
| 38.810345 | 90 | 0.749889 |
from flask_restplus import Namespace, Resource
from powderbooking.models import model_forecast
from sqlalchemy import MetaData
from database import db
from database.query import Query
from utils.convert_models import convert_sqlalchemy_to_restplus_model
api = Namespace('forecast', description='Weather reports of a overview')
forecast = convert_sqlalchemy_to_restplus_model(table=model_forecast(metadata=MetaData()))
api.add_model(name=forecast.name, definition=forecast)
@api.route('/current/<int:resort_id>')
@api.param('resort_id', 'The overview identifier')
@api.response(404, 'No current forecast report for given overview identifier found')
class ForecastCurrent(Resource):
@api.doc('get_current_forecast_report')
@api.marshal_list_with(fields=forecast)
def get(self, resort_id: int):
result = db.execute_query(Query.select_forecast_current, resort_id=resort_id)
if result.rowcount > 0:
return result.fetchall()
api.abort(404)
@api.route('/past/<int:resort_id>')
@api.param('resort_id', 'The overview identifier')
@api.response(404, 'No past forecast report for given overview identifier found')
class ForecastPast(Resource):
@api.doc('get_past_forecast_report')
@api.marshal_list_with(fields=forecast)
def get(self, resort_id: int):
result = db.execute_query(Query.select_forecast_past, resort_id=resort_id)
if result.rowcount > 0:
return result.fetchall()
api.abort(404)
| true | true |
f72fb77842fa2f02ed07d03d38852d1b6dade3ed | 1,296 | py | Python | src/fetchcode/vcs/pip/_internal/utils/pkg_resources.py | quepop/fetchcode | ac2461bdf7a249d8815987b4d421dbc615c043b9 | [
"Apache-2.0"
] | 7 | 2019-10-04T07:27:41.000Z | 2021-06-07T04:39:18.000Z | src/fetchcode/vcs/pip/_internal/utils/pkg_resources.py | quepop/fetchcode | ac2461bdf7a249d8815987b4d421dbc615c043b9 | [
"Apache-2.0"
] | 64 | 2019-10-07T12:40:56.000Z | 2022-02-17T18:44:37.000Z | src/fetchcode/vcs/pip/_internal/utils/pkg_resources.py | quepop/fetchcode | ac2461bdf7a249d8815987b4d421dbc615c043b9 | [
"Apache-2.0"
] | 16 | 2019-10-04T08:48:12.000Z | 2021-06-11T01:22:56.000Z | from fetchcode.vcs.pip._vendor.pkg_resources import yield_lines
from fetchcode.vcs.pip._vendor.six import ensure_str
from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Dict, Iterable, List
class DictMetadata(object):
"""IMetadataProvider that reads metadata files from a dictionary.
"""
def __init__(self, metadata):
# type: (Dict[str, bytes]) -> None
self._metadata = metadata
def has_metadata(self, name):
# type: (str) -> bool
return name in self._metadata
def get_metadata(self, name):
# type: (str) -> str
try:
return ensure_str(self._metadata[name])
except UnicodeDecodeError as e:
# Mirrors handling done in pkg_resources.NullProvider.
e.reason += " in {} file".format(name)
raise
def get_metadata_lines(self, name):
# type: (str) -> Iterable[str]
return yield_lines(self.get_metadata(name))
def metadata_isdir(self, name):
# type: (str) -> bool
return False
def metadata_listdir(self, name):
# type: (str) -> List[str]
return []
def run_script(self, script_name, namespace):
# type: (str, str) -> None
pass
| 28.8 | 71 | 0.631944 | from fetchcode.vcs.pip._vendor.pkg_resources import yield_lines
from fetchcode.vcs.pip._vendor.six import ensure_str
from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Dict, Iterable, List
class DictMetadata(object):
def __init__(self, metadata):
self._metadata = metadata
def has_metadata(self, name):
return name in self._metadata
def get_metadata(self, name):
try:
return ensure_str(self._metadata[name])
except UnicodeDecodeError as e:
e.reason += " in {} file".format(name)
raise
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def metadata_isdir(self, name):
return False
def metadata_listdir(self, name):
return []
def run_script(self, script_name, namespace):
pass
| true | true |
f72fb8bd9479fd40d29757e53cc63e0a56a9ebf3 | 757 | py | Python | leo/plugins/quit_leo.py | ATikhonov2/leo-editor | 225aac990a9b2804aaa9dea29574d6e072e30474 | [
"MIT"
] | 1 | 2021-02-08T21:22:38.000Z | 2021-02-08T21:22:38.000Z | leo/plugins/quit_leo.py | ATikhonov2/leo-editor | 225aac990a9b2804aaa9dea29574d6e072e30474 | [
"MIT"
] | null | null | null | leo/plugins/quit_leo.py | ATikhonov2/leo-editor | 225aac990a9b2804aaa9dea29574d6e072e30474 | [
"MIT"
] | null | null | null | #@+leo-ver=5-thin
#@+node:edream.110203113231.734: * @file ../plugins/quit_leo.py
""" Shows how to force Leo to quit."""
#@@language python
#@@tabwidth -4
from leo.core import leoGlobals as g
def init():
'''Return True if the plugin has loaded successfully.'''
ok = not g.app.unitTesting # Not for unit testing.
if ok:
def forceLeoToQuit(tag, keywords):
if not g.app.initing:
g.pr("forceLeoToQuit", tag)
g.app.forceShutdown()
# Force a shutdown at any other time, even "idle" time.
# Exception: do not call g.app.forceShutdown in a "start2" hook.
g.pr(__doc__)
g.registerHandler("idle", forceLeoToQuit)
g.plugin_signon(__name__)
return ok
#@-leo
| 30.28 | 72 | 0.620872 |
from leo.core import leoGlobals as g
def init():
ok = not g.app.unitTesting
if ok:
def forceLeoToQuit(tag, keywords):
if not g.app.initing:
g.pr("forceLeoToQuit", tag)
g.app.forceShutdown()
g.pr(__doc__)
g.registerHandler("idle", forceLeoToQuit)
g.plugin_signon(__name__)
return ok
| true | true |
f72fb9520e3654537387e3873817fd1158160606 | 1,721 | py | Python | deeptennis/data/transforms.py | sethah/deeptennis | a689c5f1d6f5ff1d665aec99b8db6262d3442c3a | [
"MIT"
] | 27 | 2018-11-23T21:37:14.000Z | 2021-11-22T08:44:35.000Z | deeptennis/data/transforms.py | sethah/deeptennis | a689c5f1d6f5ff1d665aec99b8db6262d3442c3a | [
"MIT"
] | 6 | 2019-07-09T16:26:56.000Z | 2021-05-17T17:29:42.000Z | deeptennis/data/transforms.py | sethah/deeptennis | a689c5f1d6f5ff1d665aec99b8db6262d3442c3a | [
"MIT"
] | 4 | 2019-06-11T06:44:30.000Z | 2021-02-27T14:49:02.000Z | import numpy as np
import albumentations.augmentations.functional as af
from albumentations.core.transforms_interface import DualTransform
from allencv.data.transforms import _ImageTransformWrapper, ImageTransform
class CourtKeypointFlip(DualTransform):
"""Flip the input horizontally around the y-axis.
Args:
p (float): probability of applying the transform. Default: 0.5.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
"""
def apply(self, img, **params):
if img.ndim == 3 and img.shape[2] > 1 and img.dtype == np.uint8:
# Opencv is faster than numpy only in case of
# non-gray scale 8bits images
return af.hflip_cv2(img)
else:
return af.hflip(img)
def apply_to_bbox(self, bbox, **params):
return af.bbox_hflip(bbox, **params)
def apply_to_keypoint(self, keypoint, **params):
return af.keypoint_hflip(keypoint, **params)
def apply_to_keypoints(self, keypoints, **params):
keypoints = [list(keypoint) for keypoint in keypoints]
kps = [self.apply_to_keypoint(keypoint[:4], **params) + keypoint[4:] for keypoint in keypoints]
# print(kps)
# sorted_x = sorted(kps, key=lambda x: x[0])
# bottom_left = max(sorted_x[:2], key=lambda x: x[1])
# top_left = min(sorted_x[:2], key=lambda x: x[1])
# bottom_right = max(sorted_x[2:], key=lambda x: x[1])
# top_right = min(sorted_x[2:], key=lambda x: x[1])
# tmp = [bottom_left, bottom_right, top_right, top_left]
# print(tmp)
return kps
ImageTransform.register("keypoint_hflip")(_ImageTransformWrapper(CourtKeypointFlip)) | 34.42 | 103 | 0.651365 | import numpy as np
import albumentations.augmentations.functional as af
from albumentations.core.transforms_interface import DualTransform
from allencv.data.transforms import _ImageTransformWrapper, ImageTransform
class CourtKeypointFlip(DualTransform):
def apply(self, img, **params):
if img.ndim == 3 and img.shape[2] > 1 and img.dtype == np.uint8:
return af.hflip_cv2(img)
else:
return af.hflip(img)
def apply_to_bbox(self, bbox, **params):
return af.bbox_hflip(bbox, **params)
def apply_to_keypoint(self, keypoint, **params):
return af.keypoint_hflip(keypoint, **params)
def apply_to_keypoints(self, keypoints, **params):
keypoints = [list(keypoint) for keypoint in keypoints]
kps = [self.apply_to_keypoint(keypoint[:4], **params) + keypoint[4:] for keypoint in keypoints]
return kps
ImageTransform.register("keypoint_hflip")(_ImageTransformWrapper(CourtKeypointFlip)) | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.