hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4075292667381a58b69acdd7965c603d6dc88e
| 6,850
|
py
|
Python
|
src/python3/request/calendar_groups_collection.py
|
microsoftarchive/msgraph-sdk-python
|
1320ba9116be0d00a1d7fce3484ea979e24ee82d
|
[
"MIT"
] | 7
|
2019-07-17T06:59:53.000Z
|
2021-05-13T15:23:37.000Z
|
src/python3/request/calendar_groups_collection.py
|
microsoftarchive/msgraph-sdk-python
|
1320ba9116be0d00a1d7fce3484ea979e24ee82d
|
[
"MIT"
] | null | null | null |
src/python3/request/calendar_groups_collection.py
|
microsoftarchive/msgraph-sdk-python
|
1320ba9116be0d00a1d7fce3484ea979e24ee82d
|
[
"MIT"
] | 2
|
2020-06-30T13:06:59.000Z
|
2021-06-03T09:47:35.000Z
|
# -*- coding: utf-8 -*-
"""
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
"""
from __future__ import unicode_literals
from ..collection_base import CollectionRequestBase, CollectionResponseBase, CollectionPageBase
from ..request_builder_base import RequestBuilderBase
from ..request import calendar_group_request_builder
from ..model.calendar_group import CalendarGroup
import json
import asyncio
class CalendarGroupsCollectionRequest(CollectionRequestBase):
def __init__(self, request_url, client, options):
"""Initialize the CalendarGroupsCollectionRequest
Args:
request_url (str): The url to perform the CalendarGroupsCollectionRequest
on
client (:class:`GraphClient<msgraph.request.graph_client.GraphClient>`):
The client which will be used for the request
options (list of :class:`Option<msgraph.options.Option>`):
A list of options to pass into the request
"""
super(CalendarGroupsCollectionRequest, self).__init__(request_url, client, options)
def get(self):
"""Gets the CalendarGroupsCollectionPage
Returns:
:class:`CalendarGroupsCollectionPage<msgraph.request.calendar_groups_collection.CalendarGroupsCollectionPage>`:
The CalendarGroupsCollectionPage
"""
self.method = "GET"
collection_response = CalendarGroupsCollectionResponse(json.loads(self.send().content))
return self._page_from_response(collection_response)
@asyncio.coroutine
def get_async(self):
"""Gets the CalendarGroupsCollectionPage in async
Yields:
:class:`CalendarGroupsCollectionPage<msgraph.request.calendar_groups_collection.CalendarGroupsCollectionPage>`:
The CalendarGroupsCollectionPage
"""
future = self._client._loop.run_in_executor(None,
self.get)
collection_page = yield from future
return collection_page
class CalendarGroupsCollectionRequestBuilder(RequestBuilderBase):
def __getitem__(self, key):
"""Get the CalendarGroupRequestBuilder with the specified key
Args:
key (str): The key to get a CalendarGroupRequestBuilder for
Returns:
:class:`CalendarGroupRequestBuilder<msgraph.request.calendar_group_request_builder.CalendarGroupRequestBuilder>`:
A CalendarGroupRequestBuilder for that key
"""
return calendar_group_request_builder.CalendarGroupRequestBuilder(self.append_to_request_url(str(key)), self._client)
def request(self,select=None, filter=None, top=None, skip=None, order_by=None, options=None):
"""Builds the CalendarGroupsCollectionRequest
Args:
expand (str): Default None, comma-separated list of relationships
to expand in the response.
select (str): Default None, comma-separated list of properties to
include in the response.
top (int): Default None, the number of items to return in a result.
order_by (str): Default None, comma-separated list of properties
that are used to sort the order of items in the response.
options (list of :class:`Option<msgraph.options.Option>`):
A list of options to pass into the request. Defaults to None.
Returns:
:class:`CalendarGroupsCollectionRequest<msgraph.request.calendar_groups_collection.CalendarGroupsCollectionRequest>`:
The CalendarGroupsCollectionRequest
"""
req = CalendarGroupsCollectionRequest(self._request_url, self._client, options)
req._set_query_options(select=select, filter=filter, top=top, skip=skip, order_by=order_by, )
return req
def get(self):
"""Gets the CalendarGroupsCollectionPage
Returns:
:class:`CalendarGroupsCollectionPage<msgraph.request.calendar_groups_collection.CalendarGroupsCollectionPage>`:
The CalendarGroupsCollectionPage
"""
return self.request().get()
@asyncio.coroutine
def get_async(self):
"""Gets the CalendarGroupsCollectionPage in async
Yields:
:class:`CalendarGroupsCollectionPage<msgraph.request.calendar_groups_collection.CalendarGroupsCollectionPage>`:
The CalendarGroupsCollectionPage
"""
collection_page = yield from self.request().get_async()
return collection_page
class CalendarGroupsCollectionResponse(CollectionResponseBase):
@property
def collection_page(self):
"""The collection page stored in the response JSON
Returns:
:class:`CalendarGroupsCollectionPage<msgraph.request.calendar_groups_collection.CalendarGroupsCollectionPage>`:
The collection page
"""
if self._collection_page:
self._collection_page._prop_list = self._prop_dict["value"]
else:
self._collection_page = CalendarGroupsCollectionPage(self._prop_dict["value"])
return self._collection_page
class CalendarGroupsCollectionPage(CollectionPageBase):
def __getitem__(self, index):
"""Get the CalendarGroup at the index specified
Args:
index (int): The index of the item to get from the CalendarGroupsCollectionPage
Returns:
:class:`CalendarGroup<msgraph.model.calendar_group.CalendarGroup>`:
The CalendarGroup at the index
"""
return CalendarGroup(self._prop_list[index])
def calendar_groups(self):
"""Get a generator of CalendarGroup within the CalendarGroupsCollectionPage
Yields:
:class:`CalendarGroup<msgraph.model.calendar_group.CalendarGroup>`:
The next CalendarGroup in the collection
"""
for item in self._prop_list:
yield CalendarGroup(item)
def _init_next_page_request(self, next_page_link, client, options):
"""Initialize the next page request for the CalendarGroupsCollectionPage
Args:
next_page_link (str): The URL for the next page request
to be sent to
client (:class:`GraphClient<msgraph.model.graph_client.GraphClient>`:
The client to be used for the request
options (list of :class:`Option<msgraph.options.Option>`:
A list of options
"""
self._next_page_request = CalendarGroupsCollectionRequest(next_page_link, client, options)
| 41.26506
| 151
| 0.676204
|
from __future__ import unicode_literals
from ..collection_base import CollectionRequestBase, CollectionResponseBase, CollectionPageBase
from ..request_builder_base import RequestBuilderBase
from ..request import calendar_group_request_builder
from ..model.calendar_group import CalendarGroup
import json
import asyncio
class CalendarGroupsCollectionRequest(CollectionRequestBase):
def __init__(self, request_url, client, options):
super(CalendarGroupsCollectionRequest, self).__init__(request_url, client, options)
def get(self):
self.method = "GET"
collection_response = CalendarGroupsCollectionResponse(json.loads(self.send().content))
return self._page_from_response(collection_response)
@asyncio.coroutine
def get_async(self):
future = self._client._loop.run_in_executor(None,
self.get)
collection_page = yield from future
return collection_page
class CalendarGroupsCollectionRequestBuilder(RequestBuilderBase):
def __getitem__(self, key):
return calendar_group_request_builder.CalendarGroupRequestBuilder(self.append_to_request_url(str(key)), self._client)
def request(self,select=None, filter=None, top=None, skip=None, order_by=None, options=None):
req = CalendarGroupsCollectionRequest(self._request_url, self._client, options)
req._set_query_options(select=select, filter=filter, top=top, skip=skip, order_by=order_by, )
return req
def get(self):
return self.request().get()
@asyncio.coroutine
def get_async(self):
collection_page = yield from self.request().get_async()
return collection_page
class CalendarGroupsCollectionResponse(CollectionResponseBase):
@property
def collection_page(self):
if self._collection_page:
self._collection_page._prop_list = self._prop_dict["value"]
else:
self._collection_page = CalendarGroupsCollectionPage(self._prop_dict["value"])
return self._collection_page
class CalendarGroupsCollectionPage(CollectionPageBase):
def __getitem__(self, index):
return CalendarGroup(self._prop_list[index])
def calendar_groups(self):
for item in self._prop_list:
yield CalendarGroup(item)
def _init_next_page_request(self, next_page_link, client, options):
self._next_page_request = CalendarGroupsCollectionRequest(next_page_link, client, options)
| true
| true
|
1c4076133d2531bdb57097d627ff8fbc5b0dfd03
| 5,555
|
py
|
Python
|
anyway/widgets/suburban_widgets/motorcycle_accidents_vs_all_accidents_widget.py
|
shaysw/anyway
|
35dec531fd4ac79c99d09e684027df017e989ddc
|
[
"MIT"
] | null | null | null |
anyway/widgets/suburban_widgets/motorcycle_accidents_vs_all_accidents_widget.py
|
shaysw/anyway
|
35dec531fd4ac79c99d09e684027df017e989ddc
|
[
"MIT"
] | null | null | null |
anyway/widgets/suburban_widgets/motorcycle_accidents_vs_all_accidents_widget.py
|
shaysw/anyway
|
35dec531fd4ac79c99d09e684027df017e989ddc
|
[
"MIT"
] | null | null | null |
import datetime
from typing import List
import pandas as pd
from sqlalchemy import case, literal_column, func, distinct, desc
from anyway.request_params import RequestParams
from anyway.backend_constants import BE_CONST, AccidentSeverity
from anyway.widgets.widget_utils import get_query
from anyway.models import InvolvedMarkerView
from anyway.vehicle_type import VehicleCategory
from anyway.widgets.suburban_widgets.sub_urban_widget import SubUrbanWidget
from typing import Dict
from flask_babel import _
# TODO: register?
class MotorcycleAccidentsVsAllAccidentsWidget(SubUrbanWidget):
name: str = "motorcycle_accidents_vs_all_accidents"
def __init__(self, request_params: RequestParams):
super().__init__(request_params, type(self).name)
self.rank = 20
self.road_number: str = request_params.location_info["road1"]
def generate_items(self) -> None:
self.items = MotorcycleAccidentsVsAllAccidentsWidget.motorcycle_accidents_vs_all_accidents(
self.request_params.start_time, self.request_params.end_time, self.road_number
)
@staticmethod
def motorcycle_accidents_vs_all_accidents(
start_time: datetime.date, end_time: datetime.date, road_number: str
) -> List:
location_label = "location"
location_other = "שאר הארץ"
location_road = f"כביש {int(road_number)}"
case_location = case(
[
(
(InvolvedMarkerView.road1 == road_number)
| (InvolvedMarkerView.road2 == road_number),
location_road,
)
],
else_=literal_column(f"'{location_other}'"),
).label(location_label)
vehicle_label = "vehicle"
vehicle_other = "אחר"
vehicle_motorcycle = "אופנוע"
case_vehicle = case(
[
(
InvolvedMarkerView.involve_vehicle_type.in_(
VehicleCategory.MOTORCYCLE.get_codes()
),
literal_column(f"'{vehicle_motorcycle}'"),
)
],
else_=literal_column(f"'{vehicle_other}'"),
).label(vehicle_label)
query = get_query(
table_obj=InvolvedMarkerView, filters={}, start_time=start_time, end_time=end_time
)
num_accidents_label = "num_of_accidents"
query = (
query.with_entities(
case_location,
case_vehicle,
func.count(distinct(InvolvedMarkerView.provider_and_id)).label(num_accidents_label),
)
.filter(InvolvedMarkerView.road_type.in_(BE_CONST.NON_CITY_ROAD_TYPES))
.filter(
InvolvedMarkerView.accident_severity.in_(
# pylint: disable=no-member
[AccidentSeverity.FATAL.value, AccidentSeverity.SEVERE.value]
)
)
.group_by(location_label, vehicle_label)
.order_by(desc(num_accidents_label))
)
# pylint: disable=no-member
results = pd.read_sql_query(query.statement, query.session.bind).to_dict(
orient="records"
) # pylint: disable=no-member
counter_road_motorcycle = 0
counter_other_motorcycle = 0
counter_road_other = 0
counter_other_other = 0
for record in results:
if record[location_label] == location_other:
if record[vehicle_label] == vehicle_other:
counter_other_other = record[num_accidents_label]
else:
counter_other_motorcycle = record[num_accidents_label]
else:
if record[vehicle_label] == vehicle_other:
counter_road_other = record[num_accidents_label]
else:
counter_road_motorcycle = record[num_accidents_label]
sum_road = counter_road_other + counter_road_motorcycle
if sum_road == 0:
sum_road = 1 # prevent division by zero
sum_all = counter_other_other + counter_other_motorcycle + sum_road
percentage_label = "percentage"
location_all_label = "כל הארץ"
return [
{
location_label: location_road,
vehicle_label: vehicle_motorcycle,
percentage_label: counter_road_motorcycle / sum_road,
},
{
location_label: location_road,
vehicle_label: vehicle_other,
percentage_label: counter_road_other / sum_road,
},
{
location_label: location_all_label,
vehicle_label: vehicle_motorcycle,
percentage_label: (counter_other_motorcycle + counter_road_motorcycle) / sum_all,
},
{
location_label: location_all_label,
vehicle_label: vehicle_other,
percentage_label: (counter_other_other + counter_road_other) / sum_all,
},
]
@staticmethod
def localize_items(request_params: RequestParams, items: Dict) -> Dict:
items["data"]["text"] = {
"title": _('Number of fatal and severe motorcycle accidents') +f" - {request_params.location_info['road1']} " +_('compared to rest of country')
}
return items
| 39.678571
| 156
| 0.59586
|
import datetime
from typing import List
import pandas as pd
from sqlalchemy import case, literal_column, func, distinct, desc
from anyway.request_params import RequestParams
from anyway.backend_constants import BE_CONST, AccidentSeverity
from anyway.widgets.widget_utils import get_query
from anyway.models import InvolvedMarkerView
from anyway.vehicle_type import VehicleCategory
from anyway.widgets.suburban_widgets.sub_urban_widget import SubUrbanWidget
from typing import Dict
from flask_babel import _
class MotorcycleAccidentsVsAllAccidentsWidget(SubUrbanWidget):
name: str = "motorcycle_accidents_vs_all_accidents"
def __init__(self, request_params: RequestParams):
super().__init__(request_params, type(self).name)
self.rank = 20
self.road_number: str = request_params.location_info["road1"]
def generate_items(self) -> None:
self.items = MotorcycleAccidentsVsAllAccidentsWidget.motorcycle_accidents_vs_all_accidents(
self.request_params.start_time, self.request_params.end_time, self.road_number
)
@staticmethod
def motorcycle_accidents_vs_all_accidents(
start_time: datetime.date, end_time: datetime.date, road_number: str
) -> List:
location_label = "location"
location_other = "שאר הארץ"
location_road = f"כביש {int(road_number)}"
case_location = case(
[
(
(InvolvedMarkerView.road1 == road_number)
| (InvolvedMarkerView.road2 == road_number),
location_road,
)
],
else_=literal_column(f"'{location_other}'"),
).label(location_label)
vehicle_label = "vehicle"
vehicle_other = "אחר"
vehicle_motorcycle = "אופנוע"
case_vehicle = case(
[
(
InvolvedMarkerView.involve_vehicle_type.in_(
VehicleCategory.MOTORCYCLE.get_codes()
),
literal_column(f"'{vehicle_motorcycle}'"),
)
],
else_=literal_column(f"'{vehicle_other}'"),
).label(vehicle_label)
query = get_query(
table_obj=InvolvedMarkerView, filters={}, start_time=start_time, end_time=end_time
)
num_accidents_label = "num_of_accidents"
query = (
query.with_entities(
case_location,
case_vehicle,
func.count(distinct(InvolvedMarkerView.provider_and_id)).label(num_accidents_label),
)
.filter(InvolvedMarkerView.road_type.in_(BE_CONST.NON_CITY_ROAD_TYPES))
.filter(
InvolvedMarkerView.accident_severity.in_(
[AccidentSeverity.FATAL.value, AccidentSeverity.SEVERE.value]
)
)
.group_by(location_label, vehicle_label)
.order_by(desc(num_accidents_label))
)
results = pd.read_sql_query(query.statement, query.session.bind).to_dict(
orient="records"
)
counter_road_motorcycle = 0
counter_other_motorcycle = 0
counter_road_other = 0
counter_other_other = 0
for record in results:
if record[location_label] == location_other:
if record[vehicle_label] == vehicle_other:
counter_other_other = record[num_accidents_label]
else:
counter_other_motorcycle = record[num_accidents_label]
else:
if record[vehicle_label] == vehicle_other:
counter_road_other = record[num_accidents_label]
else:
counter_road_motorcycle = record[num_accidents_label]
sum_road = counter_road_other + counter_road_motorcycle
if sum_road == 0:
sum_road = 1
sum_all = counter_other_other + counter_other_motorcycle + sum_road
percentage_label = "percentage"
location_all_label = "כל הארץ"
return [
{
location_label: location_road,
vehicle_label: vehicle_motorcycle,
percentage_label: counter_road_motorcycle / sum_road,
},
{
location_label: location_road,
vehicle_label: vehicle_other,
percentage_label: counter_road_other / sum_road,
},
{
location_label: location_all_label,
vehicle_label: vehicle_motorcycle,
percentage_label: (counter_other_motorcycle + counter_road_motorcycle) / sum_all,
},
{
location_label: location_all_label,
vehicle_label: vehicle_other,
percentage_label: (counter_other_other + counter_road_other) / sum_all,
},
]
@staticmethod
def localize_items(request_params: RequestParams, items: Dict) -> Dict:
items["data"]["text"] = {
"title": _('Number of fatal and severe motorcycle accidents') +f" - {request_params.location_info['road1']} " +_('compared to rest of country')
}
return items
| true
| true
|
1c40763425a3dd4fa423267ce2489d4f3864d171
| 10,271
|
py
|
Python
|
recipes/lanso/eval.py
|
wangwei2009/speechbrain
|
ebbac4561a9c9101786e0ab0b1105017eb655fc8
|
[
"Apache-2.0"
] | null | null | null |
recipes/lanso/eval.py
|
wangwei2009/speechbrain
|
ebbac4561a9c9101786e0ab0b1105017eb655fc8
|
[
"Apache-2.0"
] | null | null | null |
recipes/lanso/eval.py
|
wangwei2009/speechbrain
|
ebbac4561a9c9101786e0ab0b1105017eb655fc8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
"""Recipe for training a classifier using the
mobvoihotwords Dataset.
To run this recipe, use the following command:
> python train.py {hyperparameter_file}
Using your own hyperparameter file or one of the following:
hyperparams/xvect.yaml (xvector system)
"""
import os
import sys
import torch
import torchaudio
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from sklearn.metrics import confusion_matrix
import numpy as np
label_list = []
output_list = []
global output_array
global label_array
output_array=np.array([])
label_array = np.array([])
global batch_count
batch_count = 0
class SpeakerBrain(sb.core.Brain):
"""Class for GSC training"
"""
def compute_forward(self, batch, stage):
"""Computation pipeline based on a encoder + command classifier.
Data augmentation and environmental corruption are applied to the
input speech.
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
if stage == sb.Stage.TRAIN and self.hparams.apply_data_augmentation:
# Applying the augmentation pipeline
wavs_aug_tot = []
wavs_aug_tot.append(wavs)
for count, augment in enumerate(self.hparams.augment_pipeline):
# Apply augment
wavs_aug = augment(wavs, lens)
# Managing speed change
if wavs_aug.shape[1] > wavs.shape[1]:
wavs_aug = wavs_aug[:, 0 : wavs.shape[1]]
else:
zero_sig = torch.zeros_like(wavs)
zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug
wavs_aug = zero_sig
if self.hparams.concat_augment:
wavs_aug_tot.append(wavs_aug)
else:
wavs = wavs_aug
wavs_aug_tot[0] = wavs
wavs = torch.cat(wavs_aug_tot, dim=0)
self.n_augment = len(wavs_aug_tot)
lens = torch.cat([lens] * self.n_augment)
# print("wavs.size():{}".format(wavs.size()))
# print("lens.size():{}".format(lens.size()))
# Feature extraction and normalization
feats = self.modules.compute_features(wavs)
if self.hparams.use_log1p:
# Log1p reduces the emphasis on small differences
feats = torch.log1p(feats)
feats = self.modules.mean_var_norm(feats, lens)
# print("feats.size():{}".format(feats.size()))
# Embeddings + classifier
outputs = self.modules.embedding_model(feats)
if "classifier" in self.modules.keys():
outputs = self.modules.classifier(outputs)
# print("outputs.size():{}".format(outputs.size()))
# Ecapa model uses softmax outside of its classifer
if "softmax" in self.modules.keys():
outputs = self.modules.softmax(outputs)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss using command-id as label.
"""
predictions, lens = predictions
uttid = batch.id
command, _ = batch.command_encoded #[batch, 1]
global label_array
global output_array
output_label = torch.argmax(predictions[:, 0, :], dim=1).cpu().numpy()
# label_list.append(command.cpu().numpy())
label_array = np.concatenate((label_array, command.cpu().numpy()[:, 0]))
output_array = np.concatenate((output_array, output_label))
# output_list.append(output_label)
# Concatenate labels (due to data augmentation)
if stage == sb.Stage.TRAIN and self.hparams.apply_data_augmentation:
command = torch.cat([command] * self.n_augment, dim=0)
# print("command.size():{}".format(command.size()))
# compute the cost function
loss = self.hparams.compute_cost(predictions, command, lens)
# loss = sb.nnet.losses.nll_loss(predictions, command, lens)
if hasattr(self.hparams.lr_annealing, "on_batch_end"):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
if stage != sb.Stage.TRAIN:
self.error_metrics.append(uttid, predictions, command, lens)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of an epoch."""
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ErrorRate"] = self.error_metrics.summarize("average")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ErrorRate": stage_stats["ErrorRate"]},
min_keys=["ErrorRate"],
)
if self.hparams.use_tensorboard:
valid_stats = {
"loss": stage_stats['loss'],
"ErrorRate": stage_stats["ErrorRate"],
}
self.hparams.tensorboard_train_logger.log_stats(
{"Epoch": epoch}, self.train_stats, valid_stats
)
# We also write statistics about test data to stdout and to the logfile.
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
def dataio_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_annotation"],
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CategoricalEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start", "stop", "duration")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop, duration):
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
sig = sig.transpose(0, 1).squeeze(1)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("command")
@sb.utils.data_pipeline.provides("command", "command_encoded")
def label_pipeline(command):
yield command
command_encoded = label_encoder.encode_sequence_torch([command])
yield command_encoded
sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline)
# 3. Fit encoder:
# Load or compute the label encoder (with multi-GPU DDP support)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file, from_didatasets=[train_data], output_key="command",
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "command_encoded"]
)
return train_data, valid_data, test_data, label_encoder
if __name__ == "__main__":
# This flag enables the inbuilt cudnn auto-tuner
torch.backends.cudnn.benchmark = True
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.core.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing GSC and annotation into csv files)
from prepare_kws import prepare_kws
# Data preparation
run_on_main(
prepare_kws,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"skip_prep": hparams["skip_prep"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, test_data, label_encoder = dataio_prep(hparams)
# Brain class initialization
speaker_brain = SpeakerBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# # Training
# speaker_brain.fit(
# speaker_brain.hparams.epoch_counter,
# train_data,
# valid_data,
# train_loader_kwargs=hparams["dataloader_options"],
# valid_loader_kwargs=hparams["dataloader_options"],
# )
# Load the best checkpoint for evaluation
test_stats = speaker_brain.evaluate(
test_set=valid_data,
min_key="ErrorRate",
test_loader_kwargs=hparams["dataloader_options"],
)
cm = confusion_matrix(label_array, output_array)
print(cm)
| 33.565359
| 80
| 0.634115
|
import os
import sys
import torch
import torchaudio
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from sklearn.metrics import confusion_matrix
import numpy as np
label_list = []
output_list = []
global output_array
global label_array
output_array=np.array([])
label_array = np.array([])
global batch_count
batch_count = 0
class SpeakerBrain(sb.core.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
wavs, lens = batch.sig
if stage == sb.Stage.TRAIN and self.hparams.apply_data_augmentation:
wavs_aug_tot = []
wavs_aug_tot.append(wavs)
for count, augment in enumerate(self.hparams.augment_pipeline):
wavs_aug = augment(wavs, lens)
if wavs_aug.shape[1] > wavs.shape[1]:
wavs_aug = wavs_aug[:, 0 : wavs.shape[1]]
else:
zero_sig = torch.zeros_like(wavs)
zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug
wavs_aug = zero_sig
if self.hparams.concat_augment:
wavs_aug_tot.append(wavs_aug)
else:
wavs = wavs_aug
wavs_aug_tot[0] = wavs
wavs = torch.cat(wavs_aug_tot, dim=0)
self.n_augment = len(wavs_aug_tot)
lens = torch.cat([lens] * self.n_augment)
feats = self.modules.compute_features(wavs)
if self.hparams.use_log1p:
feats = torch.log1p(feats)
feats = self.modules.mean_var_norm(feats, lens)
outputs = self.modules.embedding_model(feats)
if "classifier" in self.modules.keys():
outputs = self.modules.classifier(outputs)
if "softmax" in self.modules.keys():
outputs = self.modules.softmax(outputs)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
predictions, lens = predictions
uttid = batch.id
command, _ = batch.command_encoded
global label_array
global output_array
output_label = torch.argmax(predictions[:, 0, :], dim=1).cpu().numpy()
label_array = np.concatenate((label_array, command.cpu().numpy()[:, 0]))
output_array = np.concatenate((output_array, output_label))
if stage == sb.Stage.TRAIN and self.hparams.apply_data_augmentation:
command = torch.cat([command] * self.n_augment, dim=0)
loss = self.hparams.compute_cost(predictions, command, lens)
if hasattr(self.hparams.lr_annealing, "on_batch_end"):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
if stage != sb.Stage.TRAIN:
self.error_metrics.append(uttid, predictions, command, lens)
return loss
def on_stage_start(self, stage, epoch=None):
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ErrorRate"] = self.error_metrics.summarize("average")
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ErrorRate": stage_stats["ErrorRate"]},
min_keys=["ErrorRate"],
)
if self.hparams.use_tensorboard:
valid_stats = {
"loss": stage_stats['loss'],
"ErrorRate": stage_stats["ErrorRate"],
}
self.hparams.tensorboard_train_logger.log_stats(
{"Epoch": epoch}, self.train_stats, valid_stats
)
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
def dataio_prep(hparams):
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_annotation"],
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data, test_data]
label_encoder = sb.dataio.encoder.CategoricalEncoder()
@sb.utils.data_pipeline.takes("wav", "start", "stop", "duration")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop, duration):
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
sig = sig.transpose(0, 1).squeeze(1)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
@sb.utils.data_pipeline.takes("command")
@sb.utils.data_pipeline.provides("command", "command_encoded")
def label_pipeline(command):
yield command
command_encoded = label_encoder.encode_sequence_torch([command])
yield command_encoded
sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file, from_didatasets=[train_data], output_key="command",
)
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "command_encoded"]
)
return train_data, valid_data, test_data, label_encoder
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
sb.core.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
from prepare_kws import prepare_kws
run_on_main(
prepare_kws,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"skip_prep": hparams["skip_prep"],
},
)
train_data, valid_data, test_data, label_encoder = dataio_prep(hparams)
speaker_brain = SpeakerBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
test_stats = speaker_brain.evaluate(
test_set=valid_data,
min_key="ErrorRate",
test_loader_kwargs=hparams["dataloader_options"],
)
cm = confusion_matrix(label_array, output_array)
print(cm)
| true
| true
|
1c407682bc1aa17b328c6916ee6337b8b41d7e77
| 40,315
|
py
|
Python
|
tests/test_date_parser.py
|
ASOdesk/dateparser
|
d8050511772c30199d14cd8506d46f9c587c61a8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_date_parser.py
|
ASOdesk/dateparser
|
d8050511772c30199d14cd8506d46f9c587c61a8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_date_parser.py
|
ASOdesk/dateparser
|
d8050511772c30199d14cd8506d46f9c587c61a8
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
import unittest
from datetime import datetime, timedelta
from functools import wraps
from operator import attrgetter
import six
from mock import patch, Mock
from nose_parameterized import parameterized, param
import dateparser.timezone_parser
from dateparser.date import DateDataParser, date_parser
from dateparser.date_parser import DateParser
from dateparser.languages import default_language_loader
from dateparser.languages.detection import AutoDetectLanguage, ExactLanguages
from dateparser.conf import settings
from dateparser.utils import normalize_unicode
from tests import BaseTestCase
class AutoDetectLanguageTest(BaseTestCase):
def setUp(self):
super(AutoDetectLanguageTest, self).setUp()
# Just a known subset so we can rely on test outcomes. Feel free to add, but not exclude or change order.
self.known_languages = ['en', 'fr', 'es', 'pt', 'ru', 'tr', 'cs']
self.parser = NotImplemented
self.detected_languages = NotImplemented
@parameterized.expand([
param(date_strings=["11 abril 2010"], expected_languages=['es', 'pt']),
param(date_strings=["11 junio 2010"], expected_languages=['es']),
param(date_strings=["13 Ago, 2014", "13 Septiembre, 2014"], expected_languages=['es']),
])
def test_detect_languages(self, date_strings, expected_languages):
self.given_parser(languages=self.known_languages)
self.when_all_languages_are_detected(date_strings)
self.then_detected_languages_are(expected_languages)
@parameterized.expand([
param(date_strings=["11 abril 2010"], expected_language='es'),
param(date_strings=["11 junio 2010"], expected_language='es'),
param(date_strings=["13 Ago, 2014", "13 Septiembre, 2014"], expected_language='es'),
])
def test_exclude_ineligible_languages_with_modify(self, date_strings, expected_language):
self.given_parser(languages=self.known_languages)
self.when_one_language_is_detected(date_strings, modify=True)
self.then_detected_languages_are([expected_language])
self.then_parser_languages_are(self.known_languages[self.known_languages.index(expected_language):])
@parameterized.expand([
param(date_strings=["11 abril 2010"], expected_language='es'),
param(date_strings=["11 junio 2010"], expected_language='es'),
param(date_strings=["13 Ago, 2014", "13 Septiembre, 2014"], expected_language='es'),
])
def test_do_not_exclude_ineligible_languages_without_modify(self, date_strings, expected_language):
self.given_parser(languages=self.known_languages)
self.when_one_language_is_detected(date_strings, modify=False)
self.then_detected_languages_are([expected_language])
self.then_parser_languages_are(self.known_languages)
@parameterized.expand([
param(date_strings=["11 abril 2010"], expected_languages=['es', 'pt']),
param(date_strings=["11 junio 2010"], expected_languages=['es']),
param(date_strings=["13 Ago, 2014", "13 Septiembre, 2014"], expected_languages=['es']),
param(date_strings=["13 Srpen, 2014"], expected_languages=['cs']),
])
def test_do_not_exclude_ineligible_languages_when_all_ineligible(self, date_strings, expected_languages):
self.given_parser(languages=self.known_languages)
self.when_all_languages_are_detected(date_strings, modify=True)
self.then_detected_languages_are(expected_languages)
self.then_parser_languages_are(self.known_languages)
@parameterized.expand([
param(language='es', date_strings=["13 Setembro, 2014"]),
param(language='cs', date_strings=["'11 Ağustos, 2014'"]),
])
def test_reject_dates_in_other_languages_without_redetection(self, language, date_strings):
self.given_parser(languages=self.known_languages)
self.given_parser_languages_are([language])
self.when_all_languages_are_detected(date_strings)
self.then_detected_languages_are([])
@parameterized.expand([
param(detected_languages=['es'], date_strings=['13 Juillet, 2014'], expected_languages=['fr']),
param(detected_languages=['es'], date_strings=['11 Ağustos, 2014'], expected_languages=['tr']),
])
def test_accept_dates_in_other_languages_with_redetection_enabled(
self, detected_languages, date_strings, expected_languages
):
self.given_parser(languages=self.known_languages, allow_redetection=True)
self.given_parser_languages_are(detected_languages)
self.when_all_languages_are_detected(date_strings)
self.then_detected_languages_are(expected_languages)
def test_accept_numeric_dates_without_redetection(self,):
self.given_parser(languages=self.known_languages)
self.given_parser_languages_are(['es'])
self.when_all_languages_are_detected(['13/08/2014'])
self.then_detected_languages_are(['es'])
def given_parser(self, languages=None, allow_redetection=False):
if languages is not None:
language_map = default_language_loader.get_language_map()
languages = [language_map[language]
for language in languages]
self.parser = AutoDetectLanguage(languages, allow_redetection=allow_redetection)
def given_parser_languages_are(self, languages):
language_map = default_language_loader.get_language_map()
self.parser.languages = [language_map[language]
for language in languages]
def when_all_languages_are_detected(self, date_strings, modify=False):
assert not isinstance(date_strings, six.string_types)
for date_string in date_strings:
if settings.NORMALIZE:
date_string = normalize_unicode(date_string)
detected_languages = list(self.parser.iterate_applicable_languages(date_string, modify=modify, settings=settings))
self.detected_languages = detected_languages
def when_one_language_is_detected(self, date_strings, modify=False):
for date_string in date_strings:
detected_language = next(self.parser.iterate_applicable_languages(date_string, modify=modify, settings=settings))
self.detected_languages = [detected_language]
def then_detected_languages_are(self, expected_languages):
shortnames = map(attrgetter('shortname'), self.detected_languages)
six.assertCountEqual(self, expected_languages, shortnames)
def then_parser_languages_are(self, expected_languages):
shortnames = map(attrgetter('shortname'), self.parser.languages)
six.assertCountEqual(self, expected_languages, shortnames)
class ExactLanguagesTest(BaseTestCase):
def setUp(self):
super(ExactLanguagesTest, self).setUp()
self.parser = NotImplemented
self.detected_languages = NotImplemented
def test_languages_passed_in_constructor_should_not_be_none(self):
self.when_parser_is_constructed(languages=None)
self.then_error_was_raised(ValueError, ['language cannot be None for ExactLanguages'])
@parameterized.expand([
param(languages=['fr'], date_strings=["04-decembre-2015", "13 aou, 2014"]),
])
def test_missing_diacritical_marks(self, languages, date_strings):
settings.NORMALIZE = True
self.given_parser(languages)
self.when_languages_are_detected(date_strings)
self.then_detected_languages_are(languages)
@parameterized.expand([
param(languages=['es'], date_strings=["13 Ago, 2014"]),
param(languages=['es'], date_strings=["13 Septiembre, 2014"]),
param(languages=['es'], date_strings=["13/03/2014"]),
param(languages=['es'], date_strings=["11/03/2014"]),
])
def test_parse_date_in_exact_language(self, languages, date_strings):
self.given_parser(languages)
self.when_languages_are_detected(date_strings)
self.then_detected_languages_are(languages)
@parameterized.expand([
param(languages=['es'], date_strings=["13 Setembro, 2014"]),
])
def test_reject_dates_in_other_languages(self, languages, date_strings):
self.given_parser(languages=languages)
self.when_languages_are_detected(date_strings)
self.then_detected_languages_are([])
def given_parser(self, languages):
language_map = default_language_loader.get_language_map()
languages = [language_map[language]
for language in languages]
self.parser = ExactLanguages(languages)
def when_languages_are_detected(self, date_strings, modify=False):
assert not isinstance(date_strings, six.string_types)
for date_string in date_strings:
detected_languages = list(self.parser.iterate_applicable_languages(date_string, modify=modify, settings=settings))
self.detected_languages = detected_languages
def when_parser_is_constructed(self, languages):
try:
ExactLanguages(languages)
except Exception as error:
self.error = error
def then_detected_languages_are(self, expected_languages):
shortnames = map(attrgetter('shortname'), self.detected_languages)
six.assertCountEqual(self, expected_languages, shortnames)
class TestDateParser(BaseTestCase):
def setUp(self):
super(TestDateParser, self).setUp()
self.parser = NotImplemented
self.result = NotImplemented
self.date_parser = NotImplemented
self.date_result = NotImplemented
@parameterized.expand([
# English dates
param('[Sept] 04, 2014.', datetime(2014, 9, 4)),
param('Tuesday Jul 22, 2014', datetime(2014, 7, 22)),
param('10:04am EDT', datetime(2012, 11, 13, 14, 4)),
param('Friday', datetime(2012, 11, 9)),
param('November 19, 2014 at noon', datetime(2014, 11, 19, 12, 0)),
param('December 13, 2014 at midnight', datetime(2014, 12, 13, 0, 0)),
param('Nov 25 2014 10:17 pm EST', datetime(2014, 11, 26, 3, 17)),
param('Wed Aug 05 12:00:00 EDT 2015', datetime(2015, 8, 5, 16, 0)),
param('April 9, 2013 at 6:11 a.m.', datetime(2013, 4, 9, 6, 11)),
param('Aug. 9, 2012 at 2:57 p.m.', datetime(2012, 8, 9, 14, 57)),
param('December 10, 2014, 11:02:21 pm', datetime(2014, 12, 10, 23, 2, 21)),
param('8:25 a.m. Dec. 12, 2014', datetime(2014, 12, 12, 8, 25)),
param('2:21 p.m., December 11, 2014', datetime(2014, 12, 11, 14, 21)),
param('Fri, 12 Dec 2014 10:55:50', datetime(2014, 12, 12, 10, 55, 50)),
param('20 Mar 2013 10h11', datetime(2013, 3, 20, 10, 11)),
param('10:06am Dec 11, 2014', datetime(2014, 12, 11, 10, 6)),
param('19 February 2013 year 09:10', datetime(2013, 2, 19, 9, 10)),
# French dates
param('11 Mai 2014', datetime(2014, 5, 11)),
param('dimanche, 11 Mai 2014', datetime(2014, 5, 11)),
param('22 janvier 2015 à 14h40', datetime(2015, 1, 22, 14, 40)),
param('Dimanche 1er Février à 21:24', datetime(2012, 2, 1, 21, 24)),
param('vendredi, décembre 5 2014.', datetime(2014, 12, 5, 0, 0)),
param('le 08 Déc 2014 15:11', datetime(2014, 12, 8, 15, 11)),
param('Le 11 Décembre 2014 à 09:00', datetime(2014, 12, 11, 9, 0)),
param('fév 15, 2013', datetime(2013, 2, 15, 0, 0)),
param('Jeu 15:12', datetime(2012, 11, 8, 15, 12)),
# Spanish dates
param('Martes 21 de Octubre de 2014', datetime(2014, 10, 21)),
param('Miércoles 20 de Noviembre de 2013', datetime(2013, 11, 20)),
param('12 de junio del 2012', datetime(2012, 6, 12)),
param('13 Ago, 2014', datetime(2014, 8, 13)),
param('13 Septiembre, 2014', datetime(2014, 9, 13)),
param('11 Marzo, 2014', datetime(2014, 3, 11)),
param('julio 5, 2015 en 1:04 pm', datetime(2015, 7, 5, 13, 4)),
param('Vi 17:15', datetime(2012, 11, 9, 17, 15)),
# Dutch dates
param('11 augustus 2014', datetime(2014, 8, 11)),
param('14 januari 2014', datetime(2014, 1, 14)),
param('vr jan 24, 2014 12:49', datetime(2014, 1, 24, 12, 49)),
# Italian dates
param('16 giu 2014', datetime(2014, 6, 16)),
param('26 gennaio 2014', datetime(2014, 1, 26)),
param('Ven 18:23', datetime(2012, 11, 9, 18, 23)),
# Portuguese dates
param('sexta-feira, 10 de junho de 2014 14:52', datetime(2014, 6, 10, 14, 52)),
param('13 Setembro, 2014', datetime(2014, 9, 13)),
param('Sab 3:03', datetime(2012, 11, 10, 3, 3)),
# Russian dates
param('10 мая', datetime(2012, 5, 10)), # forum.codenet.ru
param('26 апреля', datetime(2012, 4, 26)),
param('20 ноября 2013', datetime(2013, 11, 20)),
param('28 октября 2014 в 07:54', datetime(2014, 10, 28, 7, 54)),
param('13 января 2015 г. в 13:34', datetime(2015, 1, 13, 13, 34)),
param('09 августа 2012', datetime(2012, 8, 9, 0, 0)),
param('Авг 26, 2015 15:12', datetime(2015, 8, 26, 15, 12)),
param('2 Декабрь 95 11:15', datetime(1995, 12, 2, 11, 15)),
param('13 янв. 2005 19:13', datetime(2005, 1, 13, 19, 13)),
param('13 авг. 2005 19:13', datetime(2005, 8, 13, 19, 13)),
param('13 авг. 2005г. 19:13', datetime(2005, 8, 13, 19, 13)),
param('13 авг. 2005 г. 19:13', datetime(2005, 8, 13, 19, 13)),
# Turkish dates
param('11 Ağustos, 2014', datetime(2014, 8, 11)),
param('08.Haziran.2014, 11:07', datetime(2014, 6, 8, 11, 7)), # forum.andronova.net
param('17.Şubat.2014, 17:51', datetime(2014, 2, 17, 17, 51)),
param('14-Aralık-2012, 20:56', datetime(2012, 12, 14, 20, 56)), # forum.ceviz.net
# Romanian dates
param('13 iunie 2013', datetime(2013, 6, 13)),
param('14 aprilie 2014', datetime(2014, 4, 14)),
param('18 martie 2012', datetime(2012, 3, 18)),
param('S 14:14', datetime(2012, 11, 10, 14, 14)),
param('12-Iun-2013', datetime(2013, 6, 12)),
# German dates
param('21. Dezember 2013', datetime(2013, 12, 21)),
param('19. Februar 2012', datetime(2012, 2, 19)),
param('26. Juli 2014', datetime(2014, 7, 26)),
param('18.10.14 um 22:56 Uhr', datetime(2014, 10, 18, 22, 56)),
param('12-Mär-2014', datetime(2014, 3, 12)),
param('Mit 13:14', datetime(2012, 11, 7, 13, 14)),
# Czech dates
param('pon 16. čer 2014 10:07:43', datetime(2014, 6, 16, 10, 7, 43)),
param('13 Srpen, 2014', datetime(2014, 8, 13)),
param('čtv 14. lis 2013 12:38:43', datetime(2013, 11, 14, 12, 38, 43)),
# Thai dates
param('ธันวาคม 11, 2014, 08:55:08 PM', datetime(2014, 12, 11, 20, 55, 8)),
param('22 พฤษภาคม 2012, 22:12', datetime(2012, 5, 22, 22, 12)),
param('11 กุมภา 2020, 8:13 AM', datetime(2020, 2, 11, 8, 13)),
param('1 เดือนตุลาคม 2005, 1:00 AM', datetime(2005, 10, 1, 1, 0)),
param('11 ก.พ. 2020, 1:13 pm', datetime(2020, 2, 11, 13, 13)),
# Vietnamese dates
param('Thứ năm', datetime(2012, 11, 8)), # Thursday
param('Thứ sáu', datetime(2012, 11, 9)), # Friday
param('Tháng Mười Hai 29, 2013, 14:14', datetime(2013, 12, 29, 14, 14)), # bpsosrcs.wordpress.com
param('05 Tháng một 2015 - 03:54 AM', datetime(2015, 1, 5, 3, 54)),
# Belarusian dates
param('11 траўня', datetime(2012, 5, 11)),
param('4 мая', datetime(2012, 5, 4)),
param('Чацвер 06 жніўня 2015', datetime(2015, 8, 6)),
param('Нд 14 сакавіка 2015 у 7 гадзін 10 хвілін', datetime(2015, 3, 14, 7, 10)),
param('5 жніўня 2015 года у 13:34', datetime(2015, 8, 5, 13, 34)),
# Ukrainian dates
param('2015-кві-12', datetime(2015, 4, 12)),
param('21 чер 2013 3:13', datetime(2013, 6, 21, 3, 13)),
param('12 лютого 2012, 13:12:23', datetime(2012, 2, 12, 13, 12, 23)),
param('вів о 14:04', datetime(2012, 11, 6, 14, 4)),
# Tagalog dates
param('12 Hulyo 2003 13:01', datetime(2003, 7, 12, 13, 1)),
param('1978, 1 Peb, 7:05 PM', datetime(1978, 2, 1, 19, 5)),
param('2 hun', datetime(2012, 6, 2)),
param('Lin 16:16', datetime(2012, 11, 11, 16, 16)),
# Japanese dates
param('2016年3月20日(日) 21時40分', datetime(2016, 3, 20, 21, 40)),
param("2016年3月20日 21時40分", datetime(2016, 3, 20, 21, 40)),
# Numeric dates
param('06-17-2014', datetime(2014, 6, 17)),
param('13/03/2014', datetime(2014, 3, 13)),
param('11. 12. 2014, 08:45:39', datetime(2014, 11, 12, 8, 45, 39)),
# Miscellaneous dates
param('1 Ni 2015', datetime(2015, 4, 1, 0, 0)),
param('1 Mar 2015', datetime(2015, 3, 1, 0, 0)),
param('1 Paz 2015', datetime(2015, 10, 1, 0, 0)),
param('1 сер 2015', datetime(2015, 8, 1, 0, 0)),
# Chinese dates
param('2015年04月08日10:05', datetime(2015, 4, 8, 10, 5)),
param('2012年12月20日10:35', datetime(2012, 12, 20, 10, 35)),
param('2016年 2月 5日', datetime(2016, 2, 5, 0, 0)),
# Greek dates
param('19 Ιουνίου 2016', datetime(2016, 6, 19, 0, 0)),
param('8 Ιανουαρίου 2015', datetime(2015, 1, 8, 0, 0)),
param('4 Μαρτίου 2015', datetime(2015, 3, 4, 0, 0)),
param('29 Δεκεμβρίου 2015', datetime(2015, 12, 29, 0, 0)),
param('4 Απριλίου 2015', datetime(2015, 4, 4, 0, 0)),
param('19 Φεβρουαρίου 2015', datetime(2015, 2, 19, 0, 0)),
param('16 Μαΐου 2015', datetime(2015, 5, 16, 0, 0)),
param('21 Αυγούστου 2014', datetime(2014, 8, 21, 0, 0)),
param('30 Σεπτεμβρίου 2014', datetime(2014, 9, 30, 0, 0)),
param('24 Οκτωβρίου 2014', datetime(2014, 10, 24, 0, 0)),
param('1 Ιουλίου 2014', datetime(2014, 7, 1, 0, 0)),
param('27 Νοεμβρίου 2014', datetime(2014, 11, 27, 0, 0)),
# Arabic dates
param('١٦ أكتوبر، ٢٠١٥', datetime(2015, 10, 16, 0, 0)),
param('١٦ يونيو، ٢٠١٦', datetime(2016, 6, 16, 0, 0)),
# Korean
param('2016년 6월 18일', datetime(2016, 6, 18, 0, 0)),
# Hindi
param('27 अगस्त 2014', datetime(2014, 8, 27, 0, 0)),
param('8 दिसंबर 2014', datetime(2014, 12, 8, 0, 0)),
param('23 फ़रवरी 2014', datetime(2014, 2, 23, 0, 0)),
param('10 सितंबर 2014', datetime(2014, 9, 10, 0, 0)),
param('11 अक्तूबर 2014', datetime(2014, 10, 11, 0, 0)),
param('12 नवंबर 2014', datetime(2014, 11, 12, 0, 0)),
param('16 जनवरी 2014', datetime(2014, 1, 16, 0, 0)),
param('1 जून 2014', datetime(2014, 6, 1, 0, 0)),
param('25 अप्रैल 2014', datetime(2014, 4, 25, 0, 0)),
param('19 मई 2015', datetime(2015, 5, 19, 0, 0)),
param('2 मार्च 2015', datetime(2015, 3, 2, 0, 0)),
param('1 जुलाई 2015', datetime(2015, 7, 1, 0, 0)),
# Swedish
param('27 augusti 2014', datetime(2014, 8, 27, 0, 0)),
param('7 mars 2011', datetime(2011, 3, 7, 0, 0)),
param('30 januari 2015', datetime(2015, 1, 30, 0, 0)),
param('28 februari 2015', datetime(2015, 2, 28, 0, 0)),
# Norwegian
param('5. januar 2014', datetime(2014, 1, 5, 0, 0)),
param('12. februar 2014', datetime(2014, 2, 12, 0, 0)),
param('12. mars 2013', datetime(2013, 3, 12, 0, 0)),
param('4. april 2014', datetime(2014, 4, 4, 0, 0)),
param('8. mai 2016', datetime(2016, 5, 8, 0, 0)),
param('11. juni 2012', datetime(2012, 6, 11, 0, 0)),
param('29. juli 2012', datetime(2012, 7, 29, 0, 0)),
param('18. august 2012', datetime(2012, 8, 18, 0, 0)),
param('1. september 2012', datetime(2012, 9, 1, 0, 0)),
param('6. oktober 2014', datetime(2014, 10, 6, 0, 0)),
param('28. desember 2014', datetime(2014, 12, 28, 0, 0)),
])
def test_dates_parsing(self, date_string, expected):
self.given_utcnow(datetime(2012, 11, 13)) # Tuesday
self.given_local_tz_offset(0)
self.given_parser(settings={'NORMALIZE': False})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_period_is('day')
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
# English dates
param('[Sept] 04, 2014.', datetime(2014, 9, 4)),
param('Tuesday Jul 22, 2014', datetime(2014, 7, 22)),
param('10:04am EDT', datetime(2012, 11, 13, 14, 4)),
param('Friday', datetime(2012, 11, 9)),
param('November 19, 2014 at noon', datetime(2014, 11, 19, 12, 0)),
param('December 13, 2014 at midnight', datetime(2014, 12, 13, 0, 0)),
param('Nov 25 2014 10:17 pm EST', datetime(2014, 11, 26, 3, 17)),
param('Wed Aug 05 12:00:00 EDT 2015', datetime(2015, 8, 5, 16, 0)),
param('April 9, 2013 at 6:11 a.m.', datetime(2013, 4, 9, 6, 11)),
param('Aug. 9, 2012 at 2:57 p.m.', datetime(2012, 8, 9, 14, 57)),
param('December 10, 2014, 11:02:21 pm', datetime(2014, 12, 10, 23, 2, 21)),
param('8:25 a.m. Dec. 12, 2014', datetime(2014, 12, 12, 8, 25)),
param('2:21 p.m., December 11, 2014', datetime(2014, 12, 11, 14, 21)),
param('Fri, 12 Dec 2014 10:55:50', datetime(2014, 12, 12, 10, 55, 50)),
param('20 Mar 2013 10h11', datetime(2013, 3, 20, 10, 11)),
param('10:06am Dec 11, 2014', datetime(2014, 12, 11, 10, 6)),
param('19 February 2013 year 09:10', datetime(2013, 2, 19, 9, 10)),
# French dates
param('11 Mai 2014', datetime(2014, 5, 11)),
param('dimanche, 11 Mai 2014', datetime(2014, 5, 11)),
param('22 janvier 2015 à 14h40', datetime(2015, 1, 22, 14, 40)), #wrong
param('Dimanche 1er Février à 21:24', datetime(2012, 2, 1, 21, 24)),
param('vendredi, décembre 5 2014.', datetime(2014, 12, 5, 0, 0)),
param('le 08 Déc 2014 15:11', datetime(2014, 12, 8, 15, 11)),
param('Le 11 Décembre 2014 à 09:00', datetime(2014, 12, 11, 9, 0)),
param('fév 15, 2013', datetime(2013, 2, 15, 0, 0)),
param('Jeu 15:12', datetime(2012, 11, 8, 15, 12)),
# Spanish dates
param('Martes 21 de Octubre de 2014', datetime(2014, 10, 21)),
param('Miércoles 20 de Noviembre de 2013', datetime(2013, 11, 20)),
param('12 de junio del 2012', datetime(2012, 6, 12)),
param('13 Ago, 2014', datetime(2014, 8, 13)),
param('13 Septiembre, 2014', datetime(2014, 9, 13)),
param('11 Marzo, 2014', datetime(2014, 3, 11)),
param('julio 5, 2015 en 1:04 pm', datetime(2015, 7, 5, 13, 4)),
param('Vi 17:15', datetime(2012, 11, 9, 17, 15)),
# Dutch dates
param('11 augustus 2014', datetime(2014, 8, 11)),
param('14 januari 2014', datetime(2014, 1, 14)),
param('vr jan 24, 2014 12:49', datetime(2014, 1, 24, 12, 49)),
# Italian dates
param('16 giu 2014', datetime(2014, 6, 16)),
param('26 gennaio 2014', datetime(2014, 1, 26)),
param('Ven 18:23', datetime(2012, 11, 9, 18, 23)),
# Portuguese dates
param('sexta-feira, 10 de junho de 2014 14:52', datetime(2014, 6, 10, 14, 52)),
param('13 Setembro, 2014', datetime(2014, 9, 13)),
param('Sab 3:03', datetime(2012, 11, 10, 3, 3)),
# Russian dates
param('10 мая', datetime(2012, 5, 10)), # forum.codenet.ru
param('26 апреля', datetime(2012, 4, 26)),
param('20 ноября 2013', datetime(2013, 11, 20)),
param('28 октября 2014 в 07:54', datetime(2014, 10, 28, 7, 54)),
param('13 января 2015 г. в 13:34', datetime(2015, 1, 13, 13, 34)),
param('09 августа 2012', datetime(2012, 8, 9, 0, 0)),
param('Авг 26, 2015 15:12', datetime(2015, 8, 26, 15, 12)),
param('2 Декабрь 95 11:15', datetime(1995, 12, 2, 11, 15)),
param('13 янв. 2005 19:13', datetime(2005, 1, 13, 19, 13)),
param('13 авг. 2005 19:13', datetime(2005, 8, 13, 19, 13)),
param('13 авг. 2005г. 19:13', datetime(2005, 8, 13, 19, 13)),
param('13 авг. 2005 г. 19:13', datetime(2005, 8, 13, 19, 13)),
# Turkish dates
param('11 Ağustos, 2014', datetime(2014, 8, 11)),
param('08.Haziran.2014, 11:07', datetime(2014, 6, 8, 11, 7)), # forum.andronova.net
param('17.Şubat.2014, 17:51', datetime(2014, 2, 17, 17, 51)),
param('14-Aralık-2012, 20:56', datetime(2012, 12, 14, 20, 56)), # forum.ceviz.net
# Romanian dates
param('13 iunie 2013', datetime(2013, 6, 13)),
param('14 aprilie 2014', datetime(2014, 4, 14)),
param('18 martie 2012', datetime(2012, 3, 18)),
param('S 14:14', datetime(2012, 11, 10, 14, 14)),
param('12-Iun-2013', datetime(2013, 6, 12)),
# German dates
param('21. Dezember 2013', datetime(2013, 12, 21)),
param('19. Februar 2012', datetime(2012, 2, 19)),
param('26. Juli 2014', datetime(2014, 7, 26)),
param('18.10.14 um 22:56 Uhr', datetime(2014, 10, 18, 22, 56)),
param('12-Mär-2014', datetime(2014, 3, 12)),
param('Mit 13:14', datetime(2012, 11, 7, 13, 14)),
# Czech dates
param('pon 16. čer 2014 10:07:43', datetime(2014, 6, 16, 10, 7, 43)),
param('13 Srpen, 2014', datetime(2014, 8, 13)),
param('čtv 14. lis 2013 12:38:43', datetime(2013, 11, 14, 12, 38, 43)),
# Thai dates
param('ธันวาคม 11, 2014, 08:55:08 PM', datetime(2014, 12, 11, 20, 55, 8)),
param('22 พฤษภาคม 2012, 22:12', datetime(2012, 5, 22, 22, 12)),
param('11 กุมภา 2020, 8:13 AM', datetime(2020, 2, 11, 8, 13)),
param('1 เดือนตุลาคม 2005, 1:00 AM', datetime(2005, 10, 1, 1, 0)),
param('11 ก.พ. 2020, 1:13 pm', datetime(2020, 2, 11, 13, 13)),
# Vietnamese dates
param('Thứ năm', datetime(2012, 11, 8)), # Thursday
param('Thứ sáu', datetime(2012, 11, 9)), # Friday
param('Tháng Mười Hai 29, 2013, 14:14', datetime(2013, 12, 29, 14, 14)), # bpsosrcs.wordpress.com
param('05 Tháng một 2015 - 03:54 AM', datetime(2015, 1, 5, 3, 54)),
# Belarusian dates
param('11 траўня', datetime(2012, 5, 11)),
param('4 мая', datetime(2012, 5, 4)),
param('Чацвер 06 жніўня 2015', datetime(2015, 8, 6)),
param('Нд 14 сакавіка 2015 у 7 гадзін 10 хвілін', datetime(2015, 3, 14, 7, 10)),
param('5 жніўня 2015 года у 13:34', datetime(2015, 8, 5, 13, 34)),
# Ukrainian dates
param('2015-кві-12', datetime(2015, 4, 12)),
param('21 чер 2013 3:13', datetime(2013, 6, 21, 3, 13)),
param('12 лютого 2012, 13:12:23', datetime(2012, 2, 12, 13, 12, 23)),
param('вів о 14:04', datetime(2012, 11, 6, 14, 4)),
# Filipino dates
param('12 Hulyo 2003 13:01', datetime(2003, 7, 12, 13, 1)),
param('1978, 1 Peb, 7:05 PM', datetime(1978, 2, 1, 19, 5)),
param('2 hun', datetime(2012, 6, 2)),
param('Lin 16:16', datetime(2012, 11, 11, 16, 16)),
# Japanese dates
param('2016年3月20日(日) 21時40分', datetime(2016, 3, 20, 21, 40)),
param("2016年3月20日 21時40分", datetime(2016, 3, 20, 21, 40)),
# Numeric dates
param('06-17-2014', datetime(2014, 6, 17)),
param('13/03/2014', datetime(2014, 3, 13)),
param('11. 12. 2014, 08:45:39', datetime(2014, 11, 12, 8, 45, 39)),
# Miscellaneous dates
param('1 Ni 2015', datetime(2015, 4, 1, 0, 0)),
param('1 Mar 2015', datetime(2015, 3, 1, 0, 0)),
param('1 Paz 2015', datetime(2015, 10, 1, 0, 0)),
param('1 сер 2015', datetime(2015, 8, 1, 0, 0)),
])
def test_dates_parsing_with_normalization(self, date_string, expected):
self.given_utcnow(datetime(2012, 11, 13)) # Tuesday
self.given_local_tz_offset(0)
self.given_parser(settings={'NORMALIZE': True})
self.when_date_is_parsed(normalize_unicode(date_string))
self.then_date_was_parsed_by_date_parser()
self.then_period_is('day')
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('Sep 03 2014 | 4:32 pm EDT', datetime(2014, 9, 3, 20, 32)),
param('17th October, 2034 @ 01:08 am PDT', datetime(2034, 10, 17, 8, 8)),
param('15 May 2004 23:24 EDT', datetime(2004, 5, 16, 3, 24)),
param('15 May 2004', datetime(2004, 5, 15, 0, 0)),
param('08/17/14 17:00 (PDT)', datetime(2014, 8, 18, 0, 0)),
])
def test_parsing_with_time_zones(self, date_string, expected):
self.given_local_tz_offset(+1)
self.given_parser()
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_period_is('day')
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('15 May 2004 16:10 -0400', datetime(2004, 5, 15, 20, 10)),
param('1999-12-31 19:00:00 -0500', datetime(2000, 1, 1, 0, 0)),
param('1999-12-31 19:00:00 +0500', datetime(1999, 12, 31, 14, 0)),
param('Fri, 09 Sep 2005 13:51:39 -0700', datetime(2005, 9, 9, 20, 51, 39)),
param('Fri, 09 Sep 2005 13:51:39 +0000', datetime(2005, 9, 9, 13, 51, 39)),
])
def test_parsing_with_utc_offsets(self, date_string, expected):
self.given_local_tz_offset(0)
self.given_parser()
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_period_is('day')
self.then_date_obj_exactly_is(expected)
def test_empty_dates_string_is_not_parsed(self):
self.when_date_is_parsed_by_date_parser('')
self.then_error_was_raised(ValueError, ["Empty string"])
@parameterized.expand([
param('invalid date string'),
param('Aug 7, 2014Aug 7, 2014'),
param('24h ago'),
])
def test_dates_not_parsed(self, date_string):
self.when_date_is_parsed_by_date_parser(date_string)
self.then_error_was_raised(ValueError, ["unknown string format"])
@parameterized.expand([
param('10 December', datetime(2014, 12, 10)),
param('March', datetime(2014, 3, 15)),
param('Friday', datetime(2015, 2, 13)),
param('Monday', datetime(2015, 2, 9)),
param('10:00PM', datetime(2015, 2, 14, 22, 00)),
param('16:10', datetime(2015, 2, 14, 16, 10)),
param('14:05', datetime(2015, 2, 15, 14, 5)),
])
def test_preferably_past_dates(self, date_string, expected):
self.given_utcnow(datetime(2015, 2, 15, 15, 30)) # Sunday
self.given_local_tz_offset(0)
self.given_parser(settings={'PREFER_DATES_FROM': 'past'})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('10 December', datetime(2015, 12, 10)),
param('March', datetime(2015, 3, 15)),
param('Friday', datetime(2015, 2, 20)),
param('Monday', datetime(2015, 2, 16)),
param('10:00PM', datetime(2015, 2, 15, 22, 00)),
param('16:10', datetime(2015, 2, 15, 16, 10)),
param('14:05', datetime(2015, 2, 16, 14, 5)),
])
def test_preferably_future_dates(self, date_string, expected):
self.given_utcnow(datetime(2015, 2, 15, 15, 30)) # Sunday
self.given_local_tz_offset(0)
self.given_parser(settings={'PREFER_DATES_FROM': 'future'})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('10 December', datetime(2015, 12, 10)),
param('March', datetime(2015, 3, 15)),
param('Friday', datetime(2015, 2, 13)),
param('10:00PM', datetime(2015, 2, 15, 22, 00)),
param('16:10', datetime(2015, 2, 15, 16, 10)),
param('14:05', datetime(2015, 2, 15, 14, 5)),
])
def test_dates_without_preference(self, date_string, expected):
self.given_utcnow(datetime(2015, 2, 15, 15, 30)) # Sunday
self.given_local_tz_offset(0)
self.given_parser(settings={'PREFER_DATES_FROM': 'current_period'})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('February 2015', today=datetime(2015, 1, 31), expected=datetime(2015, 2, 28)),
param('February 2012', today=datetime(2015, 1, 31), expected=datetime(2012, 2, 29)),
param('March 2015', today=datetime(2015, 1, 25), expected=datetime(2015, 3, 25)),
param('April 2015', today=datetime(2015, 1, 31), expected=datetime(2015, 4, 30)),
param('April 2015', today=datetime(2015, 2, 28), expected=datetime(2015, 4, 28)),
param('December 2014', today=datetime(2015, 2, 15), expected=datetime(2014, 12, 15)),
])
def test_dates_with_day_missing_prefering_current_day_of_month(self, date_string, today=None, expected=None):
self.given_utcnow(today)
self.given_parser(settings={'PREFER_DAY_OF_MONTH': 'current'})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('February 2015', today=datetime(2015, 1, 1), expected=datetime(2015, 2, 28)),
param('February 2012', today=datetime(2015, 1, 1), expected=datetime(2012, 2, 29)),
param('March 2015', today=datetime(2015, 1, 25), expected=datetime(2015, 3, 31)),
param('April 2015', today=datetime(2015, 1, 15), expected=datetime(2015, 4, 30)),
param('April 2015', today=datetime(2015, 2, 28), expected=datetime(2015, 4, 30)),
param('December 2014', today=datetime(2015, 2, 15), expected=datetime(2014, 12, 31)),
])
def test_dates_with_day_missing_prefering_last_day_of_month(self, date_string, today=None, expected=None):
self.given_utcnow(today)
self.given_parser(settings={'PREFER_DAY_OF_MONTH': 'last'})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('February 2015', today=datetime(2015, 1, 8), expected=datetime(2015, 2, 1)),
param('February 2012', today=datetime(2015, 1, 7), expected=datetime(2012, 2, 1)),
param('March 2015', today=datetime(2015, 1, 25), expected=datetime(2015, 3, 1)),
param('April 2015', today=datetime(2015, 1, 15), expected=datetime(2015, 4, 1)),
param('April 2015', today=datetime(2015, 2, 28), expected=datetime(2015, 4, 1)),
param('December 2014', today=datetime(2015, 2, 15), expected=datetime(2014, 12, 1)),
])
def test_dates_with_day_missing_prefering_first_day_of_month(self, date_string, today=None, expected=None):
self.given_utcnow(today)
self.given_parser(settings={'PREFER_DAY_OF_MONTH': 'first'})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param(prefer_day_of_month='current'),
param(prefer_day_of_month='last'),
param(prefer_day_of_month='first'),
])
def test_that_day_preference_does_not_affect_dates_with_explicit_day(self, prefer_day_of_month=None):
self.given_utcnow(datetime(2015, 2, 12))
self.given_parser(settings={'PREFER_DAY_OF_MONTH': prefer_day_of_month})
self.when_date_is_parsed('24 April 2012')
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(datetime(2012, 4, 24))
def test_date_is_parsed_when_skip_tokens_are_supplied(self):
self.given_utcnow(datetime(2015, 2, 12))
self.given_parser(settings={'SKIP_TOKENS': ['de']})
self.when_date_is_parsed('24 April 2012 de')
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(datetime(2012, 4, 24))
@parameterized.expand([
param('29 February 2015'),
param('32 January 2015'),
param('31 April 2015'),
param('31 June 2015'),
param('31 September 2015'),
])
def test_error_should_be_raised_for_invalid_dates_with_too_large_day_number(self, date_string):
self.when_date_is_parsed_by_date_parser(date_string)
self.then_error_was_raised(ValueError, ['Day not in range for month'])
@parameterized.expand([
param('2015-05-02T10:20:19+0000', languages=['fr'], expected=datetime(2015, 5, 2, 10, 20, 19)),
param('2015-05-02T10:20:19+0000', languages=['en'], expected=datetime(2015, 5, 2, 10, 20, 19)),
param('2015-05-02T10:20:19+0000', languages=[], expected=datetime(2015, 5, 2, 10, 20, 19)),
])
def test_iso_datestamp_format_should_always_parse(self, date_string, languages, expected):
self.given_local_tz_offset(0)
self.given_parser(languages=languages)
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('10 December', expected=datetime(2015, 12, 10), period='day'),
param('March', expected=datetime(2015, 3, 15), period='month'),
param('April', expected=datetime(2015, 4, 15), period='month'),
param('December', expected=datetime(2015, 12, 15), period='month'),
param('Friday', expected=datetime(2015, 2, 13), period='day'),
param('Monday', expected=datetime(2015, 2, 9), period='day'),
param('10:00PM', expected=datetime(2015, 2, 15, 22, 00), period='day'),
param('16:10', expected=datetime(2015, 2, 15, 16, 10), period='day'),
param('2014', expected=datetime(2014, 2, 15), period='year'),
param('2008', expected=datetime(2008, 2, 15), period='year'),
])
def test_extracted_period(self, date_string, expected=None, period=None):
self.given_utcnow(datetime(2015, 2, 15, 15, 30)) # Sunday
self.given_local_tz_offset(0)
self.given_parser()
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
self.then_period_is(period)
def given_utcnow(self, now):
datetime_mock = Mock(wraps=datetime)
datetime_mock.utcnow = Mock(return_value=now)
self.add_patch(patch('dateparser.date_parser.datetime', new=datetime_mock))
def given_local_tz_offset(self, offset):
self.add_patch(
patch.object(dateparser.timezone_parser,
'local_tz_offset',
new=timedelta(seconds=3600 * offset))
)
def given_parser(self, *args, **kwds):
def collecting_get_date_data(parse):
@wraps(parse)
def wrapped(*args, **kwargs):
self.date_result = parse(*args, **kwargs)
return self.date_result
return wrapped
self.add_patch(patch.object(date_parser,
'parse',
collecting_get_date_data(date_parser.parse)))
self.date_parser = Mock(wraps=date_parser)
self.add_patch(patch('dateparser.date.date_parser', new=self.date_parser))
self.parser = DateDataParser(*args, **kwds)
def when_date_is_parsed(self, date_string):
self.result = self.parser.get_date_data(date_string)
def when_date_is_parsed_by_date_parser(self, date_string):
try:
self.result = DateParser().parse(date_string)
except Exception as error:
self.error = error
def then_period_is(self, period):
self.assertEqual(period, self.result['period'])
def then_date_obj_exactly_is(self, expected):
self.assertEqual(expected, self.result['date_obj'])
def then_date_was_parsed_by_date_parser(self):
self.assertNotEqual(NotImplemented, self.date_result, "Date was not parsed")
self.assertEqual(self.result['date_obj'], self.date_result[0])
if __name__ == '__main__':
unittest.main()
| 51.685897
| 126
| 0.627781
|
from __future__ import unicode_literals
import unittest
from datetime import datetime, timedelta
from functools import wraps
from operator import attrgetter
import six
from mock import patch, Mock
from nose_parameterized import parameterized, param
import dateparser.timezone_parser
from dateparser.date import DateDataParser, date_parser
from dateparser.date_parser import DateParser
from dateparser.languages import default_language_loader
from dateparser.languages.detection import AutoDetectLanguage, ExactLanguages
from dateparser.conf import settings
from dateparser.utils import normalize_unicode
from tests import BaseTestCase
class AutoDetectLanguageTest(BaseTestCase):
def setUp(self):
super(AutoDetectLanguageTest, self).setUp()
self.known_languages = ['en', 'fr', 'es', 'pt', 'ru', 'tr', 'cs']
self.parser = NotImplemented
self.detected_languages = NotImplemented
@parameterized.expand([
param(date_strings=["11 abril 2010"], expected_languages=['es', 'pt']),
param(date_strings=["11 junio 2010"], expected_languages=['es']),
param(date_strings=["13 Ago, 2014", "13 Septiembre, 2014"], expected_languages=['es']),
])
def test_detect_languages(self, date_strings, expected_languages):
self.given_parser(languages=self.known_languages)
self.when_all_languages_are_detected(date_strings)
self.then_detected_languages_are(expected_languages)
@parameterized.expand([
param(date_strings=["11 abril 2010"], expected_language='es'),
param(date_strings=["11 junio 2010"], expected_language='es'),
param(date_strings=["13 Ago, 2014", "13 Septiembre, 2014"], expected_language='es'),
])
def test_exclude_ineligible_languages_with_modify(self, date_strings, expected_language):
self.given_parser(languages=self.known_languages)
self.when_one_language_is_detected(date_strings, modify=True)
self.then_detected_languages_are([expected_language])
self.then_parser_languages_are(self.known_languages[self.known_languages.index(expected_language):])
@parameterized.expand([
param(date_strings=["11 abril 2010"], expected_language='es'),
param(date_strings=["11 junio 2010"], expected_language='es'),
param(date_strings=["13 Ago, 2014", "13 Septiembre, 2014"], expected_language='es'),
])
def test_do_not_exclude_ineligible_languages_without_modify(self, date_strings, expected_language):
self.given_parser(languages=self.known_languages)
self.when_one_language_is_detected(date_strings, modify=False)
self.then_detected_languages_are([expected_language])
self.then_parser_languages_are(self.known_languages)
@parameterized.expand([
param(date_strings=["11 abril 2010"], expected_languages=['es', 'pt']),
param(date_strings=["11 junio 2010"], expected_languages=['es']),
param(date_strings=["13 Ago, 2014", "13 Septiembre, 2014"], expected_languages=['es']),
param(date_strings=["13 Srpen, 2014"], expected_languages=['cs']),
])
def test_do_not_exclude_ineligible_languages_when_all_ineligible(self, date_strings, expected_languages):
self.given_parser(languages=self.known_languages)
self.when_all_languages_are_detected(date_strings, modify=True)
self.then_detected_languages_are(expected_languages)
self.then_parser_languages_are(self.known_languages)
@parameterized.expand([
param(language='es', date_strings=["13 Setembro, 2014"]),
param(language='cs', date_strings=["'11 Ağustos, 2014'"]),
])
def test_reject_dates_in_other_languages_without_redetection(self, language, date_strings):
self.given_parser(languages=self.known_languages)
self.given_parser_languages_are([language])
self.when_all_languages_are_detected(date_strings)
self.then_detected_languages_are([])
@parameterized.expand([
param(detected_languages=['es'], date_strings=['13 Juillet, 2014'], expected_languages=['fr']),
param(detected_languages=['es'], date_strings=['11 Ağustos, 2014'], expected_languages=['tr']),
])
def test_accept_dates_in_other_languages_with_redetection_enabled(
self, detected_languages, date_strings, expected_languages
):
self.given_parser(languages=self.known_languages, allow_redetection=True)
self.given_parser_languages_are(detected_languages)
self.when_all_languages_are_detected(date_strings)
self.then_detected_languages_are(expected_languages)
def test_accept_numeric_dates_without_redetection(self,):
self.given_parser(languages=self.known_languages)
self.given_parser_languages_are(['es'])
self.when_all_languages_are_detected(['13/08/2014'])
self.then_detected_languages_are(['es'])
def given_parser(self, languages=None, allow_redetection=False):
if languages is not None:
language_map = default_language_loader.get_language_map()
languages = [language_map[language]
for language in languages]
self.parser = AutoDetectLanguage(languages, allow_redetection=allow_redetection)
def given_parser_languages_are(self, languages):
language_map = default_language_loader.get_language_map()
self.parser.languages = [language_map[language]
for language in languages]
def when_all_languages_are_detected(self, date_strings, modify=False):
assert not isinstance(date_strings, six.string_types)
for date_string in date_strings:
if settings.NORMALIZE:
date_string = normalize_unicode(date_string)
detected_languages = list(self.parser.iterate_applicable_languages(date_string, modify=modify, settings=settings))
self.detected_languages = detected_languages
def when_one_language_is_detected(self, date_strings, modify=False):
for date_string in date_strings:
detected_language = next(self.parser.iterate_applicable_languages(date_string, modify=modify, settings=settings))
self.detected_languages = [detected_language]
def then_detected_languages_are(self, expected_languages):
shortnames = map(attrgetter('shortname'), self.detected_languages)
six.assertCountEqual(self, expected_languages, shortnames)
def then_parser_languages_are(self, expected_languages):
shortnames = map(attrgetter('shortname'), self.parser.languages)
six.assertCountEqual(self, expected_languages, shortnames)
class ExactLanguagesTest(BaseTestCase):
def setUp(self):
super(ExactLanguagesTest, self).setUp()
self.parser = NotImplemented
self.detected_languages = NotImplemented
def test_languages_passed_in_constructor_should_not_be_none(self):
self.when_parser_is_constructed(languages=None)
self.then_error_was_raised(ValueError, ['language cannot be None for ExactLanguages'])
@parameterized.expand([
param(languages=['fr'], date_strings=["04-decembre-2015", "13 aou, 2014"]),
])
def test_missing_diacritical_marks(self, languages, date_strings):
settings.NORMALIZE = True
self.given_parser(languages)
self.when_languages_are_detected(date_strings)
self.then_detected_languages_are(languages)
@parameterized.expand([
param(languages=['es'], date_strings=["13 Ago, 2014"]),
param(languages=['es'], date_strings=["13 Septiembre, 2014"]),
param(languages=['es'], date_strings=["13/03/2014"]),
param(languages=['es'], date_strings=["11/03/2014"]),
])
def test_parse_date_in_exact_language(self, languages, date_strings):
self.given_parser(languages)
self.when_languages_are_detected(date_strings)
self.then_detected_languages_are(languages)
@parameterized.expand([
param(languages=['es'], date_strings=["13 Setembro, 2014"]),
])
def test_reject_dates_in_other_languages(self, languages, date_strings):
self.given_parser(languages=languages)
self.when_languages_are_detected(date_strings)
self.then_detected_languages_are([])
def given_parser(self, languages):
language_map = default_language_loader.get_language_map()
languages = [language_map[language]
for language in languages]
self.parser = ExactLanguages(languages)
def when_languages_are_detected(self, date_strings, modify=False):
assert not isinstance(date_strings, six.string_types)
for date_string in date_strings:
detected_languages = list(self.parser.iterate_applicable_languages(date_string, modify=modify, settings=settings))
self.detected_languages = detected_languages
def when_parser_is_constructed(self, languages):
try:
ExactLanguages(languages)
except Exception as error:
self.error = error
def then_detected_languages_are(self, expected_languages):
shortnames = map(attrgetter('shortname'), self.detected_languages)
six.assertCountEqual(self, expected_languages, shortnames)
class TestDateParser(BaseTestCase):
def setUp(self):
super(TestDateParser, self).setUp()
self.parser = NotImplemented
self.result = NotImplemented
self.date_parser = NotImplemented
self.date_result = NotImplemented
@parameterized.expand([
param('[Sept] 04, 2014.', datetime(2014, 9, 4)),
param('Tuesday Jul 22, 2014', datetime(2014, 7, 22)),
param('10:04am EDT', datetime(2012, 11, 13, 14, 4)),
param('Friday', datetime(2012, 11, 9)),
param('November 19, 2014 at noon', datetime(2014, 11, 19, 12, 0)),
param('December 13, 2014 at midnight', datetime(2014, 12, 13, 0, 0)),
param('Nov 25 2014 10:17 pm EST', datetime(2014, 11, 26, 3, 17)),
param('Wed Aug 05 12:00:00 EDT 2015', datetime(2015, 8, 5, 16, 0)),
param('April 9, 2013 at 6:11 a.m.', datetime(2013, 4, 9, 6, 11)),
param('Aug. 9, 2012 at 2:57 p.m.', datetime(2012, 8, 9, 14, 57)),
param('December 10, 2014, 11:02:21 pm', datetime(2014, 12, 10, 23, 2, 21)),
param('8:25 a.m. Dec. 12, 2014', datetime(2014, 12, 12, 8, 25)),
param('2:21 p.m., December 11, 2014', datetime(2014, 12, 11, 14, 21)),
param('Fri, 12 Dec 2014 10:55:50', datetime(2014, 12, 12, 10, 55, 50)),
param('20 Mar 2013 10h11', datetime(2013, 3, 20, 10, 11)),
param('10:06am Dec 11, 2014', datetime(2014, 12, 11, 10, 6)),
param('19 February 2013 year 09:10', datetime(2013, 2, 19, 9, 10)),
param('11 Mai 2014', datetime(2014, 5, 11)),
param('dimanche, 11 Mai 2014', datetime(2014, 5, 11)),
param('22 janvier 2015 à 14h40', datetime(2015, 1, 22, 14, 40)),
param('Dimanche 1er Février à 21:24', datetime(2012, 2, 1, 21, 24)),
param('vendredi, décembre 5 2014.', datetime(2014, 12, 5, 0, 0)),
param('le 08 Déc 2014 15:11', datetime(2014, 12, 8, 15, 11)),
param('Le 11 Décembre 2014 à 09:00', datetime(2014, 12, 11, 9, 0)),
param('fév 15, 2013', datetime(2013, 2, 15, 0, 0)),
param('Jeu 15:12', datetime(2012, 11, 8, 15, 12)),
param('Martes 21 de Octubre de 2014', datetime(2014, 10, 21)),
param('Miércoles 20 de Noviembre de 2013', datetime(2013, 11, 20)),
param('12 de junio del 2012', datetime(2012, 6, 12)),
param('13 Ago, 2014', datetime(2014, 8, 13)),
param('13 Septiembre, 2014', datetime(2014, 9, 13)),
param('11 Marzo, 2014', datetime(2014, 3, 11)),
param('julio 5, 2015 en 1:04 pm', datetime(2015, 7, 5, 13, 4)),
param('Vi 17:15', datetime(2012, 11, 9, 17, 15)),
param('11 augustus 2014', datetime(2014, 8, 11)),
param('14 januari 2014', datetime(2014, 1, 14)),
param('vr jan 24, 2014 12:49', datetime(2014, 1, 24, 12, 49)),
param('16 giu 2014', datetime(2014, 6, 16)),
param('26 gennaio 2014', datetime(2014, 1, 26)),
param('Ven 18:23', datetime(2012, 11, 9, 18, 23)),
param('sexta-feira, 10 de junho de 2014 14:52', datetime(2014, 6, 10, 14, 52)),
param('13 Setembro, 2014', datetime(2014, 9, 13)),
param('Sab 3:03', datetime(2012, 11, 10, 3, 3)),
param('10 мая', datetime(2012, 5, 10)),
param('26 апреля', datetime(2012, 4, 26)),
param('20 ноября 2013', datetime(2013, 11, 20)),
param('28 октября 2014 в 07:54', datetime(2014, 10, 28, 7, 54)),
param('13 января 2015 г. в 13:34', datetime(2015, 1, 13, 13, 34)),
param('09 августа 2012', datetime(2012, 8, 9, 0, 0)),
param('Авг 26, 2015 15:12', datetime(2015, 8, 26, 15, 12)),
param('2 Декабрь 95 11:15', datetime(1995, 12, 2, 11, 15)),
param('13 янв. 2005 19:13', datetime(2005, 1, 13, 19, 13)),
param('13 авг. 2005 19:13', datetime(2005, 8, 13, 19, 13)),
param('13 авг. 2005г. 19:13', datetime(2005, 8, 13, 19, 13)),
param('13 авг. 2005 г. 19:13', datetime(2005, 8, 13, 19, 13)),
param('11 Ağustos, 2014', datetime(2014, 8, 11)),
param('08.Haziran.2014, 11:07', datetime(2014, 6, 8, 11, 7)),
param('17.Şubat.2014, 17:51', datetime(2014, 2, 17, 17, 51)),
param('14-Aralık-2012, 20:56', datetime(2012, 12, 14, 20, 56)),
param('13 iunie 2013', datetime(2013, 6, 13)),
param('14 aprilie 2014', datetime(2014, 4, 14)),
param('18 martie 2012', datetime(2012, 3, 18)),
param('S 14:14', datetime(2012, 11, 10, 14, 14)),
param('12-Iun-2013', datetime(2013, 6, 12)),
param('21. Dezember 2013', datetime(2013, 12, 21)),
param('19. Februar 2012', datetime(2012, 2, 19)),
param('26. Juli 2014', datetime(2014, 7, 26)),
param('18.10.14 um 22:56 Uhr', datetime(2014, 10, 18, 22, 56)),
param('12-Mär-2014', datetime(2014, 3, 12)),
param('Mit 13:14', datetime(2012, 11, 7, 13, 14)),
param('pon 16. čer 2014 10:07:43', datetime(2014, 6, 16, 10, 7, 43)),
param('13 Srpen, 2014', datetime(2014, 8, 13)),
param('čtv 14. lis 2013 12:38:43', datetime(2013, 11, 14, 12, 38, 43)),
param('ธันวาคม 11, 2014, 08:55:08 PM', datetime(2014, 12, 11, 20, 55, 8)),
param('22 พฤษภาคม 2012, 22:12', datetime(2012, 5, 22, 22, 12)),
param('11 กุมภา 2020, 8:13 AM', datetime(2020, 2, 11, 8, 13)),
param('1 เดือนตุลาคม 2005, 1:00 AM', datetime(2005, 10, 1, 1, 0)),
param('11 ก.พ. 2020, 1:13 pm', datetime(2020, 2, 11, 13, 13)),
param('Thứ năm', datetime(2012, 11, 8)),
param('Thứ sáu', datetime(2012, 11, 9)),
param('Tháng Mười Hai 29, 2013, 14:14', datetime(2013, 12, 29, 14, 14)),
param('05 Tháng một 2015 - 03:54 AM', datetime(2015, 1, 5, 3, 54)),
param('11 траўня', datetime(2012, 5, 11)),
param('4 мая', datetime(2012, 5, 4)),
param('Чацвер 06 жніўня 2015', datetime(2015, 8, 6)),
param('Нд 14 сакавіка 2015 у 7 гадзін 10 хвілін', datetime(2015, 3, 14, 7, 10)),
param('5 жніўня 2015 года у 13:34', datetime(2015, 8, 5, 13, 34)),
param('2015-кві-12', datetime(2015, 4, 12)),
param('21 чер 2013 3:13', datetime(2013, 6, 21, 3, 13)),
param('12 лютого 2012, 13:12:23', datetime(2012, 2, 12, 13, 12, 23)),
param('вів о 14:04', datetime(2012, 11, 6, 14, 4)),
param('12 Hulyo 2003 13:01', datetime(2003, 7, 12, 13, 1)),
param('1978, 1 Peb, 7:05 PM', datetime(1978, 2, 1, 19, 5)),
param('2 hun', datetime(2012, 6, 2)),
param('Lin 16:16', datetime(2012, 11, 11, 16, 16)),
param('2016年3月20日(日) 21時40分', datetime(2016, 3, 20, 21, 40)),
param("2016年3月20日 21時40分", datetime(2016, 3, 20, 21, 40)),
param('06-17-2014', datetime(2014, 6, 17)),
param('13/03/2014', datetime(2014, 3, 13)),
param('11. 12. 2014, 08:45:39', datetime(2014, 11, 12, 8, 45, 39)),
param('1 Ni 2015', datetime(2015, 4, 1, 0, 0)),
param('1 Mar 2015', datetime(2015, 3, 1, 0, 0)),
param('1 Paz 2015', datetime(2015, 10, 1, 0, 0)),
param('1 сер 2015', datetime(2015, 8, 1, 0, 0)),
param('2015年04月08日10:05', datetime(2015, 4, 8, 10, 5)),
param('2012年12月20日10:35', datetime(2012, 12, 20, 10, 35)),
param('2016年 2月 5日', datetime(2016, 2, 5, 0, 0)),
param('19 Ιουνίου 2016', datetime(2016, 6, 19, 0, 0)),
param('8 Ιανουαρίου 2015', datetime(2015, 1, 8, 0, 0)),
param('4 Μαρτίου 2015', datetime(2015, 3, 4, 0, 0)),
param('29 Δεκεμβρίου 2015', datetime(2015, 12, 29, 0, 0)),
param('4 Απριλίου 2015', datetime(2015, 4, 4, 0, 0)),
param('19 Φεβρουαρίου 2015', datetime(2015, 2, 19, 0, 0)),
param('16 Μαΐου 2015', datetime(2015, 5, 16, 0, 0)),
param('21 Αυγούστου 2014', datetime(2014, 8, 21, 0, 0)),
param('30 Σεπτεμβρίου 2014', datetime(2014, 9, 30, 0, 0)),
param('24 Οκτωβρίου 2014', datetime(2014, 10, 24, 0, 0)),
param('1 Ιουλίου 2014', datetime(2014, 7, 1, 0, 0)),
param('27 Νοεμβρίου 2014', datetime(2014, 11, 27, 0, 0)),
param('١٦ أكتوبر، ٢٠١٥', datetime(2015, 10, 16, 0, 0)),
param('١٦ يونيو، ٢٠١٦', datetime(2016, 6, 16, 0, 0)),
param('2016년 6월 18일', datetime(2016, 6, 18, 0, 0)),
param('27 अगस्त 2014', datetime(2014, 8, 27, 0, 0)),
param('8 दिसंबर 2014', datetime(2014, 12, 8, 0, 0)),
param('23 फ़रवरी 2014', datetime(2014, 2, 23, 0, 0)),
param('10 सितंबर 2014', datetime(2014, 9, 10, 0, 0)),
param('11 अक्तूबर 2014', datetime(2014, 10, 11, 0, 0)),
param('12 नवंबर 2014', datetime(2014, 11, 12, 0, 0)),
param('16 जनवरी 2014', datetime(2014, 1, 16, 0, 0)),
param('1 जून 2014', datetime(2014, 6, 1, 0, 0)),
param('25 अप्रैल 2014', datetime(2014, 4, 25, 0, 0)),
param('19 मई 2015', datetime(2015, 5, 19, 0, 0)),
param('2 मार्च 2015', datetime(2015, 3, 2, 0, 0)),
param('1 जुलाई 2015', datetime(2015, 7, 1, 0, 0)),
param('27 augusti 2014', datetime(2014, 8, 27, 0, 0)),
param('7 mars 2011', datetime(2011, 3, 7, 0, 0)),
param('30 januari 2015', datetime(2015, 1, 30, 0, 0)),
param('28 februari 2015', datetime(2015, 2, 28, 0, 0)),
param('5. januar 2014', datetime(2014, 1, 5, 0, 0)),
param('12. februar 2014', datetime(2014, 2, 12, 0, 0)),
param('12. mars 2013', datetime(2013, 3, 12, 0, 0)),
param('4. april 2014', datetime(2014, 4, 4, 0, 0)),
param('8. mai 2016', datetime(2016, 5, 8, 0, 0)),
param('11. juni 2012', datetime(2012, 6, 11, 0, 0)),
param('29. juli 2012', datetime(2012, 7, 29, 0, 0)),
param('18. august 2012', datetime(2012, 8, 18, 0, 0)),
param('1. september 2012', datetime(2012, 9, 1, 0, 0)),
param('6. oktober 2014', datetime(2014, 10, 6, 0, 0)),
param('28. desember 2014', datetime(2014, 12, 28, 0, 0)),
])
def test_dates_parsing(self, date_string, expected):
self.given_utcnow(datetime(2012, 11, 13))
self.given_local_tz_offset(0)
self.given_parser(settings={'NORMALIZE': False})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_period_is('day')
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('[Sept] 04, 2014.', datetime(2014, 9, 4)),
param('Tuesday Jul 22, 2014', datetime(2014, 7, 22)),
param('10:04am EDT', datetime(2012, 11, 13, 14, 4)),
param('Friday', datetime(2012, 11, 9)),
param('November 19, 2014 at noon', datetime(2014, 11, 19, 12, 0)),
param('December 13, 2014 at midnight', datetime(2014, 12, 13, 0, 0)),
param('Nov 25 2014 10:17 pm EST', datetime(2014, 11, 26, 3, 17)),
param('Wed Aug 05 12:00:00 EDT 2015', datetime(2015, 8, 5, 16, 0)),
param('April 9, 2013 at 6:11 a.m.', datetime(2013, 4, 9, 6, 11)),
param('Aug. 9, 2012 at 2:57 p.m.', datetime(2012, 8, 9, 14, 57)),
param('December 10, 2014, 11:02:21 pm', datetime(2014, 12, 10, 23, 2, 21)),
param('8:25 a.m. Dec. 12, 2014', datetime(2014, 12, 12, 8, 25)),
param('2:21 p.m., December 11, 2014', datetime(2014, 12, 11, 14, 21)),
param('Fri, 12 Dec 2014 10:55:50', datetime(2014, 12, 12, 10, 55, 50)),
param('20 Mar 2013 10h11', datetime(2013, 3, 20, 10, 11)),
param('10:06am Dec 11, 2014', datetime(2014, 12, 11, 10, 6)),
param('19 February 2013 year 09:10', datetime(2013, 2, 19, 9, 10)),
param('11 Mai 2014', datetime(2014, 5, 11)),
param('dimanche, 11 Mai 2014', datetime(2014, 5, 11)),
param('22 janvier 2015 à 14h40', datetime(2015, 1, 22, 14, 40)),
param('Dimanche 1er Février à 21:24', datetime(2012, 2, 1, 21, 24)),
param('vendredi, décembre 5 2014.', datetime(2014, 12, 5, 0, 0)),
param('le 08 Déc 2014 15:11', datetime(2014, 12, 8, 15, 11)),
param('Le 11 Décembre 2014 à 09:00', datetime(2014, 12, 11, 9, 0)),
param('fév 15, 2013', datetime(2013, 2, 15, 0, 0)),
param('Jeu 15:12', datetime(2012, 11, 8, 15, 12)),
param('Martes 21 de Octubre de 2014', datetime(2014, 10, 21)),
param('Miércoles 20 de Noviembre de 2013', datetime(2013, 11, 20)),
param('12 de junio del 2012', datetime(2012, 6, 12)),
param('13 Ago, 2014', datetime(2014, 8, 13)),
param('13 Septiembre, 2014', datetime(2014, 9, 13)),
param('11 Marzo, 2014', datetime(2014, 3, 11)),
param('julio 5, 2015 en 1:04 pm', datetime(2015, 7, 5, 13, 4)),
param('Vi 17:15', datetime(2012, 11, 9, 17, 15)),
param('11 augustus 2014', datetime(2014, 8, 11)),
param('14 januari 2014', datetime(2014, 1, 14)),
param('vr jan 24, 2014 12:49', datetime(2014, 1, 24, 12, 49)),
param('16 giu 2014', datetime(2014, 6, 16)),
param('26 gennaio 2014', datetime(2014, 1, 26)),
param('Ven 18:23', datetime(2012, 11, 9, 18, 23)),
param('sexta-feira, 10 de junho de 2014 14:52', datetime(2014, 6, 10, 14, 52)),
param('13 Setembro, 2014', datetime(2014, 9, 13)),
param('Sab 3:03', datetime(2012, 11, 10, 3, 3)),
param('10 мая', datetime(2012, 5, 10)),
param('26 апреля', datetime(2012, 4, 26)),
param('20 ноября 2013', datetime(2013, 11, 20)),
param('28 октября 2014 в 07:54', datetime(2014, 10, 28, 7, 54)),
param('13 января 2015 г. в 13:34', datetime(2015, 1, 13, 13, 34)),
param('09 августа 2012', datetime(2012, 8, 9, 0, 0)),
param('Авг 26, 2015 15:12', datetime(2015, 8, 26, 15, 12)),
param('2 Декабрь 95 11:15', datetime(1995, 12, 2, 11, 15)),
param('13 янв. 2005 19:13', datetime(2005, 1, 13, 19, 13)),
param('13 авг. 2005 19:13', datetime(2005, 8, 13, 19, 13)),
param('13 авг. 2005г. 19:13', datetime(2005, 8, 13, 19, 13)),
param('13 авг. 2005 г. 19:13', datetime(2005, 8, 13, 19, 13)),
param('11 Ağustos, 2014', datetime(2014, 8, 11)),
param('08.Haziran.2014, 11:07', datetime(2014, 6, 8, 11, 7)),
param('17.Şubat.2014, 17:51', datetime(2014, 2, 17, 17, 51)),
param('14-Aralık-2012, 20:56', datetime(2012, 12, 14, 20, 56)),
param('13 iunie 2013', datetime(2013, 6, 13)),
param('14 aprilie 2014', datetime(2014, 4, 14)),
param('18 martie 2012', datetime(2012, 3, 18)),
param('S 14:14', datetime(2012, 11, 10, 14, 14)),
param('12-Iun-2013', datetime(2013, 6, 12)),
param('21. Dezember 2013', datetime(2013, 12, 21)),
param('19. Februar 2012', datetime(2012, 2, 19)),
param('26. Juli 2014', datetime(2014, 7, 26)),
param('18.10.14 um 22:56 Uhr', datetime(2014, 10, 18, 22, 56)),
param('12-Mär-2014', datetime(2014, 3, 12)),
param('Mit 13:14', datetime(2012, 11, 7, 13, 14)),
param('pon 16. čer 2014 10:07:43', datetime(2014, 6, 16, 10, 7, 43)),
param('13 Srpen, 2014', datetime(2014, 8, 13)),
param('čtv 14. lis 2013 12:38:43', datetime(2013, 11, 14, 12, 38, 43)),
param('ธันวาคม 11, 2014, 08:55:08 PM', datetime(2014, 12, 11, 20, 55, 8)),
param('22 พฤษภาคม 2012, 22:12', datetime(2012, 5, 22, 22, 12)),
param('11 กุมภา 2020, 8:13 AM', datetime(2020, 2, 11, 8, 13)),
param('1 เดือนตุลาคม 2005, 1:00 AM', datetime(2005, 10, 1, 1, 0)),
param('11 ก.พ. 2020, 1:13 pm', datetime(2020, 2, 11, 13, 13)),
param('Thứ năm', datetime(2012, 11, 8)),
param('Thứ sáu', datetime(2012, 11, 9)),
param('Tháng Mười Hai 29, 2013, 14:14', datetime(2013, 12, 29, 14, 14)),
param('05 Tháng một 2015 - 03:54 AM', datetime(2015, 1, 5, 3, 54)),
param('11 траўня', datetime(2012, 5, 11)),
param('4 мая', datetime(2012, 5, 4)),
param('Чацвер 06 жніўня 2015', datetime(2015, 8, 6)),
param('Нд 14 сакавіка 2015 у 7 гадзін 10 хвілін', datetime(2015, 3, 14, 7, 10)),
param('5 жніўня 2015 года у 13:34', datetime(2015, 8, 5, 13, 34)),
param('2015-кві-12', datetime(2015, 4, 12)),
param('21 чер 2013 3:13', datetime(2013, 6, 21, 3, 13)),
param('12 лютого 2012, 13:12:23', datetime(2012, 2, 12, 13, 12, 23)),
param('вів о 14:04', datetime(2012, 11, 6, 14, 4)),
param('12 Hulyo 2003 13:01', datetime(2003, 7, 12, 13, 1)),
param('1978, 1 Peb, 7:05 PM', datetime(1978, 2, 1, 19, 5)),
param('2 hun', datetime(2012, 6, 2)),
param('Lin 16:16', datetime(2012, 11, 11, 16, 16)),
param('2016年3月20日(日) 21時40分', datetime(2016, 3, 20, 21, 40)),
param("2016年3月20日 21時40分", datetime(2016, 3, 20, 21, 40)),
param('06-17-2014', datetime(2014, 6, 17)),
param('13/03/2014', datetime(2014, 3, 13)),
param('11. 12. 2014, 08:45:39', datetime(2014, 11, 12, 8, 45, 39)),
param('1 Ni 2015', datetime(2015, 4, 1, 0, 0)),
param('1 Mar 2015', datetime(2015, 3, 1, 0, 0)),
param('1 Paz 2015', datetime(2015, 10, 1, 0, 0)),
param('1 сер 2015', datetime(2015, 8, 1, 0, 0)),
])
def test_dates_parsing_with_normalization(self, date_string, expected):
self.given_utcnow(datetime(2012, 11, 13))
self.given_local_tz_offset(0)
self.given_parser(settings={'NORMALIZE': True})
self.when_date_is_parsed(normalize_unicode(date_string))
self.then_date_was_parsed_by_date_parser()
self.then_period_is('day')
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('Sep 03 2014 | 4:32 pm EDT', datetime(2014, 9, 3, 20, 32)),
param('17th October, 2034 @ 01:08 am PDT', datetime(2034, 10, 17, 8, 8)),
param('15 May 2004 23:24 EDT', datetime(2004, 5, 16, 3, 24)),
param('15 May 2004', datetime(2004, 5, 15, 0, 0)),
param('08/17/14 17:00 (PDT)', datetime(2014, 8, 18, 0, 0)),
])
def test_parsing_with_time_zones(self, date_string, expected):
self.given_local_tz_offset(+1)
self.given_parser()
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_period_is('day')
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('15 May 2004 16:10 -0400', datetime(2004, 5, 15, 20, 10)),
param('1999-12-31 19:00:00 -0500', datetime(2000, 1, 1, 0, 0)),
param('1999-12-31 19:00:00 +0500', datetime(1999, 12, 31, 14, 0)),
param('Fri, 09 Sep 2005 13:51:39 -0700', datetime(2005, 9, 9, 20, 51, 39)),
param('Fri, 09 Sep 2005 13:51:39 +0000', datetime(2005, 9, 9, 13, 51, 39)),
])
def test_parsing_with_utc_offsets(self, date_string, expected):
self.given_local_tz_offset(0)
self.given_parser()
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_period_is('day')
self.then_date_obj_exactly_is(expected)
def test_empty_dates_string_is_not_parsed(self):
self.when_date_is_parsed_by_date_parser('')
self.then_error_was_raised(ValueError, ["Empty string"])
@parameterized.expand([
param('invalid date string'),
param('Aug 7, 2014Aug 7, 2014'),
param('24h ago'),
])
def test_dates_not_parsed(self, date_string):
self.when_date_is_parsed_by_date_parser(date_string)
self.then_error_was_raised(ValueError, ["unknown string format"])
@parameterized.expand([
param('10 December', datetime(2014, 12, 10)),
param('March', datetime(2014, 3, 15)),
param('Friday', datetime(2015, 2, 13)),
param('Monday', datetime(2015, 2, 9)),
param('10:00PM', datetime(2015, 2, 14, 22, 00)),
param('16:10', datetime(2015, 2, 14, 16, 10)),
param('14:05', datetime(2015, 2, 15, 14, 5)),
])
def test_preferably_past_dates(self, date_string, expected):
self.given_utcnow(datetime(2015, 2, 15, 15, 30))
self.given_local_tz_offset(0)
self.given_parser(settings={'PREFER_DATES_FROM': 'past'})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('10 December', datetime(2015, 12, 10)),
param('March', datetime(2015, 3, 15)),
param('Friday', datetime(2015, 2, 20)),
param('Monday', datetime(2015, 2, 16)),
param('10:00PM', datetime(2015, 2, 15, 22, 00)),
param('16:10', datetime(2015, 2, 15, 16, 10)),
param('14:05', datetime(2015, 2, 16, 14, 5)),
])
def test_preferably_future_dates(self, date_string, expected):
self.given_utcnow(datetime(2015, 2, 15, 15, 30))
self.given_local_tz_offset(0)
self.given_parser(settings={'PREFER_DATES_FROM': 'future'})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('10 December', datetime(2015, 12, 10)),
param('March', datetime(2015, 3, 15)),
param('Friday', datetime(2015, 2, 13)),
param('10:00PM', datetime(2015, 2, 15, 22, 00)),
param('16:10', datetime(2015, 2, 15, 16, 10)),
param('14:05', datetime(2015, 2, 15, 14, 5)),
])
def test_dates_without_preference(self, date_string, expected):
self.given_utcnow(datetime(2015, 2, 15, 15, 30))
self.given_local_tz_offset(0)
self.given_parser(settings={'PREFER_DATES_FROM': 'current_period'})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('February 2015', today=datetime(2015, 1, 31), expected=datetime(2015, 2, 28)),
param('February 2012', today=datetime(2015, 1, 31), expected=datetime(2012, 2, 29)),
param('March 2015', today=datetime(2015, 1, 25), expected=datetime(2015, 3, 25)),
param('April 2015', today=datetime(2015, 1, 31), expected=datetime(2015, 4, 30)),
param('April 2015', today=datetime(2015, 2, 28), expected=datetime(2015, 4, 28)),
param('December 2014', today=datetime(2015, 2, 15), expected=datetime(2014, 12, 15)),
])
def test_dates_with_day_missing_prefering_current_day_of_month(self, date_string, today=None, expected=None):
self.given_utcnow(today)
self.given_parser(settings={'PREFER_DAY_OF_MONTH': 'current'})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('February 2015', today=datetime(2015, 1, 1), expected=datetime(2015, 2, 28)),
param('February 2012', today=datetime(2015, 1, 1), expected=datetime(2012, 2, 29)),
param('March 2015', today=datetime(2015, 1, 25), expected=datetime(2015, 3, 31)),
param('April 2015', today=datetime(2015, 1, 15), expected=datetime(2015, 4, 30)),
param('April 2015', today=datetime(2015, 2, 28), expected=datetime(2015, 4, 30)),
param('December 2014', today=datetime(2015, 2, 15), expected=datetime(2014, 12, 31)),
])
def test_dates_with_day_missing_prefering_last_day_of_month(self, date_string, today=None, expected=None):
self.given_utcnow(today)
self.given_parser(settings={'PREFER_DAY_OF_MONTH': 'last'})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('February 2015', today=datetime(2015, 1, 8), expected=datetime(2015, 2, 1)),
param('February 2012', today=datetime(2015, 1, 7), expected=datetime(2012, 2, 1)),
param('March 2015', today=datetime(2015, 1, 25), expected=datetime(2015, 3, 1)),
param('April 2015', today=datetime(2015, 1, 15), expected=datetime(2015, 4, 1)),
param('April 2015', today=datetime(2015, 2, 28), expected=datetime(2015, 4, 1)),
param('December 2014', today=datetime(2015, 2, 15), expected=datetime(2014, 12, 1)),
])
def test_dates_with_day_missing_prefering_first_day_of_month(self, date_string, today=None, expected=None):
self.given_utcnow(today)
self.given_parser(settings={'PREFER_DAY_OF_MONTH': 'first'})
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param(prefer_day_of_month='current'),
param(prefer_day_of_month='last'),
param(prefer_day_of_month='first'),
])
def test_that_day_preference_does_not_affect_dates_with_explicit_day(self, prefer_day_of_month=None):
self.given_utcnow(datetime(2015, 2, 12))
self.given_parser(settings={'PREFER_DAY_OF_MONTH': prefer_day_of_month})
self.when_date_is_parsed('24 April 2012')
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(datetime(2012, 4, 24))
def test_date_is_parsed_when_skip_tokens_are_supplied(self):
self.given_utcnow(datetime(2015, 2, 12))
self.given_parser(settings={'SKIP_TOKENS': ['de']})
self.when_date_is_parsed('24 April 2012 de')
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(datetime(2012, 4, 24))
@parameterized.expand([
param('29 February 2015'),
param('32 January 2015'),
param('31 April 2015'),
param('31 June 2015'),
param('31 September 2015'),
])
def test_error_should_be_raised_for_invalid_dates_with_too_large_day_number(self, date_string):
self.when_date_is_parsed_by_date_parser(date_string)
self.then_error_was_raised(ValueError, ['Day not in range for month'])
@parameterized.expand([
param('2015-05-02T10:20:19+0000', languages=['fr'], expected=datetime(2015, 5, 2, 10, 20, 19)),
param('2015-05-02T10:20:19+0000', languages=['en'], expected=datetime(2015, 5, 2, 10, 20, 19)),
param('2015-05-02T10:20:19+0000', languages=[], expected=datetime(2015, 5, 2, 10, 20, 19)),
])
def test_iso_datestamp_format_should_always_parse(self, date_string, languages, expected):
self.given_local_tz_offset(0)
self.given_parser(languages=languages)
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
@parameterized.expand([
param('10 December', expected=datetime(2015, 12, 10), period='day'),
param('March', expected=datetime(2015, 3, 15), period='month'),
param('April', expected=datetime(2015, 4, 15), period='month'),
param('December', expected=datetime(2015, 12, 15), period='month'),
param('Friday', expected=datetime(2015, 2, 13), period='day'),
param('Monday', expected=datetime(2015, 2, 9), period='day'),
param('10:00PM', expected=datetime(2015, 2, 15, 22, 00), period='day'),
param('16:10', expected=datetime(2015, 2, 15, 16, 10), period='day'),
param('2014', expected=datetime(2014, 2, 15), period='year'),
param('2008', expected=datetime(2008, 2, 15), period='year'),
])
def test_extracted_period(self, date_string, expected=None, period=None):
self.given_utcnow(datetime(2015, 2, 15, 15, 30))
self.given_local_tz_offset(0)
self.given_parser()
self.when_date_is_parsed(date_string)
self.then_date_was_parsed_by_date_parser()
self.then_date_obj_exactly_is(expected)
self.then_period_is(period)
def given_utcnow(self, now):
datetime_mock = Mock(wraps=datetime)
datetime_mock.utcnow = Mock(return_value=now)
self.add_patch(patch('dateparser.date_parser.datetime', new=datetime_mock))
def given_local_tz_offset(self, offset):
self.add_patch(
patch.object(dateparser.timezone_parser,
'local_tz_offset',
new=timedelta(seconds=3600 * offset))
)
def given_parser(self, *args, **kwds):
def collecting_get_date_data(parse):
@wraps(parse)
def wrapped(*args, **kwargs):
self.date_result = parse(*args, **kwargs)
return self.date_result
return wrapped
self.add_patch(patch.object(date_parser,
'parse',
collecting_get_date_data(date_parser.parse)))
self.date_parser = Mock(wraps=date_parser)
self.add_patch(patch('dateparser.date.date_parser', new=self.date_parser))
self.parser = DateDataParser(*args, **kwds)
def when_date_is_parsed(self, date_string):
self.result = self.parser.get_date_data(date_string)
def when_date_is_parsed_by_date_parser(self, date_string):
try:
self.result = DateParser().parse(date_string)
except Exception as error:
self.error = error
def then_period_is(self, period):
self.assertEqual(period, self.result['period'])
def then_date_obj_exactly_is(self, expected):
self.assertEqual(expected, self.result['date_obj'])
def then_date_was_parsed_by_date_parser(self):
self.assertNotEqual(NotImplemented, self.date_result, "Date was not parsed")
self.assertEqual(self.result['date_obj'], self.date_result[0])
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c4076c85e14c5bd3849ea13543d080b510e08e1
| 12,264
|
py
|
Python
|
mars/services/task/tests/test_service.py
|
qinxuye/mars
|
3b10fd4b40fbaf1526c179709fdbcc3a1f899ab7
|
[
"Apache-2.0"
] | null | null | null |
mars/services/task/tests/test_service.py
|
qinxuye/mars
|
3b10fd4b40fbaf1526c179709fdbcc3a1f899ab7
|
[
"Apache-2.0"
] | null | null | null |
mars/services/task/tests/test_service.py
|
qinxuye/mars
|
3b10fd4b40fbaf1526c179709fdbcc3a1f899ab7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import time
import numpy as np
import pytest
import mars.oscar as mo
import mars.remote as mr
from mars.core import TileableGraph, TileableGraphBuilder
from mars.core.context import get_context
from mars.services import start_services, NodeRole
from mars.services.session import SessionAPI
from mars.services.storage import MockStorageAPI
from mars.services.subtask import SubtaskStatus
from mars.services.web import WebActor
from mars.services.meta import MetaAPI
from mars.services.task import TaskAPI, TaskStatus, WebTaskAPI
from mars.services.task.errors import TaskNotExist
from mars.utils import Timer
@pytest.fixture
async def actor_pools():
async def start_pool(is_worker: bool):
if is_worker:
kw = dict(
n_process=3,
labels=['main'] + ['numa-0'] * 2 + ['io'],
subprocess_start_method='spawn'
)
else:
kw = dict(n_process=0,
subprocess_start_method='spawn')
pool = await mo.create_actor_pool('127.0.0.1', **kw)
await pool.start()
return pool
sv_pool, worker_pool = await asyncio.gather(
start_pool(False), start_pool(True)
)
try:
yield sv_pool, worker_pool
finally:
await asyncio.gather(sv_pool.stop(), worker_pool.stop())
@pytest.mark.parametrize(indirect=True)
@pytest.fixture(params=[False, True])
async def start_test_service(actor_pools, request):
sv_pool, worker_pool = actor_pools
config = {
"services": ["cluster", "session", "meta", "lifecycle",
"scheduling", "subtask", "task"],
"cluster": {
"backend": "fixed",
"lookup_address": sv_pool.external_address,
"resource": {"numa-0": 2}
},
"meta": {
"store": "dict"
},
"scheduling": {},
"task": {},
}
if request:
config['services'].append('web')
await start_services(
NodeRole.SUPERVISOR, config, address=sv_pool.external_address)
await start_services(
NodeRole.WORKER, config, address=worker_pool.external_address)
session_id = 'test_session'
session_api = await SessionAPI.create(sv_pool.external_address)
await session_api.create_session(session_id)
if not request.param:
task_api = await TaskAPI.create(session_id,
sv_pool.external_address)
else:
web_actor = await mo.actor_ref(WebActor.default_uid(),
address=sv_pool.external_address)
web_address = await web_actor.get_web_address()
task_api = WebTaskAPI(session_id, web_address)
assert await task_api.get_task_results() == []
# create mock meta and storage APIs
_ = await MetaAPI.create(session_id,
sv_pool.external_address)
storage_api = await MockStorageAPI.create(session_id,
worker_pool.external_address)
try:
yield sv_pool.external_address, task_api, storage_api
finally:
await MockStorageAPI.cleanup(worker_pool.external_address)
@pytest.mark.asyncio
async def test_task_execution(start_test_service):
_sv_pool_address, task_api, storage_api = start_test_service
def f1():
return np.arange(5)
def f2():
return np.arange(5, 10)
def f3(f1r, f2r):
return np.concatenate([f1r, f2r]).sum()
r1 = mr.spawn(f1)
r2 = mr.spawn(f2)
r3 = mr.spawn(f3, args=(r1, r2))
graph = TileableGraph([r3.data])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)
assert await task_api.get_last_idle_time() is None
assert isinstance(task_id, str)
await task_api.wait_task(task_id)
task_result = await task_api.get_task_result(task_id)
assert task_result.status == TaskStatus.terminated
assert await task_api.get_last_idle_time() is not None
if task_result.error is not None:
raise task_result.error.with_traceback(task_result.traceback)
result_tileable = (await task_api.get_fetch_tileables(task_id))[0]
data_key = result_tileable.chunks[0].key
assert await storage_api.get(data_key) == 45
@pytest.mark.asyncio
async def test_task_error(start_test_service):
_sv_pool_address, task_api, storage_api = start_test_service
# test job cancel
def f1():
raise SystemError
rs = [mr.spawn(f1) for _ in range(10)]
graph = TileableGraph([r.data for r in rs])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)
await task_api.wait_task(task_id, timeout=10)
results = await task_api.get_task_results(progress=True)
assert type(results[0].error) is SystemError
@pytest.mark.asyncio
async def test_task_cancel(start_test_service):
_sv_pool_address, task_api, storage_api = start_test_service
# test job cancel
def f1():
time.sleep(100)
rs = [mr.spawn(f1) for _ in range(10)]
graph = TileableGraph([r.data for r in rs])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)
await asyncio.sleep(.5)
with Timer() as timer:
await task_api.cancel_task(task_id)
result = await task_api.get_task_result(task_id)
assert result.status == TaskStatus.terminated
assert timer.duration < 20
await asyncio.sleep(.1)
assert await task_api.get_last_idle_time() is not None
results = await task_api.get_task_results(progress=True)
assert all(result.status == TaskStatus.terminated for result in results)
class _ProgressController:
def __init__(self):
self._step_event = asyncio.Event()
async def wait(self):
await self._step_event.wait()
self._step_event.clear()
def set(self):
self._step_event.set()
@pytest.mark.asyncio
async def test_task_progress(start_test_service):
sv_pool_address, task_api, storage_api = start_test_service
session_api = await SessionAPI.create(address=sv_pool_address)
ref = await session_api.create_remote_object(
task_api._session_id, 'progress_controller', _ProgressController)
def f1(count: int):
progress_controller = get_context().get_remote_object('progress_controller')
for idx in range(count):
progress_controller.wait()
get_context().set_progress((1 + idx) * 1.0 / count)
r = mr.spawn(f1, args=(2,))
graph = TileableGraph([r.data])
next(TileableGraphBuilder(graph).build())
await task_api.submit_tileable_graph(graph, fuse_enabled=False)
await asyncio.sleep(0.2)
results = await task_api.get_task_results(progress=True)
assert results[0].progress == 0.0
await ref.set()
await asyncio.sleep(1)
results = await task_api.get_task_results(progress=True)
assert results[0].progress == 0.5
await ref.set()
await asyncio.sleep(1)
results = await task_api.get_task_results(progress=True)
assert results[0].progress == 1.0
@pytest.mark.asyncio
async def test_get_tileable_graph(start_test_service):
_sv_pool_address, task_api, storage_api = start_test_service
def f1():
return np.arange(5)
def f2():
return np.arange(5, 10)
def f3(f1r, f2r):
return np.concatenate([f1r, f2r]).sum()
r1 = mr.spawn(f1)
r2 = mr.spawn(f2)
r3 = mr.spawn(f3, args=(r1, r2))
graph = TileableGraph([r3.data])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)
with pytest.raises(TaskNotExist):
await task_api.get_tileable_graph_as_json('non_exist')
tileable_detail = await task_api.get_tileable_graph_as_json(task_id)
num_tileable = len(tileable_detail.get('tileables'))
num_dependencies = len(tileable_detail.get('dependencies'))
assert num_tileable > 0
assert num_dependencies <= (num_tileable / 2) * (num_tileable / 2)
assert (num_tileable == 1 and num_dependencies == 0) or (num_tileable > 1 and num_dependencies > 0)
graph_nodes = []
graph_dependencies = []
for node in graph.iter_nodes():
graph_nodes.append(node.key)
for node_successor in graph.iter_successors(node):
graph_dependencies.append({
'fromTileableId': node.key,
'toTileableId': node_successor.key,
'linkType': 0,
})
for tileable in tileable_detail.get('tileables'):
graph_nodes.remove(tileable.get('tileableId'))
assert len(graph_nodes) == 0
for i in range(num_dependencies):
dependency = tileable_detail.get('dependencies')[i]
assert graph_dependencies[i] == dependency
@pytest.mark.asyncio
async def test_get_tileable_details(start_test_service):
sv_pool_address, task_api, storage_api = start_test_service
session_api = await SessionAPI.create(address=sv_pool_address)
ref = await session_api.create_remote_object(
task_api._session_id, 'progress_controller', _ProgressController)
with pytest.raises(TaskNotExist):
await task_api.get_tileable_details('non_exist')
def f(*_args, raises=False):
get_context().set_progress(0.5)
if raises:
raise ValueError
progress_controller = get_context().get_remote_object('progress_controller')
progress_controller.wait()
get_context().set_progress(1.0)
# test non-fused DAGs
r1 = mr.spawn(f)
r2 = mr.spawn(f, args=(r1, 0))
r3 = mr.spawn(f, args=(r1, 1))
graph = TileableGraph([r2.data, r3.data])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)
def _get_fields(details, field, wrapper=None):
rs = [r1, r2, r3]
ret = [details[r.key][field] for r in rs]
if wrapper:
ret = [wrapper(v) for v in ret]
return ret
await asyncio.sleep(1)
details = await task_api.get_tileable_details(task_id)
assert _get_fields(details, 'progress') == [0.5, 0.0, 0.0]
assert _get_fields(details, 'status', SubtaskStatus) \
== [SubtaskStatus.running] + [SubtaskStatus.pending] * 2
await ref.set()
await asyncio.sleep(1)
details = await task_api.get_tileable_details(task_id)
assert _get_fields(details, 'progress') == [1.0, 0.5, 0.5]
assert _get_fields(details, 'status', SubtaskStatus) \
== [SubtaskStatus.succeeded] + [SubtaskStatus.running] * 2
await ref.set()
await task_api.wait_task(task_id)
# test fused DAGs
r5 = mr.spawn(f, args=(0,))
r6 = mr.spawn(f, args=(r5,))
graph = TileableGraph([r6.data])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=True)
await asyncio.sleep(1)
details = await task_api.get_tileable_details(task_id)
assert details[r5.key]['progress'] == details[r6.key]['progress'] == 0.25
await ref.set()
await asyncio.sleep(0.1)
await ref.set()
await task_api.wait_task(task_id)
# test raises
r7 = mr.spawn(f, kwargs={'raises': 1})
graph = TileableGraph([r7.data])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=True)
await task_api.wait_task(task_id)
details = await task_api.get_tileable_details(task_id)
assert details[r7.key]['status'] == SubtaskStatus.errored.value
| 32.020888
| 103
| 0.679713
|
import asyncio
import time
import numpy as np
import pytest
import mars.oscar as mo
import mars.remote as mr
from mars.core import TileableGraph, TileableGraphBuilder
from mars.core.context import get_context
from mars.services import start_services, NodeRole
from mars.services.session import SessionAPI
from mars.services.storage import MockStorageAPI
from mars.services.subtask import SubtaskStatus
from mars.services.web import WebActor
from mars.services.meta import MetaAPI
from mars.services.task import TaskAPI, TaskStatus, WebTaskAPI
from mars.services.task.errors import TaskNotExist
from mars.utils import Timer
@pytest.fixture
async def actor_pools():
async def start_pool(is_worker: bool):
if is_worker:
kw = dict(
n_process=3,
labels=['main'] + ['numa-0'] * 2 + ['io'],
subprocess_start_method='spawn'
)
else:
kw = dict(n_process=0,
subprocess_start_method='spawn')
pool = await mo.create_actor_pool('127.0.0.1', **kw)
await pool.start()
return pool
sv_pool, worker_pool = await asyncio.gather(
start_pool(False), start_pool(True)
)
try:
yield sv_pool, worker_pool
finally:
await asyncio.gather(sv_pool.stop(), worker_pool.stop())
@pytest.mark.parametrize(indirect=True)
@pytest.fixture(params=[False, True])
async def start_test_service(actor_pools, request):
sv_pool, worker_pool = actor_pools
config = {
"services": ["cluster", "session", "meta", "lifecycle",
"scheduling", "subtask", "task"],
"cluster": {
"backend": "fixed",
"lookup_address": sv_pool.external_address,
"resource": {"numa-0": 2}
},
"meta": {
"store": "dict"
},
"scheduling": {},
"task": {},
}
if request:
config['services'].append('web')
await start_services(
NodeRole.SUPERVISOR, config, address=sv_pool.external_address)
await start_services(
NodeRole.WORKER, config, address=worker_pool.external_address)
session_id = 'test_session'
session_api = await SessionAPI.create(sv_pool.external_address)
await session_api.create_session(session_id)
if not request.param:
task_api = await TaskAPI.create(session_id,
sv_pool.external_address)
else:
web_actor = await mo.actor_ref(WebActor.default_uid(),
address=sv_pool.external_address)
web_address = await web_actor.get_web_address()
task_api = WebTaskAPI(session_id, web_address)
assert await task_api.get_task_results() == []
_ = await MetaAPI.create(session_id,
sv_pool.external_address)
storage_api = await MockStorageAPI.create(session_id,
worker_pool.external_address)
try:
yield sv_pool.external_address, task_api, storage_api
finally:
await MockStorageAPI.cleanup(worker_pool.external_address)
@pytest.mark.asyncio
async def test_task_execution(start_test_service):
_sv_pool_address, task_api, storage_api = start_test_service
def f1():
return np.arange(5)
def f2():
return np.arange(5, 10)
def f3(f1r, f2r):
return np.concatenate([f1r, f2r]).sum()
r1 = mr.spawn(f1)
r2 = mr.spawn(f2)
r3 = mr.spawn(f3, args=(r1, r2))
graph = TileableGraph([r3.data])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)
assert await task_api.get_last_idle_time() is None
assert isinstance(task_id, str)
await task_api.wait_task(task_id)
task_result = await task_api.get_task_result(task_id)
assert task_result.status == TaskStatus.terminated
assert await task_api.get_last_idle_time() is not None
if task_result.error is not None:
raise task_result.error.with_traceback(task_result.traceback)
result_tileable = (await task_api.get_fetch_tileables(task_id))[0]
data_key = result_tileable.chunks[0].key
assert await storage_api.get(data_key) == 45
@pytest.mark.asyncio
async def test_task_error(start_test_service):
_sv_pool_address, task_api, storage_api = start_test_service
def f1():
raise SystemError
rs = [mr.spawn(f1) for _ in range(10)]
graph = TileableGraph([r.data for r in rs])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)
await task_api.wait_task(task_id, timeout=10)
results = await task_api.get_task_results(progress=True)
assert type(results[0].error) is SystemError
@pytest.mark.asyncio
async def test_task_cancel(start_test_service):
_sv_pool_address, task_api, storage_api = start_test_service
def f1():
time.sleep(100)
rs = [mr.spawn(f1) for _ in range(10)]
graph = TileableGraph([r.data for r in rs])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)
await asyncio.sleep(.5)
with Timer() as timer:
await task_api.cancel_task(task_id)
result = await task_api.get_task_result(task_id)
assert result.status == TaskStatus.terminated
assert timer.duration < 20
await asyncio.sleep(.1)
assert await task_api.get_last_idle_time() is not None
results = await task_api.get_task_results(progress=True)
assert all(result.status == TaskStatus.terminated for result in results)
class _ProgressController:
def __init__(self):
self._step_event = asyncio.Event()
async def wait(self):
await self._step_event.wait()
self._step_event.clear()
def set(self):
self._step_event.set()
@pytest.mark.asyncio
async def test_task_progress(start_test_service):
sv_pool_address, task_api, storage_api = start_test_service
session_api = await SessionAPI.create(address=sv_pool_address)
ref = await session_api.create_remote_object(
task_api._session_id, 'progress_controller', _ProgressController)
def f1(count: int):
progress_controller = get_context().get_remote_object('progress_controller')
for idx in range(count):
progress_controller.wait()
get_context().set_progress((1 + idx) * 1.0 / count)
r = mr.spawn(f1, args=(2,))
graph = TileableGraph([r.data])
next(TileableGraphBuilder(graph).build())
await task_api.submit_tileable_graph(graph, fuse_enabled=False)
await asyncio.sleep(0.2)
results = await task_api.get_task_results(progress=True)
assert results[0].progress == 0.0
await ref.set()
await asyncio.sleep(1)
results = await task_api.get_task_results(progress=True)
assert results[0].progress == 0.5
await ref.set()
await asyncio.sleep(1)
results = await task_api.get_task_results(progress=True)
assert results[0].progress == 1.0
@pytest.mark.asyncio
async def test_get_tileable_graph(start_test_service):
_sv_pool_address, task_api, storage_api = start_test_service
def f1():
return np.arange(5)
def f2():
return np.arange(5, 10)
def f3(f1r, f2r):
return np.concatenate([f1r, f2r]).sum()
r1 = mr.spawn(f1)
r2 = mr.spawn(f2)
r3 = mr.spawn(f3, args=(r1, r2))
graph = TileableGraph([r3.data])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)
with pytest.raises(TaskNotExist):
await task_api.get_tileable_graph_as_json('non_exist')
tileable_detail = await task_api.get_tileable_graph_as_json(task_id)
num_tileable = len(tileable_detail.get('tileables'))
num_dependencies = len(tileable_detail.get('dependencies'))
assert num_tileable > 0
assert num_dependencies <= (num_tileable / 2) * (num_tileable / 2)
assert (num_tileable == 1 and num_dependencies == 0) or (num_tileable > 1 and num_dependencies > 0)
graph_nodes = []
graph_dependencies = []
for node in graph.iter_nodes():
graph_nodes.append(node.key)
for node_successor in graph.iter_successors(node):
graph_dependencies.append({
'fromTileableId': node.key,
'toTileableId': node_successor.key,
'linkType': 0,
})
for tileable in tileable_detail.get('tileables'):
graph_nodes.remove(tileable.get('tileableId'))
assert len(graph_nodes) == 0
for i in range(num_dependencies):
dependency = tileable_detail.get('dependencies')[i]
assert graph_dependencies[i] == dependency
@pytest.mark.asyncio
async def test_get_tileable_details(start_test_service):
sv_pool_address, task_api, storage_api = start_test_service
session_api = await SessionAPI.create(address=sv_pool_address)
ref = await session_api.create_remote_object(
task_api._session_id, 'progress_controller', _ProgressController)
with pytest.raises(TaskNotExist):
await task_api.get_tileable_details('non_exist')
def f(*_args, raises=False):
get_context().set_progress(0.5)
if raises:
raise ValueError
progress_controller = get_context().get_remote_object('progress_controller')
progress_controller.wait()
get_context().set_progress(1.0)
r1 = mr.spawn(f)
r2 = mr.spawn(f, args=(r1, 0))
r3 = mr.spawn(f, args=(r1, 1))
graph = TileableGraph([r2.data, r3.data])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=False)
def _get_fields(details, field, wrapper=None):
rs = [r1, r2, r3]
ret = [details[r.key][field] for r in rs]
if wrapper:
ret = [wrapper(v) for v in ret]
return ret
await asyncio.sleep(1)
details = await task_api.get_tileable_details(task_id)
assert _get_fields(details, 'progress') == [0.5, 0.0, 0.0]
assert _get_fields(details, 'status', SubtaskStatus) \
== [SubtaskStatus.running] + [SubtaskStatus.pending] * 2
await ref.set()
await asyncio.sleep(1)
details = await task_api.get_tileable_details(task_id)
assert _get_fields(details, 'progress') == [1.0, 0.5, 0.5]
assert _get_fields(details, 'status', SubtaskStatus) \
== [SubtaskStatus.succeeded] + [SubtaskStatus.running] * 2
await ref.set()
await task_api.wait_task(task_id)
r5 = mr.spawn(f, args=(0,))
r6 = mr.spawn(f, args=(r5,))
graph = TileableGraph([r6.data])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=True)
await asyncio.sleep(1)
details = await task_api.get_tileable_details(task_id)
assert details[r5.key]['progress'] == details[r6.key]['progress'] == 0.25
await ref.set()
await asyncio.sleep(0.1)
await ref.set()
await task_api.wait_task(task_id)
r7 = mr.spawn(f, kwargs={'raises': 1})
graph = TileableGraph([r7.data])
next(TileableGraphBuilder(graph).build())
task_id = await task_api.submit_tileable_graph(graph, fuse_enabled=True)
await task_api.wait_task(task_id)
details = await task_api.get_tileable_details(task_id)
assert details[r7.key]['status'] == SubtaskStatus.errored.value
| true
| true
|
1c4076e540d68a9547420103d4ad6383ba1ec3cc
| 2,384
|
py
|
Python
|
server/workers/base/src/base.py
|
dasch124/Headstart
|
9eb37ce458a24fd42b22f2aa15c53ac46a69f9bf
|
[
"MIT"
] | 111
|
2016-12-10T17:27:46.000Z
|
2022-03-29T02:57:19.000Z
|
server/workers/base/src/base.py
|
dasch124/Headstart
|
9eb37ce458a24fd42b22f2aa15c53ac46a69f9bf
|
[
"MIT"
] | 338
|
2016-12-04T17:43:28.000Z
|
2022-03-04T15:50:33.000Z
|
server/workers/base/src/base.py
|
dasch124/Headstart
|
9eb37ce458a24fd42b22f2aa15c53ac46a69f9bf
|
[
"MIT"
] | 32
|
2016-12-19T12:48:00.000Z
|
2022-02-12T17:47:47.000Z
|
import json
import subprocess
import pandas as pd
import logging
from common.r_wrapper import RWrapper
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
class BaseClient(RWrapper):
def next_item(self):
queue, msg = self.redis_store.blpop("base")
msg = json.loads(msg.decode('utf-8'))
k = msg.get('id')
params = self.add_default_params(msg.get('params'))
params["service"] = "base"
endpoint = msg.get('endpoint')
return k, params, endpoint
def execute_r(self, params):
q = params.get('q')
service = params.get('service')
data = {}
data["params"] = params
cmd = [self.command, self.runner, self.wd,
q, service]
try:
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding='utf-8')
stdout, stderr = proc.communicate(json.dumps(data))
output = [o for o in stdout.split('\n') if len(o) > 0]
error = [o for o in stderr.split('\n') if len(o) > 0]
metadata = pd.DataFrame(json.loads(output[-2]))
text = pd.DataFrame(json.loads(output[-1]))
input_data = {}
input_data["metadata"] = metadata.to_json(orient='records')
input_data["text"] = text.to_json(orient='records')
return input_data
except Exception as e:
self.logger.error(e)
self.logger.error(error)
raise
def run(self):
while True:
k, params, endpoint = self.next_item()
self.logger.debug(k)
self.logger.debug(params)
if endpoint == "search":
try:
res = {}
res["id"] = k
res["input_data"] = self.execute_r(params)
res["params"] = params
if params.get('raw') is True:
self.redis_store.set(k+"_output", json.dumps(res))
else:
self.redis_store.rpush("input_data", json.dumps(res).encode('utf8'))
except Exception as e:
self.logger.error(e)
self.logger.error(params)
| 36.676923
| 111
| 0.518876
|
import json
import subprocess
import pandas as pd
import logging
from common.r_wrapper import RWrapper
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
class BaseClient(RWrapper):
def next_item(self):
queue, msg = self.redis_store.blpop("base")
msg = json.loads(msg.decode('utf-8'))
k = msg.get('id')
params = self.add_default_params(msg.get('params'))
params["service"] = "base"
endpoint = msg.get('endpoint')
return k, params, endpoint
def execute_r(self, params):
q = params.get('q')
service = params.get('service')
data = {}
data["params"] = params
cmd = [self.command, self.runner, self.wd,
q, service]
try:
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding='utf-8')
stdout, stderr = proc.communicate(json.dumps(data))
output = [o for o in stdout.split('\n') if len(o) > 0]
error = [o for o in stderr.split('\n') if len(o) > 0]
metadata = pd.DataFrame(json.loads(output[-2]))
text = pd.DataFrame(json.loads(output[-1]))
input_data = {}
input_data["metadata"] = metadata.to_json(orient='records')
input_data["text"] = text.to_json(orient='records')
return input_data
except Exception as e:
self.logger.error(e)
self.logger.error(error)
raise
def run(self):
while True:
k, params, endpoint = self.next_item()
self.logger.debug(k)
self.logger.debug(params)
if endpoint == "search":
try:
res = {}
res["id"] = k
res["input_data"] = self.execute_r(params)
res["params"] = params
if params.get('raw') is True:
self.redis_store.set(k+"_output", json.dumps(res))
else:
self.redis_store.rpush("input_data", json.dumps(res).encode('utf8'))
except Exception as e:
self.logger.error(e)
self.logger.error(params)
| true
| true
|
1c4076e641e754b295b1a76ea44a40f0f9c23f5f
| 10,843
|
py
|
Python
|
google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py
|
TheMichaelHu/python-aiplatform
|
e03f373a7e44c354eda88875a41c771f6d7e3ce1
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py
|
TheMichaelHu/python-aiplatform
|
e03f373a7e44c354eda88875a41c771f6d7e3ce1
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py
|
TheMichaelHu/python-aiplatform
|
e03f373a7e44c354eda88875a41c771f6d7e3ce1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.aiplatform_v1.types import pipeline_job
from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job
from google.cloud.aiplatform_v1.types import pipeline_service
from google.cloud.aiplatform_v1.types import training_pipeline
from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class PipelineServiceTransport(abc.ABC):
"""Abstract transport class for PipelineService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_training_pipeline: gapic_v1.method.wrap_method(
self.create_training_pipeline,
default_timeout=None,
client_info=client_info,
),
self.get_training_pipeline: gapic_v1.method.wrap_method(
self.get_training_pipeline,
default_timeout=None,
client_info=client_info,
),
self.list_training_pipelines: gapic_v1.method.wrap_method(
self.list_training_pipelines,
default_timeout=None,
client_info=client_info,
),
self.delete_training_pipeline: gapic_v1.method.wrap_method(
self.delete_training_pipeline,
default_timeout=None,
client_info=client_info,
),
self.cancel_training_pipeline: gapic_v1.method.wrap_method(
self.cancel_training_pipeline,
default_timeout=None,
client_info=client_info,
),
self.create_pipeline_job: gapic_v1.method.wrap_method(
self.create_pipeline_job,
default_timeout=None,
client_info=client_info,
),
self.get_pipeline_job: gapic_v1.method.wrap_method(
self.get_pipeline_job,
default_timeout=None,
client_info=client_info,
),
self.list_pipeline_jobs: gapic_v1.method.wrap_method(
self.list_pipeline_jobs,
default_timeout=None,
client_info=client_info,
),
self.delete_pipeline_job: gapic_v1.method.wrap_method(
self.delete_pipeline_job,
default_timeout=None,
client_info=client_info,
),
self.cancel_pipeline_job: gapic_v1.method.wrap_method(
self.cancel_pipeline_job,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_training_pipeline(
self,
) -> Callable[
[pipeline_service.CreateTrainingPipelineRequest],
Union[
gca_training_pipeline.TrainingPipeline,
Awaitable[gca_training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def get_training_pipeline(
self,
) -> Callable[
[pipeline_service.GetTrainingPipelineRequest],
Union[
training_pipeline.TrainingPipeline,
Awaitable[training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def list_training_pipelines(
self,
) -> Callable[
[pipeline_service.ListTrainingPipelinesRequest],
Union[
pipeline_service.ListTrainingPipelinesResponse,
Awaitable[pipeline_service.ListTrainingPipelinesResponse],
],
]:
raise NotImplementedError()
@property
def delete_training_pipeline(
self,
) -> Callable[
[pipeline_service.DeleteTrainingPipelineRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_training_pipeline(
self,
) -> Callable[
[pipeline_service.CancelTrainingPipelineRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def create_pipeline_job(
self,
) -> Callable[
[pipeline_service.CreatePipelineJobRequest],
Union[gca_pipeline_job.PipelineJob, Awaitable[gca_pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def get_pipeline_job(
self,
) -> Callable[
[pipeline_service.GetPipelineJobRequest],
Union[pipeline_job.PipelineJob, Awaitable[pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def list_pipeline_jobs(
self,
) -> Callable[
[pipeline_service.ListPipelineJobsRequest],
Union[
pipeline_service.ListPipelineJobsResponse,
Awaitable[pipeline_service.ListPipelineJobsResponse],
],
]:
raise NotImplementedError()
@property
def delete_pipeline_job(
self,
) -> Callable[
[pipeline_service.DeletePipelineJobRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_pipeline_job(
self,
) -> Callable[
[pipeline_service.CancelPipelineJobRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def kind(self) -> str:
raise NotImplementedError()
__all__ = ("PipelineServiceTransport",)
| 35.55082
| 101
| 0.647883
|
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials
from google.oauth2 import service_account
from google.cloud.aiplatform_v1.types import pipeline_job
from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job
from google.cloud.aiplatform_v1.types import pipeline_service
from google.cloud.aiplatform_v1.types import training_pipeline
from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class PipelineServiceTransport(abc.ABC):
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
self._scopes = scopes
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
self._wrapped_methods = {
self.create_training_pipeline: gapic_v1.method.wrap_method(
self.create_training_pipeline,
default_timeout=None,
client_info=client_info,
),
self.get_training_pipeline: gapic_v1.method.wrap_method(
self.get_training_pipeline,
default_timeout=None,
client_info=client_info,
),
self.list_training_pipelines: gapic_v1.method.wrap_method(
self.list_training_pipelines,
default_timeout=None,
client_info=client_info,
),
self.delete_training_pipeline: gapic_v1.method.wrap_method(
self.delete_training_pipeline,
default_timeout=None,
client_info=client_info,
),
self.cancel_training_pipeline: gapic_v1.method.wrap_method(
self.cancel_training_pipeline,
default_timeout=None,
client_info=client_info,
),
self.create_pipeline_job: gapic_v1.method.wrap_method(
self.create_pipeline_job,
default_timeout=None,
client_info=client_info,
),
self.get_pipeline_job: gapic_v1.method.wrap_method(
self.get_pipeline_job,
default_timeout=None,
client_info=client_info,
),
self.list_pipeline_jobs: gapic_v1.method.wrap_method(
self.list_pipeline_jobs,
default_timeout=None,
client_info=client_info,
),
self.delete_pipeline_job: gapic_v1.method.wrap_method(
self.delete_pipeline_job,
default_timeout=None,
client_info=client_info,
),
self.cancel_pipeline_job: gapic_v1.method.wrap_method(
self.cancel_pipeline_job,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
raise NotImplementedError()
@property
def operations_client(self):
raise NotImplementedError()
@property
def create_training_pipeline(
self,
) -> Callable[
[pipeline_service.CreateTrainingPipelineRequest],
Union[
gca_training_pipeline.TrainingPipeline,
Awaitable[gca_training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def get_training_pipeline(
self,
) -> Callable[
[pipeline_service.GetTrainingPipelineRequest],
Union[
training_pipeline.TrainingPipeline,
Awaitable[training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def list_training_pipelines(
self,
) -> Callable[
[pipeline_service.ListTrainingPipelinesRequest],
Union[
pipeline_service.ListTrainingPipelinesResponse,
Awaitable[pipeline_service.ListTrainingPipelinesResponse],
],
]:
raise NotImplementedError()
@property
def delete_training_pipeline(
self,
) -> Callable[
[pipeline_service.DeleteTrainingPipelineRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_training_pipeline(
self,
) -> Callable[
[pipeline_service.CancelTrainingPipelineRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def create_pipeline_job(
self,
) -> Callable[
[pipeline_service.CreatePipelineJobRequest],
Union[gca_pipeline_job.PipelineJob, Awaitable[gca_pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def get_pipeline_job(
self,
) -> Callable[
[pipeline_service.GetPipelineJobRequest],
Union[pipeline_job.PipelineJob, Awaitable[pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def list_pipeline_jobs(
self,
) -> Callable[
[pipeline_service.ListPipelineJobsRequest],
Union[
pipeline_service.ListPipelineJobsResponse,
Awaitable[pipeline_service.ListPipelineJobsResponse],
],
]:
raise NotImplementedError()
@property
def delete_pipeline_job(
self,
) -> Callable[
[pipeline_service.DeletePipelineJobRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_pipeline_job(
self,
) -> Callable[
[pipeline_service.CancelPipelineJobRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def kind(self) -> str:
raise NotImplementedError()
__all__ = ("PipelineServiceTransport",)
| true
| true
|
1c4076f88b85fdfb5685a007425e937b013824ff
| 2,019
|
py
|
Python
|
api/src/rapidapi/nutritionix.py
|
carlotacb/foodlord
|
3e2379e47ea31474f4a18c2e5904980a34165ae6
|
[
"MIT"
] | 8
|
2019-02-23T18:48:33.000Z
|
2020-01-14T11:48:33.000Z
|
api/src/rapidapi/nutritionix.py
|
andreugallofre/foodlord
|
3e2379e47ea31474f4a18c2e5904980a34165ae6
|
[
"MIT"
] | null | null | null |
api/src/rapidapi/nutritionix.py
|
andreugallofre/foodlord
|
3e2379e47ea31474f4a18c2e5904980a34165ae6
|
[
"MIT"
] | 3
|
2019-02-24T20:27:42.000Z
|
2019-02-27T11:36:28.000Z
|
import requests
import os
from src import *
from src.util import log
def __api_request(ingredient):
data = {
'appId': 'cd730bdb',
'appKey': '0555561b71a1ebfa3479c8fd1d966b8c',
'prhase': ingredient,
'fields': ['item_name', 'brand_name', 'nf_calories'],
'filters': {
'item_type': 2
}
}
response = requests.post("https://api.nutritionix.com/v1_1/search", json=data)
response_json = response.json()
return response_json
def __extract_values(json):
result = []
hits = json['hits']
for hit in hits:
brand_name = hit['fields']['brand_name']
calories = hit['fields']['nf_calories']
pair = (calories, brand_name)
result.append(pair)
return result
def __parse_values(values):
max_value = 0
min_value = 1000000
max_tuple = None
min_tuple = None
for value in values:
if value[0] < min_value:
min_value = value[0]
min_tuple = value
if value[0] > max_value:
max_value = value[0]
max_tuple = value
values.remove(max_tuple)
values.remove(min_tuple)
return values
def get_ingredient_calories(ingredient):
response_json = __api_request(ingredient)
new_values = __parse_values(__extract_values(response_json))
dictionary = {}
for value in new_values:
brand_name = value[1]
calories = value[0]
if brand_name in dictionary:
pair = dictionary[brand_name]
new_calories = pair[0] + calories
new_num = pair[1] + 1
dictionary[brand_name] = (new_calories, new_num)
else:
pair = (calories, 1)
dictionary[brand_name] = pair
max_elem = 0
max_calories = 0
for item in dictionary:
if dictionary[item][1] > max_elem:
max_elem = dictionary[item][1]
max_calories = dictionary[item][0]
log.debug(max_calories/max_elem)
return max_calories/max_elem
| 26.565789
| 82
| 0.607727
|
import requests
import os
from src import *
from src.util import log
def __api_request(ingredient):
data = {
'appId': 'cd730bdb',
'appKey': '0555561b71a1ebfa3479c8fd1d966b8c',
'prhase': ingredient,
'fields': ['item_name', 'brand_name', 'nf_calories'],
'filters': {
'item_type': 2
}
}
response = requests.post("https://api.nutritionix.com/v1_1/search", json=data)
response_json = response.json()
return response_json
def __extract_values(json):
result = []
hits = json['hits']
for hit in hits:
brand_name = hit['fields']['brand_name']
calories = hit['fields']['nf_calories']
pair = (calories, brand_name)
result.append(pair)
return result
def __parse_values(values):
max_value = 0
min_value = 1000000
max_tuple = None
min_tuple = None
for value in values:
if value[0] < min_value:
min_value = value[0]
min_tuple = value
if value[0] > max_value:
max_value = value[0]
max_tuple = value
values.remove(max_tuple)
values.remove(min_tuple)
return values
def get_ingredient_calories(ingredient):
response_json = __api_request(ingredient)
new_values = __parse_values(__extract_values(response_json))
dictionary = {}
for value in new_values:
brand_name = value[1]
calories = value[0]
if brand_name in dictionary:
pair = dictionary[brand_name]
new_calories = pair[0] + calories
new_num = pair[1] + 1
dictionary[brand_name] = (new_calories, new_num)
else:
pair = (calories, 1)
dictionary[brand_name] = pair
max_elem = 0
max_calories = 0
for item in dictionary:
if dictionary[item][1] > max_elem:
max_elem = dictionary[item][1]
max_calories = dictionary[item][0]
log.debug(max_calories/max_elem)
return max_calories/max_elem
| true
| true
|
1c40787ba009b629beac95a38d431fdc2146936a
| 3,040
|
py
|
Python
|
test/unit/test_azure_blob_remove_public_access.py
|
kshrutik/secure-state-remediation-jobs
|
dc0a5acc3a74dd70d0b18e448124761a8481990d
|
[
"Apache-2.0"
] | 13
|
2020-08-07T17:48:19.000Z
|
2022-02-17T17:17:04.000Z
|
test/unit/test_azure_blob_remove_public_access.py
|
kshrutik/secure-state-remediation-jobs
|
dc0a5acc3a74dd70d0b18e448124761a8481990d
|
[
"Apache-2.0"
] | 27
|
2020-08-19T18:42:44.000Z
|
2021-10-04T05:35:05.000Z
|
test/unit/test_azure_blob_remove_public_access.py
|
kshrutik/secure-state-remediation-jobs
|
dc0a5acc3a74dd70d0b18e448124761a8481990d
|
[
"Apache-2.0"
] | 23
|
2020-08-12T13:09:08.000Z
|
2021-09-16T11:59:17.000Z
|
# Copyright (c) 2020 VMware Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from mock import Mock
from remediation_worker.jobs.azure_blob_remove_public_access.azure_blob_remove_public_access import (
StorageBlobRemovePublicAccess,
)
@pytest.fixture
def valid_payload():
return """
{
"notificationInfo": {
"RuleId": "5c6cc5e103dcc90f363146cd",
"Service": "Storage",
"FindingInfo": {
"FindingId": "d0431afd-b82e-4021-8aa6-ba3cf5c60ef7",
"ObjectId": "storage_account_name.default.container_name",
"ObjectChain": "{\\"cloudAccountId\\":\\"subscription_id\\",\\"entityId\\":\\"Azure.Storage.d687b1a3-9b78-43b1-a17b-7de297fd1fce.resource_group_name.BlobContainer.storage_account_name.default.container_name\\",\\"entityName\\":\\"storage_account_name.default.container_name\\",\\"entityType\\":\\"Azure.Storage.BlobContainer\\",\\"lastUpdateTime\\":\\"2020-09-09T00:36:35.000Z\\",\\"partitionKey\\":\\"d687b1a3-9b78-43b1-a17b-7de297fd1fce\\",\\"provider\\":\\"Azure\\",\\"region\\":\\"eastus\\",\\"service\\":\\"Storage\\", \\"properties\\":[{\\"name\\":\\"ResourceGroup\\",\\"stringV\\":\\"resource_group_name\\",\\"type\\":\\"string\\"}]}",
"Region": "region"
}
}
}
"""
class TestBlobRemovePublicAccess(object):
def test_parse_payload(self, valid_payload):
params = StorageBlobRemovePublicAccess().parse(valid_payload)
assert params["account_name"] == "storage_account_name"
assert params["container_name"] == "container_name"
assert params["resource_group_name"] == "resource_group_name"
assert params["subscription_id"] == "subscription_id"
assert params["region"] == "region"
def test_remediate_success(self):
client = Mock()
action = StorageBlobRemovePublicAccess()
assert (
action.remediate(client, "resource_group", "account_name", "container_name")
== 0
)
assert client.blob_containers.update.call_count == 1
call_args = client.blob_containers.update.call_args
updated_container = call_args[1]["blob_container"]
assert updated_container.public_access == "None"
def test_remediate_with_exception(self):
client = Mock()
client.blob_containers.update.side_effect = Exception
action = StorageBlobRemovePublicAccess()
with pytest.raises(Exception):
assert action.remediate(client, "security_group_id", "resource_group")
| 44.057971
| 654
| 0.684539
|
import pytest
from mock import Mock
from remediation_worker.jobs.azure_blob_remove_public_access.azure_blob_remove_public_access import (
StorageBlobRemovePublicAccess,
)
@pytest.fixture
def valid_payload():
return """
{
"notificationInfo": {
"RuleId": "5c6cc5e103dcc90f363146cd",
"Service": "Storage",
"FindingInfo": {
"FindingId": "d0431afd-b82e-4021-8aa6-ba3cf5c60ef7",
"ObjectId": "storage_account_name.default.container_name",
"ObjectChain": "{\\"cloudAccountId\\":\\"subscription_id\\",\\"entityId\\":\\"Azure.Storage.d687b1a3-9b78-43b1-a17b-7de297fd1fce.resource_group_name.BlobContainer.storage_account_name.default.container_name\\",\\"entityName\\":\\"storage_account_name.default.container_name\\",\\"entityType\\":\\"Azure.Storage.BlobContainer\\",\\"lastUpdateTime\\":\\"2020-09-09T00:36:35.000Z\\",\\"partitionKey\\":\\"d687b1a3-9b78-43b1-a17b-7de297fd1fce\\",\\"provider\\":\\"Azure\\",\\"region\\":\\"eastus\\",\\"service\\":\\"Storage\\", \\"properties\\":[{\\"name\\":\\"ResourceGroup\\",\\"stringV\\":\\"resource_group_name\\",\\"type\\":\\"string\\"}]}",
"Region": "region"
}
}
}
"""
class TestBlobRemovePublicAccess(object):
def test_parse_payload(self, valid_payload):
params = StorageBlobRemovePublicAccess().parse(valid_payload)
assert params["account_name"] == "storage_account_name"
assert params["container_name"] == "container_name"
assert params["resource_group_name"] == "resource_group_name"
assert params["subscription_id"] == "subscription_id"
assert params["region"] == "region"
def test_remediate_success(self):
client = Mock()
action = StorageBlobRemovePublicAccess()
assert (
action.remediate(client, "resource_group", "account_name", "container_name")
== 0
)
assert client.blob_containers.update.call_count == 1
call_args = client.blob_containers.update.call_args
updated_container = call_args[1]["blob_container"]
assert updated_container.public_access == "None"
def test_remediate_with_exception(self):
client = Mock()
client.blob_containers.update.side_effect = Exception
action = StorageBlobRemovePublicAccess()
with pytest.raises(Exception):
assert action.remediate(client, "security_group_id", "resource_group")
| true
| true
|
1c4078f5eb934c119808576ac595981540e733de
| 4,134
|
py
|
Python
|
test_project/test_app/views.py
|
mblayman/django-test-plus
|
691ce7bcb2e4c31cb0958a53548f49277d9305c2
|
[
"BSD-3-Clause"
] | 530
|
2015-05-23T18:25:39.000Z
|
2022-03-20T14:30:10.000Z
|
test_project/test_app/views.py
|
mblayman/django-test-plus
|
691ce7bcb2e4c31cb0958a53548f49277d9305c2
|
[
"BSD-3-Clause"
] | 144
|
2015-05-27T04:09:15.000Z
|
2021-11-24T15:32:08.000Z
|
test_project/test_app/views.py
|
mblayman/django-test-plus
|
691ce7bcb2e4c31cb0958a53548f49277d9305c2
|
[
"BSD-3-Clause"
] | 62
|
2015-05-27T02:47:19.000Z
|
2022-02-11T21:01:36.000Z
|
import json
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseGone
from django.shortcuts import redirect, render
from django.utils.decorators import method_decorator
from django.views import generic
from .forms import DataForm, NameForm
from .models import Data
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
# Function-based test views
def status_code_view(request, status=200):
status = int(status)
if status in (301, 302):
is_perm = True if status == 301 else False
return redirect('view-200', permanent=is_perm)
return HttpResponse('', status=status)
def view_200(request):
return HttpResponse('', status=200)
def view_201(request):
return HttpResponse('', status=201)
def view_204(request):
return HttpResponse('', status=204)
def view_301(request):
return HttpResponse('', status=301)
def view_302(request):
return HttpResponse('', status=302)
def view_400(request):
return HttpResponse('', status=400)
def view_401(request):
return HttpResponse('', status=401)
def view_403(request):
return HttpResponse('', status=403)
def view_404(request):
return HttpResponse('', status=404)
def view_405(request):
return HttpResponse('', status=405)
def view_409(request):
return HttpResponse('', status=409)
def view_410(request):
return HttpResponseGone()
def view_redirect(request):
return redirect('view-200')
def view_json(request):
ctype = request.META['CONTENT_TYPE']
if not ctype.startswith('application/json'):
raise ValueError("Request's content-type should be 'application/json'. Got '{}' instead.".format(ctype))
data = json.loads(request.body.decode('utf-8'))
return HttpResponse(json.dumps(data), content_type='application/json')
@login_required
def needs_login(request):
return HttpResponse('', status=200)
def data_1(request):
list(Data.objects.all())
return HttpResponse('', status=200)
def data_5(request):
list(Data.objects.all())
list(Data.objects.all())
list(Data.objects.all())
list(Data.objects.all())
list(Data.objects.all())
return HttpResponse('', status=200)
def view_context_with(request):
return render(request, 'base.html', {'testvalue': True})
def view_context_without(request):
return render(request, 'base.html', {})
def view_is_ajax(request):
return HttpResponse('', status=200 if request.is_ajax() else 404)
def view_contains(request):
return render(request, 'test.html', {})
def view_headers(request):
response = HttpResponse('', content_type='text/plain', status=200)
response['X-Custom'] = 1
return response
# Class-based test views
class CBView(generic.View):
def get(self, request):
return HttpResponse('', status=200)
def post(self, request):
return HttpResponse('', status=200)
def special(self):
if hasattr(self, 'special_value'):
return self.special_value
else:
return False
class CBLoginRequiredView(generic.View):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CBLoginRequiredView, self).dispatch(*args, **kwargs)
def get(self, request):
return HttpResponse('', status=200)
class CBDataView(generic.UpdateView):
model = Data
template_name = "test.html"
form_class = DataForm
def get_success_url(self):
return reverse("view-200")
def get_context_data(self, **kwargs):
kwargs = super(CBDataView, self).get_context_data(**kwargs)
if hasattr(self.request, "some_data"):
kwargs.update({
"some_data": self.request.some_data
})
return kwargs
class CBTemplateView(generic.TemplateView):
template_name = 'test.html'
def get_context_data(self, **kwargs):
kwargs['revsys'] = 42
return kwargs
class FormErrors(generic.FormView):
form_class = NameForm
template_name = 'form_errors.html'
| 21.989362
| 112
| 0.688195
|
import json
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseGone
from django.shortcuts import redirect, render
from django.utils.decorators import method_decorator
from django.views import generic
from .forms import DataForm, NameForm
from .models import Data
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
def status_code_view(request, status=200):
status = int(status)
if status in (301, 302):
is_perm = True if status == 301 else False
return redirect('view-200', permanent=is_perm)
return HttpResponse('', status=status)
def view_200(request):
return HttpResponse('', status=200)
def view_201(request):
return HttpResponse('', status=201)
def view_204(request):
return HttpResponse('', status=204)
def view_301(request):
return HttpResponse('', status=301)
def view_302(request):
return HttpResponse('', status=302)
def view_400(request):
return HttpResponse('', status=400)
def view_401(request):
return HttpResponse('', status=401)
def view_403(request):
return HttpResponse('', status=403)
def view_404(request):
return HttpResponse('', status=404)
def view_405(request):
return HttpResponse('', status=405)
def view_409(request):
return HttpResponse('', status=409)
def view_410(request):
return HttpResponseGone()
def view_redirect(request):
return redirect('view-200')
def view_json(request):
ctype = request.META['CONTENT_TYPE']
if not ctype.startswith('application/json'):
raise ValueError("Request's content-type should be 'application/json'. Got '{}' instead.".format(ctype))
data = json.loads(request.body.decode('utf-8'))
return HttpResponse(json.dumps(data), content_type='application/json')
@login_required
def needs_login(request):
return HttpResponse('', status=200)
def data_1(request):
list(Data.objects.all())
return HttpResponse('', status=200)
def data_5(request):
list(Data.objects.all())
list(Data.objects.all())
list(Data.objects.all())
list(Data.objects.all())
list(Data.objects.all())
return HttpResponse('', status=200)
def view_context_with(request):
return render(request, 'base.html', {'testvalue': True})
def view_context_without(request):
return render(request, 'base.html', {})
def view_is_ajax(request):
return HttpResponse('', status=200 if request.is_ajax() else 404)
def view_contains(request):
return render(request, 'test.html', {})
def view_headers(request):
response = HttpResponse('', content_type='text/plain', status=200)
response['X-Custom'] = 1
return response
# Class-based test views
class CBView(generic.View):
def get(self, request):
return HttpResponse('', status=200)
def post(self, request):
return HttpResponse('', status=200)
def special(self):
if hasattr(self, 'special_value'):
return self.special_value
else:
return False
class CBLoginRequiredView(generic.View):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CBLoginRequiredView, self).dispatch(*args, **kwargs)
def get(self, request):
return HttpResponse('', status=200)
class CBDataView(generic.UpdateView):
model = Data
template_name = "test.html"
form_class = DataForm
def get_success_url(self):
return reverse("view-200")
def get_context_data(self, **kwargs):
kwargs = super(CBDataView, self).get_context_data(**kwargs)
if hasattr(self.request, "some_data"):
kwargs.update({
"some_data": self.request.some_data
})
return kwargs
class CBTemplateView(generic.TemplateView):
template_name = 'test.html'
def get_context_data(self, **kwargs):
kwargs['revsys'] = 42
return kwargs
class FormErrors(generic.FormView):
form_class = NameForm
template_name = 'form_errors.html'
| true
| true
|
1c407a36b89b5ec80b8a9ce3e8a4466d21e5f77d
| 5,390
|
py
|
Python
|
Ansible-AWS-Provisioning/collections/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw_info.py
|
ginigangadharan/ansible-real-life
|
897c2fc0d05babbb540768b336b6ad399dad5bfa
|
[
"MIT"
] | 22
|
2021-07-16T08:11:22.000Z
|
2022-03-31T07:15:34.000Z
|
Ansible-AWS-Provisioning/collections/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw_info.py
|
premsagar0228/ansible-real-life
|
1a51193b833ab6ad320100472333b9ffb0da39d4
|
[
"MIT"
] | null | null | null |
Ansible-AWS-Provisioning/collections/ansible_collections/community/aws/plugins/modules/ec2_vpc_igw_info.py
|
premsagar0228/ansible-real-life
|
1a51193b833ab6ad320100472333b9ffb0da39d4
|
[
"MIT"
] | 39
|
2021-07-05T02:31:42.000Z
|
2022-03-31T02:46:03.000Z
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vpc_igw_info
short_description: Gather information about internet gateways in AWS
description:
- Gather information about internet gateways in AWS.
- This module was called C(ec2_vpc_igw_facts) before Ansible 2.9. The usage did not change.
requirements: [ boto3 ]
author: "Nick Aslanidis (@naslanidis)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInternetGateways.html) for possible filters.
type: dict
internet_gateway_ids:
description:
- Get details of specific Internet Gateway ID. Provide this value as a list.
type: list
elements: str
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = '''
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all Internet Gateways for an account or profile
ec2_vpc_igw_info:
region: ap-southeast-2
profile: production
register: igw_info
- name: Gather information about a filtered list of Internet Gateways
ec2_vpc_igw_info:
region: ap-southeast-2
profile: production
filters:
"tag:Name": "igw-123"
register: igw_info
- name: Gather information about a specific internet gateway by InternetGatewayId
ec2_vpc_igw_info:
region: ap-southeast-2
profile: production
internet_gateway_ids: igw-c1231234
register: igw_info
'''
RETURN = '''
internet_gateways:
description: The internet gateways for the account.
returned: always
type: list
sample: [
{
"attachments": [
{
"state": "available",
"vpc_id": "vpc-02123b67"
}
],
"internet_gateway_id": "igw-2123634d",
"tags": [
{
"key": "Name",
"value": "test-vpc-20-igw"
}
]
}
]
changed:
description: True if listing the internet gateways succeeds.
type: bool
returned: always
sample: "false"
'''
try:
import botocore
except ImportError:
pass # will be captured by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec,
get_aws_connection_info,
boto3_conn,
camel_dict_to_snake_dict,
ansible_dict_to_boto3_filter_list,
HAS_BOTO3,
)
def get_internet_gateway_info(internet_gateway):
internet_gateway_info = {'InternetGatewayId': internet_gateway['InternetGatewayId'],
'Attachments': internet_gateway['Attachments'],
'Tags': internet_gateway['Tags']}
return internet_gateway_info
def list_internet_gateways(client, module):
params = dict()
params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
if module.params.get("internet_gateway_ids"):
params['InternetGatewayIds'] = module.params.get("internet_gateway_ids")
try:
all_internet_gateways = client.describe_internet_gateways(**params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
return [camel_dict_to_snake_dict(get_internet_gateway_info(igw))
for igw in all_internet_gateways['InternetGateways']]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(type='dict', default=dict()),
internet_gateway_ids=dict(type='list', default=None)
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if module._name == 'ec2_vpc_igw_facts':
module.deprecate("The 'ec2_vpc_igw_facts' module has been renamed to 'ec2_vpc_igw_info'", version='2.13')
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='botocore and boto3 are required.')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg="Can't authorize connection - " + str(e))
# call your function here
results = list_internet_gateways(connection, module)
module.exit_json(internet_gateways=results)
if __name__ == '__main__':
main()
| 32.666667
| 130
| 0.62987
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vpc_igw_info
short_description: Gather information about internet gateways in AWS
description:
- Gather information about internet gateways in AWS.
- This module was called C(ec2_vpc_igw_facts) before Ansible 2.9. The usage did not change.
requirements: [ boto3 ]
author: "Nick Aslanidis (@naslanidis)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInternetGateways.html) for possible filters.
type: dict
internet_gateway_ids:
description:
- Get details of specific Internet Gateway ID. Provide this value as a list.
type: list
elements: str
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = '''
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all Internet Gateways for an account or profile
ec2_vpc_igw_info:
region: ap-southeast-2
profile: production
register: igw_info
- name: Gather information about a filtered list of Internet Gateways
ec2_vpc_igw_info:
region: ap-southeast-2
profile: production
filters:
"tag:Name": "igw-123"
register: igw_info
- name: Gather information about a specific internet gateway by InternetGatewayId
ec2_vpc_igw_info:
region: ap-southeast-2
profile: production
internet_gateway_ids: igw-c1231234
register: igw_info
'''
RETURN = '''
internet_gateways:
description: The internet gateways for the account.
returned: always
type: list
sample: [
{
"attachments": [
{
"state": "available",
"vpc_id": "vpc-02123b67"
}
],
"internet_gateway_id": "igw-2123634d",
"tags": [
{
"key": "Name",
"value": "test-vpc-20-igw"
}
]
}
]
changed:
description: True if listing the internet gateways succeeds.
type: bool
returned: always
sample: "false"
'''
try:
import botocore
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec,
get_aws_connection_info,
boto3_conn,
camel_dict_to_snake_dict,
ansible_dict_to_boto3_filter_list,
HAS_BOTO3,
)
def get_internet_gateway_info(internet_gateway):
internet_gateway_info = {'InternetGatewayId': internet_gateway['InternetGatewayId'],
'Attachments': internet_gateway['Attachments'],
'Tags': internet_gateway['Tags']}
return internet_gateway_info
def list_internet_gateways(client, module):
params = dict()
params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
if module.params.get("internet_gateway_ids"):
params['InternetGatewayIds'] = module.params.get("internet_gateway_ids")
try:
all_internet_gateways = client.describe_internet_gateways(**params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
return [camel_dict_to_snake_dict(get_internet_gateway_info(igw))
for igw in all_internet_gateways['InternetGateways']]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(type='dict', default=dict()),
internet_gateway_ids=dict(type='list', default=None)
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if module._name == 'ec2_vpc_igw_facts':
module.deprecate("The 'ec2_vpc_igw_facts' module has been renamed to 'ec2_vpc_igw_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='botocore and boto3 are required.')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg="Can't authorize connection - " + str(e))
# call your function here
results = list_internet_gateways(connection, module)
module.exit_json(internet_gateways=results)
if __name__ == '__main__':
main()
| true
| true
|
1c407a6298b4da99045109389a17c51b109e624a
| 38,694
|
py
|
Python
|
test/functional/test_framework/messages.py
|
AtomicLemon/bitcoinflex
|
fe02bd48be01e08a047ef8d5821eb247a0681306
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/messages.py
|
AtomicLemon/bitcoinflex
|
fe02bd48be01e08a047ef8d5821eb247a0681306
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/messages.py
|
AtomicLemon/bitcoinflex
|
fe02bd48be01e08a047ef8d5821eb247a0681306
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message strcutures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization."""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70914 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def ser_uint64(u):
rs = b""
for i in range(2):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "Error",
1: "TX",
2: "Block",
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nAccumulatorCheckpoint = header.nAccumulatorCheckpoint
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.nAccumulatorCheckpoint = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.nAccumulatorCheckpoint = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
# BCX Uniqueness
def get_uniqueness(self, prevout):
r = b""
r += struct.pack("<I", prevout.n)
r += ser_uint256(prevout.hash)
return r
def solve_stake(self, prevouts):
target0 = uint256_from_compact(self.nBits)
loop = True
while loop:
for prevout in prevouts:
nvalue, txBlockTime, stakeModifier, hashStake = prevouts[prevout]
target = int(target0 * nvalue / 100) % 2**256
data = b""
data += ser_uint64(stakeModifier)
data += struct.pack("<I", txBlockTime)
# prevout for zPoS is serial hashes hex strings
if isinstance(prevout, COutPoint):
data += self.get_uniqueness(prevout)
else:
data += ser_uint256(uint256_from_str(bytes.fromhex(hashStake)[::-1]))
data += struct.pack("<I", self.nTime)
posHash = uint256_from_str(hash256(data))
if posHash <= target:
self.prevoutStake = prevout
loop = False
break
if loop:
self.nTime += 1
return True
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
if hasattr(self, 'vchBlockSig'):
r += ser_string(self.vchBlockSig)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def sign_block(self, key, low_s=True):
data = b""
data += struct.pack("<i", self.nVersion)
data += ser_uint256(self.hashPrevBlock)
data += ser_uint256(self.hashMerkleRoot)
data += struct.pack("<I", self.nTime)
data += struct.pack("<I", self.nBits)
data += struct.pack("<I", self.nNonce)
data += ser_uint256(self.nAccumulatorCheckpoint)
sha256NoSig = hash256(data)
self.vchBlockSig = key.sign(sha256NoSig, low_s=low_s)
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
| 29.447489
| 262
| 0.596527
|
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70914
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000
NODE_NETWORK = (1 << 0)
NODE_BLOOM = (1 << 2)
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def ser_uint64(u):
rs = b""
for i in range(2):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
class CAddress():
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "Error",
1: "TX",
2: "Block",
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nAccumulatorCheckpoint = header.nAccumulatorCheckpoint
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.nAccumulatorCheckpoint = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.nAccumulatorCheckpoint = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
# BCX Uniqueness
def get_uniqueness(self, prevout):
r = b""
r += struct.pack("<I", prevout.n)
r += ser_uint256(prevout.hash)
return r
def solve_stake(self, prevouts):
target0 = uint256_from_compact(self.nBits)
loop = True
while loop:
for prevout in prevouts:
nvalue, txBlockTime, stakeModifier, hashStake = prevouts[prevout]
target = int(target0 * nvalue / 100) % 2**256
data = b""
data += ser_uint64(stakeModifier)
data += struct.pack("<I", txBlockTime)
# prevout for zPoS is serial hashes hex strings
if isinstance(prevout, COutPoint):
data += self.get_uniqueness(prevout)
else:
data += ser_uint256(uint256_from_str(bytes.fromhex(hashStake)[::-1]))
data += struct.pack("<I", self.nTime)
posHash = uint256_from_str(hash256(data))
if posHash <= target:
self.prevoutStake = prevout
loop = False
break
if loop:
self.nTime += 1
return True
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
if hasattr(self, 'vchBlockSig'):
r += ser_string(self.vchBlockSig)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def sign_block(self, key, low_s=True):
data = b""
data += struct.pack("<i", self.nVersion)
data += ser_uint256(self.hashPrevBlock)
data += ser_uint256(self.hashMerkleRoot)
data += struct.pack("<I", self.nTime)
data += struct.pack("<I", self.nBits)
data += struct.pack("<I", self.nNonce)
data += ser_uint256(self.nAccumulatorCheckpoint)
sha256NoSig = hash256(data)
self.vchBlockSig = key.sign(sha256NoSig, low_s=low_s)
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
| true
| true
|
1c407a8836fca2986284f537934f9013606eda28
| 5,204
|
py
|
Python
|
tools/langs/python.py
|
Breakend/libmei
|
c031be79a2775e8bb9b47e1057e1398232d4b293
|
[
"MIT"
] | null | null | null |
tools/langs/python.py
|
Breakend/libmei
|
c031be79a2775e8bb9b47e1057e1398232d4b293
|
[
"MIT"
] | null | null | null |
tools/langs/python.py
|
Breakend/libmei
|
c031be79a2775e8bb9b47e1057e1398232d4b293
|
[
"MIT"
] | 1
|
2021-02-23T21:13:47.000Z
|
2021-02-23T21:13:47.000Z
|
import os
import codecs
import re
import logging
lg = logging.getLogger('schemaparser')
LANG_NAME="Python"
MODULE_TEMPLATE = """
{license}
from pymei import MeiElement
{classes}
"""
MODULE_CLASS_TEMPLATE = """
class {className}_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "{className}")
# <{className}>
"""
LICENSE = """\"\"\"
Copyright (c) 2011-2012 {authors}
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\"\"\""""
AUTHORS = "Andrew Hankinson, Alastair Porter, and Others"
def create(schema):
lg.debug("Begin Python Output...")
__create_python_classes(schema)
__create_init(schema)
lg.debug("Success!")
def __create_python_classes(schema):
lg.debug("Creating Python Modules")
for module, elements in sorted(schema.element_structure.iteritems()):
if not elements:
continue
class_output = ""
module_output = ""
for element, atgroups in sorted(elements.iteritems()):
methstr = {
"className": element
}
class_output += MODULE_CLASS_TEMPLATE.format(**methstr)
modstr = {
"classes": class_output,
"license": LICENSE.format(authors=AUTHORS),
}
module_output = MODULE_TEMPLATE.format(**modstr)
fmi = open(os.path.join(schema.outdir, "{0}.py".format(module.lower())), "w")
fmi.write(module_output)
fmi.close()
lg.debug("\tCreated {0}.py".format(module.lower()))
def __create_init(schema):
m = []
a = []
p = open(os.path.join(schema.outdir, "__init__.py"), 'w')
for module, elements in sorted(schema.element_structure.iteritems()):
a.append('"{0}"'.format(module.lower()))
m.append("from pymei.Modules.{0} import *\n".format(module.lower()))
p.write("__all__ = [{0}]\n\n".format(", ".join(a)))
p.writelines(m)
p.close()
def parse_includes(file_dir, includes_dir):
lg.debug("Parsing includes")
# get the files in the includes directory
includes = [f for f in os.listdir(includes_dir) if not f.startswith(".")]
for dp,dn,fn in os.walk(file_dir):
for f in fn:
if f.startswith("."):
continue
methods, inc = __process_include(f, includes, includes_dir)
if methods:
__parse_codefile(methods, inc, dp, f)
def __process_include(fname, includes, includes_dir):
name,ext = os.path.splitext(fname)
new_methods, includes_block = None, None
if "{0}.inc".format(fname) in includes:
lg.debug("\tProcessing include for {0}".format(fname))
f = open(os.path.join(includes_dir, "{0}.inc".format(fname)), 'r')
includefile = f.read()
f.close()
new_methods, includes_block = __parse_includefile(includefile)
return (new_methods, includes_block)
else:
return (None, None)
def __parse_includefile(contents):
# parse the include file for our methods.
ret = {}
inc = []
reg = re.compile(r"# <(?P<elementName>[^>]+)>(.+?)# </(?P=elementName)>", re.MULTILINE|re.DOTALL)
ret = dict(re.findall(reg, contents))
# grab the include for the includes...
reginc = re.compile(r"/\* #include_block \*/(.+?)/\* #include_block \*/", re.MULTILINE|re.DOTALL)
inc = re.findall(reginc, contents)
return (ret, inc)
def __parse_codefile(methods, includes, directory, codefile):
f = open(os.path.join(directory, codefile), 'r')
contents = f.readlines()
f.close()
regmatch = re.compile(r"[\s]+# <(?P<elementName>[^>]+)>", re.MULTILINE|re.DOTALL)
incmatch = re.compile(r"/\* #include_block \*/")
for i,line in enumerate(contents):
imatch = re.match(incmatch, line)
if imatch:
if includes:
contents[i] = includes[0]
match = re.match(regmatch, line)
if match:
if match.group("elementName") in methods.keys():
contents[i] = methods[match.group("elementName")].lstrip("\n") + "\n"
f = open(os.path.join(directory, codefile), 'w')
f.writelines(contents)
f.close()
| 32.525
| 101
| 0.637779
|
import os
import codecs
import re
import logging
lg = logging.getLogger('schemaparser')
LANG_NAME="Python"
MODULE_TEMPLATE = """
{license}
from pymei import MeiElement
{classes}
"""
MODULE_CLASS_TEMPLATE = """
class {className}_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "{className}")
# <{className}>
"""
LICENSE = """\"\"\"
Copyright (c) 2011-2012 {authors}
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\"\"\""""
AUTHORS = "Andrew Hankinson, Alastair Porter, and Others"
def create(schema):
lg.debug("Begin Python Output...")
__create_python_classes(schema)
__create_init(schema)
lg.debug("Success!")
def __create_python_classes(schema):
lg.debug("Creating Python Modules")
for module, elements in sorted(schema.element_structure.iteritems()):
if not elements:
continue
class_output = ""
module_output = ""
for element, atgroups in sorted(elements.iteritems()):
methstr = {
"className": element
}
class_output += MODULE_CLASS_TEMPLATE.format(**methstr)
modstr = {
"classes": class_output,
"license": LICENSE.format(authors=AUTHORS),
}
module_output = MODULE_TEMPLATE.format(**modstr)
fmi = open(os.path.join(schema.outdir, "{0}.py".format(module.lower())), "w")
fmi.write(module_output)
fmi.close()
lg.debug("\tCreated {0}.py".format(module.lower()))
def __create_init(schema):
m = []
a = []
p = open(os.path.join(schema.outdir, "__init__.py"), 'w')
for module, elements in sorted(schema.element_structure.iteritems()):
a.append('"{0}"'.format(module.lower()))
m.append("from pymei.Modules.{0} import *\n".format(module.lower()))
p.write("__all__ = [{0}]\n\n".format(", ".join(a)))
p.writelines(m)
p.close()
def parse_includes(file_dir, includes_dir):
lg.debug("Parsing includes")
includes = [f for f in os.listdir(includes_dir) if not f.startswith(".")]
for dp,dn,fn in os.walk(file_dir):
for f in fn:
if f.startswith("."):
continue
methods, inc = __process_include(f, includes, includes_dir)
if methods:
__parse_codefile(methods, inc, dp, f)
def __process_include(fname, includes, includes_dir):
name,ext = os.path.splitext(fname)
new_methods, includes_block = None, None
if "{0}.inc".format(fname) in includes:
lg.debug("\tProcessing include for {0}".format(fname))
f = open(os.path.join(includes_dir, "{0}.inc".format(fname)), 'r')
includefile = f.read()
f.close()
new_methods, includes_block = __parse_includefile(includefile)
return (new_methods, includes_block)
else:
return (None, None)
def __parse_includefile(contents):
ret = {}
inc = []
reg = re.compile(r"# <(?P<elementName>[^>]+)>(.+?)# </(?P=elementName)>", re.MULTILINE|re.DOTALL)
ret = dict(re.findall(reg, contents))
reginc = re.compile(r"/\* #include_block \*/(.+?)/\* #include_block \*/", re.MULTILINE|re.DOTALL)
inc = re.findall(reginc, contents)
return (ret, inc)
def __parse_codefile(methods, includes, directory, codefile):
f = open(os.path.join(directory, codefile), 'r')
contents = f.readlines()
f.close()
regmatch = re.compile(r"[\s]+# <(?P<elementName>[^>]+)>", re.MULTILINE|re.DOTALL)
incmatch = re.compile(r"/\* #include_block \*/")
for i,line in enumerate(contents):
imatch = re.match(incmatch, line)
if imatch:
if includes:
contents[i] = includes[0]
match = re.match(regmatch, line)
if match:
if match.group("elementName") in methods.keys():
contents[i] = methods[match.group("elementName")].lstrip("\n") + "\n"
f = open(os.path.join(directory, codefile), 'w')
f.writelines(contents)
f.close()
| true
| true
|
1c407a8ac254ac8c2d539df2d33986b39bdb8715
| 1,712
|
py
|
Python
|
Testing_ProjectBudgetTracker/budget/tests/test_models.py
|
muhammad-mamdouh/Django_Projects
|
1f31e12aefb36b33474256db40a2c551882f445e
|
[
"MIT"
] | null | null | null |
Testing_ProjectBudgetTracker/budget/tests/test_models.py
|
muhammad-mamdouh/Django_Projects
|
1f31e12aefb36b33474256db40a2c551882f445e
|
[
"MIT"
] | 40
|
2020-06-05T22:10:58.000Z
|
2022-03-11T23:56:09.000Z
|
Testing_ProjectBudgetTracker/budget/tests/test_models.py
|
muhammad-mamdouh/Django_Projects
|
1f31e12aefb36b33474256db40a2c551882f445e
|
[
"MIT"
] | 1
|
2021-03-31T10:30:03.000Z
|
2021-03-31T10:30:03.000Z
|
from django.test import TestCase
from budget.models import Project, Category, Expense
class TestModels(TestCase):
def setUp(self):
self.project1 = Project.objects.create(
name='Project 1',
budget=10000
)
def test_project_is_assigned_slug_on_creation(self):
self.assertEquals(self.project1.slug, 'project-1')
def test_budget_left(self):
category1 = Category.objects.create(
project=self.project1,
name='development'
)
Expense.objects.create(
project=self.project1,
title='expense1',
amount=1000,
category=category1
)
Expense.objects.create(
project=self.project1,
title='expense2',
amount=2000,
category=category1
)
self.assertEquals(self.project1.budget_left, 7000)
def test_project_total_transactions(self):
self.project2 = Project.objects.create(
name='Project2',
budget=10000
)
category1 = Category.objects.create(
project=self.project2,
name='development'
)
Expense.objects.create(
project=self.project2,
title='expense1',
amount=1000,
category=category1
)
Expense.objects.create(
project=self.project2,
title='expense2',
amount=2000,
category=category1
)
self.assertEquals(self.project2.total_transactions, 2)
| 28.533333
| 62
| 0.525701
|
from django.test import TestCase
from budget.models import Project, Category, Expense
class TestModels(TestCase):
def setUp(self):
self.project1 = Project.objects.create(
name='Project 1',
budget=10000
)
def test_project_is_assigned_slug_on_creation(self):
self.assertEquals(self.project1.slug, 'project-1')
def test_budget_left(self):
category1 = Category.objects.create(
project=self.project1,
name='development'
)
Expense.objects.create(
project=self.project1,
title='expense1',
amount=1000,
category=category1
)
Expense.objects.create(
project=self.project1,
title='expense2',
amount=2000,
category=category1
)
self.assertEquals(self.project1.budget_left, 7000)
def test_project_total_transactions(self):
self.project2 = Project.objects.create(
name='Project2',
budget=10000
)
category1 = Category.objects.create(
project=self.project2,
name='development'
)
Expense.objects.create(
project=self.project2,
title='expense1',
amount=1000,
category=category1
)
Expense.objects.create(
project=self.project2,
title='expense2',
amount=2000,
category=category1
)
self.assertEquals(self.project2.total_transactions, 2)
| true
| true
|
1c407aeb8ee9b73aa7dbc86d2f813d0c87ff8533
| 14,005
|
py
|
Python
|
deepstate.py
|
simonkamronn/deepstate
|
74878840c609dd92fd5410e1db111c834b68f357
|
[
"MIT"
] | 4
|
2019-01-24T02:54:14.000Z
|
2020-08-10T07:46:38.000Z
|
deepstate.py
|
simonkamronn/deepstate
|
74878840c609dd92fd5410e1db111c834b68f357
|
[
"MIT"
] | null | null | null |
deepstate.py
|
simonkamronn/deepstate
|
74878840c609dd92fd5410e1db111c834b68f357
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
import tensorflow_probability as tfp
from tensorflow.keras import layers
import numpy as np
import argparse
import sys
from collections import namedtuple
parameter_class = namedtuple('parameters', ['A', 'C', 'Q', 'R', 'mu', 'sigma'])
class DeepState(tf.keras.Model):
"""
This class defines a Kalman Filter (Linear Gaussian State Space model)
parameterized by a RNN.
"""
def __init__(self,
dim_z,
seq_len,
dim_y=1,
dim_u=0,
rnn_units=32,
no_use_cudnn_rnn=True,
**kwargs):
super(DeepState, self).__init__()
self.seq_len = seq_len
self.dim_z = dim_z
self.dim_y = dim_y
# Create model
if no_use_cudnn_rnn:
self.rnn = layers.LSTM(rnn_units,
return_sequences=True)
else:
self.rnn = layers.CuDNNLSTM(rnn_units,
return_sequences=True)
self.A = layers.Dense(dim_z*dim_z)
self.C = layers.Dense(dim_z)
self.Q = layers.Dense(dim_z * dim_z)
self.R = layers.Dense(dim_y * dim_y)
self.mu = layers.Dense(dim_z)
self.sigma = layers.Dense(dim_z * dim_z)
self._alpha_sq = tf.constant(1., dtype=tf.float32) # fading memory control
self.M = 0 # process-measurement cross correlation
# identity matrix
self._I = tf.eye(dim_z, name='I')
self.state = kwargs.pop('state', None)
self.log_likelihood = None
def call(self, x, y):
# Create mask of ones as we don't use it right now
self.mask = tf.ones((y.shape[0], 1))
# Compute RNN outputs
output = self.rnn(x)
# Get initial state
mu = tf.reshape(self.mu(output[:, 1]), (-1, self.dim_z))
sigma = tf.reshape(self.sigma(output[:, 1]), (-1, self.dim_z, self.dim_z))
# Get parameters for the sequence
output = tf.reshape(output, (-1, output.shape[2]))
A = tf.reshape(self.A(output), (-1, self.seq_len, self.dim_z, self.dim_z), 'A')
C = tf.reshape(self.C(output), (-1, self.seq_len, self.dim_y, self.dim_z), 'C')
Q = tf.reshape(self.Q(output), (-1, self.seq_len, self.dim_z, self.dim_z), 'Q')
R = tf.reshape(self.R(output), (-1, self.seq_len, self.dim_y, self.dim_y), 'R')
# self.parameters = list((A, C, Q, R, mu, sigma))
self.parameters = parameter_class(A, C, Q, R, mu, sigma)
forward_states = self.compute_forwards(y, self.parameters)
backward_states = self.compute_backwards(forward_states, self.parameters)
return backward_states
def forward_step_fn(self, params, y, A, C, Q, R):
"""
Forward step over a batch
"""
mu_pred, Sigma_pred, mu_t, Sigma_t = params
# Residual
y_pred = tf.squeeze(tf.matmul(C, tf.expand_dims(mu_pred, 2))) # (bs, dim_y)
r = tf.reshape(y - y_pred, (-1, 1), name='residual') # (bs, dim_y)
# project system uncertainty into measurement space
S = tf.matmul(tf.matmul(C, Sigma_pred), C, transpose_b=True) + R # (bs, dim_y, dim_y)
S_inv = tf.matrix_inverse(S)
K = tf.matmul(tf.matmul(Sigma_pred, C, transpose_b=True), S_inv) # (bs, dim_z, dim_y)
# For missing values, set to 0 the Kalman gain matrix
K = tf.multiply(tf.expand_dims(self.mask, 2), K)
# Get current mu and Sigma
mu_t = mu_pred + tf.squeeze(tf.matmul(K, tf.expand_dims(r, 2))) # (bs, dim_z)
I_KC = self._I - tf.matmul(K, C) # (bs, dim_z, dim_z)
Sigma_t = tf.matmul(tf.matmul(I_KC, Sigma_pred), I_KC, transpose_b=True) # (bs, dim_z, dim_z)
Sigma_t += K * R * tf.transpose(K, [0, 2, 1])
# Prediction
mu_pred = tf.squeeze(tf.matmul(A, tf.expand_dims(mu_t, 2)))
# mu_pred = mu_pred + tf.squeeze(tf.matmul(B, tf.expand_dims(u, 2)))
Sigma_pred = tf.scalar_mul(self._alpha_sq, tf.matmul(tf.matmul(A, Sigma_t), A, transpose_b=True) + Q)
return mu_pred, Sigma_pred, mu_t, Sigma_t
def backward_step_fn(self, params, inputs):
"""
Backwards step over a batch, to be used in tf.scan
:param params:
:param inputs: (batch_size, variable dimensions)
:return:
"""
mu_back, Sigma_back = params
mu_pred_tp1, Sigma_pred_tp1, mu_filt_t, Sigma_filt_t, A = inputs
J_t = tf.matmul(tf.transpose(A, [0, 2, 1]), tf.matrix_inverse(Sigma_pred_tp1))
J_t = tf.matmul(Sigma_filt_t, J_t)
mu_back = mu_filt_t + tf.matmul(J_t, mu_back - mu_pred_tp1)
Sigma_back = Sigma_filt_t + tf.matmul(J_t, tf.matmul(Sigma_back - Sigma_pred_tp1, J_t, adjoint_b=True))
return mu_back, Sigma_back
def compute_forwards(self, y, parameters):
# Set initial state
sigma = parameters.sigma
mu = parameters.mu
params = [mu, sigma, mu, sigma]
# Step through the sequence
states = list()
for i in range(self.seq_len):
params = self.forward_step_fn(params,
y[:, i],
parameters.A[:, i],
parameters.C[:, i],
parameters.Q[:, i],
parameters.R[:, i])
states.append(params)
# Restructure to tensors of shape=(seq_len, batch_size, dim_z)
states = list(map(list, zip(*states)))
states = [tf.stack(state, axis=0) for state in states]
return states
def compute_backwards(self, forward_states, parameters):
mu_pred, Sigma_pred, mu_filt, Sigma_filt = forward_states
mu_pred = tf.expand_dims(mu_pred, 3)
mu_filt = tf.expand_dims(mu_filt, 3)
# The tf.scan below that does the smoothing is initialized with the filtering distribution at time T.
# following the derivation in Murphy's book, we then need to discard the last time step of the predictive
# (that will then have t=2,..T) and filtering distribution (t=1:T-1)
states_scan = [mu_pred[:-1],
Sigma_pred[:-1],
mu_filt[:-1],
Sigma_filt[:-1],
tf.transpose(parameters.A, (1, 0, 2, 3))[:-1]]
# Reverse time dimension
dims = [0]
for i, state in enumerate(states_scan):
states_scan[i] = tf.reverse(state, dims)
# Transpose list of lists
states_scan = list(map(list, zip(*states_scan)))
# Init params
params = [mu_filt[-1], Sigma_filt[-1]]
backward_states = list()
for i in range(self.seq_len - 1):
params = self.backward_step_fn(params,
states_scan[i])
backward_states.append(params)
# Restructure to tensors of shape=(seq_len, batch_size, dim_z)
backward_states = list(map(list, zip(*backward_states)))
backward_states = [tf.stack(state, axis=0) for state in backward_states]
# Reverse time dimension
backward_states = list(backward_states)
dims = [0]
for i, state in enumerate(backward_states):
backward_states[i] = tf.reverse(state, dims)
# Add the final state from the filtering distribution
backward_states[0] = tf.concat([backward_states[0], mu_filt[-1:, :, :, :]], axis=0)
backward_states[1] = tf.concat([backward_states[1], Sigma_filt[-1:, :, :, :]], axis=0)
# Remove extra dimension in the mean
backward_states[0] = backward_states[0][:, :, :, 0]
return backward_states
def get_elbo(self, states, y, mask):
A, C, Q, R, mu, sigma = self.parameters
mu_smooth = states[0]
Sigma_smooth = states[1]
# Sample from smoothing distribution
jitter = 1e-2 * tf.eye(Sigma_smooth.shape[-1], batch_shape=tf.shape(Sigma_smooth)[0:-2])
# mvn_smooth = tf.contrib.distributions.MultivariateNormalTriL(mu_smooth, Sigma_smooth + jitter)
mvn_smooth = tfp.distributions.MultivariateNormalTriL(mu_smooth, tf.cholesky(Sigma_smooth + jitter))
z_smooth = mvn_smooth.sample()
## Transition distribution \prod_{t=2}^T p(z_t|z_{t-1}, u_{t})
# We need to evaluate N(z_t; Az_tm1 + Bu_t, Q), where Q is the same for all the elements
# z_tm1 = tf.reshape(z_smooth[:, :-1, :], [-1, self.dim_z])
# Az_tm1 = tf.transpose(tf.matmul(self.A, tf.transpose(z_tm1)))
Az_tm1 = tf.reshape(tf.matmul(A[:, :-1], tf.expand_dims(z_smooth[:, :-1], 3)), [-1, self.dim_z])
# Remove the first input as our prior over z_1 does not depend on it
# u_t_resh = tf.reshape(u, [-1, self.dim_u])
# Bu_t = tf.transpose(tf.matmul(self.B, tf.transpose(u_t_resh)))
# Bu_t = tf.reshape(tf.matmul(B[:, :-1], tf.expand_dims(u[:, 1:], 3)), [-1, self.dim_z])
mu_transition = Az_tm1 # + Bu_t
z_t_transition = tf.reshape(z_smooth[:, 1:, :], [-1, self.dim_z])
# MultivariateNormalTriL supports broadcasting only for the inputs, not for the covariance
# To exploit this we then write N(z_t; Az_tm1 + Bu_t, Q) as N(z_t - Az_tm1 - Bu_t; 0, Q)
trans_centered = z_t_transition - mu_transition
mvn_transition = tfp.distributions.MultivariateNormalTriL(tf.zeros(self.dim_z), tf.cholesky(Q))
log_prob_transition = mvn_transition.log_prob(trans_centered)
## Emission distribution \prod_{t=1}^T p(y_t|z_t)
# We need to evaluate N(y_t; Cz_t, R). We write it as N(y_t - Cz_t; 0, R)
# z_t_emission = tf.reshape(z_smooth, [-1, self.dim_z])
# Cz_t = tf.transpose(tf.matmul(self.C, tf.transpose(z_t_emission)))
Cz_t = tf.reshape(tf.matmul(C, tf.expand_dims(z_smooth, 3)), [-1, self.dim_y])
y_t_resh = tf.reshape(y, [-1, self.dim_y])
emiss_centered = y_t_resh - Cz_t
mvn_emission = tfp.distributions.MultivariateNormalTriL(tf.zeros(self.dim_y), tf.cholesky(R))
mask_flat = tf.reshape(mask, (-1, ))
log_prob_emission = mvn_emission.log_prob(emiss_centered)
log_prob_emission = tf.multiply(mask_flat, log_prob_emission)
## Distribution of the initial state p(z_1|z_0)
z_0 = z_smooth[:, 0, :]
mvn_0 = tfp.distributions.MultivariateNormalTriL(mu, tf.cholesky(sigma))
log_prob_0 = mvn_0.log_prob(z_0)
# Entropy log(\prod_{t=1}^T p(z_t|y_{1:T}, u_{1:T}))
entropy = - mvn_smooth.log_prob(z_smooth)
entropy = tf.reshape(entropy, [-1])
# entropy = tf.zeros(())
# Compute terms of the lower bound
# We compute the log-likelihood *per frame*
num_el = tf.reduce_sum(mask_flat)
log_probs = [tf.truediv(tf.reduce_sum(log_prob_transition), num_el),
tf.truediv(tf.reduce_sum(log_prob_emission), num_el),
tf.truediv(tf.reduce_sum(log_prob_0), num_el),
tf.truediv(tf.reduce_sum(entropy), num_el)]
kf_elbo = tf.reduce_sum(log_probs)
return kf_elbo, log_probs, z_smooth
def generate_data(samples, seq_len):
y = tf.random.normal((samples, seq_len)) + tf.linspace(0., 1., seq_len)
x = tf.random.normal((samples, seq_len, 1))
x = tf.concat((x, tf.reshape(y, (samples, seq_len, 1))*2), axis=2)
return x, y
def loss_fn(model, inputs, targets, mask):
states = model(inputs, targets)
kf_elbo, log_probs, z_smooth = model.get_elbo(states, targets, mask)
return -kf_elbo
def train(model, optimizer, train_data, train_target, mask):
def model_loss(inputs, targets):
return loss_fn(model, inputs, targets, mask)
grad_fn = tfe.implicit_gradients(model_loss)
grads_and_vars = grad_fn(train_data, train_target)
optimizer.apply_gradients(grads_and_vars)
def evaluate(model, data, targets, mask):
"""evaluate an epoch."""
loss = loss_fn(model, data, targets, mask)
return loss
def main(_):
tf.enable_eager_execution()
model = DeepState(dim_z=4, seq_len=FLAGS.seq_len)
mask = tf.ones((100, 1))
train_data, train_target = generate_data(100, FLAGS.seq_len)
test_data, test_target = generate_data(100, FLAGS.seq_len)
learning_rate = tf.Variable(0.005, name="learning_rate")
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
for _ in range(FLAGS.epoch):
train(model, optimizer, train_data, train_target, mask)
loss = evaluate(model, test_data, test_target, mask)
print(f'Test loss: {loss}')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-path",
type=str,
default="")
parser.add_argument(
"--logdir", type=str, default="", help="Directory for checkpoint.")
parser.add_argument("--epoch", type=int, default=20, help="Number of epochs.")
parser.add_argument("--batch-size", type=int, default=20, help="Batch size.")
parser.add_argument(
"--seq-len", type=int, default=35, help="Sequence length.")
parser.add_argument(
"--hidden-dim", type=int, default=200, help="Hidden layer dimension.")
parser.add_argument(
"--num-layers", type=int, default=2, help="Number of RNN layers.")
parser.add_argument(
"--dropout", type=float, default=0.2, help="Drop out ratio.")
parser.add_argument(
"--clip", type=float, default=0.25, help="Gradient clipping ratio.")
parser.add_argument(
"--no-use-cudnn-rnn",
action="store_true",
default=True,
help="Disable the fast CuDNN RNN (when no gpu)")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 40.830904
| 113
| 0.605212
|
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
import tensorflow_probability as tfp
from tensorflow.keras import layers
import numpy as np
import argparse
import sys
from collections import namedtuple
parameter_class = namedtuple('parameters', ['A', 'C', 'Q', 'R', 'mu', 'sigma'])
class DeepState(tf.keras.Model):
def __init__(self,
dim_z,
seq_len,
dim_y=1,
dim_u=0,
rnn_units=32,
no_use_cudnn_rnn=True,
**kwargs):
super(DeepState, self).__init__()
self.seq_len = seq_len
self.dim_z = dim_z
self.dim_y = dim_y
if no_use_cudnn_rnn:
self.rnn = layers.LSTM(rnn_units,
return_sequences=True)
else:
self.rnn = layers.CuDNNLSTM(rnn_units,
return_sequences=True)
self.A = layers.Dense(dim_z*dim_z)
self.C = layers.Dense(dim_z)
self.Q = layers.Dense(dim_z * dim_z)
self.R = layers.Dense(dim_y * dim_y)
self.mu = layers.Dense(dim_z)
self.sigma = layers.Dense(dim_z * dim_z)
self._alpha_sq = tf.constant(1., dtype=tf.float32)
self.M = 0
self._I = tf.eye(dim_z, name='I')
self.state = kwargs.pop('state', None)
self.log_likelihood = None
def call(self, x, y):
self.mask = tf.ones((y.shape[0], 1))
# Compute RNN outputs
output = self.rnn(x)
# Get initial state
mu = tf.reshape(self.mu(output[:, 1]), (-1, self.dim_z))
sigma = tf.reshape(self.sigma(output[:, 1]), (-1, self.dim_z, self.dim_z))
# Get parameters for the sequence
output = tf.reshape(output, (-1, output.shape[2]))
A = tf.reshape(self.A(output), (-1, self.seq_len, self.dim_z, self.dim_z), 'A')
C = tf.reshape(self.C(output), (-1, self.seq_len, self.dim_y, self.dim_z), 'C')
Q = tf.reshape(self.Q(output), (-1, self.seq_len, self.dim_z, self.dim_z), 'Q')
R = tf.reshape(self.R(output), (-1, self.seq_len, self.dim_y, self.dim_y), 'R')
# self.parameters = list((A, C, Q, R, mu, sigma))
self.parameters = parameter_class(A, C, Q, R, mu, sigma)
forward_states = self.compute_forwards(y, self.parameters)
backward_states = self.compute_backwards(forward_states, self.parameters)
return backward_states
def forward_step_fn(self, params, y, A, C, Q, R):
mu_pred, Sigma_pred, mu_t, Sigma_t = params
# Residual
y_pred = tf.squeeze(tf.matmul(C, tf.expand_dims(mu_pred, 2))) # (bs, dim_y)
r = tf.reshape(y - y_pred, (-1, 1), name='residual') # (bs, dim_y)
# project system uncertainty into measurement space
S = tf.matmul(tf.matmul(C, Sigma_pred), C, transpose_b=True) + R # (bs, dim_y, dim_y)
S_inv = tf.matrix_inverse(S)
K = tf.matmul(tf.matmul(Sigma_pred, C, transpose_b=True), S_inv) # (bs, dim_z, dim_y)
# For missing values, set to 0 the Kalman gain matrix
K = tf.multiply(tf.expand_dims(self.mask, 2), K)
# Get current mu and Sigma
mu_t = mu_pred + tf.squeeze(tf.matmul(K, tf.expand_dims(r, 2))) # (bs, dim_z)
I_KC = self._I - tf.matmul(K, C) # (bs, dim_z, dim_z)
Sigma_t = tf.matmul(tf.matmul(I_KC, Sigma_pred), I_KC, transpose_b=True) # (bs, dim_z, dim_z)
Sigma_t += K * R * tf.transpose(K, [0, 2, 1])
# Prediction
mu_pred = tf.squeeze(tf.matmul(A, tf.expand_dims(mu_t, 2)))
# mu_pred = mu_pred + tf.squeeze(tf.matmul(B, tf.expand_dims(u, 2)))
Sigma_pred = tf.scalar_mul(self._alpha_sq, tf.matmul(tf.matmul(A, Sigma_t), A, transpose_b=True) + Q)
return mu_pred, Sigma_pred, mu_t, Sigma_t
def backward_step_fn(self, params, inputs):
mu_back, Sigma_back = params
mu_pred_tp1, Sigma_pred_tp1, mu_filt_t, Sigma_filt_t, A = inputs
J_t = tf.matmul(tf.transpose(A, [0, 2, 1]), tf.matrix_inverse(Sigma_pred_tp1))
J_t = tf.matmul(Sigma_filt_t, J_t)
mu_back = mu_filt_t + tf.matmul(J_t, mu_back - mu_pred_tp1)
Sigma_back = Sigma_filt_t + tf.matmul(J_t, tf.matmul(Sigma_back - Sigma_pred_tp1, J_t, adjoint_b=True))
return mu_back, Sigma_back
def compute_forwards(self, y, parameters):
# Set initial state
sigma = parameters.sigma
mu = parameters.mu
params = [mu, sigma, mu, sigma]
# Step through the sequence
states = list()
for i in range(self.seq_len):
params = self.forward_step_fn(params,
y[:, i],
parameters.A[:, i],
parameters.C[:, i],
parameters.Q[:, i],
parameters.R[:, i])
states.append(params)
# Restructure to tensors of shape=(seq_len, batch_size, dim_z)
states = list(map(list, zip(*states)))
states = [tf.stack(state, axis=0) for state in states]
return states
def compute_backwards(self, forward_states, parameters):
mu_pred, Sigma_pred, mu_filt, Sigma_filt = forward_states
mu_pred = tf.expand_dims(mu_pred, 3)
mu_filt = tf.expand_dims(mu_filt, 3)
# The tf.scan below that does the smoothing is initialized with the filtering distribution at time T.
# following the derivation in Murphy's book, we then need to discard the last time step of the predictive
states_scan = [mu_pred[:-1],
Sigma_pred[:-1],
mu_filt[:-1],
Sigma_filt[:-1],
tf.transpose(parameters.A, (1, 0, 2, 3))[:-1]]
dims = [0]
for i, state in enumerate(states_scan):
states_scan[i] = tf.reverse(state, dims)
states_scan = list(map(list, zip(*states_scan)))
params = [mu_filt[-1], Sigma_filt[-1]]
backward_states = list()
for i in range(self.seq_len - 1):
params = self.backward_step_fn(params,
states_scan[i])
backward_states.append(params)
backward_states = list(map(list, zip(*backward_states)))
backward_states = [tf.stack(state, axis=0) for state in backward_states]
backward_states = list(backward_states)
dims = [0]
for i, state in enumerate(backward_states):
backward_states[i] = tf.reverse(state, dims)
backward_states[0] = tf.concat([backward_states[0], mu_filt[-1:, :, :, :]], axis=0)
backward_states[1] = tf.concat([backward_states[1], Sigma_filt[-1:, :, :, :]], axis=0)
backward_states[0] = backward_states[0][:, :, :, 0]
return backward_states
def get_elbo(self, states, y, mask):
A, C, Q, R, mu, sigma = self.parameters
mu_smooth = states[0]
Sigma_smooth = states[1]
jitter = 1e-2 * tf.eye(Sigma_smooth.shape[-1], batch_shape=tf.shape(Sigma_smooth)[0:-2])
mvn_smooth = tfp.distributions.MultivariateNormalTriL(mu_smooth, tf.cholesky(Sigma_smooth + jitter))
z_smooth = mvn_smooth.sample()
tmul(A[:, :-1], tf.expand_dims(z_smooth[:, :-1], 3)), [-1, self.dim_z])
mu_transition = Az_tm1
z_t_transition = tf.reshape(z_smooth[:, 1:, :], [-1, self.dim_z])
trans_centered = z_t_transition - mu_transition
mvn_transition = tfp.distributions.MultivariateNormalTriL(tf.zeros(self.dim_z), tf.cholesky(Q))
log_prob_transition = mvn_transition.log_prob(trans_centered)
shape(tf.matmul(C, tf.expand_dims(z_smooth, 3)), [-1, self.dim_y])
y_t_resh = tf.reshape(y, [-1, self.dim_y])
emiss_centered = y_t_resh - Cz_t
mvn_emission = tfp.distributions.MultivariateNormalTriL(tf.zeros(self.dim_y), tf.cholesky(R))
mask_flat = tf.reshape(mask, (-1, ))
log_prob_emission = mvn_emission.log_prob(emiss_centered)
log_prob_emission = tf.multiply(mask_flat, log_prob_emission)
= tfp.distributions.MultivariateNormalTriL(mu, tf.cholesky(sigma))
log_prob_0 = mvn_0.log_prob(z_0)
entropy = - mvn_smooth.log_prob(z_smooth)
entropy = tf.reshape(entropy, [-1])
num_el = tf.reduce_sum(mask_flat)
log_probs = [tf.truediv(tf.reduce_sum(log_prob_transition), num_el),
tf.truediv(tf.reduce_sum(log_prob_emission), num_el),
tf.truediv(tf.reduce_sum(log_prob_0), num_el),
tf.truediv(tf.reduce_sum(entropy), num_el)]
kf_elbo = tf.reduce_sum(log_probs)
return kf_elbo, log_probs, z_smooth
def generate_data(samples, seq_len):
y = tf.random.normal((samples, seq_len)) + tf.linspace(0., 1., seq_len)
x = tf.random.normal((samples, seq_len, 1))
x = tf.concat((x, tf.reshape(y, (samples, seq_len, 1))*2), axis=2)
return x, y
def loss_fn(model, inputs, targets, mask):
states = model(inputs, targets)
kf_elbo, log_probs, z_smooth = model.get_elbo(states, targets, mask)
return -kf_elbo
def train(model, optimizer, train_data, train_target, mask):
def model_loss(inputs, targets):
return loss_fn(model, inputs, targets, mask)
grad_fn = tfe.implicit_gradients(model_loss)
grads_and_vars = grad_fn(train_data, train_target)
optimizer.apply_gradients(grads_and_vars)
def evaluate(model, data, targets, mask):
loss = loss_fn(model, data, targets, mask)
return loss
def main(_):
tf.enable_eager_execution()
model = DeepState(dim_z=4, seq_len=FLAGS.seq_len)
mask = tf.ones((100, 1))
train_data, train_target = generate_data(100, FLAGS.seq_len)
test_data, test_target = generate_data(100, FLAGS.seq_len)
learning_rate = tf.Variable(0.005, name="learning_rate")
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
for _ in range(FLAGS.epoch):
train(model, optimizer, train_data, train_target, mask)
loss = evaluate(model, test_data, test_target, mask)
print(f'Test loss: {loss}')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-path",
type=str,
default="")
parser.add_argument(
"--logdir", type=str, default="", help="Directory for checkpoint.")
parser.add_argument("--epoch", type=int, default=20, help="Number of epochs.")
parser.add_argument("--batch-size", type=int, default=20, help="Batch size.")
parser.add_argument(
"--seq-len", type=int, default=35, help="Sequence length.")
parser.add_argument(
"--hidden-dim", type=int, default=200, help="Hidden layer dimension.")
parser.add_argument(
"--num-layers", type=int, default=2, help="Number of RNN layers.")
parser.add_argument(
"--dropout", type=float, default=0.2, help="Drop out ratio.")
parser.add_argument(
"--clip", type=float, default=0.25, help="Gradient clipping ratio.")
parser.add_argument(
"--no-use-cudnn-rnn",
action="store_true",
default=True,
help="Disable the fast CuDNN RNN (when no gpu)")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| true
| true
|
1c407bbd2f4997e15c6bca7ec590bb3cc0644317
| 715
|
py
|
Python
|
config.py
|
obewas/NewsAPI
|
3fe0e6fbeafaa8c0529615a522b045c9bc37eb11
|
[
"MIT"
] | null | null | null |
config.py
|
obewas/NewsAPI
|
3fe0e6fbeafaa8c0529615a522b045c9bc37eb11
|
[
"MIT"
] | null | null | null |
config.py
|
obewas/NewsAPI
|
3fe0e6fbeafaa8c0529615a522b045c9bc37eb11
|
[
"MIT"
] | null | null | null |
import os
class Config:
'''
General configuration parent class
'''
NEWS_API_BASE_URL = 'https://newsapi.org/v2/{}?country=us&apiKey={}'
NEWS_API_KEY = os.environ.get('NEWS_API_KEY')
SECRET_KEY = os.environ.get('SECRET_KEY')
class ProdConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
pass
class DevConfig(Config):
'''
Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig
}
| 21.029412
| 82
| 0.671329
|
import os
class Config:
NEWS_API_BASE_URL = 'https://newsapi.org/v2/{}?country=us&apiKey={}'
NEWS_API_KEY = os.environ.get('NEWS_API_KEY')
SECRET_KEY = os.environ.get('SECRET_KEY')
class ProdConfig(Config):
pass
class DevConfig(Config):
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig
}
| true
| true
|
1c407c0f6a7b1df4c6df422c3b722b8f4efddb0d
| 4,336
|
py
|
Python
|
Peasy+Box2dshapes2Phaser.py
|
kobitoko/peasy2phaser
|
41202a0b7b7949fa1237b1a0e2ef536bff9bc576
|
[
"Unlicense"
] | null | null | null |
Peasy+Box2dshapes2Phaser.py
|
kobitoko/peasy2phaser
|
41202a0b7b7949fa1237b1a0e2ef536bff9bc576
|
[
"Unlicense"
] | null | null | null |
Peasy+Box2dshapes2Phaser.py
|
kobitoko/peasy2phaser
|
41202a0b7b7949fa1237b1a0e2ef536bff9bc576
|
[
"Unlicense"
] | null | null | null |
import sys
import json
hasPillow = False
try:
from PIL import Image
hasPillow = True
except ImportError:
print("pillow library not found. \nCan ignore this if you want to convert a Peasy file.")
class Converter():
def __init__(self, json):
self._json = json
if not "rigidBodies" in self._json:
print("Json is not in a peasy or Box2D format.")
exit(0)
self.checkWhich()
def checkWhich(self):
test = self._json["rigidBodies"][0]
self._isPeasy = False
# Check if it's Peasy format
if "height" in test and "width" in test:
self._width = test["width"]
self._height = test["height"]
self._isPeasy = True
# Check if it's physics-body-editor-box2d-2.9.2 format.
elif "imagePath" in test and "origin" in test:
if not hasPillow:
print("Trying to convert a Physics Body Editor (Box2D) file, needs Python Imaging Library (https://python-pillow.org/).\nWill exit now.")
exit(0)
img = Image.open(test["imagePath"])
self._width = img.size[0]
self._height = img.size[1]
# exit, format not recognized.
else:
print("Json is not in a peasy or Box2D format.")
exit(0)
print("Image is of size " + str(self._width) + "x" + str(self._height))
def convert(self):
# rigitBodies contains a list of dictionaries which are the objects.
# Each these objects have a name. In Peasy you cannot name them yet so they're all called "shape"
# These objects have a polygon entry which contains a list that has a list of dictionaries containing x and y points.
# Origin normalized coordinates start at bottom left for Physics Body Editor Box2D. Peasy's is i assume top right.
#
# Phaser is a dictionary of objects which has a list of dicts containing shape info etc.
phaser = {}
density = 2
friction = 0
bounce = 0
filter = { "categoryBits": 1, "maskBits": 65535 }
for eachObject in self._json["rigidBodies"]:
phaser[eachObject["name"]] = []
if self._isPeasy:
objects = eachObject["polygons"]
else:
objects = eachObject["shapes"]
for eachShape in objects:
"""
From [
{
"x": 0.510416686534882,
"y": 1.40625
},
{
"x": 0.458333343267441,
"y": 1.36458337306976
},
{
"x": 0.447916656732559,
"y": 0.46875
}
]
TO
{
"density": 2, "friction": 0, "bounce": 0,
"filter": { "categoryBits": 1, "maskBits": 65535 },
"shape": [ 10, 191 , 26, 158 , 25, 186 , 13, 204 ]
} """
shape = []
if self._isPeasy:
shapeList = eachShape
else:
shapeList = eachShape["vertices"]
for eachPoint in shapeList:
if self._isPeasy:
shape.append(eachPoint["x"]*self._width)
shape.append(eachPoint["y"]*self._height)
else:
# Physics Body Editor For some reason it normalise on width
shape.append(eachPoint["x"]*self._width)
oldY = eachPoint["y"]*self._width
shape.append(self._height - oldY)
#shape.append(oldY)
phaser[eachObject["name"]].append({"density": density, "friction": friction, "bounce": bounce, "filter": filter,"shape": shape})
print("converting done.")
return phaser
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Needs at least 1 argument, the file to convert.")
exit()
f = open(sys.argv[1], "r")
newFile = open("Converted_" + sys.argv[1], 'w')
jsonLoaded = json.loads(f.read())
f.close()
json.dump(Converter(jsonLoaded).convert(), newFile)
newFile.close()
| 38.371681
| 153
| 0.513838
|
import sys
import json
hasPillow = False
try:
from PIL import Image
hasPillow = True
except ImportError:
print("pillow library not found. \nCan ignore this if you want to convert a Peasy file.")
class Converter():
def __init__(self, json):
self._json = json
if not "rigidBodies" in self._json:
print("Json is not in a peasy or Box2D format.")
exit(0)
self.checkWhich()
def checkWhich(self):
test = self._json["rigidBodies"][0]
self._isPeasy = False
if "height" in test and "width" in test:
self._width = test["width"]
self._height = test["height"]
self._isPeasy = True
# Check if it's physics-body-editor-box2d-2.9.2 format.
elif "imagePath" in test and "origin" in test:
if not hasPillow:
print("Trying to convert a Physics Body Editor (Box2D) file, needs Python Imaging Library (https://python-pillow.org/).\nWill exit now.")
exit(0)
img = Image.open(test["imagePath"])
self._width = img.size[0]
self._height = img.size[1]
else:
print("Json is not in a peasy or Box2D format.")
exit(0)
print("Image is of size " + str(self._width) + "x" + str(self._height))
def convert(self):
# These objects have a polygon entry which contains a list that has a list of dictionaries containing x and y points.
# Origin normalized coordinates start at bottom left for Physics Body Editor Box2D. Peasy's is i assume top right.
phaser = {}
density = 2
friction = 0
bounce = 0
filter = { "categoryBits": 1, "maskBits": 65535 }
for eachObject in self._json["rigidBodies"]:
phaser[eachObject["name"]] = []
if self._isPeasy:
objects = eachObject["polygons"]
else:
objects = eachObject["shapes"]
for eachShape in objects:
shape = []
if self._isPeasy:
shapeList = eachShape
else:
shapeList = eachShape["vertices"]
for eachPoint in shapeList:
if self._isPeasy:
shape.append(eachPoint["x"]*self._width)
shape.append(eachPoint["y"]*self._height)
else:
shape.append(eachPoint["x"]*self._width)
oldY = eachPoint["y"]*self._width
shape.append(self._height - oldY)
phaser[eachObject["name"]].append({"density": density, "friction": friction, "bounce": bounce, "filter": filter,"shape": shape})
print("converting done.")
return phaser
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Needs at least 1 argument, the file to convert.")
exit()
f = open(sys.argv[1], "r")
newFile = open("Converted_" + sys.argv[1], 'w')
jsonLoaded = json.loads(f.read())
f.close()
json.dump(Converter(jsonLoaded).convert(), newFile)
newFile.close()
| true
| true
|
1c407c5bbb37f740c6338168cc73d9c43c64c49f
| 1,716
|
py
|
Python
|
cmsplugin_blog_categories/views.py
|
bitmazk/cmsplugin-blog-categories
|
05e2fa3d50a8501f3f3f9cab784269838079cc37
|
[
"MIT"
] | null | null | null |
cmsplugin_blog_categories/views.py
|
bitmazk/cmsplugin-blog-categories
|
05e2fa3d50a8501f3f3f9cab784269838079cc37
|
[
"MIT"
] | 3
|
2020-02-11T22:01:45.000Z
|
2021-06-10T17:38:13.000Z
|
cmsplugin_blog_categories/views.py
|
bitmazk/cmsplugin-blog-categories
|
05e2fa3d50a8501f3f3f9cab784269838079cc37
|
[
"MIT"
] | null | null | null |
"""Views of the ``cmsplugin_blog_categories`` app."""
from django.db.models import Q
from django.views.generic import ListView
from cmsplugin_blog.models import Entry
from .models import Category
class CategoryListView(ListView):
template_name = 'cmsplugin_blog_categories/entry_archive_category.html'
context_object_name = 'entries'
def dispatch(self, request, *args, **kwargs):
self.category = Category.objects.get(slug=kwargs.get('category'))
return super(CategoryListView, self).dispatch(
request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(CategoryListView, self).get_context_data(**kwargs)
ctx.update({'category': self.category, })
return ctx
def get_queryset(self):
return self.category.get_entries()
class GetEntriesAjaxView(ListView):
template_name = 'cmsplugin_blog_categories/partials/entry_list.html'
context_object_name = 'entries'
def dispatch(self, request, *args, **kwargs):
if request.GET.get('category'):
self.category = request.GET.get('category')
else:
self.category = None
if request.GET.get('count'):
self.count = int(request.GET.get('count'))
else:
self.count = None
return super(GetEntriesAjaxView, self).dispatch(
request, *args, **kwargs)
def get_queryset(self):
qs = Entry.published.all()
if self.category:
qs = qs.filter(
Q(categories__category__slug=self.category) |
Q(categories__category__parent__slug=self.category))
if self.count:
return qs[:self.count]
return qs
| 32.377358
| 75
| 0.649767
|
from django.db.models import Q
from django.views.generic import ListView
from cmsplugin_blog.models import Entry
from .models import Category
class CategoryListView(ListView):
template_name = 'cmsplugin_blog_categories/entry_archive_category.html'
context_object_name = 'entries'
def dispatch(self, request, *args, **kwargs):
self.category = Category.objects.get(slug=kwargs.get('category'))
return super(CategoryListView, self).dispatch(
request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(CategoryListView, self).get_context_data(**kwargs)
ctx.update({'category': self.category, })
return ctx
def get_queryset(self):
return self.category.get_entries()
class GetEntriesAjaxView(ListView):
template_name = 'cmsplugin_blog_categories/partials/entry_list.html'
context_object_name = 'entries'
def dispatch(self, request, *args, **kwargs):
if request.GET.get('category'):
self.category = request.GET.get('category')
else:
self.category = None
if request.GET.get('count'):
self.count = int(request.GET.get('count'))
else:
self.count = None
return super(GetEntriesAjaxView, self).dispatch(
request, *args, **kwargs)
def get_queryset(self):
qs = Entry.published.all()
if self.category:
qs = qs.filter(
Q(categories__category__slug=self.category) |
Q(categories__category__parent__slug=self.category))
if self.count:
return qs[:self.count]
return qs
| true
| true
|
1c407ce22b0188d626bd19f8a9dfb9016f55a632
| 652
|
py
|
Python
|
samples/iris/iris/evaluation/evaluation_result.py
|
katyamust/ml-expr-fw
|
5ede3ff1f777430cf25e8731e4798fc37387fb9d
|
[
"MIT"
] | 1
|
2022-03-06T21:52:01.000Z
|
2022-03-06T21:52:01.000Z
|
samples/iris/iris/evaluation/evaluation_result.py
|
omri374/FabricML
|
a545f1ee907b1b89ca9766a873c5944ec88e54e9
|
[
"MIT"
] | null | null | null |
samples/iris/iris/evaluation/evaluation_result.py
|
omri374/FabricML
|
a545f1ee907b1b89ca9766a873c5944ec88e54e9
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod
from typing import Dict
from iris import LoggableObject
class EvaluationResult(LoggableObject):
"""
Class which holds the evaluation output for one model run.
For example, precision or recall, MSE, accuracy etc.
"""
@abstractmethod
def get_metrics(self) -> Dict:
"""
Return the evaluation result's metrics you wish to be stored in the experiment logging system
:return: A dictionary with names of values of metrics to store
"""
pass
def get_params(self):
# Evaluation results are not likely to have params, just metrics
return None
| 27.166667
| 101
| 0.684049
|
from abc import abstractmethod
from typing import Dict
from iris import LoggableObject
class EvaluationResult(LoggableObject):
@abstractmethod
def get_metrics(self) -> Dict:
pass
def get_params(self):
return None
| true
| true
|
1c407d59c8ef52303c20adc45386f2e632b7af91
| 10,932
|
py
|
Python
|
nncf/compression_method_api.py
|
krodyush/nncf
|
476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a
|
[
"Apache-2.0"
] | null | null | null |
nncf/compression_method_api.py
|
krodyush/nncf
|
476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a
|
[
"Apache-2.0"
] | null | null | null |
nncf/compression_method_api.py
|
krodyush/nncf
|
476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a
|
[
"Apache-2.0"
] | 1
|
2021-04-05T09:33:51.000Z
|
2021-04-05T09:33:51.000Z
|
#
# Copyright (c) 2019-2020 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
@package docstring
This package defines the API for the NNCF compression methods, so that the user could
extend the existing algorithms.
"""
import functools
from copy import copy
from enum import Enum
from functools import partial
import torch
from torch import nn
from nncf.config import NNCFConfig
from nncf.dynamic_graph.graph_builder import create_mock_tensor
from nncf.initialization import DataLoaderBNAdaptationRunner
from nncf.nncf_logger import logger as nncf_logger
from nncf.nncf_network import NNCFNetwork
from nncf.structures import BNAdaptationInitArgs
from nncf.utils import should_consider_scope
class CompressionLoss(nn.Module):
"""
Used to calculate additional loss to be added to the base loss during the
training process. It uses the model graph to measure variables and activations
values of the layers during the loss construction. For example, the $L_0$-based
sparsity algorithm calculates the number of non-zero weights in convolutional
and fully-connected layers to construct the loss function.
"""
def forward(self):
"""
Returns the compression loss value.
"""
return torch.zeros([])
def statistics(self):
"""
Returns a dictionary of printable statistics.
"""
return {}
class CompressionScheduler:
"""
Implements the logic of compression method control during the training process.
May change the method hyperparameters in regards to the current training step or
epoch. For example, the sparsity method can smoothly increase the sparsity rate
over several epochs.
"""
def __init__(self):
self.last_epoch = -1
self.last_step = -1
self._steps_in_current_epoch = 0
def step(self, last=None):
"""
Should be called after each optimizer step during training.
Arguments:
`last` - specifies the initial "previous" step
"""
if last is None:
last = self.last_step + 1
self.last_step = last
self._steps_in_current_epoch += 1
def epoch_step(self, last=None):
"""
Should be called after each training epoch.
Arguments:
`last` - specifies the initial "previous" epoch
"""
if last is None:
last = self.last_epoch + 1
self.last_epoch = last
self._steps_in_current_epoch = 0
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
def state_dict(self):
default_keys = {'last_step', 'last_epoch'}
return {key: val for key, val in self.__dict__.items() if key in default_keys}
def initialize(self):
pass
@functools.total_ordering
class CompressionLevel(Enum):
NONE = 0
PARTIAL = 1
FULL = 2
# pylint:disable=comparison-with-callable
def __add__(self, other: 'CompressionLevel') -> 'CompressionLevel':
"""
Defines compression level of a composite compression controller, consist of two algorithms, where `self` is
compression level of first algorithm and other - compression level of second one.
NONE & NONE = NONE
PARTIAL & PARTIAL = PARTIAL
FULL & FULL = FULL
NONE & PARTIAL = PARTIAL
NONE & FULL = PARTIAL
PARTIAL & FULL = PARTIAL
Args:
other: instance of another compression level
Returns:
common compression level of two algorithms
"""
if self.value == other.value:
return self
return CompressionLevel.PARTIAL
def __lt__(self, other: 'CompressionLevel') -> bool:
return self.value < other.value
class CompressionAlgorithmController:
"""Serves as a handle to the additional modules, parameters and hooks inserted
into the original uncompressed model in order to enable algorithm-specific compression.
Hosts entities that are to be used during the training process, such as compression scheduler and
compression loss."""
def __init__(self, target_model: NNCFNetwork):
self._model = target_model
self._loss = CompressionLoss()
self._scheduler = CompressionScheduler()
@property
def loss(self):
return self._loss
@property
def scheduler(self):
return self._scheduler
def distributed(self):
"""
Should be called when distributed training with multiple training processes
is going to be used (i.e. after the model is wrapped with DistributedDataParallel).
Any special preparations for the algorithm to properly support distributed training
should be made inside this function.
"""
def compression_level(self) -> CompressionLevel:
"""
Returns level of compression. Should be used on saving best checkpoints to distinguish between
uncompressed, partially compressed and fully compressed models.
"""
raise NotImplementedError()
def statistics(self):
"""
Returns a dictionary of printable statistics.
"""
stats = self._loss.statistics()
if hasattr(self._model, 'statistics'):
stats.update(self._model.statistics())
return stats
def run_batchnorm_adaptation(self, config):
initializer_params = config.get("initializer", {})
init_bn_adapt_config = initializer_params.get('batchnorm_adaptation', {})
num_bn_adaptation_steps = init_bn_adapt_config.get('num_bn_adaptation_steps', 0)
num_bn_forget_steps = init_bn_adapt_config.get('num_bn_forget_steps', 5)
if num_bn_adaptation_steps < 0:
raise AttributeError('Number of batch adaptation steps must be >= 0')
if num_bn_adaptation_steps > 0:
try:
bn_adaptation_args = config.get_extra_struct(BNAdaptationInitArgs)
except KeyError:
nncf_logger.info(
'Could not run batchnorm adaptation '
'as the adaptation data loader is not provided as an extra struct. '
'Refer to `NNCFConfig.register_extra_structs` and the `BNAdaptationInitArgs` class')
return
bn_adaptation_runner = DataLoaderBNAdaptationRunner(self._model, bn_adaptation_args.device,
num_bn_forget_steps)
bn_adaptation_runner.run(bn_adaptation_args.data_loader, num_bn_adaptation_steps)
def prepare_for_export(self):
pass
def export_model(self, filename, *args, **kwargs):
"""
Used to export the compressed model for inference into the ONNX format.
Makes method-specific preparations of the model graph,
(e.g. removing auxiliary layers that were used for the model compression),
then exports the model and dumps it into the output file.
Parameters:
`filename` - a path to the file for the exported model to be saved into.
*args, **kwargs - if the model's `forward` requires additional parameters
during export, specify these here.
"""
self.prepare_for_export()
model = self._model.eval().cpu()
input_tensor_list = []
for info in self._model.input_infos:
single_batch_info = copy(info)
input_shape = tuple([1] + list(info.shape)[1:])
single_batch_info.shape = input_shape
input_tensor_list.append(create_mock_tensor(single_batch_info, "cpu"))
original_forward = model.forward
model.forward = partial(model.forward, *args, **kwargs)
# pylint:disable=unexpected-keyword-arg
with torch.no_grad():
torch.onnx.export(model, tuple(input_tensor_list),
filename, verbose=True, enable_onnx_checker=False, opset_version=10)
model.forward = original_forward
class CompressionAlgorithmBuilder:
"""
Determines which modifications should be made to the original FP32 model in
order to enable algorithm-specific compression during fine-tuning. Operates
on an NNCFNetwork object wrapping a target PyTorch model (torch.nn.Module).
"""
def __init__(self, config: NNCFConfig, should_init: bool = True):
"""
Arguments:
`config` - a dictionary that contains parameters of compression method
`should_init` - if False, trainable parameter initialization will be skipped during building
"""
self.config = config
self.should_init = should_init
if not isinstance(self.config, list):
self.ignored_scopes = self.config.get('ignored_scopes')
self.target_scopes = self.config.get('target_scopes')
def apply_to(self, target_model: NNCFNetwork) -> NNCFNetwork:
"""
Applies algorithm-specific modifications to the model. Hooks to be executed during model
forward operation may be registered using NNCFNetwork command insertion methods. Additional
compression modules that are expected to be saved along with the network via torch.save should also be
registered and added to the model here.
:param target_model: An instance of NNCFNetwork for the algorithm to be applied to.
:return: NNCFNetwork with algorithm-specific modifications applied
"""
self._model = target_model # type: NNCFNetwork
return target_model
def build_controller(self, target_model: NNCFNetwork) -> CompressionAlgorithmController:
"""
Should be called once the compressed model target_model is fully constructed (i.e. hooks are applied and
modules are in place. Returns a CompressionAlgorithmController object containing information
and references to the compressed model or specific modules thereof required for the corresponding compression
scheduler operation or compression loss calculation.
:param target_model: An instance of NNCFNetwork with current algorithm already applied
:return: A CompressionAlgorithmController object.
"""
def _should_consider_scope(self, scope_str: str) -> bool:
return should_consider_scope(scope_str, self.target_scopes, self.ignored_scopes)
| 40.043956
| 117
| 0.676454
|
import functools
from copy import copy
from enum import Enum
from functools import partial
import torch
from torch import nn
from nncf.config import NNCFConfig
from nncf.dynamic_graph.graph_builder import create_mock_tensor
from nncf.initialization import DataLoaderBNAdaptationRunner
from nncf.nncf_logger import logger as nncf_logger
from nncf.nncf_network import NNCFNetwork
from nncf.structures import BNAdaptationInitArgs
from nncf.utils import should_consider_scope
class CompressionLoss(nn.Module):
def forward(self):
return torch.zeros([])
def statistics(self):
return {}
class CompressionScheduler:
def __init__(self):
self.last_epoch = -1
self.last_step = -1
self._steps_in_current_epoch = 0
def step(self, last=None):
if last is None:
last = self.last_step + 1
self.last_step = last
self._steps_in_current_epoch += 1
def epoch_step(self, last=None):
if last is None:
last = self.last_epoch + 1
self.last_epoch = last
self._steps_in_current_epoch = 0
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
def state_dict(self):
default_keys = {'last_step', 'last_epoch'}
return {key: val for key, val in self.__dict__.items() if key in default_keys}
def initialize(self):
pass
@functools.total_ordering
class CompressionLevel(Enum):
NONE = 0
PARTIAL = 1
FULL = 2
def __add__(self, other: 'CompressionLevel') -> 'CompressionLevel':
if self.value == other.value:
return self
return CompressionLevel.PARTIAL
def __lt__(self, other: 'CompressionLevel') -> bool:
return self.value < other.value
class CompressionAlgorithmController:
def __init__(self, target_model: NNCFNetwork):
self._model = target_model
self._loss = CompressionLoss()
self._scheduler = CompressionScheduler()
@property
def loss(self):
return self._loss
@property
def scheduler(self):
return self._scheduler
def distributed(self):
def compression_level(self) -> CompressionLevel:
raise NotImplementedError()
def statistics(self):
stats = self._loss.statistics()
if hasattr(self._model, 'statistics'):
stats.update(self._model.statistics())
return stats
def run_batchnorm_adaptation(self, config):
initializer_params = config.get("initializer", {})
init_bn_adapt_config = initializer_params.get('batchnorm_adaptation', {})
num_bn_adaptation_steps = init_bn_adapt_config.get('num_bn_adaptation_steps', 0)
num_bn_forget_steps = init_bn_adapt_config.get('num_bn_forget_steps', 5)
if num_bn_adaptation_steps < 0:
raise AttributeError('Number of batch adaptation steps must be >= 0')
if num_bn_adaptation_steps > 0:
try:
bn_adaptation_args = config.get_extra_struct(BNAdaptationInitArgs)
except KeyError:
nncf_logger.info(
'Could not run batchnorm adaptation '
'as the adaptation data loader is not provided as an extra struct. '
'Refer to `NNCFConfig.register_extra_structs` and the `BNAdaptationInitArgs` class')
return
bn_adaptation_runner = DataLoaderBNAdaptationRunner(self._model, bn_adaptation_args.device,
num_bn_forget_steps)
bn_adaptation_runner.run(bn_adaptation_args.data_loader, num_bn_adaptation_steps)
def prepare_for_export(self):
pass
def export_model(self, filename, *args, **kwargs):
self.prepare_for_export()
model = self._model.eval().cpu()
input_tensor_list = []
for info in self._model.input_infos:
single_batch_info = copy(info)
input_shape = tuple([1] + list(info.shape)[1:])
single_batch_info.shape = input_shape
input_tensor_list.append(create_mock_tensor(single_batch_info, "cpu"))
original_forward = model.forward
model.forward = partial(model.forward, *args, **kwargs)
with torch.no_grad():
torch.onnx.export(model, tuple(input_tensor_list),
filename, verbose=True, enable_onnx_checker=False, opset_version=10)
model.forward = original_forward
class CompressionAlgorithmBuilder:
def __init__(self, config: NNCFConfig, should_init: bool = True):
self.config = config
self.should_init = should_init
if not isinstance(self.config, list):
self.ignored_scopes = self.config.get('ignored_scopes')
self.target_scopes = self.config.get('target_scopes')
def apply_to(self, target_model: NNCFNetwork) -> NNCFNetwork:
self._model = target_model
return target_model
def build_controller(self, target_model: NNCFNetwork) -> CompressionAlgorithmController:
def _should_consider_scope(self, scope_str: str) -> bool:
return should_consider_scope(scope_str, self.target_scopes, self.ignored_scopes)
| true
| true
|
1c407ee6c340fe5d42a2e3383839c117a000ebd8
| 25,169
|
py
|
Python
|
test/moduletests/check_growth/test_check_growth.py
|
vespian/check-growth
|
83322e40f51759bb0fba5dba214357e1fc3fdaea
|
[
"Apache-2.0"
] | 2
|
2015-01-27T14:39:22.000Z
|
2016-03-10T07:50:41.000Z
|
test/moduletests/check_growth/test_check_growth.py
|
brainly/check-growth
|
83322e40f51759bb0fba5dba214357e1fc3fdaea
|
[
"Apache-2.0"
] | null | null | null |
test/moduletests/check_growth/test_check_growth.py
|
brainly/check-growth
|
83322e40f51759bb0fba5dba214357e1fc3fdaea
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015 Pawel Rozlach
# Copyright (c) 2014 Pawel Rozlach
# Copyright (c) 2014 Brainly.com sp. z o.o.
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# Global imports:
import ddt
import mock
import os
import subprocess
import sys
import unittest
from ddt import ddt, data
# To perform local imports first we need to fix PYTHONPATH:
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.abspath(pwd + '/../../modules/'))
# Local imports:
import file_paths as paths
import check_growth
# Constants:
DF_COMMAND = '/bin/df' # FIXME - should be autodetected
class TestsBaseClass(unittest.TestCase):
# Used by side effects:
@staticmethod
def _terminate_script(*unused):
raise SystemExit(0)
# Fake configuration data factory:
def _script_conf_factory(self, **kwargs):
good_configuration = {"lockfile": paths.TEST_LOCKFILE,
"history_file": paths.TEST_STATUSFILE,
"timeframe": 365,
"max_averaging_window": 14,
"min_averaging_window": 7,
"memory_mon_enabled": True,
"memory_mon_warn_reduction": 20,
"memory_mon_crit_reduction": 40,
"disk_mon_enabled": True,
"disk_mountpoints": ["/fake/mountpoint/",
"/faker/mountpoint/",
"/not/a/mountpoint"],
"disk_mon_warn_reduction": 20,
"disk_mon_crit_reduction": 40,
}
def func(key):
config = good_configuration.copy()
config.update(kwargs)
self.assertIn(key, config)
return config[key]
return func
@mock.patch('sys.exit')
class TestCommandLineParsing(unittest.TestCase):
def setUp(self):
self._old_args = sys.argv
def tearDown(self):
sys.argv = self._old_args
def test_proper_command_line_parsing(self, *unused):
sys.argv = ['./check_growth.py', '-v', '-s', '-c', './check_growth.json']
parsed_cmdline = check_growth.parse_command_line()
self.assertEqual(parsed_cmdline, {'std_err': True,
'config_file': './check_growth.json',
'verbose': True,
'clean_histdata': False,
})
def test_config_file_missing_from_commandline(self, SysExitMock):
sys.argv = ['./check_growth.py', ]
# Suppres warnings from argparse
with mock.patch('sys.stderr'):
check_growth.parse_command_line()
SysExitMock.assert_called_once_with(2)
def test_default_command_line_args(self, *unused):
sys.argv = ['./check_growth.py', '-c', './check_growth.json']
parsed_cmdline = check_growth.parse_command_line()
self.assertEqual(parsed_cmdline, {'std_err': False,
'config_file': './check_growth.json',
'verbose': False,
'clean_histdata': False,
})
class TestSystemMeasurement(unittest.TestCase):
def test_memusage_fetch(self):
with open(paths.TEST_MEMINFO, 'r') as fh:
tmp = fh.read()
m = mock.mock_open(read_data=tmp)
with mock.patch('check_growth.open', m, create=True):
cur_mem, max_mem = check_growth.fetch_memory_usage()
self.assertLessEqual(cur_mem, 3808.93)
self.assertLessEqual(max_mem, 24058.3)
def test_inodeusage_fetch(self):
cur_inode, max_inode = check_growth.fetch_inode_usage(
paths.MOUNTPOINT_DIRS[0])
cur_inode = int(cur_inode)
max_inode = int(max_inode)
output = subprocess.check_output([DF_COMMAND, '-i',
paths.MOUNTPOINT_DIRS[0]],
shell=False,
universal_newlines=True).split('\n')
correct_maxinode = int(output[1].split()[1])
correct_curinode = int(output[1].split()[2])
self.assertEqual(correct_maxinode, max_inode)
self.assertEqual(correct_curinode, cur_inode)
def test_diskusage_fetch(self):
cur_disk, max_disk = check_growth.fetch_disk_usage(paths.MOUNTPOINT_DIRS[0])
cur_disk = int(cur_disk)
max_disk = int(max_disk)
output = subprocess.check_output([DF_COMMAND, '-m',
paths.MOUNTPOINT_DIRS[0]],
shell=False,
universal_newlines=True).split('\n')
correct_maxdisk = int(output[1].split()[1])
correct_curdisk = int(output[1].split()[2])
diff_max = abs(correct_maxdisk - max_disk)
diff_cur = abs(correct_curdisk - cur_disk)
# Rounding problems, try 20% of effort, 80 of errors detected :D
self.assertLessEqual(diff_max, 3)
self.assertLessEqual(diff_cur, 3)
def test_growth_ratio_calculation(self):
result = check_growth.find_planned_grow_ratio(252, 11323, 365)
self.assertTrue(result, 31.02)
result = check_growth.find_current_grow_ratio({1: 5, 20: 100, 30: 150})
self.assertTrue(result, 5)
class TestConfigVerification(TestsBaseClass):
def setUp(self):
self.mocks = {}
for patched in ['check_growth.ScriptConfiguration',
'check_growth.ScriptStatus']:
patcher = mock.patch(patched)
self.mocks[patched] = patcher.start()
self.addCleanup(patcher.stop)
self.mocks['check_growth.ScriptStatus'].notify_immediate.side_effect = \
self._terminate_script
def test_values_greater_than_zero(self):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(timeframe=-7,
max_averaging_window=-3,
memory_mon_warn_reduction=-10,
memory_mon_crit_reduction=-100,
disk_mon_warn_reduction=0,
disk_mon_crit_reduction=-5)
with self.assertRaises(SystemExit):
check_growth.verify_conf()
status, msg = self.mocks['check_growth.ScriptStatus'].notify_immediate.call_args[0]
self.assertEqual(status, 'unknown')
self.assertIn('Timeframe should be a positive int', msg)
self.assertIn('Max averaging window should be a positive int', msg)
self.assertIn('memory_mon_warn_reduction should be a positive int', msg)
self.assertIn('memory_mon_crit_reduction should be a positive int', msg)
self.assertIn('disk_mon_warn_reduction should be a positive int', msg)
self.assertIn('disk_mon_crit_reduction should be a positive int', msg)
def test_limits_sanity(self):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(memory_mon_warn_reduction=30,
memory_mon_crit_reduction=20,
disk_mon_warn_reduction=10,
disk_mon_crit_reduction=5)
with self.assertRaises(SystemExit):
check_growth.verify_conf()
status, msg = self.mocks['check_growth.ScriptStatus'].notify_immediate.call_args[0]
self.assertEqual(status, 'unknown')
self.assertIn('memory_mon_warn_reduction should be lower ' +
'than memory_mon_crit_reduction', msg)
self.assertIn('disk_mon_warn_reduction should be lower than ' +
'disk_mon_crit_reduction', msg)
def test_at_least_one_checktype_enabled(self):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(memory_mon_enabled=False,
disk_mon_enabled=False,)
with self.assertRaises(SystemExit):
check_growth.verify_conf()
status, msg = self.mocks['check_growth.ScriptStatus'].notify_immediate.call_args[0]
self.assertEqual(status, 'unknown')
self.assertIn('There should be at least one resourece check enabled.',
msg)
def test_configuration_ok(self):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(disk_mountpoints=paths.MOUNTPOINT_DIRS)
check_growth.verify_conf()
@ddt
class TestHistFileUpdateMethodsSyntaxChecking(TestsBaseClass):
def setUp(self):
conf_file = self._script_conf_factory(disk_mon_enabled=False)
max_averaging_window = conf_file("max_averaging_window")
min_averaging_window = conf_file("min_averaging_window")
history_file = conf_file("history_file")
# Initialize the class:
check_growth.HistoryFile.init(history_file, max_averaging_window,
min_averaging_window)
def test_disk_resource_defined(self):
# add_datapoint - disk resource type should be defined:
with self.assertRaises(ValueError):
check_growth.HistoryFile.add_datapoint(prefix='disk',
path='/dev/shm',
datapoint=10)
def test_datapoint_valid_type(self):
# add_datapoint - datapoint should be a float or int object
with self.assertRaises(ValueError):
check_growth.HistoryFile.add_datapoint(prefix='disk',
path='/dev/shm',
datapoint='foo',
data_type='inode')
@data('verify_dataspan', 'get_dataspan', 'get_datapoints')
def test_datapoint_type_defined(self, method):
args = {'prefix': 'disk',
'path': '/dev/shm'}
with self.assertRaises(ValueError):
getattr(check_growth.HistoryFile, method)(**args)
@data('add_datapoint', 'verify_dataspan', 'get_dataspan', 'get_datapoints')
def test_only_disk_or_memory_permitted(self, method):
with self.assertRaises(ValueError):
getattr(check_growth.HistoryFile, method)('dummy', 10)
@data('add_datapoint', 'verify_dataspan', 'get_dataspan', 'get_datapoints')
def test_disk_resource_path_valid(self, method):
args = {"prefix": 'disk',
"path": 'no-a-path',
"data_type": 'inode'}
if method == 'add_datapoint':
args["datapoint"] = 10
with self.assertRaises(ValueError):
getattr(check_growth.HistoryFile, method)(**args)
@data('add_datapoint', 'verify_dataspan', 'get_dataspan', 'get_datapoints')
def test_disk_resource_type_valid(self, method):
args = {"prefix": 'disk',
"path": '/dev/shm',
"data_type": 'fooBar'}
if method == 'add_datapoint':
args["datapoint"] = 10
with self.assertRaises(ValueError):
getattr(check_growth.HistoryFile, method)(**args)
@ddt
class TestScriptLogic(TestsBaseClass):
def setUp(self):
self.mocks = {}
for patched in ['check_growth.fetch_inode_usage',
'check_growth.fetch_disk_usage',
'check_growth.fetch_memory_usage',
'check_growth.find_planned_grow_ratio',
'check_growth.find_current_grow_ratio',
'check_growth.HistoryFile',
'check_growth.ScriptLock',
'check_growth.ScriptStatus',
'check_growth.verify_conf',
'check_growth.ScriptConfiguration',
'check_growth.logging',
]:
patcher = mock.patch(patched)
self.mocks[patched] = patcher.start()
self.addCleanup(patcher.stop)
self.mocks['check_growth.ScriptStatus'].notify_immediate.side_effect = \
self._terminate_script
self.mocks['check_growth.ScriptStatus'].notify_agregated.side_effect = \
self._terminate_script
self.mocks['check_growth.fetch_disk_usage'].return_value = (1000, 2000)
self.mocks['check_growth.fetch_inode_usage'].return_value = (2000, 4000)
self.mocks['check_growth.fetch_memory_usage'].return_value = (1000, 2000)
self.mocks['check_growth.HistoryFile'].verify_dataspan.return_value = 10
self.mocks['check_growth.HistoryFile'].get_datapoints.side_effect = \
self._dummy_datapoints
self.mocks['check_growth.find_planned_grow_ratio'].return_value = 100
self.mocks['check_growth.find_current_grow_ratio'].return_value = 60
@staticmethod
def _dummy_datapoints(dtype, path=None, data_type=None):
if dtype in ('memory', 'disk'):
return (1212, 1232, 500, 1563)
else:
self.fail("Unsupported datapoints type requested: {0}.".format(
dtype))
def test_allok(self):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory()
with self.assertRaises(SystemExit):
check_growth.main(config_file=paths.TEST_CONFIG_FILE)
# Configuration is loaded:
self.mocks['check_growth.ScriptConfiguration'].load_config.assert_called_once_with(
paths.TEST_CONFIG_FILE)
self.assertTrue(self.mocks['check_growth.verify_conf'].called)
# Lock is properly handled:
self.mocks['check_growth.ScriptLock'].init.assert_called_once_with(
paths.TEST_LOCKFILE)
self.assertTrue(self.mocks['check_growth.ScriptLock'].aqquire.called)
# Monitoring is notified:
self.assertTrue(self.mocks['check_growth.ScriptStatus'].init.called)
self.assertTrue(self.mocks['check_growth.ScriptStatus'].notify_agregated.called)
# Data is stored:
self.mocks['check_growth.HistoryFile'].init.assert_called_once_with(
location=paths.TEST_STATUSFILE,
max_averaging_window=14,
min_averaging_window=7)
self.assertTrue(self.mocks['check_growth.HistoryFile'].save.called)
# Status is OK
status, msg = self.mocks['check_growth.ScriptStatus'].update.call_args[0]
self.assertEqual(status, 'ok')
def test_history_cleaning(self):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory()
with self.assertRaises(SystemExit):
check_growth.main(config_file=paths.TEST_CONFIG_FILE,
clean_histdata=True)
self.assertTrue(self.mocks['check_growth.HistoryFile'].clear_history.called)
self.assertTrue(self.mocks['check_growth.HistoryFile'].save.called)
@data('disk', 'memory')
def test_insufficient_input_data(self, prefix):
if prefix == 'disk':
# Test memory checks:
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(memory_mon_enabled=False,
disk_mountpoints=['/tmp/'])
elif prefix == 'memory':
# Test memory checks:
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(disk_mon_enabled=False)
self.mocks['check_growth.HistoryFile'].verify_dataspan.return_value = -1
with self.assertRaises(SystemExit):
check_growth.main(config_file=paths.TEST_CONFIG_FILE)
status, msg = self.mocks['check_growth.ScriptStatus'].update.call_args[0]
self.assertEqual(status, 'unknown')
@data(("warn", 130), ("crit", 160))
def test_disk_alert_condition(self, data):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(memory_mon_enabled=False,
disk_mountpoints=['/tmp/'])
self.mocks['check_growth.find_current_grow_ratio'].return_value = data[1]
with self.assertRaises(SystemExit):
check_growth.main(config_file=paths.TEST_CONFIG_FILE)
self.assertEqual(self.mocks['check_growth.find_planned_grow_ratio'].call_args_list,
[mock.call(1000, 2000, 365),
mock.call(2000, 4000, 365)])
self.assertEqual(self.mocks['check_growth.find_current_grow_ratio'].call_args_list,
[mock.call((1212, 1232, 500, 1563), ),
mock.call((1212, 1232, 500, 1563), )])
status, msg = self.mocks['check_growth.ScriptStatus'].update.call_args[0]
self.assertEqual(status, data[0])
@data(("warn", 130), ("crit", 160))
def test_memory_alert_condition(self, data):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(disk_mon_enabled=False)
self.mocks['check_growth.find_current_grow_ratio'].return_value = data[1]
with self.assertRaises(SystemExit):
check_growth.main(config_file=paths.TEST_CONFIG_FILE)
self.mocks['check_growth.find_planned_grow_ratio'].assert_called_with(1000, 2000, 365)
self.mocks['check_growth.find_current_grow_ratio'].assert_called_with((1212, 1232, 500, 1563),)
status, msg = self.mocks['check_growth.ScriptStatus'].update.call_args[0]
self.assertEqual(status, data[0])
class TestHistFile(TestsBaseClass):
def setUp(self):
conf_file = self._script_conf_factory(disk_mon_enabled=False)
self.max_averaging_window = conf_file("max_averaging_window")
self.min_averaging_window = conf_file("min_averaging_window")
self.history_file = conf_file("history_file")
self.cur_time = 1000000000
patcher = mock.patch('check_growth.time.time')
self.time_mock = patcher.start()
self.addCleanup(patcher.stop)
self.time_mock.return_value = self.cur_time
try:
os.unlink(self.history_file)
except (OSError, IOError):
pass
check_growth.HistoryFile.init(self.history_file, self.max_averaging_window,
self.min_averaging_window)
def test_histfile_timespan_calculation(self):
check_growth.HistoryFile.add_datapoint('memory', 1)
check_growth.HistoryFile.add_datapoint('disk', 1, path='/tmp/',
data_type='inode')
check_growth.HistoryFile.add_datapoint('disk', 1, path='/tmp/',
data_type='space')
# Now - move the clock 24h ahead:
self.time_mock.return_value = self.cur_time + 1 * 3600 * 24 + 1
check_growth.HistoryFile.add_datapoint('memory', 2)
check_growth.HistoryFile.add_datapoint('disk', 2, path='/tmp/',
data_type='inode')
check_growth.HistoryFile.add_datapoint('disk', 2, path='/tmp/',
data_type='space')
dataspan_memory = check_growth.HistoryFile.get_dataspan('memory')
dataspan_disk_i = check_growth.HistoryFile.get_dataspan('disk',
'/tmp/',
'inode')
dataspan_disk_s = check_growth.HistoryFile.get_dataspan('disk',
'/tmp/',
'space')
self.assertEqual(dataspan_memory, 1)
self.assertEqual(dataspan_disk_i, 1)
self.assertEqual(dataspan_disk_s, 1)
self.assertLess(check_growth.HistoryFile.verify_dataspan('memory'), 0)
self.assertLess(check_growth.HistoryFile.verify_dataspan(
'disk', '/tmp/', 'inode'), 0)
self.assertLess(check_growth.HistoryFile.verify_dataspan(
'disk', '/tmp/', 'space'), 0)
# Now move the clock enough to cover self.min_averaging_window:
self.time_mock.return_value = self.cur_time + (0.1 + self.min_averaging_window) * 3600 * 24 + 1
check_growth.HistoryFile.add_datapoint('memory', 3)
check_growth.HistoryFile.add_datapoint('disk', 3, path='/tmp/',
data_type='inode')
check_growth.HistoryFile.add_datapoint('disk', 3, path='/tmp/',
data_type='space')
dataspan_memory = check_growth.HistoryFile.get_dataspan('memory')
dataspan_disk_i = check_growth.HistoryFile.get_dataspan(
'disk', '/tmp/', 'inode')
dataspan_disk_s = check_growth.HistoryFile.get_dataspan(
'disk', '/tmp/', 'space')
self.assertEqual(dataspan_memory, self.min_averaging_window + 0.1)
self.assertEqual(dataspan_disk_i, self.min_averaging_window + 0.1)
self.assertEqual(dataspan_disk_s, self.min_averaging_window + 0.1)
self.assertGreater(check_growth.HistoryFile.verify_dataspan('memory'), 0)
self.assertGreater(check_growth.HistoryFile.verify_dataspan(
'disk', '/tmp/', 'inode'), 0)
self.assertGreater(check_growth.HistoryFile.verify_dataspan(
'disk', '/tmp/', 'space'), 0)
def test_histfile_load(self):
check_growth.HistoryFile.add_datapoint('memory', 10356)
check_growth.HistoryFile.add_datapoint('disk', 134321, path='/tmp/',
data_type='inode')
check_growth.HistoryFile.add_datapoint('disk', 354334321, path='/tmp/',
data_type='space')
self.time_mock.return_value = self.cur_time + self.max_averaging_window * \
3600 * 24 + 1
check_growth.HistoryFile.add_datapoint('memory', 234453)
check_growth.HistoryFile.add_datapoint('disk', 234321, path='/tmp/',
data_type='inode')
check_growth.HistoryFile.add_datapoint('disk', 654334321, path='/tmp/',
data_type='space')
check_growth.HistoryFile.save()
# Test reading existing file and adding few more points:
check_growth.HistoryFile.init(self.history_file, self.max_averaging_window,
self.min_averaging_window)
self.time_mock.return_value = self.cur_time + (self.max_averaging_window + 1) * \
3600 * 24
check_growth.HistoryFile.add_datapoint('memory', 575553)
check_growth.HistoryFile.add_datapoint('disk', 234234367, path='/tmp/',
data_type='inode')
check_growth.HistoryFile.add_datapoint('disk', 652314121, path='/tmp/',
data_type='space')
# Test whether we have new and saved data and that old data got
# trimmed:
memory_data = check_growth.HistoryFile.get_datapoints('memory')
disk_data_space = check_growth.HistoryFile.get_datapoints('disk',
path='/tmp/',
data_type='space')
disk_data_inode = check_growth.HistoryFile.get_datapoints('disk',
path='/tmp/',
data_type='inode')
self.assertEqual(memory_data, {1001296000: 575553, 1001209601: 234453})
self.assertEqual(disk_data_space,
{1001296000: 652314121, 1001209601: 654334321})
self.assertEqual(disk_data_inode,
{1001296000: 234234367, 1001209601: 234321})
if __name__ == '__main__':
unittest.main()
| 44.31162
| 103
| 0.596329
|
import ddt
import mock
import os
import subprocess
import sys
import unittest
from ddt import ddt, data
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.abspath(pwd + '/../../modules/'))
import file_paths as paths
import check_growth
DF_COMMAND = '/bin/df'
class TestsBaseClass(unittest.TestCase):
@staticmethod
def _terminate_script(*unused):
raise SystemExit(0)
def _script_conf_factory(self, **kwargs):
good_configuration = {"lockfile": paths.TEST_LOCKFILE,
"history_file": paths.TEST_STATUSFILE,
"timeframe": 365,
"max_averaging_window": 14,
"min_averaging_window": 7,
"memory_mon_enabled": True,
"memory_mon_warn_reduction": 20,
"memory_mon_crit_reduction": 40,
"disk_mon_enabled": True,
"disk_mountpoints": ["/fake/mountpoint/",
"/faker/mountpoint/",
"/not/a/mountpoint"],
"disk_mon_warn_reduction": 20,
"disk_mon_crit_reduction": 40,
}
def func(key):
config = good_configuration.copy()
config.update(kwargs)
self.assertIn(key, config)
return config[key]
return func
@mock.patch('sys.exit')
class TestCommandLineParsing(unittest.TestCase):
def setUp(self):
self._old_args = sys.argv
def tearDown(self):
sys.argv = self._old_args
def test_proper_command_line_parsing(self, *unused):
sys.argv = ['./check_growth.py', '-v', '-s', '-c', './check_growth.json']
parsed_cmdline = check_growth.parse_command_line()
self.assertEqual(parsed_cmdline, {'std_err': True,
'config_file': './check_growth.json',
'verbose': True,
'clean_histdata': False,
})
def test_config_file_missing_from_commandline(self, SysExitMock):
sys.argv = ['./check_growth.py', ]
with mock.patch('sys.stderr'):
check_growth.parse_command_line()
SysExitMock.assert_called_once_with(2)
def test_default_command_line_args(self, *unused):
sys.argv = ['./check_growth.py', '-c', './check_growth.json']
parsed_cmdline = check_growth.parse_command_line()
self.assertEqual(parsed_cmdline, {'std_err': False,
'config_file': './check_growth.json',
'verbose': False,
'clean_histdata': False,
})
class TestSystemMeasurement(unittest.TestCase):
def test_memusage_fetch(self):
with open(paths.TEST_MEMINFO, 'r') as fh:
tmp = fh.read()
m = mock.mock_open(read_data=tmp)
with mock.patch('check_growth.open', m, create=True):
cur_mem, max_mem = check_growth.fetch_memory_usage()
self.assertLessEqual(cur_mem, 3808.93)
self.assertLessEqual(max_mem, 24058.3)
def test_inodeusage_fetch(self):
cur_inode, max_inode = check_growth.fetch_inode_usage(
paths.MOUNTPOINT_DIRS[0])
cur_inode = int(cur_inode)
max_inode = int(max_inode)
output = subprocess.check_output([DF_COMMAND, '-i',
paths.MOUNTPOINT_DIRS[0]],
shell=False,
universal_newlines=True).split('\n')
correct_maxinode = int(output[1].split()[1])
correct_curinode = int(output[1].split()[2])
self.assertEqual(correct_maxinode, max_inode)
self.assertEqual(correct_curinode, cur_inode)
def test_diskusage_fetch(self):
cur_disk, max_disk = check_growth.fetch_disk_usage(paths.MOUNTPOINT_DIRS[0])
cur_disk = int(cur_disk)
max_disk = int(max_disk)
output = subprocess.check_output([DF_COMMAND, '-m',
paths.MOUNTPOINT_DIRS[0]],
shell=False,
universal_newlines=True).split('\n')
correct_maxdisk = int(output[1].split()[1])
correct_curdisk = int(output[1].split()[2])
diff_max = abs(correct_maxdisk - max_disk)
diff_cur = abs(correct_curdisk - cur_disk)
self.assertLessEqual(diff_max, 3)
self.assertLessEqual(diff_cur, 3)
def test_growth_ratio_calculation(self):
result = check_growth.find_planned_grow_ratio(252, 11323, 365)
self.assertTrue(result, 31.02)
result = check_growth.find_current_grow_ratio({1: 5, 20: 100, 30: 150})
self.assertTrue(result, 5)
class TestConfigVerification(TestsBaseClass):
def setUp(self):
self.mocks = {}
for patched in ['check_growth.ScriptConfiguration',
'check_growth.ScriptStatus']:
patcher = mock.patch(patched)
self.mocks[patched] = patcher.start()
self.addCleanup(patcher.stop)
self.mocks['check_growth.ScriptStatus'].notify_immediate.side_effect = \
self._terminate_script
def test_values_greater_than_zero(self):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(timeframe=-7,
max_averaging_window=-3,
memory_mon_warn_reduction=-10,
memory_mon_crit_reduction=-100,
disk_mon_warn_reduction=0,
disk_mon_crit_reduction=-5)
with self.assertRaises(SystemExit):
check_growth.verify_conf()
status, msg = self.mocks['check_growth.ScriptStatus'].notify_immediate.call_args[0]
self.assertEqual(status, 'unknown')
self.assertIn('Timeframe should be a positive int', msg)
self.assertIn('Max averaging window should be a positive int', msg)
self.assertIn('memory_mon_warn_reduction should be a positive int', msg)
self.assertIn('memory_mon_crit_reduction should be a positive int', msg)
self.assertIn('disk_mon_warn_reduction should be a positive int', msg)
self.assertIn('disk_mon_crit_reduction should be a positive int', msg)
def test_limits_sanity(self):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(memory_mon_warn_reduction=30,
memory_mon_crit_reduction=20,
disk_mon_warn_reduction=10,
disk_mon_crit_reduction=5)
with self.assertRaises(SystemExit):
check_growth.verify_conf()
status, msg = self.mocks['check_growth.ScriptStatus'].notify_immediate.call_args[0]
self.assertEqual(status, 'unknown')
self.assertIn('memory_mon_warn_reduction should be lower ' +
'than memory_mon_crit_reduction', msg)
self.assertIn('disk_mon_warn_reduction should be lower than ' +
'disk_mon_crit_reduction', msg)
def test_at_least_one_checktype_enabled(self):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(memory_mon_enabled=False,
disk_mon_enabled=False,)
with self.assertRaises(SystemExit):
check_growth.verify_conf()
status, msg = self.mocks['check_growth.ScriptStatus'].notify_immediate.call_args[0]
self.assertEqual(status, 'unknown')
self.assertIn('There should be at least one resourece check enabled.',
msg)
def test_configuration_ok(self):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(disk_mountpoints=paths.MOUNTPOINT_DIRS)
check_growth.verify_conf()
@ddt
class TestHistFileUpdateMethodsSyntaxChecking(TestsBaseClass):
def setUp(self):
conf_file = self._script_conf_factory(disk_mon_enabled=False)
max_averaging_window = conf_file("max_averaging_window")
min_averaging_window = conf_file("min_averaging_window")
history_file = conf_file("history_file")
check_growth.HistoryFile.init(history_file, max_averaging_window,
min_averaging_window)
def test_disk_resource_defined(self):
with self.assertRaises(ValueError):
check_growth.HistoryFile.add_datapoint(prefix='disk',
path='/dev/shm',
datapoint=10)
def test_datapoint_valid_type(self):
with self.assertRaises(ValueError):
check_growth.HistoryFile.add_datapoint(prefix='disk',
path='/dev/shm',
datapoint='foo',
data_type='inode')
@data('verify_dataspan', 'get_dataspan', 'get_datapoints')
def test_datapoint_type_defined(self, method):
args = {'prefix': 'disk',
'path': '/dev/shm'}
with self.assertRaises(ValueError):
getattr(check_growth.HistoryFile, method)(**args)
@data('add_datapoint', 'verify_dataspan', 'get_dataspan', 'get_datapoints')
def test_only_disk_or_memory_permitted(self, method):
with self.assertRaises(ValueError):
getattr(check_growth.HistoryFile, method)('dummy', 10)
@data('add_datapoint', 'verify_dataspan', 'get_dataspan', 'get_datapoints')
def test_disk_resource_path_valid(self, method):
args = {"prefix": 'disk',
"path": 'no-a-path',
"data_type": 'inode'}
if method == 'add_datapoint':
args["datapoint"] = 10
with self.assertRaises(ValueError):
getattr(check_growth.HistoryFile, method)(**args)
@data('add_datapoint', 'verify_dataspan', 'get_dataspan', 'get_datapoints')
def test_disk_resource_type_valid(self, method):
args = {"prefix": 'disk',
"path": '/dev/shm',
"data_type": 'fooBar'}
if method == 'add_datapoint':
args["datapoint"] = 10
with self.assertRaises(ValueError):
getattr(check_growth.HistoryFile, method)(**args)
@ddt
class TestScriptLogic(TestsBaseClass):
def setUp(self):
self.mocks = {}
for patched in ['check_growth.fetch_inode_usage',
'check_growth.fetch_disk_usage',
'check_growth.fetch_memory_usage',
'check_growth.find_planned_grow_ratio',
'check_growth.find_current_grow_ratio',
'check_growth.HistoryFile',
'check_growth.ScriptLock',
'check_growth.ScriptStatus',
'check_growth.verify_conf',
'check_growth.ScriptConfiguration',
'check_growth.logging',
]:
patcher = mock.patch(patched)
self.mocks[patched] = patcher.start()
self.addCleanup(patcher.stop)
self.mocks['check_growth.ScriptStatus'].notify_immediate.side_effect = \
self._terminate_script
self.mocks['check_growth.ScriptStatus'].notify_agregated.side_effect = \
self._terminate_script
self.mocks['check_growth.fetch_disk_usage'].return_value = (1000, 2000)
self.mocks['check_growth.fetch_inode_usage'].return_value = (2000, 4000)
self.mocks['check_growth.fetch_memory_usage'].return_value = (1000, 2000)
self.mocks['check_growth.HistoryFile'].verify_dataspan.return_value = 10
self.mocks['check_growth.HistoryFile'].get_datapoints.side_effect = \
self._dummy_datapoints
self.mocks['check_growth.find_planned_grow_ratio'].return_value = 100
self.mocks['check_growth.find_current_grow_ratio'].return_value = 60
@staticmethod
def _dummy_datapoints(dtype, path=None, data_type=None):
if dtype in ('memory', 'disk'):
return (1212, 1232, 500, 1563)
else:
self.fail("Unsupported datapoints type requested: {0}.".format(
dtype))
def test_allok(self):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory()
with self.assertRaises(SystemExit):
check_growth.main(config_file=paths.TEST_CONFIG_FILE)
self.mocks['check_growth.ScriptConfiguration'].load_config.assert_called_once_with(
paths.TEST_CONFIG_FILE)
self.assertTrue(self.mocks['check_growth.verify_conf'].called)
self.mocks['check_growth.ScriptLock'].init.assert_called_once_with(
paths.TEST_LOCKFILE)
self.assertTrue(self.mocks['check_growth.ScriptLock'].aqquire.called)
self.assertTrue(self.mocks['check_growth.ScriptStatus'].init.called)
self.assertTrue(self.mocks['check_growth.ScriptStatus'].notify_agregated.called)
self.mocks['check_growth.HistoryFile'].init.assert_called_once_with(
location=paths.TEST_STATUSFILE,
max_averaging_window=14,
min_averaging_window=7)
self.assertTrue(self.mocks['check_growth.HistoryFile'].save.called)
status, msg = self.mocks['check_growth.ScriptStatus'].update.call_args[0]
self.assertEqual(status, 'ok')
def test_history_cleaning(self):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory()
with self.assertRaises(SystemExit):
check_growth.main(config_file=paths.TEST_CONFIG_FILE,
clean_histdata=True)
self.assertTrue(self.mocks['check_growth.HistoryFile'].clear_history.called)
self.assertTrue(self.mocks['check_growth.HistoryFile'].save.called)
@data('disk', 'memory')
def test_insufficient_input_data(self, prefix):
if prefix == 'disk':
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(memory_mon_enabled=False,
disk_mountpoints=['/tmp/'])
elif prefix == 'memory':
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(disk_mon_enabled=False)
self.mocks['check_growth.HistoryFile'].verify_dataspan.return_value = -1
with self.assertRaises(SystemExit):
check_growth.main(config_file=paths.TEST_CONFIG_FILE)
status, msg = self.mocks['check_growth.ScriptStatus'].update.call_args[0]
self.assertEqual(status, 'unknown')
@data(("warn", 130), ("crit", 160))
def test_disk_alert_condition(self, data):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(memory_mon_enabled=False,
disk_mountpoints=['/tmp/'])
self.mocks['check_growth.find_current_grow_ratio'].return_value = data[1]
with self.assertRaises(SystemExit):
check_growth.main(config_file=paths.TEST_CONFIG_FILE)
self.assertEqual(self.mocks['check_growth.find_planned_grow_ratio'].call_args_list,
[mock.call(1000, 2000, 365),
mock.call(2000, 4000, 365)])
self.assertEqual(self.mocks['check_growth.find_current_grow_ratio'].call_args_list,
[mock.call((1212, 1232, 500, 1563), ),
mock.call((1212, 1232, 500, 1563), )])
status, msg = self.mocks['check_growth.ScriptStatus'].update.call_args[0]
self.assertEqual(status, data[0])
@data(("warn", 130), ("crit", 160))
def test_memory_alert_condition(self, data):
self.mocks['check_growth.ScriptConfiguration'].get_val.side_effect = \
self._script_conf_factory(disk_mon_enabled=False)
self.mocks['check_growth.find_current_grow_ratio'].return_value = data[1]
with self.assertRaises(SystemExit):
check_growth.main(config_file=paths.TEST_CONFIG_FILE)
self.mocks['check_growth.find_planned_grow_ratio'].assert_called_with(1000, 2000, 365)
self.mocks['check_growth.find_current_grow_ratio'].assert_called_with((1212, 1232, 500, 1563),)
status, msg = self.mocks['check_growth.ScriptStatus'].update.call_args[0]
self.assertEqual(status, data[0])
class TestHistFile(TestsBaseClass):
def setUp(self):
conf_file = self._script_conf_factory(disk_mon_enabled=False)
self.max_averaging_window = conf_file("max_averaging_window")
self.min_averaging_window = conf_file("min_averaging_window")
self.history_file = conf_file("history_file")
self.cur_time = 1000000000
patcher = mock.patch('check_growth.time.time')
self.time_mock = patcher.start()
self.addCleanup(patcher.stop)
self.time_mock.return_value = self.cur_time
try:
os.unlink(self.history_file)
except (OSError, IOError):
pass
check_growth.HistoryFile.init(self.history_file, self.max_averaging_window,
self.min_averaging_window)
def test_histfile_timespan_calculation(self):
check_growth.HistoryFile.add_datapoint('memory', 1)
check_growth.HistoryFile.add_datapoint('disk', 1, path='/tmp/',
data_type='inode')
check_growth.HistoryFile.add_datapoint('disk', 1, path='/tmp/',
data_type='space')
self.time_mock.return_value = self.cur_time + 1 * 3600 * 24 + 1
check_growth.HistoryFile.add_datapoint('memory', 2)
check_growth.HistoryFile.add_datapoint('disk', 2, path='/tmp/',
data_type='inode')
check_growth.HistoryFile.add_datapoint('disk', 2, path='/tmp/',
data_type='space')
dataspan_memory = check_growth.HistoryFile.get_dataspan('memory')
dataspan_disk_i = check_growth.HistoryFile.get_dataspan('disk',
'/tmp/',
'inode')
dataspan_disk_s = check_growth.HistoryFile.get_dataspan('disk',
'/tmp/',
'space')
self.assertEqual(dataspan_memory, 1)
self.assertEqual(dataspan_disk_i, 1)
self.assertEqual(dataspan_disk_s, 1)
self.assertLess(check_growth.HistoryFile.verify_dataspan('memory'), 0)
self.assertLess(check_growth.HistoryFile.verify_dataspan(
'disk', '/tmp/', 'inode'), 0)
self.assertLess(check_growth.HistoryFile.verify_dataspan(
'disk', '/tmp/', 'space'), 0)
self.time_mock.return_value = self.cur_time + (0.1 + self.min_averaging_window) * 3600 * 24 + 1
check_growth.HistoryFile.add_datapoint('memory', 3)
check_growth.HistoryFile.add_datapoint('disk', 3, path='/tmp/',
data_type='inode')
check_growth.HistoryFile.add_datapoint('disk', 3, path='/tmp/',
data_type='space')
dataspan_memory = check_growth.HistoryFile.get_dataspan('memory')
dataspan_disk_i = check_growth.HistoryFile.get_dataspan(
'disk', '/tmp/', 'inode')
dataspan_disk_s = check_growth.HistoryFile.get_dataspan(
'disk', '/tmp/', 'space')
self.assertEqual(dataspan_memory, self.min_averaging_window + 0.1)
self.assertEqual(dataspan_disk_i, self.min_averaging_window + 0.1)
self.assertEqual(dataspan_disk_s, self.min_averaging_window + 0.1)
self.assertGreater(check_growth.HistoryFile.verify_dataspan('memory'), 0)
self.assertGreater(check_growth.HistoryFile.verify_dataspan(
'disk', '/tmp/', 'inode'), 0)
self.assertGreater(check_growth.HistoryFile.verify_dataspan(
'disk', '/tmp/', 'space'), 0)
def test_histfile_load(self):
check_growth.HistoryFile.add_datapoint('memory', 10356)
check_growth.HistoryFile.add_datapoint('disk', 134321, path='/tmp/',
data_type='inode')
check_growth.HistoryFile.add_datapoint('disk', 354334321, path='/tmp/',
data_type='space')
self.time_mock.return_value = self.cur_time + self.max_averaging_window * \
3600 * 24 + 1
check_growth.HistoryFile.add_datapoint('memory', 234453)
check_growth.HistoryFile.add_datapoint('disk', 234321, path='/tmp/',
data_type='inode')
check_growth.HistoryFile.add_datapoint('disk', 654334321, path='/tmp/',
data_type='space')
check_growth.HistoryFile.save()
check_growth.HistoryFile.init(self.history_file, self.max_averaging_window,
self.min_averaging_window)
self.time_mock.return_value = self.cur_time + (self.max_averaging_window + 1) * \
3600 * 24
check_growth.HistoryFile.add_datapoint('memory', 575553)
check_growth.HistoryFile.add_datapoint('disk', 234234367, path='/tmp/',
data_type='inode')
check_growth.HistoryFile.add_datapoint('disk', 652314121, path='/tmp/',
data_type='space')
memory_data = check_growth.HistoryFile.get_datapoints('memory')
disk_data_space = check_growth.HistoryFile.get_datapoints('disk',
path='/tmp/',
data_type='space')
disk_data_inode = check_growth.HistoryFile.get_datapoints('disk',
path='/tmp/',
data_type='inode')
self.assertEqual(memory_data, {1001296000: 575553, 1001209601: 234453})
self.assertEqual(disk_data_space,
{1001296000: 652314121, 1001209601: 654334321})
self.assertEqual(disk_data_inode,
{1001296000: 234234367, 1001209601: 234321})
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c407f90884301b776b97d1fded0cfdf77b2360e
| 5,153
|
py
|
Python
|
edge/server.py
|
akirato0223/test
|
d530ee17ca839fcf863f9e08f9615e3856e02e3d
|
[
"Apache-2.0"
] | null | null | null |
edge/server.py
|
akirato0223/test
|
d530ee17ca839fcf863f9e08f9615e3856e02e3d
|
[
"Apache-2.0"
] | null | null | null |
edge/server.py
|
akirato0223/test
|
d530ee17ca839fcf863f9e08f9615e3856e02e3d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Minimal example on how to start a simple Flower server."""
import argparse
from collections import OrderedDict
from typing import Callable, Dict, Optional, Tuple
import flwr as fl
import numpy as np
import torch
import torchvision
from flwr.common.logger import log
from logging import INFO
import utils
# pylint: disable=no-member
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# pylint: enable=no-member
parser = argparse.ArgumentParser(description="Flower")
parser.add_argument(
"--server_address",
type=str,
required=True,
help=f"gRPC server address",
)
parser.add_argument(
"--rounds",
type=int,
default=1,
help="Number of rounds of federated learning (default: 1)",
)
parser.add_argument(
"--sample_fraction",
type=float,
default=1.0,
help="Fraction of available clients used for fit/evaluate (default: 1.0)",
)
parser.add_argument(
"--min_sample_size",
type=int,
default=2,
help="Minimum number of clients used for fit/evaluate (default: 2)",
)
parser.add_argument(
"--min_num_clients",
type=int,
default=2,
help="Minimum number of available clients required for sampling (default: 2)",
)
parser.add_argument(
"--log_host",
type=str,
help="Logserver address (no default)",
)
parser.add_argument(
"--model",
type=str,
default="ResNet18",
choices=["Net", "ResNet18"],
help="model to train",
)
parser.add_argument(
"--batch_size",
type=int,
default=32,
help="training batch size",
)
parser.add_argument(
"--num_workers",
type=int,
default=4,
help="number of workers for dataset reading",
)
parser.add_argument("--pin_memory", action="store_true")
args = parser.parse_args()
def main() -> None:
"""Start server and train five rounds."""
print(args)
assert (
args.min_sample_size <= args.min_num_clients
), f"Num_clients shouldn't be lower than min_sample_size"
# Configure logger
fl.common.logger.configure("server", host=args.log_host)
# Load evaluation data
_, testset = utils.load_cifar(download=True)
# Create client_manager, strategy, and server
client_manager = fl.server.SimpleClientManager()
# this is empty
log(INFO, f"Clients inside client_manager (available clients: {client_manager.all()}")
strategy = fl.server.strategy.FedAvg(
fraction_fit=args.sample_fraction,
min_fit_clients=args.min_sample_size,
min_available_clients=args.min_num_clients,
eval_fn=get_eval_fn(testset),
on_fit_config_fn=fit_config,
)
#server initialization
server = fl.server.Server(client_manager=client_manager, strategy=strategy)
# Run server
log(INFO, "Starting up the server (gRPC)")
# this is inside server/app.py -> inside _fl func, server.fit is being called.
# global model training is also done here.
fl.server.start_server(
args.server_address,
server,
config={"num_rounds": args.rounds},
)
def fit_config(rnd: int) -> Dict[str, fl.common.Scalar]:
"""Return a configuration with static batch size and (local) epochs."""
config = {
"epoch_global": str(rnd),
"epochs": str(1),
"batch_size": str(args.batch_size),
"num_workers": str(args.num_workers),
"pin_memory": str(args.pin_memory),
}
return config
def set_weights(model: torch.nn.ModuleList, weights: fl.common.Weights) -> None:
"""Set model weights from a list of NumPy ndarrays."""
state_dict = OrderedDict(
{
k: torch.Tensor(np.atleast_1d(v))
for k, v in zip(model.state_dict().keys(), weights)
}
)
model.load_state_dict(state_dict, strict=True)
def get_eval_fn(
testset: torchvision.datasets.CIFAR10,
) -> Callable[[fl.common.Weights], Optional[Tuple[float, float]]]:
"""Return an evaluation function for centralized evaluation."""
def evaluate(weights: fl.common.Weights) -> Optional[Tuple[float, float]]:
"""Use the entire CIFAR-10 test set for evaluation."""
model = utils.load_model(args.model)
set_weights(model, weights)
model.to(DEVICE)
testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False)
loss, accuracy = utils.test(model, testloader, device=DEVICE)
return loss, {"accuracy": accuracy}
return evaluate
if __name__ == "__main__":
main()
| 28.787709
| 90
| 0.6732
|
import argparse
from collections import OrderedDict
from typing import Callable, Dict, Optional, Tuple
import flwr as fl
import numpy as np
import torch
import torchvision
from flwr.common.logger import log
from logging import INFO
import utils
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
parser = argparse.ArgumentParser(description="Flower")
parser.add_argument(
"--server_address",
type=str,
required=True,
help=f"gRPC server address",
)
parser.add_argument(
"--rounds",
type=int,
default=1,
help="Number of rounds of federated learning (default: 1)",
)
parser.add_argument(
"--sample_fraction",
type=float,
default=1.0,
help="Fraction of available clients used for fit/evaluate (default: 1.0)",
)
parser.add_argument(
"--min_sample_size",
type=int,
default=2,
help="Minimum number of clients used for fit/evaluate (default: 2)",
)
parser.add_argument(
"--min_num_clients",
type=int,
default=2,
help="Minimum number of available clients required for sampling (default: 2)",
)
parser.add_argument(
"--log_host",
type=str,
help="Logserver address (no default)",
)
parser.add_argument(
"--model",
type=str,
default="ResNet18",
choices=["Net", "ResNet18"],
help="model to train",
)
parser.add_argument(
"--batch_size",
type=int,
default=32,
help="training batch size",
)
parser.add_argument(
"--num_workers",
type=int,
default=4,
help="number of workers for dataset reading",
)
parser.add_argument("--pin_memory", action="store_true")
args = parser.parse_args()
def main() -> None:
print(args)
assert (
args.min_sample_size <= args.min_num_clients
), f"Num_clients shouldn't be lower than min_sample_size"
# Configure logger
fl.common.logger.configure("server", host=args.log_host)
# Load evaluation data
_, testset = utils.load_cifar(download=True)
# Create client_manager, strategy, and server
client_manager = fl.server.SimpleClientManager()
# this is empty
log(INFO, f"Clients inside client_manager (available clients: {client_manager.all()}")
strategy = fl.server.strategy.FedAvg(
fraction_fit=args.sample_fraction,
min_fit_clients=args.min_sample_size,
min_available_clients=args.min_num_clients,
eval_fn=get_eval_fn(testset),
on_fit_config_fn=fit_config,
)
#server initialization
server = fl.server.Server(client_manager=client_manager, strategy=strategy)
# Run server
log(INFO, "Starting up the server (gRPC)")
# this is inside server/app.py -> inside _fl func, server.fit is being called.
# global model training is also done here.
fl.server.start_server(
args.server_address,
server,
config={"num_rounds": args.rounds},
)
def fit_config(rnd: int) -> Dict[str, fl.common.Scalar]:
config = {
"epoch_global": str(rnd),
"epochs": str(1),
"batch_size": str(args.batch_size),
"num_workers": str(args.num_workers),
"pin_memory": str(args.pin_memory),
}
return config
def set_weights(model: torch.nn.ModuleList, weights: fl.common.Weights) -> None:
state_dict = OrderedDict(
{
k: torch.Tensor(np.atleast_1d(v))
for k, v in zip(model.state_dict().keys(), weights)
}
)
model.load_state_dict(state_dict, strict=True)
def get_eval_fn(
testset: torchvision.datasets.CIFAR10,
) -> Callable[[fl.common.Weights], Optional[Tuple[float, float]]]:
def evaluate(weights: fl.common.Weights) -> Optional[Tuple[float, float]]:
model = utils.load_model(args.model)
set_weights(model, weights)
model.to(DEVICE)
testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False)
loss, accuracy = utils.test(model, testloader, device=DEVICE)
return loss, {"accuracy": accuracy}
return evaluate
if __name__ == "__main__":
main()
| true
| true
|
1c408005c7eeb0a5dd713dacd7c45c871c4a05c6
| 4,406
|
bzl
|
Python
|
tools/build_rules/module_rules.bzl
|
xmfan/buck
|
1e755494263bfa4b68e62fd61d86a711b9febc3a
|
[
"Apache-2.0"
] | null | null | null |
tools/build_rules/module_rules.bzl
|
xmfan/buck
|
1e755494263bfa4b68e62fd61d86a711b9febc3a
|
[
"Apache-2.0"
] | null | null | null |
tools/build_rules/module_rules.bzl
|
xmfan/buck
|
1e755494263bfa4b68e62fd61d86a711b9febc3a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Contains build rules for Buck modules"""
load("@bazel_skylib//lib:collections.bzl", "collections")
load("//tools/build_rules:java_rules.bzl", "java_library_with_plugins")
load("//tools/build_rules:module_rules_for_tests.bzl", "convert_module_deps_to_test")
def buck_module(
name,
module_deps = [],
module_resources = [],
**kwargs):
"""Declares a buck module
Args:
name: name
module_deps: A list of modules this module depends on
module_resources: A list of files that needs to be placed along a module
**kwargs: kwargs
"""
kwargs["provided_deps"] = collections.uniq(list(kwargs.get("provided_deps", [])) + [
"//src/com/facebook/buck/core/module:module",
] + module_deps)
java_library_with_plugins(
name = name,
**kwargs
)
jar_without_hash_name = name + "_jar_without_hash"
native.java_binary(
name = jar_without_hash_name,
deps = [
":" + name,
],
)
calculate_module_hash_name = name + "_calculate_module_hash"
native.genrule(
name = calculate_module_hash_name,
out = "module-binary-hash.txt",
cmd = " ".join([
"$(exe //py/hash:hash_files)",
"$(location :{})".format(jar_without_hash_name),
"$(location //py/hash:hash_files.py) > $OUT",
]),
)
meta_inf_name = name + "-meta-inf"
native.genrule(
name = meta_inf_name,
out = "META-INF",
cmd = " ".join([
"mkdir $OUT && ",
"cp $(location :{}) $OUT/module-binary-hash.txt".format(calculate_module_hash_name),
]),
cmd_exe = " && ".join([
"mkdir %OUT%",
"copy $(location :{}) %OUT%\\module-binary-hash.txt".format(calculate_module_hash_name),
]),
)
module_name = name + "-module"
native.zip_file(
name = module_name,
out = "{}.jar".format(name),
srcs = [
":" + meta_inf_name,
],
zip_srcs = [
":" + jar_without_hash_name,
],
visibility = [
"//programs:bucklib",
"//programs:calculate-buck-binary-hash",
"//test/...",
],
)
final_module_jar_name = name + "-module-jar"
native.prebuilt_jar(
name = final_module_jar_name,
binary_jar = ":" + module_name,
)
# This target is not used directly by module rules, but by `java_test` to get access
# to all provided dependencies of the current module.
native.java_library(
name = name + "_module_for_test",
exported_deps = depset([":" + final_module_jar_name] +
list(kwargs.get("provided_deps", [])) +
list(kwargs.get("exported_provided_deps", [])) +
convert_module_deps_to_test(module_deps)),
visibility = ["PUBLIC"],
)
native.filegroup(
name = name + "_resources",
srcs = module_resources,
visibility = ["PUBLIC"],
)
def get_module_binary(module):
""" Returns target for module's binary """
return "{}-module".format(module)
def convert_modules_to_resources(buck_modules):
""" Converts modules to a map with resources for packaging in a Python binary """
result = {}
for k, v in buck_modules.items():
result["buck-modules/{}.jar".format(k)] = get_module_binary(v)
return result
def convert_modules_to_external_resources(buck_modules, modules_with_resources):
""" Converts modules to a map with resources to keep them outside of module jars """
result = {}
for module in modules_with_resources:
result["buck-modules-resources/{}".format(module)] = "{}_resources".format(buck_modules[module])
return result
| 31.471429
| 104
| 0.611666
|
load("@bazel_skylib//lib:collections.bzl", "collections")
load("//tools/build_rules:java_rules.bzl", "java_library_with_plugins")
load("//tools/build_rules:module_rules_for_tests.bzl", "convert_module_deps_to_test")
def buck_module(
name,
module_deps = [],
module_resources = [],
**kwargs):
kwargs["provided_deps"] = collections.uniq(list(kwargs.get("provided_deps", [])) + [
"//src/com/facebook/buck/core/module:module",
] + module_deps)
java_library_with_plugins(
name = name,
**kwargs
)
jar_without_hash_name = name + "_jar_without_hash"
native.java_binary(
name = jar_without_hash_name,
deps = [
":" + name,
],
)
calculate_module_hash_name = name + "_calculate_module_hash"
native.genrule(
name = calculate_module_hash_name,
out = "module-binary-hash.txt",
cmd = " ".join([
"$(exe //py/hash:hash_files)",
"$(location :{})".format(jar_without_hash_name),
"$(location //py/hash:hash_files.py) > $OUT",
]),
)
meta_inf_name = name + "-meta-inf"
native.genrule(
name = meta_inf_name,
out = "META-INF",
cmd = " ".join([
"mkdir $OUT && ",
"cp $(location :{}) $OUT/module-binary-hash.txt".format(calculate_module_hash_name),
]),
cmd_exe = " && ".join([
"mkdir %OUT%",
"copy $(location :{}) %OUT%\\module-binary-hash.txt".format(calculate_module_hash_name),
]),
)
module_name = name + "-module"
native.zip_file(
name = module_name,
out = "{}.jar".format(name),
srcs = [
":" + meta_inf_name,
],
zip_srcs = [
":" + jar_without_hash_name,
],
visibility = [
"//programs:bucklib",
"//programs:calculate-buck-binary-hash",
"//test/...",
],
)
final_module_jar_name = name + "-module-jar"
native.prebuilt_jar(
name = final_module_jar_name,
binary_jar = ":" + module_name,
)
native.java_library(
name = name + "_module_for_test",
exported_deps = depset([":" + final_module_jar_name] +
list(kwargs.get("provided_deps", [])) +
list(kwargs.get("exported_provided_deps", [])) +
convert_module_deps_to_test(module_deps)),
visibility = ["PUBLIC"],
)
native.filegroup(
name = name + "_resources",
srcs = module_resources,
visibility = ["PUBLIC"],
)
def get_module_binary(module):
return "{}-module".format(module)
def convert_modules_to_resources(buck_modules):
result = {}
for k, v in buck_modules.items():
result["buck-modules/{}.jar".format(k)] = get_module_binary(v)
return result
def convert_modules_to_external_resources(buck_modules, modules_with_resources):
result = {}
for module in modules_with_resources:
result["buck-modules-resources/{}".format(module)] = "{}_resources".format(buck_modules[module])
return result
| true
| true
|
1c40806bbf5b0f4e55a8e494da731a672586c90e
| 2,478
|
py
|
Python
|
jobfunnel/config/validate.py
|
Arax1/JobFunnel
|
461aca3fd8d5c07fc4a57bf82d8bdc08a775e82b
|
[
"MIT"
] | 1
|
2019-07-13T14:41:26.000Z
|
2019-07-13T14:41:26.000Z
|
jobfunnel/config/validate.py
|
studentbrad/JobFunnel
|
f7913304f7cd11799b975fc8afc1c60521184e68
|
[
"MIT"
] | 1
|
2021-05-05T01:39:59.000Z
|
2021-05-05T01:39:59.000Z
|
jobfunnel/config/validate.py
|
studentbrad/JobFunnel
|
f7913304f7cd11799b975fc8afc1c60521184e68
|
[
"MIT"
] | null | null | null |
import re
from .valid_options import DOMAINS, PROVIDERS, DELAY_FUN
from .parser import ConfigError
def validate_region(region):
""" Check if the region settings are valid.
"""
# only allow supported domains
if region['domain'] not in DOMAINS:
raise ConfigError('domain')
# search term state is inserted as province if province does not already
# exist
if 'state' in region:
if (region['state'] is not None) and (region['province'] is None):
region['province'] = region['state']
# north american jobs should have a province/state provided
if region['domain'] in ['com', 'ca'] and region['province'] is None:
raise ConfigError('province')
def validate_delay(delay):
""" Check if the delay has a valid configuration.
"""
# delay function should be constant, linear or sigmoid
if delay['function'] not in DELAY_FUN:
raise ConfigError('delay_function')
# maximum delay should be larger or equal to minimum delay
if delay['delay'] < delay['min_delay']:
raise ConfigError('(min)_delay')
# minimum delay should be at least 1 and maximum delay at least 10
if delay['delay'] < 10 or delay['min_delay'] < 1:
raise ConfigError('(min)_delay')
def validate_config(config):
""" Check whether the config is a valid configuration.
Some options are already checked at the command-line tool, e.g., loggging.
Some checks are trivial while others have a separate function.
"""
# check if paths are valid
check_paths = {
'data_path': r'data$',
'master_list_path': r'master_list\.csv$',
'duplicate_list_path': r'duplicate_list\.csv$',
'log_path': r'data[\\\/]jobfunnel.log$',
'filter_list_path': r'data[\\\/]filter_list\.json$',
}
for path, pattern in check_paths.items():
if not re.search(pattern, config[path]):
raise ConfigError(path)
# check if the provider list only consists of supported providers
if not set(config['providers']).issubset(PROVIDERS):
raise ConfigError('providers')
# check validity of region settings
validate_region(config['search_terms']['region'])
# check validity of delay settings
validate_delay(config['delay_config'])
# check the validity of max_listing_days settings
if(config['max_listing_days'] is not None and config['max_listing_days'] < 0):
raise ConfigError('max_listing_days')
| 33.486486
| 82
| 0.669088
|
import re
from .valid_options import DOMAINS, PROVIDERS, DELAY_FUN
from .parser import ConfigError
def validate_region(region):
if region['domain'] not in DOMAINS:
raise ConfigError('domain')
if 'state' in region:
if (region['state'] is not None) and (region['province'] is None):
region['province'] = region['state']
if region['domain'] in ['com', 'ca'] and region['province'] is None:
raise ConfigError('province')
def validate_delay(delay):
if delay['function'] not in DELAY_FUN:
raise ConfigError('delay_function')
if delay['delay'] < delay['min_delay']:
raise ConfigError('(min)_delay')
if delay['delay'] < 10 or delay['min_delay'] < 1:
raise ConfigError('(min)_delay')
def validate_config(config):
check_paths = {
'data_path': r'data$',
'master_list_path': r'master_list\.csv$',
'duplicate_list_path': r'duplicate_list\.csv$',
'log_path': r'data[\\\/]jobfunnel.log$',
'filter_list_path': r'data[\\\/]filter_list\.json$',
}
for path, pattern in check_paths.items():
if not re.search(pattern, config[path]):
raise ConfigError(path)
if not set(config['providers']).issubset(PROVIDERS):
raise ConfigError('providers')
validate_region(config['search_terms']['region'])
validate_delay(config['delay_config'])
if(config['max_listing_days'] is not None and config['max_listing_days'] < 0):
raise ConfigError('max_listing_days')
| true
| true
|
1c408180acd684d7e97183a6ca88a6e68ac5ae37
| 908
|
py
|
Python
|
processor_box/start.py
|
monobot/micro_orchestra
|
04fbcf202d9bda332890d4478569a911650d6540
|
[
"MIT"
] | null | null | null |
processor_box/start.py
|
monobot/micro_orchestra
|
04fbcf202d9bda332890d4478569a911650d6540
|
[
"MIT"
] | null | null | null |
processor_box/start.py
|
monobot/micro_orchestra
|
04fbcf202d9bda332890d4478569a911650d6540
|
[
"MIT"
] | null | null | null |
import os
import uuid
from redis_connector import RedisConnector
HOST = 'redis_cache'
PORT = 6379
QUEUENAME = 'microservices'
QUEUES = [QUEUENAME, ]
MICRO_SERVICE_NAME = 'processor'
redis_connector_processor = RedisConnector(HOST, PORT, QUEUES, MICRO_SERVICE_NAME)
multiplier = int(os.environ.get('MULTIPLIER', '1'))
def process(message):
first_operator = multiplier * message['data']['first']
second_operator = message['data']['second']
operation = message['data']['operation']
if operation == '+':
result = first_operator + second_operator
redis_connector_processor.publish(
'microservices',
{
"target": 'final',
"message_id": str(uuid.uuid4()),
"message": "calculated",
"data": {
'result': result
},
}
)
while True:
redis_connector_processor.subscribe(process)
| 23.894737
| 82
| 0.634361
|
import os
import uuid
from redis_connector import RedisConnector
HOST = 'redis_cache'
PORT = 6379
QUEUENAME = 'microservices'
QUEUES = [QUEUENAME, ]
MICRO_SERVICE_NAME = 'processor'
redis_connector_processor = RedisConnector(HOST, PORT, QUEUES, MICRO_SERVICE_NAME)
multiplier = int(os.environ.get('MULTIPLIER', '1'))
def process(message):
first_operator = multiplier * message['data']['first']
second_operator = message['data']['second']
operation = message['data']['operation']
if operation == '+':
result = first_operator + second_operator
redis_connector_processor.publish(
'microservices',
{
"target": 'final',
"message_id": str(uuid.uuid4()),
"message": "calculated",
"data": {
'result': result
},
}
)
while True:
redis_connector_processor.subscribe(process)
| true
| true
|
1c4081cba058811be59db669e6489eeab5314d3c
| 3,437
|
py
|
Python
|
venv/lib/python3.8/site-packages/astroid/transforms.py
|
DiegoSilvaHoffmann/Small-Ecommerce
|
c6f9d75cc6dd558aa1ba9abe0186a27fe15b32d2
|
[
"MIT"
] | 463
|
2015-01-15T08:17:42.000Z
|
2022-03-28T15:10:20.000Z
|
venv/lib/python3.8/site-packages/astroid/transforms.py
|
DiegoSilvaHoffmann/Small-Ecommerce
|
c6f9d75cc6dd558aa1ba9abe0186a27fe15b32d2
|
[
"MIT"
] | 52
|
2015-01-06T02:43:59.000Z
|
2022-03-14T11:15:21.000Z
|
env/lib/python3.9/site-packages/astroid/transforms.py
|
simotwo/AbileneParadox-ddd
|
c85961efb37aba43c0d99ed1c36d083507e2b2d3
|
[
"MIT"
] | 249
|
2015-01-07T22:49:49.000Z
|
2022-03-18T02:32:06.000Z
|
# Copyright (c) 2015-2016, 2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016 Ceridwen <ceridwenv@gmail.com>
# Copyright (c) 2018 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/LICENSE
import collections
from functools import lru_cache
class TransformVisitor:
"""A visitor for handling transforms.
The standard approach of using it is to call
:meth:`~visit` with an *astroid* module and the class
will take care of the rest, walking the tree and running the
transforms for each encountered node.
"""
TRANSFORM_MAX_CACHE_SIZE = 10000
def __init__(self):
self.transforms = collections.defaultdict(list)
@lru_cache(maxsize=TRANSFORM_MAX_CACHE_SIZE)
def _transform(self, node):
"""Call matching transforms for the given node if any and return the
transformed node.
"""
cls = node.__class__
if cls not in self.transforms:
# no transform registered for this class of node
return node
transforms = self.transforms[cls]
for transform_func, predicate in transforms:
if predicate is None or predicate(node):
ret = transform_func(node)
# if the transformation function returns something, it's
# expected to be a replacement for the node
if ret is not None:
node = ret
if ret.__class__ != cls:
# Can no longer apply the rest of the transforms.
break
return node
def _visit(self, node):
if hasattr(node, "_astroid_fields"):
for name in node._astroid_fields:
value = getattr(node, name)
visited = self._visit_generic(value)
if visited != value:
setattr(node, name, visited)
return self._transform(node)
def _visit_generic(self, node):
if isinstance(node, list):
return [self._visit_generic(child) for child in node]
if isinstance(node, tuple):
return tuple(self._visit_generic(child) for child in node)
if not node or isinstance(node, str):
return node
return self._visit(node)
def register_transform(self, node_class, transform, predicate=None):
"""Register `transform(node)` function to be applied on the given
astroid's `node_class` if `predicate` is None or returns true
when called with the node as argument.
The transform function may return a value which is then used to
substitute the original node in the tree.
"""
self.transforms[node_class].append((transform, predicate))
def unregister_transform(self, node_class, transform, predicate=None):
"""Unregister the given transform."""
self.transforms[node_class].remove((transform, predicate))
def visit(self, module):
"""Walk the given astroid *tree* and transform each encountered node
Only the nodes which have transforms registered will actually
be replaced or changed.
"""
module.body = [self._visit(child) for child in module.body]
return self._transform(module)
| 37.358696
| 85
| 0.644166
|
import collections
from functools import lru_cache
class TransformVisitor:
TRANSFORM_MAX_CACHE_SIZE = 10000
def __init__(self):
self.transforms = collections.defaultdict(list)
@lru_cache(maxsize=TRANSFORM_MAX_CACHE_SIZE)
def _transform(self, node):
cls = node.__class__
if cls not in self.transforms:
return node
transforms = self.transforms[cls]
for transform_func, predicate in transforms:
if predicate is None or predicate(node):
ret = transform_func(node)
# expected to be a replacement for the node
if ret is not None:
node = ret
if ret.__class__ != cls:
# Can no longer apply the rest of the transforms.
break
return node
def _visit(self, node):
if hasattr(node, "_astroid_fields"):
for name in node._astroid_fields:
value = getattr(node, name)
visited = self._visit_generic(value)
if visited != value:
setattr(node, name, visited)
return self._transform(node)
def _visit_generic(self, node):
if isinstance(node, list):
return [self._visit_generic(child) for child in node]
if isinstance(node, tuple):
return tuple(self._visit_generic(child) for child in node)
if not node or isinstance(node, str):
return node
return self._visit(node)
def register_transform(self, node_class, transform, predicate=None):
self.transforms[node_class].append((transform, predicate))
def unregister_transform(self, node_class, transform, predicate=None):
self.transforms[node_class].remove((transform, predicate))
def visit(self, module):
module.body = [self._visit(child) for child in module.body]
return self._transform(module)
| true
| true
|
1c40824a9bddbf7eaaaeea4f7ce6abb11a34fb46
| 108,170
|
py
|
Python
|
curator/actions.py
|
andytumelty/curator
|
ecc57679b4098aa55d1015b8cb406b4c5875e3c0
|
[
"Apache-2.0"
] | null | null | null |
curator/actions.py
|
andytumelty/curator
|
ecc57679b4098aa55d1015b8cb406b4c5875e3c0
|
[
"Apache-2.0"
] | null | null | null |
curator/actions.py
|
andytumelty/curator
|
ecc57679b4098aa55d1015b8cb406b4c5875e3c0
|
[
"Apache-2.0"
] | null | null | null |
"""Curator Actions"""
import logging
import re
import time
from copy import deepcopy
from datetime import datetime
from elasticsearch.exceptions import ConflictError, RequestError
from curator import exceptions, utils
class Alias(object):
"""Alias Action Class"""
def __init__(self, name=None, extra_settings={}, **kwargs):
"""
Define the Alias object.
:arg name: The alias name
:arg extra_settings: Extra settings, including filters and routing. For
more information see
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html
:type extra_settings: dict, representing the settings.
"""
if not name:
raise exceptions.MissingArgument('No value for "name" provided.')
#: Instance variable
#: The strftime parsed version of `name`.
self.name = utils.parse_date_pattern(name)
#: The list of actions to perform. Populated by
#: :mod:`curator.actions.Alias.add` and
#: :mod:`curator.actions.Alias.remove`
self.actions = []
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = None
#: Instance variable.
#: Any extra things to add to the alias, like filters, or routing.
self.extra_settings = extra_settings
self.loggit = logging.getLogger('curator.actions.alias')
#: Instance variable.
#: Preset default value to `False`.
self.warn_if_no_indices = False
def add(self, ilo, warn_if_no_indices=False):
"""
Create `add` statements for each index in `ilo` for `alias`, then
append them to `actions`. Add any `extras` that may be there.
:arg ilo: A :class:`curator.indexlist.IndexList` object
"""
utils.verify_index_list(ilo)
if not self.client:
self.client = ilo.client
self.name = utils.parse_datemath(self.client, self.name)
try:
ilo.empty_list_check()
except exceptions.NoIndices:
# Add a warning if there are no indices to add, if so set in options
if warn_if_no_indices:
self.warn_if_no_indices = True
self.loggit.warn(
'No indices found after processing filters. '
'Nothing to add to {0}'.format(self.name)
)
return
else:
# Re-raise the exceptions.NoIndices so it will behave as before
raise exceptions.NoIndices('No indices to add to alias')
for index in ilo.working_list():
self.loggit.debug(
'Adding index {0} to alias {1} with extra settings '
'{2}'.format(index, self.name, self.extra_settings)
)
add_dict = {'add' : {'index' : index, 'alias': self.name}}
add_dict['add'].update(self.extra_settings)
self.actions.append(add_dict)
def remove(self, ilo, warn_if_no_indices=False):
"""
Create `remove` statements for each index in `ilo` for `alias`,
then append them to `actions`.
:arg ilo: A :class:`curator.indexlist.IndexList` object
"""
utils.verify_index_list(ilo)
if not self.client:
self.client = ilo.client
self.name = utils.parse_datemath(self.client, self.name)
try:
ilo.empty_list_check()
except exceptions.NoIndices:
# Add a warning if there are no indices to add, if so set in options
if warn_if_no_indices:
self.warn_if_no_indices = True
self.loggit.warn(
'No indices found after processing filters. '
'Nothing to remove from {0}'.format(self.name)
)
return
else:
# Re-raise the exceptions.NoIndices so it will behave as before
raise exceptions.NoIndices('No indices to remove from alias')
aliases = self.client.indices.get_alias()
for index in ilo.working_list():
if index in aliases:
self.loggit.debug(
'Index {0} in get_aliases output'.format(index))
# Only remove if the index is associated with the alias
if self.name in aliases[index]['aliases']:
self.loggit.debug(
'Removing index {0} from alias '
'{1}'.format(index, self.name)
)
self.actions.append(
{'remove' : {'index' : index, 'alias': self.name}})
else:
self.loggit.debug(
'Can not remove: Index {0} is not associated with alias'
' {1}'.format(index, self.name)
)
def body(self):
"""
Return a `body` string suitable for use with the `update_aliases` API
call.
"""
if not self.actions:
if not self.warn_if_no_indices:
raise exceptions.ActionError('No "add" or "remove" operations')
else:
raise exceptions.NoIndices('No "adds" or "removes" found. Taking no action')
self.loggit.debug('Alias actions: {0}'.format(self.actions))
return {'actions' : self.actions}
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
self.loggit.info('DRY-RUN MODE. No changes will be made.')
for item in self.body()['actions']:
job = list(item.keys())[0]
index = item[job]['index']
alias = item[job]['alias']
# We want our log to look clever, so if job is "remove", strip the
# 'e' so "remove" can become "removing". "adding" works already.
self.loggit.info(
'DRY-RUN: alias: {0}ing index "{1}" {2} alias '
'"{3}"'.format(
job.rstrip('e'),
index,
'to' if job is 'add' else 'from',
alias
)
)
def do_action(self):
"""
Run the API call `update_aliases` with the results of `body()`
"""
self.loggit.info('Updating aliases...')
self.loggit.info('Alias actions: {0}'.format(self.body()))
try:
self.client.indices.update_aliases(body=self.body())
except Exception as err:
utils.report_failure(err)
class Allocation(object):
"""Allocation Action Class"""
def __init__(
self, ilo, key=None, value=None, allocation_type='require', wait_for_completion=False,
wait_interval=3, max_wait=-1
):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg key: An arbitrary metadata attribute key. Must match the key
assigned to at least some of your nodes to have any effect.
:arg value: An arbitrary metadata attribute value. Must correspond to
values associated with `key` assigned to at least some of your nodes
to have any effect. If a `None` value is provided, it will remove
any setting associated with that `key`.
:arg allocation_type: Type of allocation to apply. Default is `require`
:arg wait_for_completion: Wait (or not) for the operation
to complete before returning. (default: `False`)
:type wait_for_completion: bool
:arg wait_interval: How long in seconds to wait between checks for
completion.
:arg max_wait: Maximum number of seconds to `wait_for_completion`
.. note::
See:
https://www.elastic.co/guide/en/elasticsearch/reference/current/shard-allocation-filtering.html
"""
utils.verify_index_list(ilo)
if not key:
raise exceptions.MissingArgument('No value for "key" provided')
if allocation_type not in ['require', 'include', 'exclude']:
raise ValueError(
'{0} is an invalid allocation_type. Must be one of "require", '
'"include", "exclude".'.format(allocation_type)
)
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
self.loggit = logging.getLogger('curator.actions.allocation')
#: Instance variable.
#: Populated at instance creation time. Value is
#: ``index.routing.allocation.`` `allocation_type` ``.`` `key` ``.`` `value`
bkey = 'index.routing.allocation.{0}.{1}'.format(allocation_type, key)
self.body = {bkey : value}
#: Instance variable.
#: Internal reference to `wait_for_completion`
self.wfc = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
utils.show_dry_run(self.index_list, 'allocation', body=self.body)
def do_action(self):
"""
Change allocation settings for indices in `index_list.indices` with the
settings in `body`.
"""
self.loggit.debug(
'Cannot get change shard routing allocation of closed indices. '
'Omitting any closed indices.'
)
self.index_list.filter_closed()
self.index_list.empty_list_check()
self.loggit.info(
'Updating {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
self.loggit.info('Updating index setting {0}'.format(self.body))
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
self.client.indices.put_settings(
index=utils.to_csv(lst), body=self.body
)
if self.wfc:
self.loggit.debug(
'Waiting for shards to complete relocation for indices:'
' {0}'.format(utils.to_csv(lst))
)
utils.wait_for_it(
self.client, 'allocation',
wait_interval=self.wait_interval, max_wait=self.max_wait
)
except Exception as err:
utils.report_failure(err)
class Close(object):
"""Close Action Class"""
def __init__(self, ilo, delete_aliases=False, skip_flush=False, ignore_sync_failures=False):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg delete_aliases: If `True`, will delete any associated aliases
before closing indices.
:type delete_aliases: bool
:arg skip_flush: If `True`, will not flush indices before closing.
:type skip_flush: bool
:arg ignore_sync_failures: If `True`, will not fail if there are failures while attempting
a synced flush.
:type ignore_sync_failures: bool
"""
utils.verify_index_list(ilo)
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: Internal reference to `delete_aliases`
self.delete_aliases = delete_aliases
#: Instance variable.
#: Internal reference to `skip_flush`
self.skip_flush = skip_flush
#: Instance variable.
#: Internal reference to `ignore_sync_failures`
self.ignore_sync_failures = ignore_sync_failures
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
self.loggit = logging.getLogger('curator.actions.close')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
utils.show_dry_run(
self.index_list, 'close', **{'delete_aliases':self.delete_aliases})
def do_action(self):
"""
Close open indices in `index_list.indices`
"""
self.index_list.filter_closed()
self.index_list.empty_list_check()
self.loggit.info(
'Closing {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
lst_as_csv = utils.to_csv(lst)
self.loggit.debug('CSV list of indices to close: {0}'.format(lst_as_csv))
if self.delete_aliases:
self.loggit.info('Deleting aliases from indices before closing.')
self.loggit.debug('Deleting aliases from: {0}'.format(lst))
try:
self.client.indices.delete_alias(index=lst_as_csv, name='_all')
self.loggit.debug('Deleted aliases from: {0}'.format(lst))
except Exception as err:
self.loggit.warn(
'Some indices may not have had aliases. Exception:'
' {0}'.format(err)
)
if not self.skip_flush:
try:
self.client.indices.flush_synced(index=lst_as_csv, ignore_unavailable=True)
except ConflictError as err:
if not self.ignore_sync_failures:
raise ConflictError(err.status_code, err.error, err.info)
else:
self.loggit.warn(
'Ignoring flushed sync failures: '
'{0} {1}'.format(err.error, err.info)
)
self.client.indices.close(index=lst_as_csv, ignore_unavailable=True)
except Exception as err:
utils.report_failure(err)
class Freeze(object):
"""Freeze Action Class"""
def __init__(self, ilo):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
"""
utils.verify_index_list(ilo)
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
self.loggit = logging.getLogger('curator.actions.freeze')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
utils.show_dry_run(
self.index_list, 'freeze')
def do_action(self):
"""
Freeze indices in `index_list.indices`
"""
#self.index_list.filter_frozen()
self.index_list.empty_list_check()
self.loggit.info(
'Freezing {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
self.client.xpack.indices.freeze(
index=utils.to_csv(lst))
except Exception as err:
utils.report_failure(err)
class Unfreeze(object):
"""Unfreeze Action Class"""
def __init__(self, ilo):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
"""
utils.verify_index_list(ilo)
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
self.loggit = logging.getLogger('curator.actions.unfreeze')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
utils.show_dry_run(
self.index_list, 'unfreeze')
def do_action(self):
"""
Unfreeze indices in `index_list.indices`
"""
self.index_list.empty_list_check()
self.loggit.info(
'Unfreezing {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
self.client.xpack.indices.unfreeze(
index=utils.to_csv(lst))
except Exception as err:
utils.report_failure(err)
class ClusterRouting(object):
"""ClusterRouting Action Class"""
def __init__(
self, client, routing_type=None, setting=None, value=None, wait_for_completion=False,
wait_interval=9, max_wait=-1
):
"""
For now, the cluster routing settings are hardcoded to be ``transient``
:arg client: An :class:`elasticsearch.Elasticsearch` client object
:arg routing_type: Type of routing to apply. Either `allocation` or
`rebalance`
:arg setting: Currently, the only acceptable value for `setting` is
``enable``. This is here in case that changes.
:arg value: Used only if `setting` is `enable`. Semi-dependent on
`routing_type`. Acceptable values for `allocation` and `rebalance`
are ``all``, ``primaries``, and ``none`` (string, not `NoneType`).
If `routing_type` is `allocation`, this can also be
``new_primaries``, and if `rebalance`, it can be ``replicas``.
:arg wait_for_completion: Wait (or not) for the operation
to complete before returning. (default: `False`)
:type wait_for_completion: bool
:arg wait_interval: How long in seconds to wait between checks for
completion.
:arg max_wait: Maximum number of seconds to `wait_for_completion`
"""
utils.verify_client_object(client)
#: Instance variable.
#: An :class:`elasticsearch.Elasticsearch` client object
self.client = client
self.loggit = logging.getLogger('curator.actions.cluster_routing')
#: Instance variable.
#: Internal reference to `wait_for_completion`
self.wfc = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
if setting != 'enable':
raise ValueError(
'Invalid value for "setting": {0}.'.format(setting)
)
if routing_type == 'allocation':
if value not in ['all', 'primaries', 'new_primaries', 'none']:
raise ValueError(
'Invalid "value": {0} with "routing_type":'
'{1}.'.format(value, routing_type)
)
elif routing_type == 'rebalance':
if value not in ['all', 'primaries', 'replicas', 'none']:
raise ValueError(
'Invalid "value": {0} with "routing_type":'
'{1}.'.format(value, routing_type)
)
else:
raise ValueError(
'Invalid value for "routing_type": {0}.'.format(routing_type)
)
bkey = 'cluster.routing.{0}.{1}'.format(routing_type, setting)
self.body = {'transient' : {bkey : value}}
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
self.loggit.info('DRY-RUN MODE. No changes will be made.')
self.loggit.info(
'DRY-RUN: Update cluster routing settings with arguments: '
'{0}'.format(self.body)
)
def do_action(self):
"""
Change cluster routing settings with the settings in `body`.
"""
self.loggit.info('Updating cluster settings: {0}'.format(self.body))
try:
self.client.cluster.put_settings(body=self.body)
if self.wfc:
self.loggit.debug(
'Waiting for shards to complete routing and/or rebalancing'
)
utils.wait_for_it(
self.client, 'cluster_routing',
wait_interval=self.wait_interval, max_wait=self.max_wait
)
except Exception as err:
utils.report_failure(err)
class CreateIndex(object):
"""Create Index Action Class"""
def __init__(self, client, name, extra_settings={}, ignore_existing=False):
"""
:arg client: An :class:`elasticsearch.Elasticsearch` client object
:arg name: A name, which can contain :py:func:`time.strftime`
strings
:arg extra_settings: The `settings` and `mappings` for the index. For
more information see
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
:type extra_settings: dict, representing the settings and mappings.
:arg ignore_existing: If an index already exists, and this setting is ``True``,
ignore the 400 error that results in a `resource_already_exists_exception` and
return that it was successful.
"""
if not name:
raise exceptions.ConfigurationError('Value for "name" not provided.')
#: Instance variable.
#: The parsed version of `name`
self.name = utils.parse_date_pattern(name)
#: Instance variable.
#: Extracted from the action yaml, it should be a dictionary of
#: mappings and settings suitable for index creation.
self.body = extra_settings
#: Instance variable.
#: Extracted from the action yaml, it should be a boolean informing
#: whether to ignore the error if the index already exists.
self.ignore_existing = ignore_existing
#: Instance variable.
#: An :class:`elasticsearch.Elasticsearch` client object
self.client = client
self.loggit = logging.getLogger('curator.actions.create_index')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
self.loggit.info('DRY-RUN MODE. No changes will be made.')
self.loggit.info(
'DRY-RUN: create_index "%s" with arguments: '
'%s' % (self.name, self.body)
)
def do_action(self):
"""
Create index identified by `name` with settings in `body`
"""
self.loggit.info(
'Creating index "{0}" with settings: '
'{1}'.format(self.name, self.body)
)
try:
self.client.indices.create(index=self.name, body=self.body)
# Most likely error is a 400, `resource_already_exists_exception`
except RequestError as err:
match_list = ["index_already_exists_exception", "resource_already_exists_exception"]
if err.error in match_list and self.ignore_existing:
self.loggit.warn('Index %s already exists.' % self.name)
else:
raise exceptions.FailedExecution('Index %s already exists.' % self.name)
except Exception as err:
utils.report_failure(err)
class DeleteIndices(object):
"""Delete Indices Action Class"""
def __init__(self, ilo, master_timeout=30):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg master_timeout: Number of seconds to wait for master node response
"""
utils.verify_index_list(ilo)
if not isinstance(master_timeout, int):
raise TypeError(
'Incorrect type for "master_timeout": {0}. '
'Should be integer value.'.format(type(master_timeout))
)
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: String value of `master_timeout` + 's', for seconds.
self.master_timeout = str(master_timeout) + 's'
self.loggit = logging.getLogger('curator.actions.delete_indices')
self.loggit.debug('master_timeout value: {0}'.format(
self.master_timeout))
def _verify_result(self, result, count):
"""
Breakout method to aid readability
:arg result: A list of indices from `_get_result_list`
:arg count: The number of tries that have occurred
:rtype: bool
"""
if isinstance(result, list) and result:
self.loggit.error(
'The following indices failed to delete on try '
'#{0}:'.format(count)
)
for idx in result:
self.loggit.error("---{0}".format(idx))
retval = False
else:
self.loggit.debug(
'Successfully deleted all indices on try #{0}'.format(count)
)
retval = True
return retval
def __chunk_loop(self, chunk_list):
"""
Loop through deletes 3 times to ensure they complete
:arg chunk_list: A list of indices pre-chunked so it won't overload the
URL size limit.
"""
working_list = chunk_list
for count in range(1, 4): # Try 3 times
for i in working_list:
self.loggit.info("---deleting index {0}".format(i))
self.client.indices.delete(
index=utils.to_csv(working_list), master_timeout=self.master_timeout)
result = [i for i in working_list if i in utils.get_indices(self.client)]
if self._verify_result(result, count):
return
else:
working_list = result
self.loggit.error(
'Unable to delete the following indices after 3 attempts: '
'{0}'.format(result)
)
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
utils.show_dry_run(self.index_list, 'delete_indices')
def do_action(self):
"""
Delete indices in `index_list.indices`
"""
self.index_list.empty_list_check()
self.loggit.info(
'Deleting {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
self.__chunk_loop(lst)
except Exception as err:
utils.report_failure(err)
class ForceMerge(object):
"""ForceMerge Action Class"""
def __init__(self, ilo, max_num_segments=None, delay=0):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg max_num_segments: Number of segments per shard to forceMerge
:arg delay: Number of seconds to delay between forceMerge operations
"""
utils.verify_index_list(ilo)
if not max_num_segments:
raise exceptions.MissingArgument('Missing value for "max_num_segments"')
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: Internally accessible copy of `max_num_segments`
self.max_num_segments = max_num_segments
#: Instance variable.
#: Internally accessible copy of `delay`
self.delay = delay
self.loggit = logging.getLogger('curator.actions.forcemerge')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
utils.show_dry_run(
self.index_list, 'forcemerge',
max_num_segments=self.max_num_segments,
delay=self.delay,
)
def do_action(self):
"""
forcemerge indices in `index_list.indices`
"""
self.index_list.filter_closed()
self.index_list.filter_forceMerged(
max_num_segments=self.max_num_segments)
self.index_list.empty_list_check()
self.loggit.info(
'forceMerging {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
try:
for index_name in self.index_list.indices:
self.loggit.info(
'forceMerging index {0} to {1} segments per shard. '
'Please wait...'.format(index_name, self.max_num_segments)
)
self.client.indices.forcemerge(
index=index_name, max_num_segments=self.max_num_segments)
if self.delay > 0:
self.loggit.info(
'Pausing for {0} seconds before continuing...'.format(self.delay))
time.sleep(self.delay)
except Exception as err:
utils.report_failure(err)
class IndexSettings(object):
"""Index Settings Action Class"""
def __init__(
self, ilo, index_settings={}, ignore_unavailable=False, preserve_existing=False):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg index_settings: A dictionary structure with one or more index
settings to change.
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg preserve_existing: Whether to update existing settings. If set to
``True`` existing settings on an index remain unchanged. The default
is ``False``
"""
utils.verify_index_list(ilo)
if not index_settings:
raise exceptions.MissingArgument('Missing value for "index_settings"')
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: Internal reference to `index_settings`
self.body = index_settings
#: Instance variable.
#: Internal reference to `ignore_unavailable`
self.ignore_unavailable = ignore_unavailable
#: Instance variable.
#: Internal reference to `preserve_settings`
self.preserve_existing = preserve_existing
self.loggit = logging.getLogger('curator.actions.index_settings')
self._body_check()
def _body_check(self):
# The body only passes the skimpiest of requirements by having 'index'
# as the only root-level key, and having a 'dict' as its value
if len(self.body) == 1:
if 'index' in self.body:
if isinstance(self.body['index'], dict):
return True
raise exceptions.ConfigurationError(
'Bad value for "index_settings": {0}'.format(self.body))
def _static_settings(self):
return [
'number_of_shards',
'shard',
'codec',
'routing_partition_size',
]
def _dynamic_settings(self):
return [
'number_of_replicas',
'auto_expand_replicas',
'refresh_interval',
'max_result_window',
'max_rescore_window',
'blocks',
'max_refresh_listeners',
'mapping',
'merge',
'translog',
]
def _settings_check(self):
# Detect if even one index is open. Save all found to open_index_list.
open_index_list = []
open_indices = False
for idx in self.index_list.indices:
if self.index_list.index_info[idx]['state'] == 'open':
open_index_list.append(idx)
open_indices = True
for k in self.body['index']:
if k in self._static_settings():
if not self.ignore_unavailable:
if open_indices:
raise exceptions.ActionError(
'Static Setting "{0}" detected with open indices: '
'{1}. Static settings can only be used with closed '
'indices. Recommend filtering out open indices, '
'or setting ignore_unavailable to True'.format(
k, open_index_list
)
)
elif k in self._dynamic_settings():
# Dynamic settings should be appliable to open or closed indices
# Act here if the case is different for some settings.
pass
else:
self.loggit.warn(
'"{0}" is not a setting Curator recognizes and may or may '
'not work.'.format(k)
)
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
utils.show_dry_run(self.index_list, 'indexsettings', **self.body)
def do_action(self):
"""Actually do the action"""
self._settings_check()
# Ensure that the open indices filter applied in _settings_check()
# didn't result in an empty list (or otherwise empty)
self.index_list.empty_list_check()
self.loggit.info(
'Applying index settings to {0} indices: '
'{1}'.format(len(self.index_list.indices), self.index_list.indices)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
response = self.client.indices.put_settings(
index=utils.to_csv(lst), body=self.body,
ignore_unavailable=self.ignore_unavailable,
preserve_existing=self.preserve_existing
)
self.loggit.debug('PUT SETTINGS RESPONSE: {0}'.format(response))
except Exception as err:
utils.report_failure(err)
class Open(object):
"""Open Action Class"""
def __init__(self, ilo):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
"""
utils.verify_index_list(ilo)
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
self.loggit = logging.getLogger('curator.actions.open')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
utils.show_dry_run(self.index_list, 'open')
def do_action(self):
"""
Open closed indices in `index_list.indices`
"""
self.index_list.empty_list_check()
self.loggit.info(
'Opening {0} selected indices: {1}'.format(
len(self.index_list.indices),
self.index_list.indices
)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
self.client.indices.open(index=utils.to_csv(lst))
except Exception as err:
utils.report_failure(err)
class Replicas(object):
"""Replica Action Class"""
def __init__(
self, ilo, count=None, wait_for_completion=False, wait_interval=9, max_wait=-1):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg count: The count of replicas per shard
:arg wait_for_completion: Wait (or not) for the operation
to complete before returning. (default: `False`)
:type wait_for_completion: bool
:arg wait_interval: How long in seconds to wait between checks for
completion.
:arg max_wait: Maximum number of seconds to `wait_for_completion`
"""
utils.verify_index_list(ilo)
# It's okay for count to be zero
if count == 0:
pass
elif not count:
raise exceptions.MissingArgument('Missing value for "count"')
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: Internally accessible copy of `count`
self.count = count
#: Instance variable.
#: Internal reference to `wait_for_completion`
self.wfc = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
self.loggit = logging.getLogger('curator.actions.replicas')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
utils.show_dry_run(self.index_list, 'replicas', count=self.count)
def do_action(self):
"""
Update the replica count of indices in `index_list.indices`
"""
self.loggit.debug(
'Cannot get update replica count of closed indices. '
'Omitting any closed indices.'
)
self.index_list.filter_closed()
self.index_list.empty_list_check()
self.loggit.info(
'Setting the replica count to {0} for {1} indices: '
'{2}'.format(self.count, len(self.index_list.indices), self.index_list.indices)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
self.client.indices.put_settings(
index=utils.to_csv(lst),
body={'number_of_replicas': self.count}
)
if self.wfc and self.count > 0:
self.loggit.debug(
'Waiting for shards to complete replication for '
'indices: {0}'.format(utils.to_csv(lst))
)
utils.wait_for_it(
self.client, 'replicas',
wait_interval=self.wait_interval, max_wait=self.max_wait
)
except Exception as err:
utils.report_failure(err)
class Rollover(object):
"""Rollover Action Class"""
def __init__(
self, client, name, conditions, new_index=None, extra_settings=None,
wait_for_active_shards=1
):
"""
:arg client: An :class:`elasticsearch.Elasticsearch` client object
:arg name: The name of the single-index-mapped alias to test for
rollover conditions.
:new_index: The new index name
:arg conditions: A dictionary of conditions to test
:arg extra_settings: Must be either `None`, or a dictionary of settings
to apply to the new index on rollover. This is used in place of
`settings` in the Rollover API, mostly because it's already existent
in other places here in Curator
:arg wait_for_active_shards: The number of shards expected to be active
before returning.
"""
self.loggit = logging.getLogger('curator.actions.rollover')
if not isinstance(conditions, dict):
raise exceptions.ConfigurationError('"conditions" must be a dictionary')
else:
self.loggit.debug('"conditions" is {0}'.format(conditions))
if not isinstance(extra_settings, dict) and extra_settings is not None:
raise exceptions.ConfigurationError(
'"extra_settings" must be a dictionary or None')
utils.verify_client_object(client)
#: Instance variable.
#: The Elasticsearch Client object
self.client = client
#: Instance variable.
#: Internal reference to `conditions`
self.conditions = self._check_max_size(conditions)
#: Instance variable.
#: Internal reference to `extra_settings`
self.settings = extra_settings
#: Instance variable.
#: Internal reference to `new_index`
self.new_index = utils.parse_date_pattern(new_index) if new_index else new_index
#: Instance variable.
#: Internal reference to `wait_for_active_shards`
self.wait_for_active_shards = wait_for_active_shards
# Verify that `conditions` and `settings` are good?
# Verify that `name` is an alias, and is only mapped to one index.
if utils.rollable_alias(client, name):
self.name = name
else:
raise ValueError(
'Unable to perform index rollover with alias '
'"{0}". See previous logs for more details.'.format(name)
)
def _check_max_size(self, conditions):
"""
Ensure that if ``max_size`` is specified, that ``self.client``
is running 6.1 or higher.
"""
if 'max_size' in conditions:
version = utils.get_version(self.client)
if version < (6, 1, 0):
raise exceptions.ConfigurationError(
'Your version of elasticsearch ({0}) does not support '
'the max_size rollover condition. It is only supported '
'in versions 6.1.0 and up.'.format(version)
)
return conditions
def body(self):
"""
Create a body from conditions and settings
"""
retval = {}
retval['conditions'] = self.conditions
if self.settings:
retval['settings'] = self.settings
return retval
def log_result(self, result):
"""
Log the results based on whether the index rolled over or not
"""
dryrun_string = ''
if result['dry_run']:
dryrun_string = 'DRY-RUN: '
self.loggit.debug('{0}Result: {1}'.format(dryrun_string, result))
rollover_string = '{0}Old index {1} rolled over to new index {2}'.format(
dryrun_string,
result['old_index'],
result['new_index']
)
# Success is determined by at one condition being True
success = False
for k in list(result['conditions'].keys()):
if result['conditions'][k]:
success = True
if result['dry_run'] and success: # log "successful" dry-run
self.loggit.info(rollover_string)
elif result['rolled_over']:
self.loggit.info(rollover_string)
else:
self.loggit.info(
'{0}Rollover conditions not met. Index {1} not rolled over.'.format(
dryrun_string,
result['old_index'])
)
def doit(self, dry_run=False):
"""
This exists solely to prevent having to have duplicate code in both
`do_dry_run` and `do_action`
"""
return self.client.indices.rollover(
alias=self.name,
new_index=self.new_index,
body=self.body(),
dry_run=dry_run,
wait_for_active_shards=self.wait_for_active_shards,
)
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
self.loggit.info('DRY-RUN MODE. No changes will be made.')
self.log_result(self.doit(dry_run=True))
def do_action(self):
"""
Rollover the index referenced by alias `name`
"""
self.loggit.info('Performing index rollover')
try:
self.log_result(self.doit())
except Exception as err:
utils.report_failure(err)
class DeleteSnapshots(object):
"""Delete Snapshots Action Class"""
def __init__(self, slo, retry_interval=120, retry_count=3):
"""
:arg slo: A :class:`curator.snapshotlist.SnapshotList` object
:arg retry_interval: Number of seconds to delay betwen retries. Default:
120 (seconds)
:arg retry_count: Number of attempts to make. Default: 3
"""
utils.verify_snapshot_list(slo)
#: Instance variable.
#: The Elasticsearch Client object derived from `slo`
self.client = slo.client
#: Instance variable.
#: Internally accessible copy of `retry_interval`
self.retry_interval = retry_interval
#: Instance variable.
#: Internally accessible copy of `retry_count`
self.retry_count = retry_count
#: Instance variable.
#: Internal reference to `slo`
self.snapshot_list = slo
#: Instance variable.
#: The repository name derived from `slo`
self.repository = slo.repository
self.loggit = logging.getLogger('curator.actions.delete_snapshots')
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
self.loggit.info('DRY-RUN MODE. No changes will be made.')
mykwargs = {
'repository' : self.repository,
'retry_interval' : self.retry_interval,
'retry_count' : self.retry_count,
}
for snap in self.snapshot_list.snapshots:
self.loggit.info(
'DRY-RUN: delete_snapshot: {0} with arguments: {1}'.format(snap, mykwargs))
def do_action(self):
"""
Delete snapshots in `slo`
Retry up to `retry_count` times, pausing `retry_interval`
seconds between retries.
"""
self.snapshot_list.empty_list_check()
self.loggit.info(
'Deleting {0} selected snapshots: {1}'.format(
len(self.snapshot_list.snapshots),
self.snapshot_list.snapshots
)
)
if not utils.safe_to_snap(
self.client, repository=self.repository,
retry_interval=self.retry_interval, retry_count=self.retry_count
):
raise exceptions.FailedExecution(
'Unable to delete snapshot(s) because a snapshot is in '
'state "IN_PROGRESS"')
try:
for snap in self.snapshot_list.snapshots:
self.loggit.info('Deleting snapshot {0}...'.format(snap))
self.client.snapshot.delete(
repository=self.repository, snapshot=snap)
except Exception as err:
utils.report_failure(err)
class Reindex(object):
"""Reindex Action Class"""
def __init__(
self, ilo, request_body, refresh=True, requests_per_second=-1, slices=1, timeout=60,
wait_for_active_shards=1, wait_for_completion=True, max_wait=-1, wait_interval=9,
remote_url_prefix=None, remote_ssl_no_validate=None, remote_certificate=None,
remote_client_cert=None, remote_client_key=None, remote_aws_key=None,
remote_aws_secret_key=None, remote_aws_region=None, remote_filters={},
migration_prefix='', migration_suffix=''
):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg request_body: The body to send to
:py:meth:`elasticsearch.Elasticsearch.reindex`, which must be complete and
usable, as Curator will do no vetting of the request_body. If it
fails to function, Curator will return an exception.
:arg refresh: Whether to refresh the entire target index after the
operation is complete. (default: `True`)
:type refresh: bool
:arg requests_per_second: The throttle to set on this request in
sub-requests per second. ``-1`` means set no throttle as does
``unlimited`` which is the only non-float this accepts. (default:
``-1``)
:arg slices: The number of slices this task should be divided into. 1
means the task will not be sliced into subtasks. (default: ``1``)
:arg timeout: The length in seconds each individual bulk request should
wait for shards that are unavailable. (default: ``60``)
:arg wait_for_active_shards: Sets the number of shard copies that must
be active before proceeding with the reindex operation. (default:
``1``) means the primary shard only. Set to ``all`` for all shard
copies, otherwise set to any non-negative value less than or equal
to the total number of copies for the shard (number of replicas + 1)
:arg wait_for_completion: Wait (or not) for the operation
to complete before returning. (default: `True`)
:type wait_for_completion: bool
:arg wait_interval: How long in seconds to wait between checks for
completion.
:arg max_wait: Maximum number of seconds to `wait_for_completion`
:arg remote_url_prefix: `Optional` url prefix, if needed to reach the
Elasticsearch API (i.e., it's not at the root level)
:type remote_url_prefix: str
:arg remote_ssl_no_validate: If `True`, do not validate the certificate
chain. This is an insecure option and you will see warnings in the
log output.
:type remote_ssl_no_validate: bool
:arg remote_certificate: Path to SSL/TLS certificate
:arg remote_client_cert: Path to SSL/TLS client certificate (public key)
:arg remote_client_key: Path to SSL/TLS private key
:arg remote_aws_key: AWS IAM Access Key (Only used if the
:mod:`requests-aws4auth` python module is installed)
:arg remote_aws_secret_key: AWS IAM Secret Access Key (Only used if the
:mod:`requests-aws4auth` python module is installed)
:arg remote_aws_region: AWS Region (Only used if the
:mod:`requests-aws4auth` python module is installed)
:arg remote_filters: Apply these filters to the remote client for
remote index selection.
:arg migration_prefix: When migrating, prepend this value to the index
name.
:arg migration_suffix: When migrating, append this value to the index
name.
"""
self.loggit = logging.getLogger('curator.actions.reindex')
utils.verify_index_list(ilo)
# Normally, we'd check for an empty list here. But since we can reindex
# from remote, we might just be starting with an empty one.
# ilo.empty_list_check()
if not isinstance(request_body, dict):
raise exceptions.ConfigurationError('"request_body" is not of type dictionary')
#: Instance variable.
#: Internal reference to `request_body`
self.body = request_body
self.loggit.debug('REQUEST_BODY = {0}'.format(request_body))
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: Internal reference to `refresh`
self.refresh = refresh
#: Instance variable.
#: Internal reference to `requests_per_second`
self.requests_per_second = requests_per_second
#: Instance variable.
#: Internal reference to `slices`
self.slices = slices
#: Instance variable.
#: Internal reference to `timeout`, and add "s" for seconds.
self.timeout = '{0}s'.format(timeout)
#: Instance variable.
#: Internal reference to `wait_for_active_shards`
self.wait_for_active_shards = wait_for_active_shards
#: Instance variable.
#: Internal reference to `wait_for_completion`
self.wfc = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
#: Instance variable.
#: Internal reference to `migration_prefix`
self.mpfx = migration_prefix
#: Instance variable.
#: Internal reference to `migration_suffix`
self.msfx = migration_suffix
# This is for error logging later...
self.remote = False
if 'remote' in self.body['source']:
self.remote = True
self.migration = False
if self.body['dest']['index'] == 'MIGRATION':
self.migration = True
if self.migration:
if not self.remote and not self.mpfx and not self.msfx:
raise exceptions.ConfigurationError(
'MIGRATION can only be used locally with one or both of '
'migration_prefix or migration_suffix.'
)
# REINDEX_SELECTION is the designated token. If you use this for the
# source "index," it will be replaced with the list of indices from the
# provided 'ilo' (index list object).
if self.body['source']['index'] == 'REINDEX_SELECTION' \
and not self.remote:
self.body['source']['index'] = self.index_list.indices
# Remote section
elif self.remote:
self.loggit.debug('Remote reindex request detected')
if 'host' not in self.body['source']['remote']:
raise exceptions.ConfigurationError('Missing remote "host"')
rclient_info = {}
for k in ['host', 'username', 'password']:
rclient_info[k] = self.body['source']['remote'][k] \
if k in self.body['source']['remote'] else None
rhost = rclient_info['host']
try:
# Save these for logging later
_ = rhost.split(':')
self.remote_port = _[2]
self.remote_host = _[1][2:]
except Exception as err:
raise exceptions.ConfigurationError(
'Host must be in the form [scheme]://[host]:[port] but '
'was [{0}]'.format(rhost)
)
rhttp_auth = '{0}:{1}'.format(
rclient_info['username'], rclient_info['password']) \
if (rclient_info['username'] and rclient_info['password']) else None
if rhost[:5] == 'http:':
use_ssl = False
elif rhost[:5] == 'https':
use_ssl = True
else:
raise exceptions.ConfigurationError(
'Host must be in URL format. You provided: '
'{0}'.format(rclient_info['host'])
)
# Let's set a decent remote timeout for initially reading
# the indices on the other side, and collecting their metadata
remote_timeout = 180
# The rest only applies if using filters for remote indices
if self.body['source']['index'] == 'REINDEX_SELECTION':
self.loggit.debug('Filtering indices from remote')
from .indexlist import IndexList
self.loggit.debug(
'Remote client args: '
'host={0} '
'http_auth={1} '
'url_prefix={2} '
'use_ssl={3} '
'ssl_no_validate={4} '
'certificate={5} '
'client_cert={6} '
'client_key={7} '
'aws_key={8} '
'aws_secret_key={9} '
'aws_region={10} '
'timeout={11} '
'skip_version_test=True'.format(
rhost,
rhttp_auth,
remote_url_prefix,
use_ssl,
remote_ssl_no_validate,
remote_certificate,
remote_client_cert,
remote_client_key,
remote_aws_key,
remote_aws_secret_key,
remote_aws_region,
remote_timeout
)
)
try: # let's try to build a remote connection with these!
rclient = utils.get_client(
host=rhost,
http_auth=rhttp_auth,
url_prefix=remote_url_prefix,
use_ssl=use_ssl,
ssl_no_validate=remote_ssl_no_validate,
certificate=remote_certificate,
client_cert=remote_client_cert,
client_key=remote_client_key,
aws_key=remote_aws_key,
aws_secret_key=remote_aws_secret_key,
aws_region=remote_aws_region,
skip_version_test=True,
timeout=remote_timeout
)
except Exception as err:
self.loggit.error(
'Unable to establish connection to remote Elasticsearch'
' with provided credentials/certificates/settings.'
)
utils.report_failure(err)
try:
rio = IndexList(rclient)
rio.iterate_filters({'filters': remote_filters})
try:
rio.empty_list_check()
except exceptions.NoIndices:
raise exceptions.FailedExecution(
'No actionable remote indices selected after '
'applying filters.'
)
self.body['source']['index'] = rio.indices
except Exception as err:
self.loggit.error(
'Unable to get/filter list of remote indices.'
)
utils.report_failure(err)
self.loggit.debug(
'Reindexing indices: {0}'.format(self.body['source']['index']))
def _get_request_body(self, source, dest):
body = deepcopy(self.body)
body['source']['index'] = source
body['dest']['index'] = dest
return body
def _get_reindex_args(self, source, dest):
# Always set wait_for_completion to False. Let 'utils.wait_for_it' do its
# thing if wait_for_completion is set to True. Report the task_id
# either way.
reindex_args = {
'body':self._get_request_body(source, dest), 'refresh':self.refresh,
'requests_per_second': self.requests_per_second,
'timeout': self.timeout,
'wait_for_active_shards': self.wait_for_active_shards,
'wait_for_completion': False,
'slices': self.slices
}
version = utils.get_version(self.client)
if version < (5, 1, 0):
self.loggit.info(
'Your version of elasticsearch ({0}) does not support '
'sliced scroll for reindex, so that setting will not be '
'used'.format(version)
)
del reindex_args['slices']
return reindex_args
def get_processed_items(self, task_id):
"""
This function calls client.tasks.get with the provided `task_id`. It will get the value
from ``'response.total'`` as the total number of elements processed during reindexing.
If the value is not found, it will return -1
:arg task_id: A task_id which ostensibly matches a task searchable in the
tasks API.
"""
try:
task_data = self.client.tasks.get(task_id=task_id)
except Exception as err:
raise exceptions.CuratorException(
'Unable to obtain task information for task_id "{0}". Exception '
'{1}'.format(task_id, err)
)
total_processed_items = -1
task = task_data['task']
if task['action'] == 'indices:data/write/reindex':
self.loggit.debug('It\'s a REINDEX TASK')
self.loggit.debug('TASK_DATA: {0}'.format(task_data))
self.loggit.debug('TASK_DATA keys: {0}'.format(list(task_data.keys())))
if 'response' in task_data:
response = task_data['response']
total_processed_items = response['total']
self.loggit.debug('total_processed_items = {0}'.format(total_processed_items))
return total_processed_items
def _post_run_quick_check(self, index_name, task_id):
# Check whether any documents were processed
# if no documents processed, the target index "dest" won't exist
processed_items = self.get_processed_items(task_id)
if processed_items == 0:
self.loggit.info(
'No items were processed. Will not check if target index "{0}" '
'exists'.format(index_name)
)
else:
# Verify the destination index is there after the fact
index_exists = self.client.indices.exists(index=index_name)
alias_instead = self.client.indices.exists_alias(name=index_name)
if not index_exists and not alias_instead:
self.loggit.error(
'The index described as "{0}" was not found after the reindex '
'operation. Check Elasticsearch logs for more '
'information.'.format(index_name)
)
if self.remote:
self.loggit.error(
'Did you forget to add "reindex.remote.whitelist: '
'{0}:{1}" to the elasticsearch.yml file on the '
'"dest" node?'.format(
self.remote_host, self.remote_port
)
)
raise exceptions.FailedExecution(
'Reindex failed. The index or alias identified by "{0}" was '
'not found.'.format(index_name)
)
def sources(self):
"""Generator for sources & dests"""
dest = self.body['dest']['index']
source_list = utils.ensure_list(self.body['source']['index'])
self.loggit.debug('source_list: {0}'.format(source_list))
if not source_list or source_list == ['REINDEX_SELECTED']: # Empty list
raise exceptions.NoIndices
if not self.migration:
yield self.body['source']['index'], dest
# Loop over all sources (default will only be one)
else:
for source in source_list:
if self.migration:
dest = self.mpfx + source + self.msfx
yield source, dest
def show_run_args(self, source, dest):
"""
Show what will run
"""
return (
'request body: {0} with arguments: '
'refresh={1} '
'requests_per_second={2} '
'slices={3} '
'timeout={4} '
'wait_for_active_shards={5} '
'wait_for_completion={6}'.format(
self._get_request_body(source, dest),
self.refresh,
self.requests_per_second,
self.slices,
self.timeout,
self.wait_for_active_shards,
self.wfc
)
)
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
self.loggit.info('DRY-RUN MODE. No changes will be made.')
for source, dest in self.sources():
self.loggit.info(
'DRY-RUN: REINDEX: {0}'.format(self.show_run_args(source, dest))
)
def do_action(self):
"""
Execute :py:meth:`elasticsearch.Elasticsearch.reindex` operation with the
provided request_body and arguments.
"""
try:
# Loop over all sources (default will only be one)
for source, dest in self.sources():
self.loggit.info('Commencing reindex operation')
self.loggit.debug(
'REINDEX: {0}'.format(self.show_run_args(source, dest)))
response = self.client.reindex(**self._get_reindex_args(source, dest))
self.loggit.debug('TASK ID = {0}'.format(response['task']))
if self.wfc:
utils.wait_for_it(
self.client, 'reindex', task_id=response['task'],
wait_interval=self.wait_interval, max_wait=self.max_wait
)
self._post_run_quick_check(dest, response['task'])
else:
self.loggit.warn(
'"wait_for_completion" set to {0}. Remember '
'to check task_id "{1}" for successful completion '
'manually.'.format(self.wfc, response['task'])
)
except exceptions.NoIndices as err:
raise exceptions.NoIndices(
'Source index must be list of actual indices. '
'It must not be an empty list.')
except Exception as err:
utils.report_failure(err)
class Snapshot(object):
"""Snapshot Action Class"""
def __init__(
self, ilo, repository=None, name=None, ignore_unavailable=False,
include_global_state=True, partial=False, wait_for_completion=True, wait_interval=9,
max_wait=-1, skip_repo_fs_check=False
):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg repository: The Elasticsearch snapshot repository to use
:arg name: What to name the snapshot.
:arg wait_for_completion: Wait (or not) for the operation
to complete before returning. (default: `True`)
:type wait_for_completion: bool
:arg wait_interval: How long in seconds to wait between checks for
completion.
:arg max_wait: Maximum number of seconds to `wait_for_completion`
:arg ignore_unavailable: Ignore unavailable shards/indices.
(default: `False`)
:type ignore_unavailable: bool
:arg include_global_state: Store cluster global state with snapshot.
(default: `True`)
:type include_global_state: bool
:arg partial: Do not fail if primary shard is unavailable. (default:
`False`)
:type partial: bool
:arg skip_repo_fs_check: Do not validate write access to repository on
all cluster nodes before proceeding. (default: `False`). Useful for
shared filesystems where intermittent timeouts can affect
validation, but won't likely affect snapshot success.
:type skip_repo_fs_check: bool
"""
utils.verify_index_list(ilo)
# Check here and don't bother with the rest of this if there are no
# indices in the index list.
ilo.empty_list_check()
if not utils.repository_exists(ilo.client, repository=repository):
raise exceptions.ActionError(
'Cannot snapshot indices to missing repository: '
'{0}'.format(repository)
)
if not name:
raise exceptions.MissingArgument('No value for "name" provided.')
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: The parsed version of `name`
self.name = utils.parse_datemath(self.client, utils.parse_date_pattern(name))
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: Internally accessible copy of `repository`
self.repository = repository
#: Instance variable.
#: Internally accessible copy of `wait_for_completion`
self.wait_for_completion = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
#: Instance variable.
#: Internally accessible copy of `skip_repo_fs_check`
self.skip_repo_fs_check = skip_repo_fs_check
self.state = None
#: Instance variable.
#: Populated at instance creation time by calling
#: :mod:`curator.utils.utils.create_snapshot_body` with `ilo.indices` and the
#: provided arguments: `ignore_unavailable`, `include_global_state`,
#: `partial`
self.body = utils.create_snapshot_body(
ilo.indices,
ignore_unavailable=ignore_unavailable,
include_global_state=include_global_state,
partial=partial
)
self.loggit = logging.getLogger('curator.actions.snapshot')
def get_state(self):
"""
Get the state of the snapshot
"""
try:
self.state = self.client.snapshot.get(
repository=self.repository,
snapshot=self.name)['snapshots'][0]['state']
return self.state
except IndexError:
raise exceptions.CuratorException(
'Snapshot "{0}" not found in repository '
'"{1}"'.format(self.name, self.repository)
)
def report_state(self):
"""
Log the state of the snapshot and raise an exception if the state is
not ``SUCCESS``
"""
self.get_state()
if self.state == 'SUCCESS':
self.loggit.info('Snapshot {0} successfully completed.'.format(self.name))
else:
msg = 'Snapshot {0} completed with state: {0}'.format(self.state)
self.loggit.error(msg)
raise exceptions.FailedSnapshot(msg)
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
self.loggit.info('DRY-RUN MODE. No changes will be made.')
self.loggit.info(
'DRY-RUN: snapshot: {0} in repository {1} with arguments: '
'{2}'.format(self.name, self.repository, self.body)
)
def do_action(self):
"""
Snapshot indices in `index_list.indices`, with options passed.
"""
if not self.skip_repo_fs_check:
utils.test_repo_fs(self.client, self.repository)
if utils.snapshot_running(self.client):
raise exceptions.SnapshotInProgress('Snapshot already in progress.')
try:
self.loggit.info(
'Creating snapshot "{0}" from indices: {1}'.format(
self.name, self.index_list.indices
)
)
# Always set wait_for_completion to False. Let 'utils.wait_for_it' do its
# thing if wait_for_completion is set to True. Report the task_id
# either way.
self.client.snapshot.create(
repository=self.repository, snapshot=self.name, body=self.body,
wait_for_completion=False
)
if self.wait_for_completion:
utils.wait_for_it(
self.client, 'snapshot', snapshot=self.name,
repository=self.repository,
wait_interval=self.wait_interval, max_wait=self.max_wait
)
self.report_state()
else:
self.loggit.warn(
'"wait_for_completion" set to {0}.'
'Remember to check for successful completion '
'manually.'.format(self.wait_for_completion)
)
except Exception as err:
utils.report_failure(err)
class Restore(object):
"""Restore Action Class"""
def __init__(
self, slo, name=None, indices=None, include_aliases=False, ignore_unavailable=False,
include_global_state=False, partial=False, rename_pattern=None,
rename_replacement=None, extra_settings={}, wait_for_completion=True, wait_interval=9,
max_wait=-1, skip_repo_fs_check=False
):
"""
:arg slo: A :class:`curator.snapshotlist.SnapshotList` object
:arg name: Name of the snapshot to restore. If no name is provided, it
will restore the most recent snapshot by age.
:type name: str
:arg indices: A list of indices to restore. If no indices are provided,
it will restore all indices in the snapshot.
:type indices: list
:arg include_aliases: If set to `True`, restore aliases with the
indices. (default: `False`)
:type include_aliases: bool
:arg ignore_unavailable: Ignore unavailable shards/indices.
(default: `False`)
:type ignore_unavailable: bool
:arg include_global_state: Restore cluster global state with snapshot.
(default: `False`)
:type include_global_state: bool
:arg partial: Do not fail if primary shard is unavailable. (default:
`False`)
:type partial: bool
:arg rename_pattern: A regular expression pattern with one or more
captures, e.g. ``index_(.+)``
:type rename_pattern: str
:arg rename_replacement: A target index name pattern with `$#` numbered
references to the captures in ``rename_pattern``, e.g.
``restored_index_$1``
:type rename_replacement: str
:arg extra_settings: Extra settings, including shard count and settings
to omit. For more information see
https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html#_changing_index_settings_during_restore
:type extra_settings: dict, representing the settings.
:arg wait_for_completion: Wait (or not) for the operation
to complete before returning. (default: `True`)
:arg wait_interval: How long in seconds to wait between checks for
completion.
:arg max_wait: Maximum number of seconds to `wait_for_completion`
:type wait_for_completion: bool
:arg skip_repo_fs_check: Do not validate write access to repository on
all cluster nodes before proceeding. (default: `False`). Useful for
shared filesystems where intermittent timeouts can affect
validation, but won't likely affect snapshot success.
:type skip_repo_fs_check: bool
"""
self.loggit = logging.getLogger('curator.actions.snapshot')
utils.verify_snapshot_list(slo)
# Get the most recent snapshot.
most_recent = slo.most_recent()
self.loggit.debug('"most_recent" snapshot: {0}'.format(most_recent))
#: Instance variable.
#: Will use a provided snapshot name, or the most recent snapshot in slo
self.name = name if name else most_recent
# Stop here now, if it's not a successful snapshot.
if slo.snapshot_info[self.name]['state'] == 'PARTIAL' and partial:
self.loggit.warn(
'Performing restore of snapshot in state PARTIAL.')
elif slo.snapshot_info[self.name]['state'] != 'SUCCESS':
raise exceptions.CuratorException(
'Restore operation can only be performed on snapshots with '
'state "SUCCESS", or "PARTIAL" if partial=True.'
)
#: Instance variable.
#: The Elasticsearch Client object derived from `slo`
self.client = slo.client
#: Instance variable.
#: Internal reference to `slo`
self.snapshot_list = slo
#: Instance variable.
#: `repository` derived from `slo`
self.repository = slo.repository
if indices:
self.indices = utils.ensure_list(indices)
else:
self.indices = slo.snapshot_info[self.name]['indices']
self.wfc = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
#: Instance variable version of ``rename_pattern``
self.rename_pattern = rename_pattern if rename_replacement is not None \
else ''
#: Instance variable version of ``rename_replacement``
self.rename_replacement = rename_replacement if rename_replacement \
is not None else ''
#: Also an instance variable version of ``rename_replacement``
#: but with Java regex group designations of ``$#``
#: converted to Python's ``\\#`` style.
self.py_rename_replacement = self.rename_replacement.replace('$', '\\')
#: Instance variable.
#: Internally accessible copy of `skip_repo_fs_check`
self.skip_repo_fs_check = skip_repo_fs_check
#: Instance variable.
#: Populated at instance creation time from the other options
self.body = {
'indices' : self.indices,
'include_aliases' : include_aliases,
'ignore_unavailable' : ignore_unavailable,
'include_global_state' : include_global_state,
'partial' : partial,
'rename_pattern' : self.rename_pattern,
'rename_replacement' : self.rename_replacement,
}
if extra_settings:
self.loggit.debug(
'Adding extra_settings to restore body: '
'{0}'.format(extra_settings)
)
try:
self.body.update(extra_settings)
except:
self.loggit.error(
'Unable to apply extra settings to restore body')
self.loggit.debug('REPOSITORY: {0}'.format(self.repository))
self.loggit.debug('WAIT_FOR_COMPLETION: {0}'.format(self.wfc))
self.loggit.debug(
'SKIP_REPO_FS_CHECK: {0}'.format(self.skip_repo_fs_check))
self.loggit.debug('BODY: {0}'.format(self.body))
# Populate the expected output index list.
self._get_expected_output()
def _get_expected_output(self):
if not self.rename_pattern and not self.rename_replacement:
self.expected_output = self.indices
return # Don't stick around if we're not replacing anything
self.expected_output = []
for index in self.indices:
self.expected_output.append(
re.sub(
self.rename_pattern,
self.py_rename_replacement,
index
)
)
self.loggit.debug('index: {0} replacement: {1}'.format(index, self.expected_output[-1]))
def report_state(self):
"""
Log the state of the restore
This should only be done if ``wait_for_completion`` is `True`, and only
after completing the restore.
"""
all_indices = utils.get_indices(self.client)
found_count = 0
missing = []
for index in self.expected_output:
if index in all_indices:
found_count += 1
self.loggit.info('Found restored index {0}'.format(index))
else:
missing.append(index)
if found_count == len(self.expected_output):
self.loggit.info('All indices appear to have been restored.')
else:
msg = (
'Some of the indices do not appear to have been restored. Missing: '
'{0}'.format(missing)
)
self.loggit.error(msg)
raise exceptions.FailedRestore(msg)
def do_dry_run(self):
"""
Log what the output would be, but take no action.
"""
self.loggit.info('DRY-RUN MODE. No changes will be made.')
self.loggit.info(
'DRY-RUN: restore: Repository: {0} Snapshot name: {1} Arguments: '
'{2}'.format(
self.repository, self.name,
{'wait_for_completion' : self.wfc, 'body' : self.body}
)
)
for index in self.indices:
if self.rename_pattern and self.rename_replacement:
replacement_msg = 'as {0}'.format(
re.sub(
self.rename_pattern,
self.py_rename_replacement,
index
)
)
else:
replacement_msg = ''
self.loggit.info(
'DRY-RUN: restore: Index {0} {1}'.format(index, replacement_msg)
)
def do_action(self):
"""
Restore indices with options passed.
"""
if not self.skip_repo_fs_check:
utils.test_repo_fs(self.client, self.repository)
if utils.snapshot_running(self.client):
raise exceptions.SnapshotInProgress('Cannot restore while a snapshot is in progress.')
try:
self.loggit.info(
'Restoring indices "{0}" from snapshot: {1}'.format(self.indices, self.name)
)
# Always set wait_for_completion to False. Let 'utils.wait_for_it' do its
# thing if wait_for_completion is set to True. Report the task_id
# either way.
self.client.snapshot.restore(
repository=self.repository, snapshot=self.name, body=self.body,
wait_for_completion=False
)
if self.wfc:
utils.wait_for_it(
self.client, 'restore', index_list=self.expected_output,
wait_interval=self.wait_interval, max_wait=self.max_wait
)
self.report_state()
else:
self.loggit.warn(
'"wait_for_completion" set to {0}. '
'Remember to check for successful completion '
'manually.'.format(self.wfc)
)
except Exception as err:
utils.report_failure(err)
class Shrink(object):
"""Shrink Action Class"""
def __init__(
self, ilo, shrink_node='DETERMINISTIC', node_filters={}, number_of_shards=1,
number_of_replicas=1, shrink_prefix='', shrink_suffix='-shrink', copy_aliases=False,
delete_after=True, post_allocation={}, wait_for_active_shards=1,
wait_for_rebalance=True, extra_settings={}, wait_for_completion=True, wait_interval=9,
max_wait=-1
):
"""
:arg ilo: A :class:`curator.indexlist.IndexList` object
:arg shrink_node: The node name to use as the shrink target, or
``DETERMINISTIC``, which will use the values in ``node_filters`` to
determine which node will be the shrink node.
:arg node_filters: If the value of ``shrink_node`` is ``DETERMINISTIC``,
the values in ``node_filters`` will be used while determining which
node to allocate the shards on before performing the shrink.
:type node_filters: dict, representing the filters
:arg number_of_shards: The number of shards the shrunk index should have
:arg number_of_replicas: The number of replicas for the shrunk index
:arg shrink_prefix: Prepend the shrunk index with this value
:arg shrink_suffix: Append the value to the shrunk index (default: `-shrink`)
:arg copy_aliases: Whether to copy each source index aliases to target index after
shrinking. The aliases will be added to target index and deleted from source index at
the same time(default: `False`)
:type copy_aliases: bool
:arg delete_after: Whether to delete each index after shrinking. (default: `True`)
:type delete_after: bool
:arg post_allocation: If populated, the `allocation_type`, `key`, and
`value` will be applied to the shrunk index to re-route it.
:type post_allocation: dict, with keys `allocation_type`, `key`, and `value`
:arg wait_for_active_shards: The number of shards expected to be active before returning.
:arg extra_settings: Permitted root keys are `settings` and `aliases`.
:type extra_settings: dict
:arg wait_for_rebalance: Wait for rebalance. (default: `True`)
:type wait_for_rebalance: bool
:arg wait_for_active_shards: Wait for active shards before returning.
:arg wait_for_completion: Wait (or not) for the operation
to complete before returning. You should not normally change this,
ever. (default: `True`)
:arg wait_interval: How long in seconds to wait between checks for
completion.
:arg max_wait: Maximum number of seconds to `wait_for_completion`
:type wait_for_completion: bool
"""
self.loggit = logging.getLogger('curator.actions.shrink')
utils.verify_index_list(ilo)
if 'permit_masters' not in node_filters:
node_filters['permit_masters'] = False
#: Instance variable. The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable. Internal reference to `ilo`
self.index_list = ilo
#: Instance variable. Internal reference to `shrink_node`
self.shrink_node = shrink_node
#: Instance variable. Internal reference to `node_filters`
self.node_filters = node_filters
#: Instance variable. Internal reference to `shrink_prefix`
self.shrink_prefix = shrink_prefix
#: Instance variable. Internal reference to `shrink_suffix`
self.shrink_suffix = shrink_suffix
#: Instance variable. Internal reference to `copy_aliases`
self.copy_aliases = copy_aliases
#: Instance variable. Internal reference to `delete_after`
self.delete_after = delete_after
#: Instance variable. Internal reference to `post_allocation`
self.post_allocation = post_allocation
#: Instance variable. Internal reference to `wait_for_rebalance`
self.wait_for_rebalance = wait_for_rebalance
#: Instance variable. Internal reference to `wait_for_completion`
self.wfc = wait_for_completion
#: Instance variable. How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable. How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
#: Instance variable. Internal reference to `number_of_shards`
self.number_of_shards = number_of_shards
self.wait_for_active_shards = wait_for_active_shards
self.shrink_node_name = None
self.body = {
'settings': {
'index.number_of_shards' : number_of_shards,
'index.number_of_replicas' : number_of_replicas,
}
}
if extra_settings:
self._merge_extra_settings(extra_settings)
def _merge_extra_settings(self, extra_settings):
self.loggit.debug(
'Adding extra_settings to shrink body: '
'{0}'.format(extra_settings)
)
# Pop these here, otherwise we could overwrite our default number of
# shards and replicas
if 'settings' in extra_settings:
settings = extra_settings.pop('settings')
try:
self.body['settings'].update(settings)
except Exception as err:
raise exceptions.ConfigurationError(
'Unable to apply extra settings "{0}" to shrink body. Exception: {1}'.format(
{'settings':settings}, err
)
)
if extra_settings:
try: # Apply any remaining keys, should there be any.
self.body.update(extra_settings)
except Exception as err:
raise exceptions.ConfigurationError(
'Unable to apply extra settings "{0}" to shrink body. Exception: {1}'.format(
extra_settings, err
)
)
def _data_node(self, node_id):
roles = utils.node_roles(self.client, node_id)
name = utils.node_id_to_name(self.client, node_id)
if not 'data' in roles:
self.loggit.info('Skipping node "{0}": non-data node'.format(name))
return False
if 'master' in roles and not self.node_filters['permit_masters']:
self.loggit.info('Skipping node "{0}": master node'.format(name))
return False
elif 'master' in roles and self.node_filters['permit_masters']:
self.loggit.warn(
'Not skipping node "{0}" which is a master node (not recommended), but '
'permit_masters is True'.format(name)
)
return True
else: # It does have `data` as a role.
return True
def _exclude_node(self, name):
if 'exclude_nodes' in self.node_filters:
if name in self.node_filters['exclude_nodes']:
self.loggit.info('Excluding node "{0}" due to node_filters'.format(name))
return True
return False
def _shrink_target(self, name):
return '{0}{1}{2}'.format(self.shrink_prefix, name, self.shrink_suffix)
def qualify_single_node(self):
"""Qualify a single node as a shrink target"""
node_id = utils.name_to_node_id(self.client, self.shrink_node)
if node_id:
self.shrink_node_id = node_id
self.shrink_node_name = self.shrink_node
else:
raise exceptions.ConfigurationError(
'Unable to find node named: "{0}"'.format(self.shrink_node))
if self._exclude_node(self.shrink_node):
raise exceptions.ConfigurationError(
'Node "{0}" listed for exclusion'.format(self.shrink_node))
if not self._data_node(node_id):
raise exceptions.ActionError(
'Node "{0}" is not usable as a shrink node'.format(self.shrink_node))
self.shrink_node_avail = (
self.client.nodes.stats()['nodes'][node_id]['fs']['total']['available_in_bytes']
)
def most_available_node(self):
"""
Determine which data node name has the most available free space, and
meets the other node filters settings.
:arg client: An :class:`elasticsearch.Elasticsearch` client object
"""
mvn_avail = 0
# mvn_total = 0
mvn_name = None
mvn_id = None
nodes = self.client.nodes.stats()['nodes']
for node_id in nodes:
name = nodes[node_id]['name']
if self._exclude_node(name):
self.loggit.debug('Node "{0}" excluded by node filters'.format(name))
continue
if not self._data_node(node_id):
self.loggit.debug('Node "{0}" is not a data node'.format(name))
continue
value = nodes[node_id]['fs']['total']['available_in_bytes']
if value > mvn_avail:
mvn_name = name
mvn_id = node_id
mvn_avail = value
# mvn_total = nodes[node_id]['fs']['total']['total_in_bytes']
self.shrink_node_name = mvn_name
self.shrink_node_id = mvn_id
self.shrink_node_avail = mvn_avail
# self.shrink_node_total = mvn_total
def route_index(self, idx, allocation_type, key, value):
"""Apply the indicated shard routing allocation"""
bkey = 'index.routing.allocation.{0}.{1}'.format(allocation_type, key)
routing = {bkey : value}
try:
self.client.indices.put_settings(index=idx, body=routing)
if self.wait_for_rebalance:
utils.wait_for_it(
self.client, 'allocation', wait_interval=self.wait_interval,
max_wait=self.max_wait
)
else:
utils.wait_for_it(
self.client, 'relocate', index=idx, wait_interval=self.wait_interval,
max_wait=self.max_wait
)
except Exception as err:
utils.report_failure(err)
def __log_action(self, error_msg, dry_run=False):
if not dry_run:
raise exceptions.ActionError(error_msg)
else:
self.loggit.warn('DRY-RUN: {0}'.format(error_msg))
def _block_writes(self, idx):
block = {'index.blocks.write': True}
self.client.indices.put_settings(index=idx, body=block)
def _unblock_writes(self, idx):
unblock = {'index.blocks.write': False}
self.client.indices.put_settings(index=idx, body=unblock)
def _check_space(self, idx, dry_run=False):
# Disk watermark calculation is already baked into `available_in_bytes`
size = utils.index_size(self.client, idx, value='primaries')
padded = (size * 2) + (32 * 1024)
if padded < self.shrink_node_avail:
self.loggit.debug(
'Sufficient space available for 2x the size of index "{0}". Required: {1}, '
'available: {2}'.format(idx, padded, self.shrink_node_avail)
)
else:
error_msg = (
'Insufficient space available for 2x the size of index "{0}", shrinking will '
'exceed space available. Required: {1}, available: {2}'.format(
idx, padded, self.shrink_node_avail
)
)
self.__log_action(error_msg, dry_run)
def _check_node(self):
if self.shrink_node != 'DETERMINISTIC':
if not self.shrink_node_name:
self.qualify_single_node()
else:
self.most_available_node()
# At this point, we should have the three shrink-node identifying
# instance variables:
# - self.shrink_node_name
# - self.shrink_node_id
# - self.shrink_node_avail
# # - self.shrink_node_total - only if needed in the future
def _check_target_exists(self, idx, dry_run=False):
target = self._shrink_target(idx)
if self.client.indices.exists(target):
error_msg = 'Target index "{0}" already exists'.format(target)
self.__log_action(error_msg, dry_run)
def _check_doc_count(self, idx, dry_run=False):
max_docs = 2147483519
doc_count = self.client.indices.stats(idx)['indices'][idx]['primaries']['docs']['count']
if doc_count > (max_docs * self.number_of_shards):
error_msg = (
'Too many documents ({0}) to fit in {1} shard(s). Maximum number of docs per '
'shard is {2}'.format(doc_count, self.number_of_shards, max_docs)
)
self.__log_action(error_msg, dry_run)
def _check_shard_count(self, idx, src_shards, dry_run=False):
if self.number_of_shards >= src_shards:
error_msg = (
'Target number of shards ({0}) must be less than current number of shards ({1}) '
'in index "{2}"'.format(self.number_of_shards, src_shards, idx)
)
self.__log_action(error_msg, dry_run)
def _check_shard_factor(self, idx, src_shards, dry_run=False):
# Find the list of factors of src_shards
factors = [x for x in range(1, src_shards+1) if src_shards % x == 0]
# Pop the last one, because it will be the value of src_shards
factors.pop()
if not self.number_of_shards in factors:
error_msg = (
'"{0}" is not a valid factor of {1} shards. Valid values are '
'{2}'.format(self.number_of_shards, src_shards, factors)
)
self.__log_action(error_msg, dry_run)
def _check_all_shards(self, idx):
shards = self.client.cluster.state(index=idx)['routing_table']['indices'][idx]['shards']
found = []
for shardnum in shards:
for shard_idx in range(0, len(shards[shardnum])):
if shards[shardnum][shard_idx]['node'] == self.shrink_node_id:
found.append(
{'shard': shardnum, 'primary': shards[shardnum][shard_idx]['primary']})
if len(shards) != len(found):
self.loggit.debug(
'Found these shards on node "{0}": {1}'.format(self.shrink_node_name, found))
raise exceptions.ActionError(
'Unable to shrink index "{0}" as not all shards were found on the designated '
'shrink node ({1}): {2}'.format(idx, self.shrink_node_name, found)
)
def pre_shrink_check(self, idx, dry_run=False):
"""Do a shrink preflight check"""
self.loggit.debug('BEGIN PRE_SHRINK_CHECK')
self.loggit.debug('Check that target exists')
self._check_target_exists(idx, dry_run)
self.loggit.debug('Check doc count constraints')
self._check_doc_count(idx, dry_run)
self.loggit.debug('Check shard count')
src_shards = int(self.client.indices.get(idx)[idx]['settings']['index']['number_of_shards'])
self._check_shard_count(idx, src_shards, dry_run)
self.loggit.debug('Check shard factor')
self._check_shard_factor(idx, src_shards, dry_run)
self.loggit.debug('Check node availability')
self._check_node()
self.loggit.debug('Check available disk space')
self._check_space(idx, dry_run)
self.loggit.debug('FINISH PRE_SHRINK_CHECK')
def do_copy_aliases(self, source_idx, target_idx):
"""Copy the aliases to the shrunk index"""
alias_actions = []
aliases = self.client.indices.get_alias(index=source_idx)
for alias in aliases[source_idx]['aliases']:
self.loggit.debug('alias: {0}'.format(alias))
alias_actions.append(
{'remove': {'index': source_idx, 'alias': alias}})
alias_actions.append(
{'add': {'index': target_idx, 'alias': alias}})
if alias_actions:
self.loggit.info('Copy alias actions: {0}'.format(alias_actions))
self.client.indices.update_aliases({'actions' : alias_actions})
def do_dry_run(self):
"""
Show what a regular run would do, but don't actually do it.
"""
self.index_list.filter_closed()
self.index_list.filter_by_shards(number_of_shards=self.number_of_shards)
self.index_list.empty_list_check()
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
for idx in lst: # Shrink can only be done one at a time...
target = self._shrink_target(idx)
self.pre_shrink_check(idx, dry_run=True)
self.loggit.info(
'DRY-RUN: Moving shards to shrink node: "{0}"'.format(
self.shrink_node_name
)
)
self.loggit.info(
'DRY-RUN: Shrinking index "{0}" to "{1}" with settings: {2}, '
'wait_for_active_shards={3}'.format(
idx, target, self.body, self.wait_for_active_shards
)
)
if self.post_allocation:
self.loggit.info(
'DRY-RUN: Applying post-shrink allocation rule "{0}" to index '
'"{1}"'.format(
'index.routing.allocation.{0}.{1}:{2}'.format(
self.post_allocation['allocation_type'],
self.post_allocation['key'], self.post_allocation['value']
), target
)
)
if self.copy_aliases:
self.loggit.info(
'DRY-RUN: Copy source index aliases "{0}"'.format(
self.client.indices.get_alias(idx)
)
)
#self.do_copy_aliases(idx, target)
if self.delete_after:
self.loggit.info('DRY-RUN: Deleting source index "{0}"'.format(idx))
except Exception as err:
utils.report_failure(err)
def do_action(self):
"""Actually do the action"""
self.index_list.filter_closed()
self.index_list.filter_by_shards(number_of_shards=self.number_of_shards)
self.index_list.empty_list_check()
self.loggit.info(
'Shrinking {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
for idx in lst: # Shrink can only be done one at a time...
target = self._shrink_target(idx)
self.loggit.info('Source index: {0} -- Target index: {1}'.format(idx, target))
# Pre-check ensures disk space available for each pass of the loop
self.pre_shrink_check(idx)
# Route the index to the shrink node
self.loggit.info(
'Moving shards to shrink node: "{0}"'.format(self.shrink_node_name))
self.route_index(idx, 'require', '_name', self.shrink_node_name)
# Ensure a copy of each shard is present
self._check_all_shards(idx)
# Block writes on index
self._block_writes(idx)
# Wait for cluster to be green
utils.wait_for_it(
self.client, 'shrink', wait_interval=self.wait_interval,
max_wait=self.max_wait
)
# Do the shrink
self.loggit.info(
'Shrinking index "{0}" to "{1}" with settings: {2}, wait_for_active_shards'
'={3}'.format(idx, target, self.body, self.wait_for_active_shards)
)
try:
self.client.indices.shrink(
index=idx, target=target, body=self.body,
wait_for_active_shards=self.wait_for_active_shards
)
# Wait for it to complete
if self.wfc:
self.loggit.debug(
'Wait for shards to complete allocation for index: '
'{0}'.format(target)
)
if self.wait_for_rebalance:
utils.wait_for_it(
self.client, 'shrink', wait_interval=self.wait_interval,
max_wait=self.max_wait
)
else:
utils.wait_for_it(
self.client, 'relocate', index=target,
wait_interval=self.wait_interval, max_wait=self.max_wait
)
except Exception as err:
if self.client.indices.exists(index=target):
self.loggit.error(
'Deleting target index "{0}" due to failure to complete '
'shrink'.format(target)
)
self.client.indices.delete(index=target)
raise exceptions.ActionError(
'Unable to shrink index "{0}" -- Error: {1}'.format(idx, err))
self.loggit.info('Index "{0}" successfully shrunk to "{1}"'.format(idx, target))
# Do post-shrink steps
# Unblock writes on index (just in case)
self._unblock_writes(idx)
## Post-allocation, if enabled
if self.post_allocation:
self.loggit.info(
'Applying post-shrink allocation rule "{0}" to index "{1}"'.format(
'index.routing.allocation.{0}.{1}:{2}'.format(
self.post_allocation['allocation_type'],
self.post_allocation['key'], self.post_allocation['value']
), target
)
)
self.route_index(
target, self.post_allocation['allocation_type'],
self.post_allocation['key'], self.post_allocation['value']
)
## Copy aliases, if flagged
if self.copy_aliases:
self.loggit.info('Copy source index aliases "{0}"'.format(idx))
self.do_copy_aliases(idx, target)
## Delete, if flagged
if self.delete_after:
self.loggit.info('Deleting source index "{0}"'.format(idx))
self.client.indices.delete(index=idx)
else: # Let's unset the routing we applied here.
self.loggit.info('Unassigning routing for source index: "{0}"'.format(idx))
self.route_index(idx, 'require', '_name', '')
except Exception as err:
# Just in case it fails after attempting to meet this condition
self._unblock_writes(idx)
utils.report_failure(err)
| 43.233413
| 138
| 0.572996
|
import logging
import re
import time
from copy import deepcopy
from datetime import datetime
from elasticsearch.exceptions import ConflictError, RequestError
from curator import exceptions, utils
class Alias(object):
def __init__(self, name=None, extra_settings={}, **kwargs):
if not name:
raise exceptions.MissingArgument('No value for "name" provided.')
self.name = utils.parse_date_pattern(name)
self.actions = []
self.client = None
self.extra_settings = extra_settings
self.loggit = logging.getLogger('curator.actions.alias')
self.warn_if_no_indices = False
def add(self, ilo, warn_if_no_indices=False):
utils.verify_index_list(ilo)
if not self.client:
self.client = ilo.client
self.name = utils.parse_datemath(self.client, self.name)
try:
ilo.empty_list_check()
except exceptions.NoIndices:
if warn_if_no_indices:
self.warn_if_no_indices = True
self.loggit.warn(
'No indices found after processing filters. '
'Nothing to add to {0}'.format(self.name)
)
return
else:
raise exceptions.NoIndices('No indices to add to alias')
for index in ilo.working_list():
self.loggit.debug(
'Adding index {0} to alias {1} with extra settings '
'{2}'.format(index, self.name, self.extra_settings)
)
add_dict = {'add' : {'index' : index, 'alias': self.name}}
add_dict['add'].update(self.extra_settings)
self.actions.append(add_dict)
def remove(self, ilo, warn_if_no_indices=False):
utils.verify_index_list(ilo)
if not self.client:
self.client = ilo.client
self.name = utils.parse_datemath(self.client, self.name)
try:
ilo.empty_list_check()
except exceptions.NoIndices:
if warn_if_no_indices:
self.warn_if_no_indices = True
self.loggit.warn(
'No indices found after processing filters. '
'Nothing to remove from {0}'.format(self.name)
)
return
else:
raise exceptions.NoIndices('No indices to remove from alias')
aliases = self.client.indices.get_alias()
for index in ilo.working_list():
if index in aliases:
self.loggit.debug(
'Index {0} in get_aliases output'.format(index))
if self.name in aliases[index]['aliases']:
self.loggit.debug(
'Removing index {0} from alias '
'{1}'.format(index, self.name)
)
self.actions.append(
{'remove' : {'index' : index, 'alias': self.name}})
else:
self.loggit.debug(
'Can not remove: Index {0} is not associated with alias'
' {1}'.format(index, self.name)
)
def body(self):
if not self.actions:
if not self.warn_if_no_indices:
raise exceptions.ActionError('No "add" or "remove" operations')
else:
raise exceptions.NoIndices('No "adds" or "removes" found. Taking no action')
self.loggit.debug('Alias actions: {0}'.format(self.actions))
return {'actions' : self.actions}
def do_dry_run(self):
self.loggit.info('DRY-RUN MODE. No changes will be made.')
for item in self.body()['actions']:
job = list(item.keys())[0]
index = item[job]['index']
alias = item[job]['alias']
self.loggit.info(
'DRY-RUN: alias: {0}ing index "{1}" {2} alias '
'"{3}"'.format(
job.rstrip('e'),
index,
'to' if job is 'add' else 'from',
alias
)
)
def do_action(self):
self.loggit.info('Updating aliases...')
self.loggit.info('Alias actions: {0}'.format(self.body()))
try:
self.client.indices.update_aliases(body=self.body())
except Exception as err:
utils.report_failure(err)
class Allocation(object):
def __init__(
self, ilo, key=None, value=None, allocation_type='require', wait_for_completion=False,
wait_interval=3, max_wait=-1
):
utils.verify_index_list(ilo)
if not key:
raise exceptions.MissingArgument('No value for "key" provided')
if allocation_type not in ['require', 'include', 'exclude']:
raise ValueError(
'{0} is an invalid allocation_type. Must be one of "require", '
'"include", "exclude".'.format(allocation_type)
)
self.index_list = ilo
self.client = ilo.client
self.loggit = logging.getLogger('curator.actions.allocation')
bkey = 'index.routing.allocation.{0}.{1}'.format(allocation_type, key)
self.body = {bkey : value}
self.wfc = wait_for_completion
self.wait_interval = wait_interval
self.max_wait = max_wait
def do_dry_run(self):
utils.show_dry_run(self.index_list, 'allocation', body=self.body)
def do_action(self):
self.loggit.debug(
'Cannot get change shard routing allocation of closed indices. '
'Omitting any closed indices.'
)
self.index_list.filter_closed()
self.index_list.empty_list_check()
self.loggit.info(
'Updating {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
self.loggit.info('Updating index setting {0}'.format(self.body))
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
self.client.indices.put_settings(
index=utils.to_csv(lst), body=self.body
)
if self.wfc:
self.loggit.debug(
'Waiting for shards to complete relocation for indices:'
' {0}'.format(utils.to_csv(lst))
)
utils.wait_for_it(
self.client, 'allocation',
wait_interval=self.wait_interval, max_wait=self.max_wait
)
except Exception as err:
utils.report_failure(err)
class Close(object):
def __init__(self, ilo, delete_aliases=False, skip_flush=False, ignore_sync_failures=False):
utils.verify_index_list(ilo)
self.index_list = ilo
self.delete_aliases = delete_aliases
self.skip_flush = skip_flush
self.ignore_sync_failures = ignore_sync_failures
self.client = ilo.client
self.loggit = logging.getLogger('curator.actions.close')
def do_dry_run(self):
utils.show_dry_run(
self.index_list, 'close', **{'delete_aliases':self.delete_aliases})
def do_action(self):
self.index_list.filter_closed()
self.index_list.empty_list_check()
self.loggit.info(
'Closing {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
lst_as_csv = utils.to_csv(lst)
self.loggit.debug('CSV list of indices to close: {0}'.format(lst_as_csv))
if self.delete_aliases:
self.loggit.info('Deleting aliases from indices before closing.')
self.loggit.debug('Deleting aliases from: {0}'.format(lst))
try:
self.client.indices.delete_alias(index=lst_as_csv, name='_all')
self.loggit.debug('Deleted aliases from: {0}'.format(lst))
except Exception as err:
self.loggit.warn(
'Some indices may not have had aliases. Exception:'
' {0}'.format(err)
)
if not self.skip_flush:
try:
self.client.indices.flush_synced(index=lst_as_csv, ignore_unavailable=True)
except ConflictError as err:
if not self.ignore_sync_failures:
raise ConflictError(err.status_code, err.error, err.info)
else:
self.loggit.warn(
'Ignoring flushed sync failures: '
'{0} {1}'.format(err.error, err.info)
)
self.client.indices.close(index=lst_as_csv, ignore_unavailable=True)
except Exception as err:
utils.report_failure(err)
class Freeze(object):
def __init__(self, ilo):
utils.verify_index_list(ilo)
self.index_list = ilo
self.client = ilo.client
self.loggit = logging.getLogger('curator.actions.freeze')
def do_dry_run(self):
utils.show_dry_run(
self.index_list, 'freeze')
def do_action(self):
self.index_list.empty_list_check()
self.loggit.info(
'Freezing {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
self.client.xpack.indices.freeze(
index=utils.to_csv(lst))
except Exception as err:
utils.report_failure(err)
class Unfreeze(object):
def __init__(self, ilo):
utils.verify_index_list(ilo)
self.index_list = ilo
self.client = ilo.client
self.loggit = logging.getLogger('curator.actions.unfreeze')
def do_dry_run(self):
utils.show_dry_run(
self.index_list, 'unfreeze')
def do_action(self):
self.index_list.empty_list_check()
self.loggit.info(
'Unfreezing {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
self.client.xpack.indices.unfreeze(
index=utils.to_csv(lst))
except Exception as err:
utils.report_failure(err)
class ClusterRouting(object):
def __init__(
self, client, routing_type=None, setting=None, value=None, wait_for_completion=False,
wait_interval=9, max_wait=-1
):
utils.verify_client_object(client)
self.client = client
self.loggit = logging.getLogger('curator.actions.cluster_routing')
self.wfc = wait_for_completion
self.wait_interval = wait_interval
self.max_wait = max_wait
if setting != 'enable':
raise ValueError(
'Invalid value for "setting": {0}.'.format(setting)
)
if routing_type == 'allocation':
if value not in ['all', 'primaries', 'new_primaries', 'none']:
raise ValueError(
'Invalid "value": {0} with "routing_type":'
'{1}.'.format(value, routing_type)
)
elif routing_type == 'rebalance':
if value not in ['all', 'primaries', 'replicas', 'none']:
raise ValueError(
'Invalid "value": {0} with "routing_type":'
'{1}.'.format(value, routing_type)
)
else:
raise ValueError(
'Invalid value for "routing_type": {0}.'.format(routing_type)
)
bkey = 'cluster.routing.{0}.{1}'.format(routing_type, setting)
self.body = {'transient' : {bkey : value}}
def do_dry_run(self):
self.loggit.info('DRY-RUN MODE. No changes will be made.')
self.loggit.info(
'DRY-RUN: Update cluster routing settings with arguments: '
'{0}'.format(self.body)
)
def do_action(self):
self.loggit.info('Updating cluster settings: {0}'.format(self.body))
try:
self.client.cluster.put_settings(body=self.body)
if self.wfc:
self.loggit.debug(
'Waiting for shards to complete routing and/or rebalancing'
)
utils.wait_for_it(
self.client, 'cluster_routing',
wait_interval=self.wait_interval, max_wait=self.max_wait
)
except Exception as err:
utils.report_failure(err)
class CreateIndex(object):
def __init__(self, client, name, extra_settings={}, ignore_existing=False):
if not name:
raise exceptions.ConfigurationError('Value for "name" not provided.')
self.name = utils.parse_date_pattern(name)
self.body = extra_settings
self.ignore_existing = ignore_existing
self.client = client
self.loggit = logging.getLogger('curator.actions.create_index')
def do_dry_run(self):
self.loggit.info('DRY-RUN MODE. No changes will be made.')
self.loggit.info(
'DRY-RUN: create_index "%s" with arguments: '
'%s' % (self.name, self.body)
)
def do_action(self):
self.loggit.info(
'Creating index "{0}" with settings: '
'{1}'.format(self.name, self.body)
)
try:
self.client.indices.create(index=self.name, body=self.body)
except RequestError as err:
match_list = ["index_already_exists_exception", "resource_already_exists_exception"]
if err.error in match_list and self.ignore_existing:
self.loggit.warn('Index %s already exists.' % self.name)
else:
raise exceptions.FailedExecution('Index %s already exists.' % self.name)
except Exception as err:
utils.report_failure(err)
class DeleteIndices(object):
def __init__(self, ilo, master_timeout=30):
utils.verify_index_list(ilo)
if not isinstance(master_timeout, int):
raise TypeError(
'Incorrect type for "master_timeout": {0}. '
'Should be integer value.'.format(type(master_timeout))
)
self.index_list = ilo
self.client = ilo.client
self.master_timeout = str(master_timeout) + 's'
self.loggit = logging.getLogger('curator.actions.delete_indices')
self.loggit.debug('master_timeout value: {0}'.format(
self.master_timeout))
def _verify_result(self, result, count):
if isinstance(result, list) and result:
self.loggit.error(
'The following indices failed to delete on try '
'#{0}:'.format(count)
)
for idx in result:
self.loggit.error("---{0}".format(idx))
retval = False
else:
self.loggit.debug(
'Successfully deleted all indices on try #{0}'.format(count)
)
retval = True
return retval
def __chunk_loop(self, chunk_list):
working_list = chunk_list
for count in range(1, 4):
for i in working_list:
self.loggit.info("---deleting index {0}".format(i))
self.client.indices.delete(
index=utils.to_csv(working_list), master_timeout=self.master_timeout)
result = [i for i in working_list if i in utils.get_indices(self.client)]
if self._verify_result(result, count):
return
else:
working_list = result
self.loggit.error(
'Unable to delete the following indices after 3 attempts: '
'{0}'.format(result)
)
def do_dry_run(self):
utils.show_dry_run(self.index_list, 'delete_indices')
def do_action(self):
self.index_list.empty_list_check()
self.loggit.info(
'Deleting {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
self.__chunk_loop(lst)
except Exception as err:
utils.report_failure(err)
class ForceMerge(object):
def __init__(self, ilo, max_num_segments=None, delay=0):
utils.verify_index_list(ilo)
if not max_num_segments:
raise exceptions.MissingArgument('Missing value for "max_num_segments"')
self.client = ilo.client
self.index_list = ilo
self.max_num_segments = max_num_segments
self.delay = delay
self.loggit = logging.getLogger('curator.actions.forcemerge')
def do_dry_run(self):
utils.show_dry_run(
self.index_list, 'forcemerge',
max_num_segments=self.max_num_segments,
delay=self.delay,
)
def do_action(self):
self.index_list.filter_closed()
self.index_list.filter_forceMerged(
max_num_segments=self.max_num_segments)
self.index_list.empty_list_check()
self.loggit.info(
'forceMerging {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
try:
for index_name in self.index_list.indices:
self.loggit.info(
'forceMerging index {0} to {1} segments per shard. '
'Please wait...'.format(index_name, self.max_num_segments)
)
self.client.indices.forcemerge(
index=index_name, max_num_segments=self.max_num_segments)
if self.delay > 0:
self.loggit.info(
'Pausing for {0} seconds before continuing...'.format(self.delay))
time.sleep(self.delay)
except Exception as err:
utils.report_failure(err)
class IndexSettings(object):
def __init__(
self, ilo, index_settings={}, ignore_unavailable=False, preserve_existing=False):
utils.verify_index_list(ilo)
if not index_settings:
raise exceptions.MissingArgument('Missing value for "index_settings"')
self.client = ilo.client
self.index_list = ilo
self.body = index_settings
self.ignore_unavailable = ignore_unavailable
self.preserve_existing = preserve_existing
self.loggit = logging.getLogger('curator.actions.index_settings')
self._body_check()
def _body_check(self):
if len(self.body) == 1:
if 'index' in self.body:
if isinstance(self.body['index'], dict):
return True
raise exceptions.ConfigurationError(
'Bad value for "index_settings": {0}'.format(self.body))
def _static_settings(self):
return [
'number_of_shards',
'shard',
'codec',
'routing_partition_size',
]
def _dynamic_settings(self):
return [
'number_of_replicas',
'auto_expand_replicas',
'refresh_interval',
'max_result_window',
'max_rescore_window',
'blocks',
'max_refresh_listeners',
'mapping',
'merge',
'translog',
]
def _settings_check(self):
open_index_list = []
open_indices = False
for idx in self.index_list.indices:
if self.index_list.index_info[idx]['state'] == 'open':
open_index_list.append(idx)
open_indices = True
for k in self.body['index']:
if k in self._static_settings():
if not self.ignore_unavailable:
if open_indices:
raise exceptions.ActionError(
'Static Setting "{0}" detected with open indices: '
'{1}. Static settings can only be used with closed '
'indices. Recommend filtering out open indices, '
'or setting ignore_unavailable to True'.format(
k, open_index_list
)
)
elif k in self._dynamic_settings():
pass
else:
self.loggit.warn(
'"{0}" is not a setting Curator recognizes and may or may '
'not work.'.format(k)
)
def do_dry_run(self):
utils.show_dry_run(self.index_list, 'indexsettings', **self.body)
def do_action(self):
self._settings_check()
self.index_list.empty_list_check()
self.loggit.info(
'Applying index settings to {0} indices: '
'{1}'.format(len(self.index_list.indices), self.index_list.indices)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
response = self.client.indices.put_settings(
index=utils.to_csv(lst), body=self.body,
ignore_unavailable=self.ignore_unavailable,
preserve_existing=self.preserve_existing
)
self.loggit.debug('PUT SETTINGS RESPONSE: {0}'.format(response))
except Exception as err:
utils.report_failure(err)
class Open(object):
def __init__(self, ilo):
utils.verify_index_list(ilo)
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
self.loggit = logging.getLogger('curator.actions.open')
def do_dry_run(self):
utils.show_dry_run(self.index_list, 'open')
def do_action(self):
self.index_list.empty_list_check()
self.loggit.info(
'Opening {0} selected indices: {1}'.format(
len(self.index_list.indices),
self.index_list.indices
)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
self.client.indices.open(index=utils.to_csv(lst))
except Exception as err:
utils.report_failure(err)
class Replicas(object):
def __init__(
self, ilo, count=None, wait_for_completion=False, wait_interval=9, max_wait=-1):
utils.verify_index_list(ilo)
# It's okay for count to be zero
if count == 0:
pass
elif not count:
raise exceptions.MissingArgument('Missing value for "count"')
self.client = ilo.client
self.index_list = ilo
self.count = count
self.wfc = wait_for_completion
self.wait_interval = wait_interval
self.max_wait = max_wait
self.loggit = logging.getLogger('curator.actions.replicas')
def do_dry_run(self):
utils.show_dry_run(self.index_list, 'replicas', count=self.count)
def do_action(self):
self.loggit.debug(
'Cannot get update replica count of closed indices. '
'Omitting any closed indices.'
)
self.index_list.filter_closed()
self.index_list.empty_list_check()
self.loggit.info(
'Setting the replica count to {0} for {1} indices: '
'{2}'.format(self.count, len(self.index_list.indices), self.index_list.indices)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
self.client.indices.put_settings(
index=utils.to_csv(lst),
body={'number_of_replicas': self.count}
)
if self.wfc and self.count > 0:
self.loggit.debug(
'Waiting for shards to complete replication for '
'indices: {0}'.format(utils.to_csv(lst))
)
utils.wait_for_it(
self.client, 'replicas',
wait_interval=self.wait_interval, max_wait=self.max_wait
)
except Exception as err:
utils.report_failure(err)
class Rollover(object):
def __init__(
self, client, name, conditions, new_index=None, extra_settings=None,
wait_for_active_shards=1
):
self.loggit = logging.getLogger('curator.actions.rollover')
if not isinstance(conditions, dict):
raise exceptions.ConfigurationError('"conditions" must be a dictionary')
else:
self.loggit.debug('"conditions" is {0}'.format(conditions))
if not isinstance(extra_settings, dict) and extra_settings is not None:
raise exceptions.ConfigurationError(
'"extra_settings" must be a dictionary or None')
utils.verify_client_object(client)
self.client = client
self.conditions = self._check_max_size(conditions)
self.settings = extra_settings
self.new_index = utils.parse_date_pattern(new_index) if new_index else new_index
self.wait_for_active_shards = wait_for_active_shards
if utils.rollable_alias(client, name):
self.name = name
else:
raise ValueError(
'Unable to perform index rollover with alias '
'"{0}". See previous logs for more details.'.format(name)
)
def _check_max_size(self, conditions):
if 'max_size' in conditions:
version = utils.get_version(self.client)
if version < (6, 1, 0):
raise exceptions.ConfigurationError(
'Your version of elasticsearch ({0}) does not support '
'the max_size rollover condition. It is only supported '
'in versions 6.1.0 and up.'.format(version)
)
return conditions
def body(self):
retval = {}
retval['conditions'] = self.conditions
if self.settings:
retval['settings'] = self.settings
return retval
def log_result(self, result):
dryrun_string = ''
if result['dry_run']:
dryrun_string = 'DRY-RUN: '
self.loggit.debug('{0}Result: {1}'.format(dryrun_string, result))
rollover_string = '{0}Old index {1} rolled over to new index {2}'.format(
dryrun_string,
result['old_index'],
result['new_index']
)
success = False
for k in list(result['conditions'].keys()):
if result['conditions'][k]:
success = True
if result['dry_run'] and success:
self.loggit.info(rollover_string)
elif result['rolled_over']:
self.loggit.info(rollover_string)
else:
self.loggit.info(
'{0}Rollover conditions not met. Index {1} not rolled over.'.format(
dryrun_string,
result['old_index'])
)
def doit(self, dry_run=False):
return self.client.indices.rollover(
alias=self.name,
new_index=self.new_index,
body=self.body(),
dry_run=dry_run,
wait_for_active_shards=self.wait_for_active_shards,
)
def do_dry_run(self):
self.loggit.info('DRY-RUN MODE. No changes will be made.')
self.log_result(self.doit(dry_run=True))
def do_action(self):
self.loggit.info('Performing index rollover')
try:
self.log_result(self.doit())
except Exception as err:
utils.report_failure(err)
class DeleteSnapshots(object):
def __init__(self, slo, retry_interval=120, retry_count=3):
utils.verify_snapshot_list(slo)
self.client = slo.client
self.retry_interval = retry_interval
self.retry_count = retry_count
self.snapshot_list = slo
self.repository = slo.repository
self.loggit = logging.getLogger('curator.actions.delete_snapshots')
def do_dry_run(self):
self.loggit.info('DRY-RUN MODE. No changes will be made.')
mykwargs = {
'repository' : self.repository,
'retry_interval' : self.retry_interval,
'retry_count' : self.retry_count,
}
for snap in self.snapshot_list.snapshots:
self.loggit.info(
'DRY-RUN: delete_snapshot: {0} with arguments: {1}'.format(snap, mykwargs))
def do_action(self):
self.snapshot_list.empty_list_check()
self.loggit.info(
'Deleting {0} selected snapshots: {1}'.format(
len(self.snapshot_list.snapshots),
self.snapshot_list.snapshots
)
)
if not utils.safe_to_snap(
self.client, repository=self.repository,
retry_interval=self.retry_interval, retry_count=self.retry_count
):
raise exceptions.FailedExecution(
'Unable to delete snapshot(s) because a snapshot is in '
'state "IN_PROGRESS"')
try:
for snap in self.snapshot_list.snapshots:
self.loggit.info('Deleting snapshot {0}...'.format(snap))
self.client.snapshot.delete(
repository=self.repository, snapshot=snap)
except Exception as err:
utils.report_failure(err)
class Reindex(object):
def __init__(
self, ilo, request_body, refresh=True, requests_per_second=-1, slices=1, timeout=60,
wait_for_active_shards=1, wait_for_completion=True, max_wait=-1, wait_interval=9,
remote_url_prefix=None, remote_ssl_no_validate=None, remote_certificate=None,
remote_client_cert=None, remote_client_key=None, remote_aws_key=None,
remote_aws_secret_key=None, remote_aws_region=None, remote_filters={},
migration_prefix='', migration_suffix=''
):
self.loggit = logging.getLogger('curator.actions.reindex')
utils.verify_index_list(ilo)
# from remote, we might just be starting with an empty one.
# ilo.empty_list_check()
if not isinstance(request_body, dict):
raise exceptions.ConfigurationError('"request_body" is not of type dictionary')
#: Instance variable.
#: Internal reference to `request_body`
self.body = request_body
self.loggit.debug('REQUEST_BODY = {0}'.format(request_body))
#: Instance variable.
#: The Elasticsearch Client object derived from `ilo`
self.client = ilo.client
#: Instance variable.
#: Internal reference to `ilo`
self.index_list = ilo
#: Instance variable.
#: Internal reference to `refresh`
self.refresh = refresh
#: Instance variable.
#: Internal reference to `requests_per_second`
self.requests_per_second = requests_per_second
#: Instance variable.
#: Internal reference to `slices`
self.slices = slices
#: Instance variable.
#: Internal reference to `timeout`, and add "s" for seconds.
self.timeout = '{0}s'.format(timeout)
#: Instance variable.
#: Internal reference to `wait_for_active_shards`
self.wait_for_active_shards = wait_for_active_shards
#: Instance variable.
#: Internal reference to `wait_for_completion`
self.wfc = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
#: Instance variable.
#: Internal reference to `migration_prefix`
self.mpfx = migration_prefix
#: Instance variable.
#: Internal reference to `migration_suffix`
self.msfx = migration_suffix
# This is for error logging later...
self.remote = False
if 'remote' in self.body['source']:
self.remote = True
self.migration = False
if self.body['dest']['index'] == 'MIGRATION':
self.migration = True
if self.migration:
if not self.remote and not self.mpfx and not self.msfx:
raise exceptions.ConfigurationError(
'MIGRATION can only be used locally with one or both of '
'migration_prefix or migration_suffix.'
)
# REINDEX_SELECTION is the designated token. If you use this for the
# source "index," it will be replaced with the list of indices from the
# provided 'ilo' (index list object).
if self.body['source']['index'] == 'REINDEX_SELECTION' \
and not self.remote:
self.body['source']['index'] = self.index_list.indices
# Remote section
elif self.remote:
self.loggit.debug('Remote reindex request detected')
if 'host' not in self.body['source']['remote']:
raise exceptions.ConfigurationError('Missing remote "host"')
rclient_info = {}
for k in ['host', 'username', 'password']:
rclient_info[k] = self.body['source']['remote'][k] \
if k in self.body['source']['remote'] else None
rhost = rclient_info['host']
try:
# Save these for logging later
_ = rhost.split(':')
self.remote_port = _[2]
self.remote_host = _[1][2:]
except Exception as err:
raise exceptions.ConfigurationError(
'Host must be in the form [scheme]://[host]:[port] but '
'was [{0}]'.format(rhost)
)
rhttp_auth = '{0}:{1}'.format(
rclient_info['username'], rclient_info['password']) \
if (rclient_info['username'] and rclient_info['password']) else None
if rhost[:5] == 'http:':
use_ssl = False
elif rhost[:5] == 'https':
use_ssl = True
else:
raise exceptions.ConfigurationError(
'Host must be in URL format. You provided: '
'{0}'.format(rclient_info['host'])
)
# Let's set a decent remote timeout for initially reading
remote_timeout = 180
if self.body['source']['index'] == 'REINDEX_SELECTION':
self.loggit.debug('Filtering indices from remote')
from .indexlist import IndexList
self.loggit.debug(
'Remote client args: '
'host={0} '
'http_auth={1} '
'url_prefix={2} '
'use_ssl={3} '
'ssl_no_validate={4} '
'certificate={5} '
'client_cert={6} '
'client_key={7} '
'aws_key={8} '
'aws_secret_key={9} '
'aws_region={10} '
'timeout={11} '
'skip_version_test=True'.format(
rhost,
rhttp_auth,
remote_url_prefix,
use_ssl,
remote_ssl_no_validate,
remote_certificate,
remote_client_cert,
remote_client_key,
remote_aws_key,
remote_aws_secret_key,
remote_aws_region,
remote_timeout
)
)
try:
rclient = utils.get_client(
host=rhost,
http_auth=rhttp_auth,
url_prefix=remote_url_prefix,
use_ssl=use_ssl,
ssl_no_validate=remote_ssl_no_validate,
certificate=remote_certificate,
client_cert=remote_client_cert,
client_key=remote_client_key,
aws_key=remote_aws_key,
aws_secret_key=remote_aws_secret_key,
aws_region=remote_aws_region,
skip_version_test=True,
timeout=remote_timeout
)
except Exception as err:
self.loggit.error(
'Unable to establish connection to remote Elasticsearch'
' with provided credentials/certificates/settings.'
)
utils.report_failure(err)
try:
rio = IndexList(rclient)
rio.iterate_filters({'filters': remote_filters})
try:
rio.empty_list_check()
except exceptions.NoIndices:
raise exceptions.FailedExecution(
'No actionable remote indices selected after '
'applying filters.'
)
self.body['source']['index'] = rio.indices
except Exception as err:
self.loggit.error(
'Unable to get/filter list of remote indices.'
)
utils.report_failure(err)
self.loggit.debug(
'Reindexing indices: {0}'.format(self.body['source']['index']))
def _get_request_body(self, source, dest):
body = deepcopy(self.body)
body['source']['index'] = source
body['dest']['index'] = dest
return body
def _get_reindex_args(self, source, dest):
# Always set wait_for_completion to False. Let 'utils.wait_for_it' do its
# thing if wait_for_completion is set to True. Report the task_id
# either way.
reindex_args = {
'body':self._get_request_body(source, dest), 'refresh':self.refresh,
'requests_per_second': self.requests_per_second,
'timeout': self.timeout,
'wait_for_active_shards': self.wait_for_active_shards,
'wait_for_completion': False,
'slices': self.slices
}
version = utils.get_version(self.client)
if version < (5, 1, 0):
self.loggit.info(
'Your version of elasticsearch ({0}) does not support '
'sliced scroll for reindex, so that setting will not be '
'used'.format(version)
)
del reindex_args['slices']
return reindex_args
def get_processed_items(self, task_id):
try:
task_data = self.client.tasks.get(task_id=task_id)
except Exception as err:
raise exceptions.CuratorException(
'Unable to obtain task information for task_id "{0}". Exception '
'{1}'.format(task_id, err)
)
total_processed_items = -1
task = task_data['task']
if task['action'] == 'indices:data/write/reindex':
self.loggit.debug('It\'s a REINDEX TASK')
self.loggit.debug('TASK_DATA: {0}'.format(task_data))
self.loggit.debug('TASK_DATA keys: {0}'.format(list(task_data.keys())))
if 'response' in task_data:
response = task_data['response']
total_processed_items = response['total']
self.loggit.debug('total_processed_items = {0}'.format(total_processed_items))
return total_processed_items
def _post_run_quick_check(self, index_name, task_id):
processed_items = self.get_processed_items(task_id)
if processed_items == 0:
self.loggit.info(
'No items were processed. Will not check if target index "{0}" '
'exists'.format(index_name)
)
else:
# Verify the destination index is there after the fact
index_exists = self.client.indices.exists(index=index_name)
alias_instead = self.client.indices.exists_alias(name=index_name)
if not index_exists and not alias_instead:
self.loggit.error(
'The index described as "{0}" was not found after the reindex '
'operation. Check Elasticsearch logs for more '
'information.'.format(index_name)
)
if self.remote:
self.loggit.error(
'Did you forget to add "reindex.remote.whitelist: '
'{0}:{1}" to the elasticsearch.yml file on the '
'"dest" node?'.format(
self.remote_host, self.remote_port
)
)
raise exceptions.FailedExecution(
'Reindex failed. The index or alias identified by "{0}" was '
'not found.'.format(index_name)
)
def sources(self):
dest = self.body['dest']['index']
source_list = utils.ensure_list(self.body['source']['index'])
self.loggit.debug('source_list: {0}'.format(source_list))
if not source_list or source_list == ['REINDEX_SELECTED']: # Empty list
raise exceptions.NoIndices
if not self.migration:
yield self.body['source']['index'], dest
# Loop over all sources (default will only be one)
else:
for source in source_list:
if self.migration:
dest = self.mpfx + source + self.msfx
yield source, dest
def show_run_args(self, source, dest):
return (
'request body: {0} with arguments: '
'refresh={1} '
'requests_per_second={2} '
'slices={3} '
'timeout={4} '
'wait_for_active_shards={5} '
'wait_for_completion={6}'.format(
self._get_request_body(source, dest),
self.refresh,
self.requests_per_second,
self.slices,
self.timeout,
self.wait_for_active_shards,
self.wfc
)
)
def do_dry_run(self):
self.loggit.info('DRY-RUN MODE. No changes will be made.')
for source, dest in self.sources():
self.loggit.info(
'DRY-RUN: REINDEX: {0}'.format(self.show_run_args(source, dest))
)
def do_action(self):
try:
# Loop over all sources (default will only be one)
for source, dest in self.sources():
self.loggit.info('Commencing reindex operation')
self.loggit.debug(
'REINDEX: {0}'.format(self.show_run_args(source, dest)))
response = self.client.reindex(**self._get_reindex_args(source, dest))
self.loggit.debug('TASK ID = {0}'.format(response['task']))
if self.wfc:
utils.wait_for_it(
self.client, 'reindex', task_id=response['task'],
wait_interval=self.wait_interval, max_wait=self.max_wait
)
self._post_run_quick_check(dest, response['task'])
else:
self.loggit.warn(
'"wait_for_completion" set to {0}. Remember '
'to check task_id "{1}" for successful completion '
'manually.'.format(self.wfc, response['task'])
)
except exceptions.NoIndices as err:
raise exceptions.NoIndices(
'Source index must be list of actual indices. '
'It must not be an empty list.')
except Exception as err:
utils.report_failure(err)
class Snapshot(object):
def __init__(
self, ilo, repository=None, name=None, ignore_unavailable=False,
include_global_state=True, partial=False, wait_for_completion=True, wait_interval=9,
max_wait=-1, skip_repo_fs_check=False
):
utils.verify_index_list(ilo)
# Check here and don't bother with the rest of this if there are no
ilo.empty_list_check()
if not utils.repository_exists(ilo.client, repository=repository):
raise exceptions.ActionError(
'Cannot snapshot indices to missing repository: '
'{0}'.format(repository)
)
if not name:
raise exceptions.MissingArgument('No value for "name" provided.')
self.client = ilo.client
self.name = utils.parse_datemath(self.client, utils.parse_date_pattern(name))
self.index_list = ilo
self.repository = repository
self.wait_for_completion = wait_for_completion
self.wait_interval = wait_interval
self.max_wait = max_wait
self.skip_repo_fs_check = skip_repo_fs_check
self.state = None
self.body = utils.create_snapshot_body(
ilo.indices,
ignore_unavailable=ignore_unavailable,
include_global_state=include_global_state,
partial=partial
)
self.loggit = logging.getLogger('curator.actions.snapshot')
def get_state(self):
try:
self.state = self.client.snapshot.get(
repository=self.repository,
snapshot=self.name)['snapshots'][0]['state']
return self.state
except IndexError:
raise exceptions.CuratorException(
'Snapshot "{0}" not found in repository '
'"{1}"'.format(self.name, self.repository)
)
def report_state(self):
self.get_state()
if self.state == 'SUCCESS':
self.loggit.info('Snapshot {0} successfully completed.'.format(self.name))
else:
msg = 'Snapshot {0} completed with state: {0}'.format(self.state)
self.loggit.error(msg)
raise exceptions.FailedSnapshot(msg)
def do_dry_run(self):
self.loggit.info('DRY-RUN MODE. No changes will be made.')
self.loggit.info(
'DRY-RUN: snapshot: {0} in repository {1} with arguments: '
'{2}'.format(self.name, self.repository, self.body)
)
def do_action(self):
if not self.skip_repo_fs_check:
utils.test_repo_fs(self.client, self.repository)
if utils.snapshot_running(self.client):
raise exceptions.SnapshotInProgress('Snapshot already in progress.')
try:
self.loggit.info(
'Creating snapshot "{0}" from indices: {1}'.format(
self.name, self.index_list.indices
)
)
self.client.snapshot.create(
repository=self.repository, snapshot=self.name, body=self.body,
wait_for_completion=False
)
if self.wait_for_completion:
utils.wait_for_it(
self.client, 'snapshot', snapshot=self.name,
repository=self.repository,
wait_interval=self.wait_interval, max_wait=self.max_wait
)
self.report_state()
else:
self.loggit.warn(
'"wait_for_completion" set to {0}.'
'Remember to check for successful completion '
'manually.'.format(self.wait_for_completion)
)
except Exception as err:
utils.report_failure(err)
class Restore(object):
def __init__(
self, slo, name=None, indices=None, include_aliases=False, ignore_unavailable=False,
include_global_state=False, partial=False, rename_pattern=None,
rename_replacement=None, extra_settings={}, wait_for_completion=True, wait_interval=9,
max_wait=-1, skip_repo_fs_check=False
):
self.loggit = logging.getLogger('curator.actions.snapshot')
utils.verify_snapshot_list(slo)
most_recent = slo.most_recent()
self.loggit.debug('"most_recent" snapshot: {0}'.format(most_recent))
self.name = name if name else most_recent
if slo.snapshot_info[self.name]['state'] == 'PARTIAL' and partial:
self.loggit.warn(
'Performing restore of snapshot in state PARTIAL.')
elif slo.snapshot_info[self.name]['state'] != 'SUCCESS':
raise exceptions.CuratorException(
'Restore operation can only be performed on snapshots with '
'state "SUCCESS", or "PARTIAL" if partial=True.'
)
#: Instance variable.
#: The Elasticsearch Client object derived from `slo`
self.client = slo.client
#: Instance variable.
#: Internal reference to `slo`
self.snapshot_list = slo
#: Instance variable.
#: `repository` derived from `slo`
self.repository = slo.repository
if indices:
self.indices = utils.ensure_list(indices)
else:
self.indices = slo.snapshot_info[self.name]['indices']
self.wfc = wait_for_completion
#: Instance variable
#: How many seconds to wait between checks for completion.
self.wait_interval = wait_interval
#: Instance variable.
#: How long in seconds to `wait_for_completion` before returning with an
#: exception. A value of -1 means wait forever.
self.max_wait = max_wait
#: Instance variable version of ``rename_pattern``
self.rename_pattern = rename_pattern if rename_replacement is not None \
else ''
#: Instance variable version of ``rename_replacement``
self.rename_replacement = rename_replacement if rename_replacement \
is not None else ''
#: Also an instance variable version of ``rename_replacement``
#: but with Java regex group designations of ``$#``
#: converted to Python's ``\\
self.py_rename_replacement = self.rename_replacement.replace('$', '\\')
self.skip_repo_fs_check = skip_repo_fs_check
self.body = {
'indices' : self.indices,
'include_aliases' : include_aliases,
'ignore_unavailable' : ignore_unavailable,
'include_global_state' : include_global_state,
'partial' : partial,
'rename_pattern' : self.rename_pattern,
'rename_replacement' : self.rename_replacement,
}
if extra_settings:
self.loggit.debug(
'Adding extra_settings to restore body: '
'{0}'.format(extra_settings)
)
try:
self.body.update(extra_settings)
except:
self.loggit.error(
'Unable to apply extra settings to restore body')
self.loggit.debug('REPOSITORY: {0}'.format(self.repository))
self.loggit.debug('WAIT_FOR_COMPLETION: {0}'.format(self.wfc))
self.loggit.debug(
'SKIP_REPO_FS_CHECK: {0}'.format(self.skip_repo_fs_check))
self.loggit.debug('BODY: {0}'.format(self.body))
self._get_expected_output()
def _get_expected_output(self):
if not self.rename_pattern and not self.rename_replacement:
self.expected_output = self.indices
return
self.expected_output = []
for index in self.indices:
self.expected_output.append(
re.sub(
self.rename_pattern,
self.py_rename_replacement,
index
)
)
self.loggit.debug('index: {0} replacement: {1}'.format(index, self.expected_output[-1]))
def report_state(self):
all_indices = utils.get_indices(self.client)
found_count = 0
missing = []
for index in self.expected_output:
if index in all_indices:
found_count += 1
self.loggit.info('Found restored index {0}'.format(index))
else:
missing.append(index)
if found_count == len(self.expected_output):
self.loggit.info('All indices appear to have been restored.')
else:
msg = (
'Some of the indices do not appear to have been restored. Missing: '
'{0}'.format(missing)
)
self.loggit.error(msg)
raise exceptions.FailedRestore(msg)
def do_dry_run(self):
self.loggit.info('DRY-RUN MODE. No changes will be made.')
self.loggit.info(
'DRY-RUN: restore: Repository: {0} Snapshot name: {1} Arguments: '
'{2}'.format(
self.repository, self.name,
{'wait_for_completion' : self.wfc, 'body' : self.body}
)
)
for index in self.indices:
if self.rename_pattern and self.rename_replacement:
replacement_msg = 'as {0}'.format(
re.sub(
self.rename_pattern,
self.py_rename_replacement,
index
)
)
else:
replacement_msg = ''
self.loggit.info(
'DRY-RUN: restore: Index {0} {1}'.format(index, replacement_msg)
)
def do_action(self):
if not self.skip_repo_fs_check:
utils.test_repo_fs(self.client, self.repository)
if utils.snapshot_running(self.client):
raise exceptions.SnapshotInProgress('Cannot restore while a snapshot is in progress.')
try:
self.loggit.info(
'Restoring indices "{0}" from snapshot: {1}'.format(self.indices, self.name)
)
self.client.snapshot.restore(
repository=self.repository, snapshot=self.name, body=self.body,
wait_for_completion=False
)
if self.wfc:
utils.wait_for_it(
self.client, 'restore', index_list=self.expected_output,
wait_interval=self.wait_interval, max_wait=self.max_wait
)
self.report_state()
else:
self.loggit.warn(
'"wait_for_completion" set to {0}. '
'Remember to check for successful completion '
'manually.'.format(self.wfc)
)
except Exception as err:
utils.report_failure(err)
class Shrink(object):
def __init__(
self, ilo, shrink_node='DETERMINISTIC', node_filters={}, number_of_shards=1,
number_of_replicas=1, shrink_prefix='', shrink_suffix='-shrink', copy_aliases=False,
delete_after=True, post_allocation={}, wait_for_active_shards=1,
wait_for_rebalance=True, extra_settings={}, wait_for_completion=True, wait_interval=9,
max_wait=-1
):
self.loggit = logging.getLogger('curator.actions.shrink')
utils.verify_index_list(ilo)
if 'permit_masters' not in node_filters:
node_filters['permit_masters'] = False
self.client = ilo.client
self.index_list = ilo
self.shrink_node = shrink_node
self.node_filters = node_filters
self.shrink_prefix = shrink_prefix
self.shrink_suffix = shrink_suffix
self.copy_aliases = copy_aliases
self.delete_after = delete_after
self.post_allocation = post_allocation
self.wait_for_rebalance = wait_for_rebalance
self.wfc = wait_for_completion
self.wait_interval = wait_interval
self.max_wait = max_wait
self.number_of_shards = number_of_shards
self.wait_for_active_shards = wait_for_active_shards
self.shrink_node_name = None
self.body = {
'settings': {
'index.number_of_shards' : number_of_shards,
'index.number_of_replicas' : number_of_replicas,
}
}
if extra_settings:
self._merge_extra_settings(extra_settings)
def _merge_extra_settings(self, extra_settings):
self.loggit.debug(
'Adding extra_settings to shrink body: '
'{0}'.format(extra_settings)
)
if 'settings' in extra_settings:
settings = extra_settings.pop('settings')
try:
self.body['settings'].update(settings)
except Exception as err:
raise exceptions.ConfigurationError(
'Unable to apply extra settings "{0}" to shrink body. Exception: {1}'.format(
{'settings':settings}, err
)
)
if extra_settings:
try:
self.body.update(extra_settings)
except Exception as err:
raise exceptions.ConfigurationError(
'Unable to apply extra settings "{0}" to shrink body. Exception: {1}'.format(
extra_settings, err
)
)
def _data_node(self, node_id):
roles = utils.node_roles(self.client, node_id)
name = utils.node_id_to_name(self.client, node_id)
if not 'data' in roles:
self.loggit.info('Skipping node "{0}": non-data node'.format(name))
return False
if 'master' in roles and not self.node_filters['permit_masters']:
self.loggit.info('Skipping node "{0}": master node'.format(name))
return False
elif 'master' in roles and self.node_filters['permit_masters']:
self.loggit.warn(
'Not skipping node "{0}" which is a master node (not recommended), but '
'permit_masters is True'.format(name)
)
return True
else:
return True
def _exclude_node(self, name):
if 'exclude_nodes' in self.node_filters:
if name in self.node_filters['exclude_nodes']:
self.loggit.info('Excluding node "{0}" due to node_filters'.format(name))
return True
return False
def _shrink_target(self, name):
return '{0}{1}{2}'.format(self.shrink_prefix, name, self.shrink_suffix)
def qualify_single_node(self):
node_id = utils.name_to_node_id(self.client, self.shrink_node)
if node_id:
self.shrink_node_id = node_id
self.shrink_node_name = self.shrink_node
else:
raise exceptions.ConfigurationError(
'Unable to find node named: "{0}"'.format(self.shrink_node))
if self._exclude_node(self.shrink_node):
raise exceptions.ConfigurationError(
'Node "{0}" listed for exclusion'.format(self.shrink_node))
if not self._data_node(node_id):
raise exceptions.ActionError(
'Node "{0}" is not usable as a shrink node'.format(self.shrink_node))
self.shrink_node_avail = (
self.client.nodes.stats()['nodes'][node_id]['fs']['total']['available_in_bytes']
)
def most_available_node(self):
mvn_avail = 0
mvn_name = None
mvn_id = None
nodes = self.client.nodes.stats()['nodes']
for node_id in nodes:
name = nodes[node_id]['name']
if self._exclude_node(name):
self.loggit.debug('Node "{0}" excluded by node filters'.format(name))
continue
if not self._data_node(node_id):
self.loggit.debug('Node "{0}" is not a data node'.format(name))
continue
value = nodes[node_id]['fs']['total']['available_in_bytes']
if value > mvn_avail:
mvn_name = name
mvn_id = node_id
mvn_avail = value
self.shrink_node_name = mvn_name
self.shrink_node_id = mvn_id
self.shrink_node_avail = mvn_avail
def route_index(self, idx, allocation_type, key, value):
bkey = 'index.routing.allocation.{0}.{1}'.format(allocation_type, key)
routing = {bkey : value}
try:
self.client.indices.put_settings(index=idx, body=routing)
if self.wait_for_rebalance:
utils.wait_for_it(
self.client, 'allocation', wait_interval=self.wait_interval,
max_wait=self.max_wait
)
else:
utils.wait_for_it(
self.client, 'relocate', index=idx, wait_interval=self.wait_interval,
max_wait=self.max_wait
)
except Exception as err:
utils.report_failure(err)
def __log_action(self, error_msg, dry_run=False):
if not dry_run:
raise exceptions.ActionError(error_msg)
else:
self.loggit.warn('DRY-RUN: {0}'.format(error_msg))
def _block_writes(self, idx):
block = {'index.blocks.write': True}
self.client.indices.put_settings(index=idx, body=block)
def _unblock_writes(self, idx):
unblock = {'index.blocks.write': False}
self.client.indices.put_settings(index=idx, body=unblock)
def _check_space(self, idx, dry_run=False):
size = utils.index_size(self.client, idx, value='primaries')
padded = (size * 2) + (32 * 1024)
if padded < self.shrink_node_avail:
self.loggit.debug(
'Sufficient space available for 2x the size of index "{0}". Required: {1}, '
'available: {2}'.format(idx, padded, self.shrink_node_avail)
)
else:
error_msg = (
'Insufficient space available for 2x the size of index "{0}", shrinking will '
'exceed space available. Required: {1}, available: {2}'.format(
idx, padded, self.shrink_node_avail
)
)
self.__log_action(error_msg, dry_run)
def _check_node(self):
if self.shrink_node != 'DETERMINISTIC':
if not self.shrink_node_name:
self.qualify_single_node()
else:
self.most_available_node()
target = self._shrink_target(idx)
if self.client.indices.exists(target):
error_msg = 'Target index "{0}" already exists'.format(target)
self.__log_action(error_msg, dry_run)
def _check_doc_count(self, idx, dry_run=False):
max_docs = 2147483519
doc_count = self.client.indices.stats(idx)['indices'][idx]['primaries']['docs']['count']
if doc_count > (max_docs * self.number_of_shards):
error_msg = (
'Too many documents ({0}) to fit in {1} shard(s). Maximum number of docs per '
'shard is {2}'.format(doc_count, self.number_of_shards, max_docs)
)
self.__log_action(error_msg, dry_run)
def _check_shard_count(self, idx, src_shards, dry_run=False):
if self.number_of_shards >= src_shards:
error_msg = (
'Target number of shards ({0}) must be less than current number of shards ({1}) '
'in index "{2}"'.format(self.number_of_shards, src_shards, idx)
)
self.__log_action(error_msg, dry_run)
def _check_shard_factor(self, idx, src_shards, dry_run=False):
factors = [x for x in range(1, src_shards+1) if src_shards % x == 0]
factors.pop()
if not self.number_of_shards in factors:
error_msg = (
'"{0}" is not a valid factor of {1} shards. Valid values are '
'{2}'.format(self.number_of_shards, src_shards, factors)
)
self.__log_action(error_msg, dry_run)
def _check_all_shards(self, idx):
shards = self.client.cluster.state(index=idx)['routing_table']['indices'][idx]['shards']
found = []
for shardnum in shards:
for shard_idx in range(0, len(shards[shardnum])):
if shards[shardnum][shard_idx]['node'] == self.shrink_node_id:
found.append(
{'shard': shardnum, 'primary': shards[shardnum][shard_idx]['primary']})
if len(shards) != len(found):
self.loggit.debug(
'Found these shards on node "{0}": {1}'.format(self.shrink_node_name, found))
raise exceptions.ActionError(
'Unable to shrink index "{0}" as not all shards were found on the designated '
'shrink node ({1}): {2}'.format(idx, self.shrink_node_name, found)
)
def pre_shrink_check(self, idx, dry_run=False):
self.loggit.debug('BEGIN PRE_SHRINK_CHECK')
self.loggit.debug('Check that target exists')
self._check_target_exists(idx, dry_run)
self.loggit.debug('Check doc count constraints')
self._check_doc_count(idx, dry_run)
self.loggit.debug('Check shard count')
src_shards = int(self.client.indices.get(idx)[idx]['settings']['index']['number_of_shards'])
self._check_shard_count(idx, src_shards, dry_run)
self.loggit.debug('Check shard factor')
self._check_shard_factor(idx, src_shards, dry_run)
self.loggit.debug('Check node availability')
self._check_node()
self.loggit.debug('Check available disk space')
self._check_space(idx, dry_run)
self.loggit.debug('FINISH PRE_SHRINK_CHECK')
def do_copy_aliases(self, source_idx, target_idx):
alias_actions = []
aliases = self.client.indices.get_alias(index=source_idx)
for alias in aliases[source_idx]['aliases']:
self.loggit.debug('alias: {0}'.format(alias))
alias_actions.append(
{'remove': {'index': source_idx, 'alias': alias}})
alias_actions.append(
{'add': {'index': target_idx, 'alias': alias}})
if alias_actions:
self.loggit.info('Copy alias actions: {0}'.format(alias_actions))
self.client.indices.update_aliases({'actions' : alias_actions})
def do_dry_run(self):
self.index_list.filter_closed()
self.index_list.filter_by_shards(number_of_shards=self.number_of_shards)
self.index_list.empty_list_check()
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
for idx in lst:
target = self._shrink_target(idx)
self.pre_shrink_check(idx, dry_run=True)
self.loggit.info(
'DRY-RUN: Moving shards to shrink node: "{0}"'.format(
self.shrink_node_name
)
)
self.loggit.info(
'DRY-RUN: Shrinking index "{0}" to "{1}" with settings: {2}, '
'wait_for_active_shards={3}'.format(
idx, target, self.body, self.wait_for_active_shards
)
)
if self.post_allocation:
self.loggit.info(
'DRY-RUN: Applying post-shrink allocation rule "{0}" to index '
'"{1}"'.format(
'index.routing.allocation.{0}.{1}:{2}'.format(
self.post_allocation['allocation_type'],
self.post_allocation['key'], self.post_allocation['value']
), target
)
)
if self.copy_aliases:
self.loggit.info(
'DRY-RUN: Copy source index aliases "{0}"'.format(
self.client.indices.get_alias(idx)
)
)
if self.delete_after:
self.loggit.info('DRY-RUN: Deleting source index "{0}"'.format(idx))
except Exception as err:
utils.report_failure(err)
def do_action(self):
self.index_list.filter_closed()
self.index_list.filter_by_shards(number_of_shards=self.number_of_shards)
self.index_list.empty_list_check()
self.loggit.info(
'Shrinking {0} selected indices: {1}'.format(
len(self.index_list.indices), self.index_list.indices
)
)
try:
index_lists = utils.chunk_index_list(self.index_list.indices)
for lst in index_lists:
for idx in lst:
target = self._shrink_target(idx)
self.loggit.info('Source index: {0} -- Target index: {1}'.format(idx, target))
self.pre_shrink_check(idx)
self.loggit.info(
'Moving shards to shrink node: "{0}"'.format(self.shrink_node_name))
self.route_index(idx, 'require', '_name', self.shrink_node_name)
self._check_all_shards(idx)
self._block_writes(idx)
utils.wait_for_it(
self.client, 'shrink', wait_interval=self.wait_interval,
max_wait=self.max_wait
)
self.loggit.info(
'Shrinking index "{0}" to "{1}" with settings: {2}, wait_for_active_shards'
'={3}'.format(idx, target, self.body, self.wait_for_active_shards)
)
try:
self.client.indices.shrink(
index=idx, target=target, body=self.body,
wait_for_active_shards=self.wait_for_active_shards
)
if self.wfc:
self.loggit.debug(
'Wait for shards to complete allocation for index: '
'{0}'.format(target)
)
if self.wait_for_rebalance:
utils.wait_for_it(
self.client, 'shrink', wait_interval=self.wait_interval,
max_wait=self.max_wait
)
else:
utils.wait_for_it(
self.client, 'relocate', index=target,
wait_interval=self.wait_interval, max_wait=self.max_wait
)
except Exception as err:
if self.client.indices.exists(index=target):
self.loggit.error(
'Deleting target index "{0}" due to failure to complete '
'shrink'.format(target)
)
self.client.indices.delete(index=target)
raise exceptions.ActionError(
'Unable to shrink index "{0}" -- Error: {1}'.format(idx, err))
self.loggit.info('Index "{0}" successfully shrunk to "{1}"'.format(idx, target))
self._unblock_writes(idx)
post_allocation:
self.loggit.info(
'Applying post-shrink allocation rule "{0}" to index "{1}"'.format(
'index.routing.allocation.{0}.{1}:{2}'.format(
self.post_allocation['allocation_type'],
self.post_allocation['key'], self.post_allocation['value']
), target
)
)
self.route_index(
target, self.post_allocation['allocation_type'],
self.post_allocation['key'], self.post_allocation['value']
)
lf.copy_aliases:
self.loggit.info('Copy source index aliases "{0}"'.format(idx))
self.do_copy_aliases(idx, target)
if self.delete_after:
self.loggit.info('Deleting source index "{0}"'.format(idx))
self.client.indices.delete(index=idx)
else:
self.loggit.info('Unassigning routing for source index: "{0}"'.format(idx))
self.route_index(idx, 'require', '_name', '')
except Exception as err:
# Just in case it fails after attempting to meet this condition
self._unblock_writes(idx)
utils.report_failure(err)
| true
| true
|
1c40828729b44afb6e27bd02134bed827f46fba8
| 3,317
|
py
|
Python
|
tests/df/test_memory.py
|
sanketsaurav/dffml
|
acf3a20cd6a4c3c15aa872f3a1f898924af05a0e
|
[
"MIT"
] | null | null | null |
tests/df/test_memory.py
|
sanketsaurav/dffml
|
acf3a20cd6a4c3c15aa872f3a1f898924af05a0e
|
[
"MIT"
] | null | null | null |
tests/df/test_memory.py
|
sanketsaurav/dffml
|
acf3a20cd6a4c3c15aa872f3a1f898924af05a0e
|
[
"MIT"
] | null | null | null |
from functools import wraps
from unittest.mock import patch
from typing import NamedTuple
from dffml.util.data import traverse_config_set
from dffml.util.cli.arg import Arg, parse_unknown
from dffml.util.entrypoint import entry_point
from dffml.df.base import BaseKeyValueStore, BaseRedundancyCheckerConfig
from dffml.df.memory import MemoryKeyValueStore, MemoryRedundancyChecker
from dffml.util.asynctestcase import AsyncTestCase
class KeyValueStoreWithArgumentsConfig(NamedTuple):
filename: str
@entry_point("withargs")
class KeyValueStoreWithArguments(BaseKeyValueStore):
CONTEXT = NotImplementedError
def __call__(self):
raise NotImplementedError
@classmethod
def args(cls, args, *above):
cls.config_set(args, above, "filename", Arg(type=str))
return args
@classmethod
def config(cls, config, *above):
return KeyValueStoreWithArgumentsConfig(
filename=cls.config_get(config, above, "filename")
)
def load_kvstore_with_args(loading=None):
if loading == "withargs":
return KeyValueStoreWithArguments
return [KeyValueStoreWithArguments]
class TestMemoryRedundancyChecker(AsyncTestCase):
@patch.object(BaseKeyValueStore, "load", load_kvstore_with_args)
def test_args(self):
self.assertEqual(
MemoryRedundancyChecker.args({}),
{
"rchecker": {
"arg": None,
"config": {
"memory": {
"arg": None,
"config": {
"kvstore": {
"arg": Arg(
type=BaseKeyValueStore.load,
default=MemoryKeyValueStore,
),
"config": {
"withargs": {
"arg": None,
"config": {
"filename": {
"arg": Arg(type=str),
"config": {},
}
},
}
},
}
},
}
},
}
},
)
@patch.object(BaseKeyValueStore, "load", load_kvstore_with_args)
def test_config_default_label(self):
was = MemoryRedundancyChecker.config(
parse_unknown(
"--rchecker-memory-kvstore",
"withargs",
"--rchecker-memory-kvstore-withargs-filename",
"somefile",
)
)
self.assertEqual(type(was), BaseRedundancyCheckerConfig)
self.assertEqual(type(was.key_value_store), KeyValueStoreWithArguments)
self.assertEqual(
type(was.key_value_store.config), KeyValueStoreWithArgumentsConfig
)
self.assertEqual(was.key_value_store.config.filename, "somefile")
| 34.915789
| 79
| 0.493217
|
from functools import wraps
from unittest.mock import patch
from typing import NamedTuple
from dffml.util.data import traverse_config_set
from dffml.util.cli.arg import Arg, parse_unknown
from dffml.util.entrypoint import entry_point
from dffml.df.base import BaseKeyValueStore, BaseRedundancyCheckerConfig
from dffml.df.memory import MemoryKeyValueStore, MemoryRedundancyChecker
from dffml.util.asynctestcase import AsyncTestCase
class KeyValueStoreWithArgumentsConfig(NamedTuple):
filename: str
@entry_point("withargs")
class KeyValueStoreWithArguments(BaseKeyValueStore):
CONTEXT = NotImplementedError
def __call__(self):
raise NotImplementedError
@classmethod
def args(cls, args, *above):
cls.config_set(args, above, "filename", Arg(type=str))
return args
@classmethod
def config(cls, config, *above):
return KeyValueStoreWithArgumentsConfig(
filename=cls.config_get(config, above, "filename")
)
def load_kvstore_with_args(loading=None):
if loading == "withargs":
return KeyValueStoreWithArguments
return [KeyValueStoreWithArguments]
class TestMemoryRedundancyChecker(AsyncTestCase):
@patch.object(BaseKeyValueStore, "load", load_kvstore_with_args)
def test_args(self):
self.assertEqual(
MemoryRedundancyChecker.args({}),
{
"rchecker": {
"arg": None,
"config": {
"memory": {
"arg": None,
"config": {
"kvstore": {
"arg": Arg(
type=BaseKeyValueStore.load,
default=MemoryKeyValueStore,
),
"config": {
"withargs": {
"arg": None,
"config": {
"filename": {
"arg": Arg(type=str),
"config": {},
}
},
}
},
}
},
}
},
}
},
)
@patch.object(BaseKeyValueStore, "load", load_kvstore_with_args)
def test_config_default_label(self):
was = MemoryRedundancyChecker.config(
parse_unknown(
"--rchecker-memory-kvstore",
"withargs",
"--rchecker-memory-kvstore-withargs-filename",
"somefile",
)
)
self.assertEqual(type(was), BaseRedundancyCheckerConfig)
self.assertEqual(type(was.key_value_store), KeyValueStoreWithArguments)
self.assertEqual(
type(was.key_value_store.config), KeyValueStoreWithArgumentsConfig
)
self.assertEqual(was.key_value_store.config.filename, "somefile")
| true
| true
|
1c40829241242b10163a3380f70cebf2109dad8d
| 108
|
py
|
Python
|
month03.2/django/day05/mysitel3/music/urls.py
|
Amiao-miao/all-codes
|
ec50036d42d40086cac5fddf6baf4de18ac91e55
|
[
"Apache-2.0"
] | 1
|
2021-02-02T02:17:37.000Z
|
2021-02-02T02:17:37.000Z
|
month03.2/django/day05/mysitel3/music/urls.py
|
Amiao-miao/all-codes
|
ec50036d42d40086cac5fddf6baf4de18ac91e55
|
[
"Apache-2.0"
] | null | null | null |
month03.2/django/day05/mysitel3/music/urls.py
|
Amiao-miao/all-codes
|
ec50036d42d40086cac5fddf6baf4de18ac91e55
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from music import views
urlpatterns = [
path('index',views.music_view),
]
| 15.428571
| 35
| 0.731481
|
from django.urls import path
from music import views
urlpatterns = [
path('index',views.music_view),
]
| true
| true
|
1c40835f9bd35870bae0825ad82318769270950d
| 2,847
|
py
|
Python
|
Examples/calibrate/calibrate.py
|
mustafacc/SiEPIC_Photonics_Package
|
50dec87c9af4f3d883134ca121e1cbbf8cf73c24
|
[
"MIT"
] | 16
|
2018-09-17T08:36:58.000Z
|
2022-03-27T12:30:50.000Z
|
Examples/calibrate/calibrate.py
|
ltianying/SiEPIC_Photonics_Package
|
8492cac275bfd2dc0f57ae9d01b3e71321a50caf
|
[
"MIT"
] | null | null | null |
Examples/calibrate/calibrate.py
|
ltianying/SiEPIC_Photonics_Package
|
8492cac275bfd2dc0f57ae9d01b3e71321a50caf
|
[
"MIT"
] | 7
|
2020-03-31T16:10:42.000Z
|
2022-03-16T16:48:38.000Z
|
"""
SiEPIC Photonics Package
Author: Mustafa Hammood
Mustafa@ece.ubc.ca
Example: Application of SiEPIC_PP calibration function
"""
#%% import package and installed dependent packages
import sys, os
# go up two directories
#dir_path = os.path.dirname(os.path.abspath(__file__))
#sys.path.append(os.path.dirname(os.path.dirname(dir_path)))
import SiEPIC_Photonics_Package as SiEPIC_PP
from SiEPIC_Photonics_Package.setup import *
#%% download .mat files from GitHub repo and parse it to a variable (data)
# response to be calibrated
file_name_in = 'MZI_data2'
file_extension = '.mat'
url = 'https://github.com/SiEPIC-Kits/SiEPIC_Photonics_Package/blob/master/Examples/'+file_name_in+file_extension+'?raw=true'
PORT = 1
input_response= SiEPIC_PP.core.download_response(url,PORT)
# reference calibration response
file_name_ref = 'MZI_data2_calib'
file_extension = '.mat'
url = 'https://github.com/SiEPIC-Kits/SiEPIC_Photonics_Package/blob/master/Examples/'+file_name_ref+file_extension+'?raw=true'
PORT = 0
ref_response= SiEPIC_PP.core.download_response(url,PORT)
#%% apply SiEPIC_PP calibration correction function
[power_corrected, power_calib_fit] = SiEPIC_PP.core.calibrate( input_response, ref_response )
#%% plot responses and save pdf
# raw responses of reference calibration data and input data
wavelength = input_response[0]*1e9
power_calib = input_response[1]
power_in = ref_response[1]
matplotlib.pyplot.figure(0)
fig1 = matplotlib.pyplot.plot(wavelength,power_calib, label='Input data', color='red')
fig2 = matplotlib.pyplot.plot(wavelength,power_calib_fit, label='Reference data fit', color='black')
fig2 = matplotlib.pyplot.plot(wavelength,power_in, label='Reference data', color='blue')
matplotlib.pyplot.legend(loc=0)
matplotlib.pyplot.ylabel('Power (dBm)', color = 'black')
matplotlib.pyplot.xlabel('Wavelength (nm)', color = 'black')
matplotlib.pyplot.setp(fig1, 'linewidth', 2.0)
matplotlib.pyplot.xlim(round(min(wavelength)),round(max(wavelength)))
matplotlib.pyplot.title("Experimental data (raw)")
matplotlib.pyplot.savefig(file_name_in+'.pdf')
matplotlib.rcParams.update({'font.size': 14, 'font.family' : 'Times New Roman', 'font.weight': 'bold'})
# Calibrated responses of the input data
matplotlib.pyplot.figure(1)
fig1 = matplotlib.pyplot.plot(wavelength,power_corrected, label='Calibrated input data', color='red')
matplotlib.pyplot.legend(loc=0)
matplotlib.pyplot.ylabel('Response (dB)', color = 'black')
matplotlib.pyplot.xlabel('Wavelength (nm)', color = 'black')
matplotlib.pyplot.setp(fig1, 'linewidth', 2.0)
matplotlib.pyplot.xlim(round(min(wavelength)),round(max(wavelength)))
matplotlib.pyplot.title("Experimental data (calibrated)")
matplotlib.pyplot.savefig(file_name_ref+'.pdf')
matplotlib.rcParams.update({'font.size': 14, 'font.family' : 'Times New Roman', 'font.weight': 'bold'})
| 42.492537
| 126
| 0.773797
|
import sys, os
import SiEPIC_Photonics_Package as SiEPIC_PP
from SiEPIC_Photonics_Package.setup import *
file_name_in = 'MZI_data2'
file_extension = '.mat'
url = 'https://github.com/SiEPIC-Kits/SiEPIC_Photonics_Package/blob/master/Examples/'+file_name_in+file_extension+'?raw=true'
PORT = 1
input_response= SiEPIC_PP.core.download_response(url,PORT)
file_name_ref = 'MZI_data2_calib'
file_extension = '.mat'
url = 'https://github.com/SiEPIC-Kits/SiEPIC_Photonics_Package/blob/master/Examples/'+file_name_ref+file_extension+'?raw=true'
PORT = 0
ref_response= SiEPIC_PP.core.download_response(url,PORT)
[power_corrected, power_calib_fit] = SiEPIC_PP.core.calibrate( input_response, ref_response )
wavelength = input_response[0]*1e9
power_calib = input_response[1]
power_in = ref_response[1]
matplotlib.pyplot.figure(0)
fig1 = matplotlib.pyplot.plot(wavelength,power_calib, label='Input data', color='red')
fig2 = matplotlib.pyplot.plot(wavelength,power_calib_fit, label='Reference data fit', color='black')
fig2 = matplotlib.pyplot.plot(wavelength,power_in, label='Reference data', color='blue')
matplotlib.pyplot.legend(loc=0)
matplotlib.pyplot.ylabel('Power (dBm)', color = 'black')
matplotlib.pyplot.xlabel('Wavelength (nm)', color = 'black')
matplotlib.pyplot.setp(fig1, 'linewidth', 2.0)
matplotlib.pyplot.xlim(round(min(wavelength)),round(max(wavelength)))
matplotlib.pyplot.title("Experimental data (raw)")
matplotlib.pyplot.savefig(file_name_in+'.pdf')
matplotlib.rcParams.update({'font.size': 14, 'font.family' : 'Times New Roman', 'font.weight': 'bold'})
matplotlib.pyplot.figure(1)
fig1 = matplotlib.pyplot.plot(wavelength,power_corrected, label='Calibrated input data', color='red')
matplotlib.pyplot.legend(loc=0)
matplotlib.pyplot.ylabel('Response (dB)', color = 'black')
matplotlib.pyplot.xlabel('Wavelength (nm)', color = 'black')
matplotlib.pyplot.setp(fig1, 'linewidth', 2.0)
matplotlib.pyplot.xlim(round(min(wavelength)),round(max(wavelength)))
matplotlib.pyplot.title("Experimental data (calibrated)")
matplotlib.pyplot.savefig(file_name_ref+'.pdf')
matplotlib.rcParams.update({'font.size': 14, 'font.family' : 'Times New Roman', 'font.weight': 'bold'})
| true
| true
|
1c4083fd4d6b1dde4ea9b1f21d7bd6b84a73e3f6
| 2,802
|
py
|
Python
|
SOP/t4/q1.py
|
joao-frohlich/BCC
|
9ed74eb6d921d1280f48680677a2140c5383368d
|
[
"Apache-2.0"
] | 10
|
2020-12-08T20:18:15.000Z
|
2021-06-07T20:00:07.000Z
|
SOP/t4/q1.py
|
joao-frohlich/BCC
|
9ed74eb6d921d1280f48680677a2140c5383368d
|
[
"Apache-2.0"
] | 2
|
2021-06-28T03:42:13.000Z
|
2021-06-28T16:53:13.000Z
|
SOP/t4/q1.py
|
joao-frohlich/BCC
|
9ed74eb6d921d1280f48680677a2140c5383368d
|
[
"Apache-2.0"
] | 2
|
2021-01-14T19:59:20.000Z
|
2021-06-15T11:53:21.000Z
|
def dist(a, b):
return abs(a - b)
def find_min(diff):
index = -1
mini = float("inf")
for i in range(len(diff)):
if not diff[i][1] and mini > diff[i][0]:
mini = diff[i][0]
index = i
return index
def ssf_sorting(requests):
head = requests[0]
l = len(requests)
diff = [[0, 0] for _ in range(l)]
seek_sequence = [0] * (l + 1)
for i in range(l):
seek_sequence[i] = head
for i in range(len(diff)):
diff[i][0] = abs(requests[i] - head)
index = find_min(diff)
diff[index][1] = True
head = requests[index]
seek_sequence[len(seek_sequence) - 1] = head
return seek_sequence[1:]
def elevator_sorting(values, direction):
# True if going ascending otherwise False
actual = 0
original_head, actual_head = values[0], values[0]
left, right = [], []
seek_sequence = []
for i in range(len(values)):
if values[i] < actual_head:
left.append(values[i])
if values[i] > actual_head:
right.append(values[i])
left.sort()
right.sort()
for _ in range(2):
if direction:
for i in range(len(right)):
actual = right[i]
seek_sequence.append(actual)
actual_head = actual
direction = not direction
else:
for i in range(len(left) - 1, -1, -1):
actual = left[i]
seek_sequence.append(actual)
actual_head = actual
direction = not direction
seek_sequence = [original_head] + seek_sequence
return seek_sequence
def fcfs(requests, deslocation_time, access_time):
print("Sequencia de execução")
print(*requests)
print("Tempos parciais")
total_time = 0
for i in range(len(requests) - 1):
actual, next = requests[i : i + 2]
request_dist = dist(actual, next)
request_time = request_dist * deslocation_time + access_time
print(
f"{actual} -> {next}: {request_dist * deslocation_time} + {access_time} = {round(request_time,2)} ms"
)
total_time += request_time
print(f"Tempo total: {total_time} ms")
def ssf(requests, deslocation_time, access_time):
requests = ssf_sorting(requests)
fcfs(requests, deslocation_time, access_time)
def elevator(requests, deslocation_time, access_time, previous_direction):
requests = elevator_sorting(requests, previous_direction == "right")
fcfs(requests, deslocation_time, access_time)
requests = [100, 131, 174, 196, 110, 142, 149, 1, 172, 82, 18]
print("--== FCFS ==--")
fcfs(requests, 0.8, 6.25)
print("\n--== SSF ==--")
ssf(requests, 0.8, 6.25)
print("\n--= Elevator ==--")
elevator(requests, 0.8, 6.25, "right")
| 26.685714
| 113
| 0.586724
|
def dist(a, b):
return abs(a - b)
def find_min(diff):
index = -1
mini = float("inf")
for i in range(len(diff)):
if not diff[i][1] and mini > diff[i][0]:
mini = diff[i][0]
index = i
return index
def ssf_sorting(requests):
head = requests[0]
l = len(requests)
diff = [[0, 0] for _ in range(l)]
seek_sequence = [0] * (l + 1)
for i in range(l):
seek_sequence[i] = head
for i in range(len(diff)):
diff[i][0] = abs(requests[i] - head)
index = find_min(diff)
diff[index][1] = True
head = requests[index]
seek_sequence[len(seek_sequence) - 1] = head
return seek_sequence[1:]
def elevator_sorting(values, direction):
actual = 0
original_head, actual_head = values[0], values[0]
left, right = [], []
seek_sequence = []
for i in range(len(values)):
if values[i] < actual_head:
left.append(values[i])
if values[i] > actual_head:
right.append(values[i])
left.sort()
right.sort()
for _ in range(2):
if direction:
for i in range(len(right)):
actual = right[i]
seek_sequence.append(actual)
actual_head = actual
direction = not direction
else:
for i in range(len(left) - 1, -1, -1):
actual = left[i]
seek_sequence.append(actual)
actual_head = actual
direction = not direction
seek_sequence = [original_head] + seek_sequence
return seek_sequence
def fcfs(requests, deslocation_time, access_time):
print("Sequencia de execução")
print(*requests)
print("Tempos parciais")
total_time = 0
for i in range(len(requests) - 1):
actual, next = requests[i : i + 2]
request_dist = dist(actual, next)
request_time = request_dist * deslocation_time + access_time
print(
f"{actual} -> {next}: {request_dist * deslocation_time} + {access_time} = {round(request_time,2)} ms"
)
total_time += request_time
print(f"Tempo total: {total_time} ms")
def ssf(requests, deslocation_time, access_time):
requests = ssf_sorting(requests)
fcfs(requests, deslocation_time, access_time)
def elevator(requests, deslocation_time, access_time, previous_direction):
requests = elevator_sorting(requests, previous_direction == "right")
fcfs(requests, deslocation_time, access_time)
requests = [100, 131, 174, 196, 110, 142, 149, 1, 172, 82, 18]
print("--== FCFS ==--")
fcfs(requests, 0.8, 6.25)
print("\n--== SSF ==--")
ssf(requests, 0.8, 6.25)
print("\n--= Elevator ==--")
elevator(requests, 0.8, 6.25, "right")
| true
| true
|
1c40842edff0a2d5d733140b134db08b5063c859
| 5,581
|
py
|
Python
|
examples/two_sample_vs_voka.py
|
icecube/voka
|
29a5d4439cf13d35e29b9308dcbf54c799be3b83
|
[
"MIT"
] | null | null | null |
examples/two_sample_vs_voka.py
|
icecube/voka
|
29a5d4439cf13d35e29b9308dcbf54c799be3b83
|
[
"MIT"
] | null | null | null |
examples/two_sample_vs_voka.py
|
icecube/voka
|
29a5d4439cf13d35e29b9308dcbf54c799be3b83
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
'''
This example exercises the two sample statistical tests
available from scipy:
* scipy.stats.ttest_ind
* scipy.stats.ks_2samp
* scipy.stats.anderson_ksamp
* scipy.stats.epps_singleton_2samp
* scipy.stats.mannwhitneyu
* scipy.stats.ranksums
* scipy.stats.wilcoxon
* scipy.stats.kruskal
* scipy.stats.friedmanchisquare
* scipy.stats.brunnermunzel
'''
import os
import pickle
import numpy
import pylab
import scipy.stats
import pylab
import voka.tools.samples
import voka.model
import voka.tools.render
def voka_2sample(sample1, sample2):
# Checkout OnlineL2_SplitTime2_SPE2itFitEnergy
# hiccup #1 (AD) ValueError: anderson_ksamp needs more than one distinct observation
# hiccup #2 (ES) numpy.linalg.LinAlgError: SVD did not converge
# hiccup #3 (TT) Ttest_indResult(statistic=nan, pvalue=nan)
# hiccup #4 (MW) ValueError: All numbers are identical in mannwhitneyu
# hiccup #5 (WP) ValueError: zero_method 'wilcox' and 'pratt' do not work if x - y is zero for all elements
# hiccup #6 (FC) ValueError: Less than 3 levels. Friedman test not appropriate.
result = dict()
r = scipy.stats.ttest_ind(sample1, sample2)
result['TTest'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
r = scipy.stats.ks_2samp(sample1, sample2)
result['KolmogorovSmirnov'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
try:
r = scipy.stats.anderson_ksamp([sample1, sample2])
result['AndersonDarling'] = {
'statistic': r.statistic,
'significance_level': r.significance_level
}
except ValueError:
#print(" skipping anderson_ksamp")
pass
try:
r = scipy.stats.epps_singleton_2samp(sample1, sample2)
result['EppsSingleton'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
except numpy.linalg.LinAlgError:
#print(" skipping epps_singleton_2samp")
pass
try:
r = scipy.stats.mannwhitneyu(sample1, sample2)
result['MannWhitneyU'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
except ValueError:
#print(" skipping mannwhitneyu")
pass
r = scipy.stats.ranksums(sample1, sample2)
result['Ranksums'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
try:
r = scipy.stats.wilcoxon(sample1, sample2)
result['Wilcoxon'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
except ValueError:
#print(" skipping wilcoxon")
pass
try:
r = scipy.stats.kruskal(sample1, sample2)
result['Kruskal'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
except:
#print(" skipping kruskal")
pass
try:
r = scipy.stats.friedmanchisquare(sample1, sample2)
result['FriedmanChiSquare'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
except ValueError:
#print(" skipping friedmanchisquare")
pass
r = scipy.stats.brunnermunzel(sample1, sample2)
result['BrunnerMunzel'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
return result
# make two samples containing
# 'standard' numpy distributions
_range = (-5,5)
widths = [w+0.1 for w in numpy.arange(0.1, 2.0, 0.1)]
locs = [l+0.1 for l in numpy.arange(-.5, 0.5, 0.1)]
size = 100
test_samples_low = list()
test_samples_high = list()
#test_samples = [numpy.histogram(
# for w in widths]
#for w in widths:
# d = numpy.random.normal(size=1000, scale=w)
# # need to make sure the binning is the same
# h = numpy.histogram(d, range=_range)
# test_samples.append(h[0])
for l in locs:
d_low = numpy.random.normal(size=100, loc=l)
d_high = numpy.random.normal(size=1000, loc=l)
# need to make sure the binning is the same
h_low = numpy.histogram(d_low, range=_range)
h_high = numpy.histogram(d_high, range=_range)
test_samples_low.append(h_low[0])
test_samples_high.append(h_high[0])
benchmark_samples = [numpy.histogram(numpy.random.normal(size=size, scale=1.0),
range=_range)[0]
for _ in range(10)]
model = voka.model.Voka()
reference_collection = {"Benchmark%d" % idx : {"Gaussian":s}
for idx, s in enumerate(benchmark_samples)}
model.train(reference_collection)
for idx, (test_sample_low, test_sample_high) \
in enumerate(zip(test_samples_low, test_samples_high)):
print(test_sample_low)
print(test_sample_high)
print(80*"-")
#print("width = %.2f" % widths[idx])
print("loc = %.2f" % locs[idx])
benchmark_sample = numpy.histogram(numpy.random.normal(size=1000, scale=1.0))[0]
voka_2samp_result = voka_2sample(test_sample_high, benchmark_sample)
for name, result in voka_2samp_result.items():
if 'pvalue' in result:
print(" %s p-value = %.4f" % (name, result['pvalue']))
# I need to fix this.
# The test labels and the benchmark labels need to match exactly.
voka_ksamp_result = model.execute({"Gaussian" : test_sample_low})
r = model.results(voka_ksamp_result)['Gaussian']
print("%s lof = %.2f threshold = %.2f" % (r['pass'], r['lof'], r['threshold']))
voka.tools.render.draw_comparisons(test_sample_low, benchmark_samples)
pylab.show()
| 30.664835
| 111
| 0.623903
|
import os
import pickle
import numpy
import pylab
import scipy.stats
import pylab
import voka.tools.samples
import voka.model
import voka.tools.render
def voka_2sample(sample1, sample2):
le2])
result['AndersonDarling'] = {
'statistic': r.statistic,
'significance_level': r.significance_level
}
except ValueError:
pass
try:
r = scipy.stats.epps_singleton_2samp(sample1, sample2)
result['EppsSingleton'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
except numpy.linalg.LinAlgError:
pass
try:
r = scipy.stats.mannwhitneyu(sample1, sample2)
result['MannWhitneyU'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
except ValueError:
pass
r = scipy.stats.ranksums(sample1, sample2)
result['Ranksums'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
try:
r = scipy.stats.wilcoxon(sample1, sample2)
result['Wilcoxon'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
except ValueError:
pass
try:
r = scipy.stats.kruskal(sample1, sample2)
result['Kruskal'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
except:
pass
try:
r = scipy.stats.friedmanchisquare(sample1, sample2)
result['FriedmanChiSquare'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
except ValueError:
pass
r = scipy.stats.brunnermunzel(sample1, sample2)
result['BrunnerMunzel'] = {
'statistic': r.statistic,
'pvalue': r.pvalue
}
return result
_range = (-5,5)
widths = [w+0.1 for w in numpy.arange(0.1, 2.0, 0.1)]
locs = [l+0.1 for l in numpy.arange(-.5, 0.5, 0.1)]
size = 100
test_samples_low = list()
test_samples_high = list()
ndom.normal(size=100, loc=l)
d_high = numpy.random.normal(size=1000, loc=l)
h_low = numpy.histogram(d_low, range=_range)
h_high = numpy.histogram(d_high, range=_range)
test_samples_low.append(h_low[0])
test_samples_high.append(h_high[0])
benchmark_samples = [numpy.histogram(numpy.random.normal(size=size, scale=1.0),
range=_range)[0]
for _ in range(10)]
model = voka.model.Voka()
reference_collection = {"Benchmark%d" % idx : {"Gaussian":s}
for idx, s in enumerate(benchmark_samples)}
model.train(reference_collection)
for idx, (test_sample_low, test_sample_high) \
in enumerate(zip(test_samples_low, test_samples_high)):
print(test_sample_low)
print(test_sample_high)
print(80*"-")
print("loc = %.2f" % locs[idx])
benchmark_sample = numpy.histogram(numpy.random.normal(size=1000, scale=1.0))[0]
voka_2samp_result = voka_2sample(test_sample_high, benchmark_sample)
for name, result in voka_2samp_result.items():
if 'pvalue' in result:
print(" %s p-value = %.4f" % (name, result['pvalue']))
voka_ksamp_result = model.execute({"Gaussian" : test_sample_low})
r = model.results(voka_ksamp_result)['Gaussian']
print("%s lof = %.2f threshold = %.2f" % (r['pass'], r['lof'], r['threshold']))
voka.tools.render.draw_comparisons(test_sample_low, benchmark_samples)
pylab.show()
| true
| true
|
1c40843db190369bf1adfa7d13266259cfc09243
| 489
|
py
|
Python
|
games/migrations/0031_auto_20171103_1744.py
|
munisisazade/diplom_isi
|
767531ef3a4b090d1bc0963e687b5215d6f92f53
|
[
"MIT"
] | 1
|
2019-04-07T15:58:00.000Z
|
2019-04-07T15:58:00.000Z
|
games/migrations/0031_auto_20171103_1744.py
|
munisisazade/diplom_isi
|
767531ef3a4b090d1bc0963e687b5215d6f92f53
|
[
"MIT"
] | 12
|
2020-06-05T18:15:45.000Z
|
2022-03-11T23:20:26.000Z
|
games/migrations/0031_auto_20171103_1744.py
|
munisisazade/diplom_isi
|
767531ef3a4b090d1bc0963e687b5215d6f92f53
|
[
"MIT"
] | 1
|
2019-04-07T15:58:08.000Z
|
2019-04-07T15:58:08.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-03 13:44
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('games', '0030_auto_20171009_1415'),
]
operations = [
migrations.AlterModelOptions(
name='monthlyresults',
options={'ordering': ('id',), 'verbose_name': 'Aylıq nəticə', 'verbose_name_plural': 'Aylıq nəticələr'},
),
]
| 24.45
| 116
| 0.633947
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('games', '0030_auto_20171009_1415'),
]
operations = [
migrations.AlterModelOptions(
name='monthlyresults',
options={'ordering': ('id',), 'verbose_name': 'Aylıq nəticə', 'verbose_name_plural': 'Aylıq nəticələr'},
),
]
| true
| true
|
1c4086111b3f7f8f648d6d8f43ddc9fcf8fb7656
| 3,118
|
py
|
Python
|
google/ads/google_ads/v1/proto/services/campaign_service_pb2_grpc.py
|
jwygoda/google-ads-python
|
863892b533240cb45269d9c2cceec47e2c5a8b68
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v1/proto/services/campaign_service_pb2_grpc.py
|
jwygoda/google-ads-python
|
863892b533240cb45269d9c2cceec47e2c5a8b68
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v1/proto/services/campaign_service_pb2_grpc.py
|
jwygoda/google-ads-python
|
863892b533240cb45269d9c2cceec47e2c5a8b68
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v1.proto.resources import campaign_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_campaign__pb2
from google.ads.google_ads.v1.proto.services import campaign_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2
class CampaignServiceStub(object):
"""Service to manage campaigns.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCampaign = channel.unary_unary(
'/google.ads.googleads.v1.services.CampaignService/GetCampaign',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2.GetCampaignRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_campaign__pb2.Campaign.FromString,
)
self.MutateCampaigns = channel.unary_unary(
'/google.ads.googleads.v1.services.CampaignService/MutateCampaigns',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2.MutateCampaignsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2.MutateCampaignsResponse.FromString,
)
class CampaignServiceServicer(object):
"""Service to manage campaigns.
"""
def GetCampaign(self, request, context):
"""Returns the requested campaign in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateCampaigns(self, request, context):
"""Creates, updates, or removes campaigns. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CampaignServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCampaign': grpc.unary_unary_rpc_method_handler(
servicer.GetCampaign,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2.GetCampaignRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_campaign__pb2.Campaign.SerializeToString,
),
'MutateCampaigns': grpc.unary_unary_rpc_method_handler(
servicer.MutateCampaigns,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2.MutateCampaignsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2.MutateCampaignsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v1.services.CampaignService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 47.969231
| 158
| 0.809173
|
import grpc
from google.ads.google_ads.v1.proto.resources import campaign_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_campaign__pb2
from google.ads.google_ads.v1.proto.services import campaign_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2
class CampaignServiceStub(object):
def __init__(self, channel):
self.GetCampaign = channel.unary_unary(
'/google.ads.googleads.v1.services.CampaignService/GetCampaign',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2.GetCampaignRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_campaign__pb2.Campaign.FromString,
)
self.MutateCampaigns = channel.unary_unary(
'/google.ads.googleads.v1.services.CampaignService/MutateCampaigns',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2.MutateCampaignsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2.MutateCampaignsResponse.FromString,
)
class CampaignServiceServicer(object):
def GetCampaign(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateCampaigns(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CampaignServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCampaign': grpc.unary_unary_rpc_method_handler(
servicer.GetCampaign,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2.GetCampaignRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_campaign__pb2.Campaign.SerializeToString,
),
'MutateCampaigns': grpc.unary_unary_rpc_method_handler(
servicer.MutateCampaigns,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2.MutateCampaignsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_campaign__service__pb2.MutateCampaignsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v1.services.CampaignService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| true
| true
|
1c40861846f6275944c4cd057c757a7fd928f481
| 495
|
py
|
Python
|
plotly/validators/layout/scene/zaxis/titlefont/_color.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/layout/scene/zaxis/titlefont/_color.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 1
|
2020-12-15T16:56:11.000Z
|
2020-12-15T16:56:11.000Z
|
plotly/validators/layout/scene/zaxis/titlefont/_color.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='layout.scene.zaxis.titlefont',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 26.052632
| 66
| 0.606061
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='layout.scene.zaxis.titlefont',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| true
| true
|
1c40867ddad76e4deac86592b2c5229745f1c42d
| 143
|
py
|
Python
|
auth/twilio_auth.py
|
busyuqboy/gateio-crypto-trading-bot-binance-announcements-new-coins
|
e60e78ddf21bd0e272d9ddce6a86d250119a9425
|
[
"MIT"
] | null | null | null |
auth/twilio_auth.py
|
busyuqboy/gateio-crypto-trading-bot-binance-announcements-new-coins
|
e60e78ddf21bd0e272d9ddce6a86d250119a9425
|
[
"MIT"
] | null | null | null |
auth/twilio_auth.py
|
busyuqboy/gateio-crypto-trading-bot-binance-announcements-new-coins
|
e60e78ddf21bd0e272d9ddce6a86d250119a9425
|
[
"MIT"
] | null | null | null |
import yaml
def load_twilio_creds(file):
with open(file) as file:
auth = yaml.load(file, Loader=yaml.FullLoader)
return auth
| 17.875
| 54
| 0.685315
|
import yaml
def load_twilio_creds(file):
with open(file) as file:
auth = yaml.load(file, Loader=yaml.FullLoader)
return auth
| true
| true
|
1c4086c300846640ff43fa76f34d9aae7872760c
| 68
|
py
|
Python
|
7 kyu/Exes and Ohs/Exes and Ohs.py
|
anthonyjatoba/codewars
|
76b0d66dd1ba76a4d136b658920cdf85fd5c4b06
|
[
"MIT"
] | null | null | null |
7 kyu/Exes and Ohs/Exes and Ohs.py
|
anthonyjatoba/codewars
|
76b0d66dd1ba76a4d136b658920cdf85fd5c4b06
|
[
"MIT"
] | null | null | null |
7 kyu/Exes and Ohs/Exes and Ohs.py
|
anthonyjatoba/codewars
|
76b0d66dd1ba76a4d136b658920cdf85fd5c4b06
|
[
"MIT"
] | null | null | null |
def xo(s):
s = s.lower()
return s.count('x') == s.count('o')
| 22.666667
| 39
| 0.485294
|
def xo(s):
s = s.lower()
return s.count('x') == s.count('o')
| true
| true
|
1c4087d8a8fde9d7ae8bc928220890c1cc009ddf
| 8,421
|
py
|
Python
|
tools/data_converter/image_classification/image_classification_data.py
|
matarof/tpu
|
d2e3b810134b200214f42cb004f20fe6b8e2cab4
|
[
"Apache-2.0"
] | 5,098
|
2018-02-09T16:56:49.000Z
|
2022-03-31T13:50:40.000Z
|
tools/data_converter/image_classification/image_classification_data.py
|
matarof/tpu
|
d2e3b810134b200214f42cb004f20fe6b8e2cab4
|
[
"Apache-2.0"
] | 550
|
2018-02-07T05:30:06.000Z
|
2022-03-13T22:00:09.000Z
|
tools/data_converter/image_classification/image_classification_data.py
|
matarof/tpu
|
d2e3b810134b200214f42cb004f20fe6b8e2cab4
|
[
"Apache-2.0"
] | 1,920
|
2018-02-07T23:44:49.000Z
|
2022-03-29T03:11:08.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools used for converting raw data into the Image Classification format.
The image classification models expect the data within TFRecords to have the
following keys:
- image/height
- image/width
- image/format
- image/filename
- image/encoded
- image/colorspace
- image/channels
- image/class/text
- image/class/label
These fields can be deduced from
- image paths
- text of the class labels
The tools provided build upon TFDS to facilitate the conversion of examples into
TFRecords with this format.
"""
import abc
import os
import six
import tensorflow.compat.v1 as tf
import tensorflow_datasets.public_api as tfds
import image_utils as image
_REQUIRED_INPUTS = [
'image_fobj',
'label',
]
_VERSION = '0.1.0'
class ImageClassificationBuilder(tfds.core.GeneratorBasedBuilder):
"""A TFDS Dataset Builder for Image Classification Datasets.
Given an implementation of ImageClassificationConfig, create a TFDS
dataset builder.
Example usage:
```
config = {ImageClassificationConfigImplementation}(...)
dataset = ImageClassificationBuilder(config)
dataset.download_and_prepare()
```
"""
VERSION = tfds.core.Version(_VERSION)
def __init__(self,
**kwargs):
super(ImageClassificationBuilder, self).__init__(**kwargs)
self._text_label_dict = {}
self._skipped = []
def _info(self):
if not issubclass(type(self.builder_config), ImageClassificationConfig):
raise ValueError('Provided config is not the correct type. Please provide'
' a config inheriting ImageClassificationConfig.')
num_labels = self.builder_config.num_labels
return tfds.core.DatasetInfo(
builder=self,
features=tfds.features.FeaturesDict({
'image': {
'height': tfds.features.Tensor(shape=(), dtype=tf.uint8),
'width': tfds.features.Tensor(shape=(), dtype=tf.uint8),
'format': tfds.features.Text(),
'filename': tfds.features.Text(),
'encoded': tfds.features.Image(encoding_format='jpeg'),
'colorspace': tfds.features.Text(),
'channels': tfds.features.Tensor(shape=(), dtype=tf.uint8),
'class': {
'text': tfds.features.Text(),
'label': tfds.features.ClassLabel(num_classes=num_labels),
}
}
}),
supervised_keys=('image', 'image/class/label'),
)
def _split_generators(self, dl_manager):
"""Split generators for TFDS."""
split_generators = []
if 'train' in self.builder_config.supported_modes:
split_generators.append(
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'mode': 'train',
},
),
)
if 'validation' in self.builder_config.supported_modes:
split_generators.append(
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'mode': 'validation',
},
),
)
if 'test' in self.builder_config.supported_modes:
split_generators.append(
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
'mode': 'test',
},
),
)
return split_generators
def _process_example(self, example):
"""Convert the required inputs into dataset outputs.
Args:
example: `dict` with keys as specified in
`ImageClassificationConfig.example_generator`.
Returns:
A nested dict representing the procesed example.
Raises:
`tf.error.InvalidArgumentError`: If the image could not be decoded.
`ValueError`: If the provided label is not an integer or string.
"""
for required_input in _REQUIRED_INPUTS:
if required_input not in example:
raise AssertionError('{} was not included in the yielded '
'example.'.format(required_input))
img_fobj = example['image_fobj']
text = str(example['label'])
img_path = img_fobj.name
base_name = os.path.basename(img_path)
channels = 3
img_format = 'JPEG'
colorspace = 'RGB'
img_bytes, img_shape = image.image_to_jpeg(fobj=img_fobj,
filename=base_name)
label = self._get_text_label(text)
assert label < self.builder_config.num_labels
return {
'image': {
'width': img_shape[0],
'height': img_shape[1],
'format': img_format,
'filename': base_name,
'encoded': img_bytes,
'colorspace': colorspace,
'channels': channels,
'class': {
'text': text,
'label': label,
}
}
}
def _generate_examples(self, mode):
"""Process specified examples into required TFDS outputs."""
generator = self.builder_config.example_generator(mode)
with tf.Graph().as_default():
for example in generator:
fname = os.path.basename(example['image_fobj'].name)
text = str(example['label'])
name = '{}-{}'.format(text, fname)
try:
processed_example = self._process_example(example)
except tf.errors.InvalidArgumentError:
# The example's image could not be processed.
self._skipped.append(name)
continue
yield name, processed_example
def _get_text_label(self, label_text):
"""Convert a string label to an integer id.
If `text_label_map` is implemented in the provided builder_config,
use this mapping. Otherwise if an entry already exists for `label_text`,
it will be used. Otherwise, a new label ID will be generated.
Args:
label_text: The `str` representing the string label.
Returns:
`int` representing the class label.
"""
if self.builder_config.text_label_map:
return self.builder_config.text_label_map[label_text]
if label_text not in self._text_label_dict:
label = len(self._text_label_dict)
self._text_label_dict[label_text] = label
return label
else:
return self._text_label_dict[label_text]
@six.add_metaclass(abc.ABCMeta)
class ImageClassificationConfig(tfds.core.BuilderConfig):
"""Base Class for an input config to ImageClassificationBuilder.
An implementation of ImageClassificationConfig includes an example
generator that yields `dict` objects with the essential inputs necessary for
"""
@property
@abc.abstractmethod
def num_labels(self):
"""Returns the number of labels in the dataset."""
raise NotImplementedError
@property
@abc.abstractmethod
def supported_modes(self):
"""Returns a list of the supported modes for this dataset.
Returns:
A `iterator` consisting of a set of 'train', 'test', 'validation'.
"""
raise NotImplementedError
@property
def text_label_map(self):
"""Specify the mapping between text and integer labels.
Returns:
A `dict` that models the relationship between text labels and
integer labels.
"""
return None
@abc.abstractmethod
def example_generator(self, mode):
"""Generator returning the set of image examples for a given 'mode'.
Args:
mode: `str` indicating the mode. One of the following:
'train', 'validation', 'test'.
Yields:
`dict` with the following:
'image_fobj': `fobj` representing the loaded image. From a file path,
this can be attained by using `tf.io.gfile.GFile`.
'label': `str` representing the class label.
"""
raise NotImplementedError
| 30.400722
| 80
| 0.641135
|
import abc
import os
import six
import tensorflow.compat.v1 as tf
import tensorflow_datasets.public_api as tfds
import image_utils as image
_REQUIRED_INPUTS = [
'image_fobj',
'label',
]
_VERSION = '0.1.0'
class ImageClassificationBuilder(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version(_VERSION)
def __init__(self,
**kwargs):
super(ImageClassificationBuilder, self).__init__(**kwargs)
self._text_label_dict = {}
self._skipped = []
def _info(self):
if not issubclass(type(self.builder_config), ImageClassificationConfig):
raise ValueError('Provided config is not the correct type. Please provide'
' a config inheriting ImageClassificationConfig.')
num_labels = self.builder_config.num_labels
return tfds.core.DatasetInfo(
builder=self,
features=tfds.features.FeaturesDict({
'image': {
'height': tfds.features.Tensor(shape=(), dtype=tf.uint8),
'width': tfds.features.Tensor(shape=(), dtype=tf.uint8),
'format': tfds.features.Text(),
'filename': tfds.features.Text(),
'encoded': tfds.features.Image(encoding_format='jpeg'),
'colorspace': tfds.features.Text(),
'channels': tfds.features.Tensor(shape=(), dtype=tf.uint8),
'class': {
'text': tfds.features.Text(),
'label': tfds.features.ClassLabel(num_classes=num_labels),
}
}
}),
supervised_keys=('image', 'image/class/label'),
)
def _split_generators(self, dl_manager):
split_generators = []
if 'train' in self.builder_config.supported_modes:
split_generators.append(
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'mode': 'train',
},
),
)
if 'validation' in self.builder_config.supported_modes:
split_generators.append(
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'mode': 'validation',
},
),
)
if 'test' in self.builder_config.supported_modes:
split_generators.append(
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
'mode': 'test',
},
),
)
return split_generators
def _process_example(self, example):
for required_input in _REQUIRED_INPUTS:
if required_input not in example:
raise AssertionError('{} was not included in the yielded '
'example.'.format(required_input))
img_fobj = example['image_fobj']
text = str(example['label'])
img_path = img_fobj.name
base_name = os.path.basename(img_path)
channels = 3
img_format = 'JPEG'
colorspace = 'RGB'
img_bytes, img_shape = image.image_to_jpeg(fobj=img_fobj,
filename=base_name)
label = self._get_text_label(text)
assert label < self.builder_config.num_labels
return {
'image': {
'width': img_shape[0],
'height': img_shape[1],
'format': img_format,
'filename': base_name,
'encoded': img_bytes,
'colorspace': colorspace,
'channels': channels,
'class': {
'text': text,
'label': label,
}
}
}
def _generate_examples(self, mode):
generator = self.builder_config.example_generator(mode)
with tf.Graph().as_default():
for example in generator:
fname = os.path.basename(example['image_fobj'].name)
text = str(example['label'])
name = '{}-{}'.format(text, fname)
try:
processed_example = self._process_example(example)
except tf.errors.InvalidArgumentError:
self._skipped.append(name)
continue
yield name, processed_example
def _get_text_label(self, label_text):
if self.builder_config.text_label_map:
return self.builder_config.text_label_map[label_text]
if label_text not in self._text_label_dict:
label = len(self._text_label_dict)
self._text_label_dict[label_text] = label
return label
else:
return self._text_label_dict[label_text]
@six.add_metaclass(abc.ABCMeta)
class ImageClassificationConfig(tfds.core.BuilderConfig):
@property
@abc.abstractmethod
def num_labels(self):
raise NotImplementedError
@property
@abc.abstractmethod
def supported_modes(self):
raise NotImplementedError
@property
def text_label_map(self):
return None
@abc.abstractmethod
def example_generator(self, mode):
raise NotImplementedError
| true
| true
|
1c4089e21b7947affd5d4e0335cbd83bde92af9f
| 10,435
|
py
|
Python
|
blueoil/generate_lmnet_config.py
|
msakai/blueoil
|
0c9160b524b17482d59ae48a0c11384f1d26dccc
|
[
"Apache-2.0"
] | 248
|
2018-10-19T01:48:42.000Z
|
2022-01-31T02:34:24.000Z
|
blueoil/generate_lmnet_config.py
|
oatawa1/blueoil
|
6a5f1cc1fb78c86423338f99cb9dbf506a76f3d6
|
[
"Apache-2.0"
] | 1,102
|
2018-10-19T04:50:34.000Z
|
2021-08-02T04:22:10.000Z
|
blueoil/generate_lmnet_config.py
|
oatawa1/blueoil
|
6a5f1cc1fb78c86423338f99cb9dbf506a76f3d6
|
[
"Apache-2.0"
] | 110
|
2018-10-19T01:49:02.000Z
|
2022-01-31T02:34:26.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import importlib
import os
from tempfile import NamedTemporaryFile
from jinja2 import Environment, FileSystemLoader
_TASK_TYPE_TEMPLATE_FILE = {
"classification": "classification.tpl.py",
"object_detection": "object_detection.tpl.py",
"semantic_segmentation": "semantic_segmentation.tpl.py",
"keypoint_detection": "keypoint_detection.tpl.py"
}
_NETWORK_NAME_NETWORK_MODULE_CLASS = {
"LmnetV0Quantize": {
"network_module": "lmnet_v0",
"network_class": "LmnetV0Quantize",
},
"LmnetV1Quantize": {
"network_module": "lmnet_v1",
"network_class": "LmnetV1Quantize",
},
"ResNetQuantize": {
"network_module": "lm_resnet",
"network_class": "LmResnetQuantize",
},
"LMFYoloQuantize": {
"network_module": "lm_fyolo",
"network_class": "LMFYoloQuantize",
},
"LmSegnetV1Quantize": {
"network_module": "lm_segnet_v1",
"network_class": "LmSegnetV1Quantize",
},
"LmSinglePoseV1Quantize": {
"network_module": "lm_single_pose_v1",
"network_class": "LmSinglePoseV1Quantize",
}
}
_DATASET_FORMAT_DATASET_MODULE_CLASS = {
"Caltech101": {
"dataset_module": "image_folder",
"dataset_class": "ImageFolderBase",
},
"OpenImagesV4": {
"dataset_module": "open_images_v4",
"dataset_class": "OpenImagesV4BoundingBoxBase",
},
"CamvidCustom": {
"dataset_module": "camvid",
"dataset_class": "CamvidCustom",
},
"DIV2K": {
"dataset_module": "div2k",
"dataset_class": "Div2k",
},
"Mscoco for Single-Person Pose Estimation": {
"dataset_module": "mscoco_2017",
"dataset_class": "MscocoSinglePersonKeypoints",
}
}
def generate(blueoil_config):
lmnet_config = _blueoil_to_lmnet(blueoil_config)
return _save(lmnet_config)
def _blueoil_to_lmnet(blueoil_config):
"""
Args:
blueoil_config(dict):
Returns:
dict:
"""
# default setting
default_lmnet_config = {
"test_steps": 1000,
"summarise_steps": 100,
}
dataset = {}
model_name = blueoil_config["model_name"]
template_file = _TASK_TYPE_TEMPLATE_FILE[blueoil_config["task_type"]]
network_module_class = _NETWORK_NAME_NETWORK_MODULE_CLASS[blueoil_config["network_name"]]
network_module = network_module_class["network_module"]
network_class = network_module_class["network_class"]
# dataset
dataset_module_class = _DATASET_FORMAT_DATASET_MODULE_CLASS[blueoil_config["dataset"]["format"]]
dataset_module = dataset_module_class["dataset_module"]
dataset_class = dataset_module_class["dataset_class"]
dataset_class_extend_dir = blueoil_config["dataset"]["train_path"]
dataset_class_validation_extend_dir = blueoil_config["dataset"]["test_path"]
if dataset_class_validation_extend_dir is not None:
dataset_class_property = {"extend_dir": dataset_class_extend_dir,
"validation_extend_dir": dataset_class_validation_extend_dir}
else:
dataset_class_property = {"extend_dir": dataset_class_extend_dir}
# load dataset python module from string.
_loaded_dataset_module = importlib.import_module("blueoil.datasets.{}".format(dataset_module))
# load dataset python module from string.
_loaded_dataset_class = _load_class(_loaded_dataset_module, dataset_class)
_dataset_class = type('DATASET_CLASS', (_loaded_dataset_class,), dataset_class_property)
_dataset_obj = _dataset_class(subset="train", batch_size=1)
classes = _dataset_obj.classes
# trainer
batch_size = blueoil_config["trainer"]["batch_size"]
optimizer = blueoil_config["trainer"]["optimizer"]
default_save_checkpoint_steps = 1000
default_keep_checkpoint_max = 5
if 'save_checkpoint_steps' in blueoil_config["trainer"]:
save_checkpoint_steps = blueoil_config["trainer"]['save_checkpoint_steps']
else:
save_checkpoint_steps = default_save_checkpoint_steps
if 'keep_checkpoint_max' in blueoil_config["trainer"]:
keep_checkpoint_max = blueoil_config["trainer"]["keep_checkpoint_max"]
else:
keep_checkpoint_max = default_keep_checkpoint_max
if optimizer == 'Adam':
optimizer_class = "tf.compat.v1.train.AdamOptimizer"
elif optimizer == 'Momentum':
optimizer_class = "tf.compat.v1.train.MomentumOptimizer"
else:
raise ValueError("not supported optimizer.")
initial_learning_rate = blueoil_config["trainer"]["initial_learning_rate"]
learning_rate_schedule = blueoil_config["trainer"]["learning_rate_schedule"]
max_epochs = blueoil_config["trainer"]["epochs"]
step_per_epoch = float(_dataset_obj.num_per_epoch) / batch_size
learning_rate_kwargs = None
if learning_rate_schedule == "constant":
learning_rate_func = None
elif learning_rate_schedule == "cosine":
learning_rate_func = "tf.compat.v1.train.cosine_decay"
else:
learning_rate_func = "tf.compat.v1.train.piecewise_constant"
if learning_rate_schedule == "constant":
if optimizer == 'Momentum':
optimizer_kwargs = {"momentum": 0.9, "learning_rate": initial_learning_rate}
else:
optimizer_kwargs = {"learning_rate": initial_learning_rate}
else:
if optimizer == 'Momentum':
optimizer_kwargs = {"momentum": 0.9}
else:
optimizer_kwargs = {}
if learning_rate_schedule == "2-step-decay":
learning_rate_kwargs = {
"values": [
initial_learning_rate,
initial_learning_rate / 10,
initial_learning_rate / 100
],
"boundaries": [
int((step_per_epoch * (max_epochs - 1)) / 2),
int(step_per_epoch * (max_epochs - 1))
],
}
elif learning_rate_schedule == "3-step-decay":
learning_rate_kwargs = {
"values": [
initial_learning_rate,
initial_learning_rate / 10,
initial_learning_rate / 100,
initial_learning_rate / 1000
],
"boundaries": [
int((step_per_epoch * (max_epochs - 1)) * 1 / 3),
int((step_per_epoch * (max_epochs - 1)) * 2 / 3),
int(step_per_epoch * (max_epochs - 1))
],
}
elif learning_rate_schedule == "3-step-decay-with-warmup":
if max_epochs < 4:
raise ValueError("epoch number must be >= 4, when 3-step-decay-with-warmup is selected.")
learning_rate_kwargs = {
"values": [
initial_learning_rate / 1000,
initial_learning_rate,
initial_learning_rate / 10,
initial_learning_rate / 100,
initial_learning_rate / 1000
],
"boundaries": [
int(step_per_epoch * 1),
int((step_per_epoch * (max_epochs - 1)) * 1 / 3),
int((step_per_epoch * (max_epochs - 1)) * 2 / 3),
int(step_per_epoch * (max_epochs - 1))
],
}
elif learning_rate_schedule == "cosine":
learning_rate_kwargs = {
"learning_rate": initial_learning_rate,
"decay_steps": int(step_per_epoch * max_epochs),
}
# common
image_size = blueoil_config["common"]["image_size"]
dataset_prefetch = blueoil_config["common"]["dataset_prefetch"]
data_augmentation = blueoil_config["common"]["data_augmentation"]
# quantize first layer
quantize_first_convolution = blueoil_config["network"]["quantize_first_convolution"]
config = {
"model_name": model_name,
"template_file": template_file,
"network_module": network_module,
"network_class": network_class,
"dataset_module": dataset_module,
"dataset_class": dataset_class,
"dataset_class_property": dataset_class_property,
"batch_size": batch_size,
"optimizer_class": optimizer_class,
"max_epochs": max_epochs,
"optimizer_kwargs": optimizer_kwargs,
"learning_rate_func": learning_rate_func,
"learning_rate_kwargs": learning_rate_kwargs,
"save_checkpoint_steps": save_checkpoint_steps,
"keep_checkpoint_max": keep_checkpoint_max,
"image_size": image_size,
"classes": classes,
"quantize_first_convolution": quantize_first_convolution,
"dataset": dataset,
"data_augmentation": data_augmentation,
"dataset_prefetch": dataset_prefetch
}
# merge dict
lmnet_config = default_lmnet_config.copy()
lmnet_config.update(config)
return lmnet_config
def _save(lmnet_config):
base_dir = os.path.dirname(os.path.abspath(__file__))
template_dir = os.path.join(base_dir, "templates")
env = Environment(loader=FileSystemLoader(os.path.join(template_dir, 'lmnet'), encoding='utf8'))
template_file = lmnet_config["template_file"]
tpl = env.get_template(template_file)
applied = tpl.render(lmnet_config)
with NamedTemporaryFile(
prefix="blueoil_config_{}".format(lmnet_config['model_name']),
suffix=".py", delete=False, mode="w") as fp:
fp.write(applied)
return fp.name
def _load_class(module, class_name):
# this converts the string from snake format into class capital format
# e.g. example_class_name -> ExampleClassName
if class_name[0].islower() or "_" in class_name:
class_name = "".join([s.capitalize() for s in class_name.split("_")])
return module.__dict__[class_name]
| 34.438944
| 101
| 0.653378
|
import importlib
import os
from tempfile import NamedTemporaryFile
from jinja2 import Environment, FileSystemLoader
_TASK_TYPE_TEMPLATE_FILE = {
"classification": "classification.tpl.py",
"object_detection": "object_detection.tpl.py",
"semantic_segmentation": "semantic_segmentation.tpl.py",
"keypoint_detection": "keypoint_detection.tpl.py"
}
_NETWORK_NAME_NETWORK_MODULE_CLASS = {
"LmnetV0Quantize": {
"network_module": "lmnet_v0",
"network_class": "LmnetV0Quantize",
},
"LmnetV1Quantize": {
"network_module": "lmnet_v1",
"network_class": "LmnetV1Quantize",
},
"ResNetQuantize": {
"network_module": "lm_resnet",
"network_class": "LmResnetQuantize",
},
"LMFYoloQuantize": {
"network_module": "lm_fyolo",
"network_class": "LMFYoloQuantize",
},
"LmSegnetV1Quantize": {
"network_module": "lm_segnet_v1",
"network_class": "LmSegnetV1Quantize",
},
"LmSinglePoseV1Quantize": {
"network_module": "lm_single_pose_v1",
"network_class": "LmSinglePoseV1Quantize",
}
}
_DATASET_FORMAT_DATASET_MODULE_CLASS = {
"Caltech101": {
"dataset_module": "image_folder",
"dataset_class": "ImageFolderBase",
},
"OpenImagesV4": {
"dataset_module": "open_images_v4",
"dataset_class": "OpenImagesV4BoundingBoxBase",
},
"CamvidCustom": {
"dataset_module": "camvid",
"dataset_class": "CamvidCustom",
},
"DIV2K": {
"dataset_module": "div2k",
"dataset_class": "Div2k",
},
"Mscoco for Single-Person Pose Estimation": {
"dataset_module": "mscoco_2017",
"dataset_class": "MscocoSinglePersonKeypoints",
}
}
def generate(blueoil_config):
lmnet_config = _blueoil_to_lmnet(blueoil_config)
return _save(lmnet_config)
def _blueoil_to_lmnet(blueoil_config):
default_lmnet_config = {
"test_steps": 1000,
"summarise_steps": 100,
}
dataset = {}
model_name = blueoil_config["model_name"]
template_file = _TASK_TYPE_TEMPLATE_FILE[blueoil_config["task_type"]]
network_module_class = _NETWORK_NAME_NETWORK_MODULE_CLASS[blueoil_config["network_name"]]
network_module = network_module_class["network_module"]
network_class = network_module_class["network_class"]
dataset_module_class = _DATASET_FORMAT_DATASET_MODULE_CLASS[blueoil_config["dataset"]["format"]]
dataset_module = dataset_module_class["dataset_module"]
dataset_class = dataset_module_class["dataset_class"]
dataset_class_extend_dir = blueoil_config["dataset"]["train_path"]
dataset_class_validation_extend_dir = blueoil_config["dataset"]["test_path"]
if dataset_class_validation_extend_dir is not None:
dataset_class_property = {"extend_dir": dataset_class_extend_dir,
"validation_extend_dir": dataset_class_validation_extend_dir}
else:
dataset_class_property = {"extend_dir": dataset_class_extend_dir}
_loaded_dataset_module = importlib.import_module("blueoil.datasets.{}".format(dataset_module))
_loaded_dataset_class = _load_class(_loaded_dataset_module, dataset_class)
_dataset_class = type('DATASET_CLASS', (_loaded_dataset_class,), dataset_class_property)
_dataset_obj = _dataset_class(subset="train", batch_size=1)
classes = _dataset_obj.classes
batch_size = blueoil_config["trainer"]["batch_size"]
optimizer = blueoil_config["trainer"]["optimizer"]
default_save_checkpoint_steps = 1000
default_keep_checkpoint_max = 5
if 'save_checkpoint_steps' in blueoil_config["trainer"]:
save_checkpoint_steps = blueoil_config["trainer"]['save_checkpoint_steps']
else:
save_checkpoint_steps = default_save_checkpoint_steps
if 'keep_checkpoint_max' in blueoil_config["trainer"]:
keep_checkpoint_max = blueoil_config["trainer"]["keep_checkpoint_max"]
else:
keep_checkpoint_max = default_keep_checkpoint_max
if optimizer == 'Adam':
optimizer_class = "tf.compat.v1.train.AdamOptimizer"
elif optimizer == 'Momentum':
optimizer_class = "tf.compat.v1.train.MomentumOptimizer"
else:
raise ValueError("not supported optimizer.")
initial_learning_rate = blueoil_config["trainer"]["initial_learning_rate"]
learning_rate_schedule = blueoil_config["trainer"]["learning_rate_schedule"]
max_epochs = blueoil_config["trainer"]["epochs"]
step_per_epoch = float(_dataset_obj.num_per_epoch) / batch_size
learning_rate_kwargs = None
if learning_rate_schedule == "constant":
learning_rate_func = None
elif learning_rate_schedule == "cosine":
learning_rate_func = "tf.compat.v1.train.cosine_decay"
else:
learning_rate_func = "tf.compat.v1.train.piecewise_constant"
if learning_rate_schedule == "constant":
if optimizer == 'Momentum':
optimizer_kwargs = {"momentum": 0.9, "learning_rate": initial_learning_rate}
else:
optimizer_kwargs = {"learning_rate": initial_learning_rate}
else:
if optimizer == 'Momentum':
optimizer_kwargs = {"momentum": 0.9}
else:
optimizer_kwargs = {}
if learning_rate_schedule == "2-step-decay":
learning_rate_kwargs = {
"values": [
initial_learning_rate,
initial_learning_rate / 10,
initial_learning_rate / 100
],
"boundaries": [
int((step_per_epoch * (max_epochs - 1)) / 2),
int(step_per_epoch * (max_epochs - 1))
],
}
elif learning_rate_schedule == "3-step-decay":
learning_rate_kwargs = {
"values": [
initial_learning_rate,
initial_learning_rate / 10,
initial_learning_rate / 100,
initial_learning_rate / 1000
],
"boundaries": [
int((step_per_epoch * (max_epochs - 1)) * 1 / 3),
int((step_per_epoch * (max_epochs - 1)) * 2 / 3),
int(step_per_epoch * (max_epochs - 1))
],
}
elif learning_rate_schedule == "3-step-decay-with-warmup":
if max_epochs < 4:
raise ValueError("epoch number must be >= 4, when 3-step-decay-with-warmup is selected.")
learning_rate_kwargs = {
"values": [
initial_learning_rate / 1000,
initial_learning_rate,
initial_learning_rate / 10,
initial_learning_rate / 100,
initial_learning_rate / 1000
],
"boundaries": [
int(step_per_epoch * 1),
int((step_per_epoch * (max_epochs - 1)) * 1 / 3),
int((step_per_epoch * (max_epochs - 1)) * 2 / 3),
int(step_per_epoch * (max_epochs - 1))
],
}
elif learning_rate_schedule == "cosine":
learning_rate_kwargs = {
"learning_rate": initial_learning_rate,
"decay_steps": int(step_per_epoch * max_epochs),
}
image_size = blueoil_config["common"]["image_size"]
dataset_prefetch = blueoil_config["common"]["dataset_prefetch"]
data_augmentation = blueoil_config["common"]["data_augmentation"]
quantize_first_convolution = blueoil_config["network"]["quantize_first_convolution"]
config = {
"model_name": model_name,
"template_file": template_file,
"network_module": network_module,
"network_class": network_class,
"dataset_module": dataset_module,
"dataset_class": dataset_class,
"dataset_class_property": dataset_class_property,
"batch_size": batch_size,
"optimizer_class": optimizer_class,
"max_epochs": max_epochs,
"optimizer_kwargs": optimizer_kwargs,
"learning_rate_func": learning_rate_func,
"learning_rate_kwargs": learning_rate_kwargs,
"save_checkpoint_steps": save_checkpoint_steps,
"keep_checkpoint_max": keep_checkpoint_max,
"image_size": image_size,
"classes": classes,
"quantize_first_convolution": quantize_first_convolution,
"dataset": dataset,
"data_augmentation": data_augmentation,
"dataset_prefetch": dataset_prefetch
}
lmnet_config = default_lmnet_config.copy()
lmnet_config.update(config)
return lmnet_config
def _save(lmnet_config):
base_dir = os.path.dirname(os.path.abspath(__file__))
template_dir = os.path.join(base_dir, "templates")
env = Environment(loader=FileSystemLoader(os.path.join(template_dir, 'lmnet'), encoding='utf8'))
template_file = lmnet_config["template_file"]
tpl = env.get_template(template_file)
applied = tpl.render(lmnet_config)
with NamedTemporaryFile(
prefix="blueoil_config_{}".format(lmnet_config['model_name']),
suffix=".py", delete=False, mode="w") as fp:
fp.write(applied)
return fp.name
def _load_class(module, class_name):
if class_name[0].islower() or "_" in class_name:
class_name = "".join([s.capitalize() for s in class_name.split("_")])
return module.__dict__[class_name]
| true
| true
|
1c408a451ce19687080e53396a1ce9bc991b7b7d
| 8,822
|
py
|
Python
|
batchgenerators/examples/brats2017/brats2017_dataloader_2D.py
|
ramesh152/batchgenerators
|
709a46a96333fd1b36205feb74059781b730b18b
|
[
"Apache-2.0"
] | null | null | null |
batchgenerators/examples/brats2017/brats2017_dataloader_2D.py
|
ramesh152/batchgenerators
|
709a46a96333fd1b36205feb74059781b730b18b
|
[
"Apache-2.0"
] | null | null | null |
batchgenerators/examples/brats2017/brats2017_dataloader_2D.py
|
ramesh152/batchgenerators
|
709a46a96333fd1b36205feb74059781b730b18b
|
[
"Apache-2.0"
] | 1
|
2019-10-19T02:20:16.000Z
|
2019-10-19T02:20:16.000Z
|
from time import time
from batchgenerators.augmentations.crop_and_pad_augmentations import crop
from batchgenerators.dataloading import MultiThreadedAugmenter
from batchgenerators.examples.brats2017.brats2017_dataloader_3D import get_list_of_patients, BraTS2017DataLoader3D, \
get_train_transform
from batchgenerators.examples.brats2017.config import brats_preprocessed_folder, num_threads_for_brats_example
from batchgenerators.transforms import Compose
from batchgenerators.utilities.data_splitting import get_split_deterministic
from batchgenerators.utilities.file_and_folder_operations import *
import numpy as np
from batchgenerators.dataloading.data_loader import DataLoader
from batchgenerators.augmentations.utils import pad_nd_image
from batchgenerators.transforms.spatial_transforms import SpatialTransform_2, MirrorTransform
from batchgenerators.transforms.color_transforms import BrightnessMultiplicativeTransform, GammaTransform
from batchgenerators.transforms.noise_transforms import GaussianNoiseTransform, GaussianBlurTransform
class BraTS2017DataLoader2D(DataLoader):
def __init__(self, data, batch_size, patch_size, num_threads_in_multithreaded, seed_for_shuffle=1234, return_incomplete=False,
shuffle=True, infinite=True):
"""
data must be a list of patients as returned by get_list_of_patients (and split by get_split_deterministic)
patch_size is the spatial size the retured batch will have
"""
super().__init__(data, batch_size, num_threads_in_multithreaded, seed_for_shuffle, return_incomplete, shuffle,
infinite)
self.patch_size = patch_size
self.num_modalities = 4
self.indices = list(range(len(data)))
@staticmethod
def load_patient(patient):
return BraTS2017DataLoader3D.load_patient(patient)
def generate_train_batch(self):
# DataLoader has its own methods for selecting what patients to use next, see its Documentation
idx = self.get_indices()
patients_for_batch = [self._data[i] for i in idx]
# initialize empty array for data and seg
data = np.zeros((self.batch_size, self.num_modalities, *self.patch_size), dtype=np.float32)
seg = np.zeros((self.batch_size, 1, *self.patch_size), dtype=np.float32)
metadata = []
patient_names = []
# iterate over patients_for_batch and include them in the batch
for i, j in enumerate(patients_for_batch):
patient_data, patient_metadata = self.load_patient(j)
# patient data is a memmap. If we extract just one slice then just this one slice will be read from the
# disk, so no worries!
slice_idx = np.random.choice(patient_data.shape[1])
patient_data = patient_data[:, slice_idx]
# this will only pad patient_data if its shape is smaller than self.patch_size
patient_data = pad_nd_image(patient_data, self.patch_size)
# now random crop to self.patch_size
# crop expects the data to be (b, c, x, y, z) but patient_data is (c, x, y, z) so we need to add one
# dummy dimension in order for it to work (@Todo, could be improved)
patient_data, patient_seg = crop(patient_data[:-1][None], patient_data[-1:][None], self.patch_size, crop_type="random")
data[i] = patient_data[0]
seg[i] = patient_seg[0]
metadata.append(patient_metadata)
patient_names.append(j)
return {'data': data, 'seg':seg, 'metadata':metadata, 'names':patient_names}
if __name__ == "__main__":
patients = get_list_of_patients(brats_preprocessed_folder)
train, val = get_split_deterministic(patients, fold=0, num_splits=5, random_state=12345)
patch_size = (160, 160)
batch_size = 48
# I recommend you don't use 'iteration oder all training data' as epoch because in patch based training this is
# really not super well defined. If you leave all arguments as default then each batch sill contain randomly
# selected patients. Since we don't care about epochs here we can set num_threads_in_multithreaded to anything.
dataloader = BraTS2017DataLoader2D(train, batch_size, patch_size, 1)
batch = next(dataloader)
try:
from batchviewer import view_batch
# batch viewer can show up to 4d tensors. We can show only one sample, but that should be sufficient here
view_batch(np.concatenate((batch['data'][0], batch['seg'][0]), 0)[:, None])
except ImportError:
view_batch = None
print("you can visualize batches with batchviewer. It's a nice and handy tool. You can get it here: "
"https://github.com/FabianIsensee/BatchViewer")
# now we have some DataLoader. Let's go an get some augmentations
# first let's collect all shapes, you will see why later
shapes = [BraTS2017DataLoader2D.load_patient(i)[0].shape[2:] for i in patients]
max_shape = np.max(shapes, 0)
max_shape = np.max((max_shape, patch_size), 0)
# we create a new instance of DataLoader. This one will return batches of shape max_shape. Cropping/padding is
# now done by SpatialTransform. If we do it this way we avoid border artifacts (the entire brain of all cases will
# be in the batch and SpatialTransform will use zeros which is exactly what we have outside the brain)
# this is viable here but not viable if you work with different data. If you work for example with CT scans that
# can be up to 500x500x500 voxels large then you should do this differently. There, instead of using max_shape you
# should estimate what shape you need to extract so that subsequent SpatialTransform does not introduce border
# artifacts
dataloader_train = BraTS2017DataLoader2D(train, batch_size, max_shape, 1)
# during training I like to run a validation from time to time to see where I am standing. This is not a correct
# validation because just like training this is patch-based but it's good enough. We don't do augmentation for the
# validation, so patch_size is used as shape target here
dataloader_validation = BraTS2017DataLoader2D(val, batch_size, patch_size, 1)
tr_transforms = get_train_transform(patch_size)
# finally we can create multithreaded transforms that we can actually use for training
# we don't pin memory here because this is pytorch specific.
tr_gen = MultiThreadedAugmenter(dataloader_train, tr_transforms, num_processes=num_threads_for_brats_example,
num_cached_per_queue=3,
seeds=None, pin_memory=False)
# we need less processes for vlaidation because we dont apply transformations
val_gen = MultiThreadedAugmenter(dataloader_validation, None,
num_processes=max(1, num_threads_for_brats_example // 2), num_cached_per_queue=1,
seeds=None,
pin_memory=False)
# lets start the MultiThreadedAugmenter. This is not necessary but allows them to start generating training
# batches while other things run in the main thread
tr_gen.restart()
val_gen.restart()
# now if this was a network training you would run epochs like this (remember tr_gen and val_gen generate
# inifinite examples! Don't do "for batch in tr_gen:"!!!):
num_batches_per_epoch = 10
num_validation_batches_per_epoch = 3
num_epochs = 5
# let's run this to get a time on how long it takes
time_per_epoch = []
start = time()
for epoch in range(num_epochs):
start_epoch = time()
for b in range(num_batches_per_epoch):
batch = next(tr_gen)
# do network training here with this batch
for b in range(num_validation_batches_per_epoch):
batch = next(val_gen)
# run validation here
end_epoch = time()
time_per_epoch.append(end_epoch - start_epoch)
end = time()
total_time = end - start
print("Running %d epochs took a total of %.2f seconds with time per epoch being %s" %
(num_epochs, total_time, str(time_per_epoch)))
# if you notice that you have CPU usage issues, reduce the probability with which the spatial transformations are
# applied in get_train_transform (down to 0.1 for example). SpatialTransform is the most expensive transform
# if you wish to visualize some augmented examples, install batchviewer and uncomment this
if view_batch is not None:
for _ in range(4):
batch = next(tr_gen)
view_batch(np.concatenate((batch['data'][0], batch['seg'][0]), 0)[:, None])
else:
print("Cannot visualize batches, install batchviewer first")
| 51.590643
| 131
| 0.710383
|
from time import time
from batchgenerators.augmentations.crop_and_pad_augmentations import crop
from batchgenerators.dataloading import MultiThreadedAugmenter
from batchgenerators.examples.brats2017.brats2017_dataloader_3D import get_list_of_patients, BraTS2017DataLoader3D, \
get_train_transform
from batchgenerators.examples.brats2017.config import brats_preprocessed_folder, num_threads_for_brats_example
from batchgenerators.transforms import Compose
from batchgenerators.utilities.data_splitting import get_split_deterministic
from batchgenerators.utilities.file_and_folder_operations import *
import numpy as np
from batchgenerators.dataloading.data_loader import DataLoader
from batchgenerators.augmentations.utils import pad_nd_image
from batchgenerators.transforms.spatial_transforms import SpatialTransform_2, MirrorTransform
from batchgenerators.transforms.color_transforms import BrightnessMultiplicativeTransform, GammaTransform
from batchgenerators.transforms.noise_transforms import GaussianNoiseTransform, GaussianBlurTransform
class BraTS2017DataLoader2D(DataLoader):
def __init__(self, data, batch_size, patch_size, num_threads_in_multithreaded, seed_for_shuffle=1234, return_incomplete=False,
shuffle=True, infinite=True):
super().__init__(data, batch_size, num_threads_in_multithreaded, seed_for_shuffle, return_incomplete, shuffle,
infinite)
self.patch_size = patch_size
self.num_modalities = 4
self.indices = list(range(len(data)))
@staticmethod
def load_patient(patient):
return BraTS2017DataLoader3D.load_patient(patient)
def generate_train_batch(self):
idx = self.get_indices()
patients_for_batch = [self._data[i] for i in idx]
data = np.zeros((self.batch_size, self.num_modalities, *self.patch_size), dtype=np.float32)
seg = np.zeros((self.batch_size, 1, *self.patch_size), dtype=np.float32)
metadata = []
patient_names = []
for i, j in enumerate(patients_for_batch):
patient_data, patient_metadata = self.load_patient(j)
slice_idx = np.random.choice(patient_data.shape[1])
patient_data = patient_data[:, slice_idx]
patient_data = pad_nd_image(patient_data, self.patch_size)
patient_data, patient_seg = crop(patient_data[:-1][None], patient_data[-1:][None], self.patch_size, crop_type="random")
data[i] = patient_data[0]
seg[i] = patient_seg[0]
metadata.append(patient_metadata)
patient_names.append(j)
return {'data': data, 'seg':seg, 'metadata':metadata, 'names':patient_names}
if __name__ == "__main__":
patients = get_list_of_patients(brats_preprocessed_folder)
train, val = get_split_deterministic(patients, fold=0, num_splits=5, random_state=12345)
patch_size = (160, 160)
batch_size = 48
# really not super well defined. If you leave all arguments as default then each batch sill contain randomly
# selected patients. Since we don't care about epochs here we can set num_threads_in_multithreaded to anything.
dataloader = BraTS2017DataLoader2D(train, batch_size, patch_size, 1)
batch = next(dataloader)
try:
from batchviewer import view_batch
view_batch(np.concatenate((batch['data'][0], batch['seg'][0]), 0)[:, None])
except ImportError:
view_batch = None
print("you can visualize batches with batchviewer. It's a nice and handy tool. You can get it here: "
"https://github.com/FabianIsensee/BatchViewer")
# now we have some DataLoader. Let's go an get some augmentations
shapes = [BraTS2017DataLoader2D.load_patient(i)[0].shape[2:] for i in patients]
max_shape = np.max(shapes, 0)
max_shape = np.max((max_shape, patch_size), 0)
# we create a new instance of DataLoader. This one will return batches of shape max_shape. Cropping/padding is
# now done by SpatialTransform. If we do it this way we avoid border artifacts (the entire brain of all cases will
# be in the batch and SpatialTransform will use zeros which is exactly what we have outside the brain)
# this is viable here but not viable if you work with different data. If you work for example with CT scans that
# can be up to 500x500x500 voxels large then you should do this differently. There, instead of using max_shape you
# should estimate what shape you need to extract so that subsequent SpatialTransform does not introduce border
# artifacts
dataloader_train = BraTS2017DataLoader2D(train, batch_size, max_shape, 1)
# during training I like to run a validation from time to time to see where I am standing. This is not a correct
# validation because just like training this is patch-based but it's good enough. We don't do augmentation for the
# validation, so patch_size is used as shape target here
dataloader_validation = BraTS2017DataLoader2D(val, batch_size, patch_size, 1)
tr_transforms = get_train_transform(patch_size)
# finally we can create multithreaded transforms that we can actually use for training
# we don't pin memory here because this is pytorch specific.
tr_gen = MultiThreadedAugmenter(dataloader_train, tr_transforms, num_processes=num_threads_for_brats_example,
num_cached_per_queue=3,
seeds=None, pin_memory=False)
val_gen = MultiThreadedAugmenter(dataloader_validation, None,
num_processes=max(1, num_threads_for_brats_example // 2), num_cached_per_queue=1,
seeds=None,
pin_memory=False)
tr_gen.restart()
val_gen.restart()
num_batches_per_epoch = 10
num_validation_batches_per_epoch = 3
num_epochs = 5
# let's run this to get a time on how long it takes
time_per_epoch = []
start = time()
for epoch in range(num_epochs):
start_epoch = time()
for b in range(num_batches_per_epoch):
batch = next(tr_gen)
for b in range(num_validation_batches_per_epoch):
batch = next(val_gen)
end_epoch = time()
time_per_epoch.append(end_epoch - start_epoch)
end = time()
total_time = end - start
print("Running %d epochs took a total of %.2f seconds with time per epoch being %s" %
(num_epochs, total_time, str(time_per_epoch)))
if view_batch is not None:
for _ in range(4):
batch = next(tr_gen)
view_batch(np.concatenate((batch['data'][0], batch['seg'][0]), 0)[:, None])
else:
print("Cannot visualize batches, install batchviewer first")
| true
| true
|
1c408a9aee927765791ec0f8397a13865a1b4e1a
| 599
|
py
|
Python
|
jaxlib/version.py
|
tomhennigan/jax
|
fb6c9f64e49880e3c3d0ff9a2ef7345fc9bbe717
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jaxlib/version.py
|
tomhennigan/jax
|
fb6c9f64e49880e3c3d0ff9a2ef7345fc9bbe717
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jaxlib/version.py
|
tomhennigan/jax
|
fb6c9f64e49880e3c3d0ff9a2ef7345fc9bbe717
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.1.36"
| 37.4375
| 74
| 0.75793
|
__version__ = "0.1.36"
| true
| true
|
1c408b947d5026f58a30e2878874e81d9390a574
| 20,386
|
py
|
Python
|
waymo_open_dataset/utils/range_image_utils.py
|
kprohith/waymo-open-dataset
|
9c519584cb95c6e2d3c909722298978668075542
|
[
"Apache-2.0"
] | 3
|
2019-09-19T02:09:09.000Z
|
2019-10-05T11:50:47.000Z
|
waymo_open_dataset/utils/range_image_utils.py
|
kprohith/waymo-open-dataset
|
9c519584cb95c6e2d3c909722298978668075542
|
[
"Apache-2.0"
] | null | null | null |
waymo_open_dataset/utils/range_image_utils.py
|
kprohith/waymo-open-dataset
|
9c519584cb95c6e2d3c909722298978668075542
|
[
"Apache-2.0"
] | 1
|
2020-03-28T16:50:05.000Z
|
2020-03-28T16:50:05.000Z
|
# Copyright 2019 The Waymo Open Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils to manage range images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
__all__ = [
'compute_range_image_polar', 'compute_range_image_cartesian',
'build_range_image_from_point_cloud', 'build_camera_depth_image',
'extract_point_cloud_from_range_image', 'crop_range_image',
'compute_inclination'
]
def _combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def _scatter_nd_with_pool(index,
value,
shape,
pool_method=tf.unsorted_segment_max):
"""Similar as tf.scatter_nd but allows custom pool method.
tf.scatter_nd accumulates (sums) values if there are duplicate indices.
Args:
index: [N, 2] tensor. Inner dims are coordinates along height (row) and then
width (col).
value: [N] tensor. Values to be scattered.
shape: (height,width) list that specifies the shape of the output tensor.
pool_method: pool method when there are multiple points scattered to one
location.
Returns:
image: tensor of shape with value scattered. Missing pixels are set to 0.
"""
if len(shape) != 2:
raise ValueError('shape must be of size 2')
height = shape[0]
width = shape[1]
# idx: [N]
index_encoded, idx = tf.unique(index[:, 0] * width + index[:, 1])
value_pooled = pool_method(value, idx, tf.size(index_encoded))
index_unique = tf.stack(
[index_encoded // width,
tf.mod(index_encoded, width)], axis=-1)
image = tf.scatter_nd(index_unique, value_pooled, [height, width])
return image
def compute_range_image_polar(range_image,
extrinsic,
inclination,
dtype=tf.float64,
scope=None):
"""Computes range image polar coordinates.
Args:
range_image: [B, H, W] tensor. Lidar range images.
extrinsic: [B, 4, 4] tensor. Lidar extrinsic.
inclination: [B, H] tensor. Inclination for each row of the range image.
0-th entry corresponds to the 0-th row of the range image.
dtype: float type to use internally. This is needed as extrinsic and
inclination sometimes have higher resolution than range_image.
scope: the name scope.
Returns:
range_image_polar: [B, H, W, 3] polar coordinates.
"""
# pylint: disable=unbalanced-tuple-unpacking
_, height, width = _combined_static_and_dynamic_shape(range_image)
range_image_dtype = range_image.dtype
range_image = tf.cast(range_image, dtype)
extrinsic = tf.cast(extrinsic, dtype)
inclination = tf.cast(inclination, dtype)
with tf.name_scope(scope, 'ComputeRangeImagePolar',
[range_image, extrinsic, inclination]):
with tf.name_scope('Azimuth'):
# [B].
az_correction = tf.atan2(extrinsic[..., 1, 0], extrinsic[..., 0, 0])
# [W].
ratios = (tf.cast(tf.range(width, 0, -1), dtype=dtype) - .5) / tf.cast(
width, dtype)
# [B, W].
azimuth = (ratios * 2. - 1.) * np.pi - tf.expand_dims(az_correction, -1)
# [B, H, W]
azimuth_tile = tf.tile(azimuth[:, tf.newaxis, :], [1, height, 1])
# [B, H, W]
inclination_tile = tf.tile(inclination[:, :, tf.newaxis], [1, 1, width])
range_image_polar = tf.stack([azimuth_tile, inclination_tile, range_image],
axis=-1)
return tf.cast(range_image_polar, dtype=range_image_dtype)
def compute_range_image_cartesian(range_image_polar,
extrinsic,
pixel_pose=None,
frame_pose=None,
dtype=tf.float64,
scope=None):
"""Computes range image cartesian coordinates from polar ones.
Args:
range_image_polar: [B, H, W, 3] float tensor. Lidar range image in polar
coordinate in sensor frame.
extrinsic: [B, 4, 4] float tensor. Lidar extrinsic.
pixel_pose: [B, H, W, 4, 4] float tensor. If not None, it sets pose for each
range image pixel.
frame_pose: [B, 4, 4] float tensor. This must be set when pixel_pose is set.
It decides the vehicle frame at which the cartesian points are computed.
dtype: float type to use internally. This is needed as extrinsic and
inclination sometimes have higher resolution than range_image.
scope: the name scope.
Returns:
range_image_cartesian: [B, H, W, 3] cartesian coordinates.
"""
range_image_polar_dtype = range_image_polar.dtype
range_image_polar = tf.cast(range_image_polar, dtype)
extrinsic = tf.cast(extrinsic, dtype)
if pixel_pose is not None:
pixel_pose = tf.cast(pixel_pose, dtype)
if frame_pose is not None:
frame_pose = tf.cast(frame_pose, dtype)
with tf.name_scope(scope, 'ComputeRangeImageCartesian',
[range_image_polar, extrinsic, pixel_pose, frame_pose]):
azimuth, inclination, range_image_range = tf.unstack(
range_image_polar, axis=-1)
cos_azimuth = tf.cos(azimuth)
sin_azimuth = tf.sin(azimuth)
cos_incl = tf.cos(inclination)
sin_incl = tf.sin(inclination)
# [B, H, W].
x = cos_azimuth * cos_incl * range_image_range
y = sin_azimuth * cos_incl * range_image_range
z = sin_incl * range_image_range
# [B, H, W, 3]
range_image_points = tf.stack([x, y, z], -1)
# [B, 3, 3]
rotation = extrinsic[..., 0:3, 0:3]
# translation [B, 1, 3]
translation = tf.expand_dims(tf.expand_dims(extrinsic[..., 0:3, 3], 1), 1)
# To vehicle frame.
# [B, H, W, 3]
range_image_points = tf.einsum('bkr,bijr->bijk', rotation,
range_image_points) + translation
if pixel_pose is not None:
# To global frame.
# [B, H, W, 3, 3]
pixel_pose_rotation = pixel_pose[..., 0:3, 0:3]
# [B, H, W, 3]
pixel_pose_translation = pixel_pose[..., 0:3, 3]
# [B, H, W, 3]
range_image_points = tf.einsum(
'bhwij,bhwj->bhwi', pixel_pose_rotation,
range_image_points) + pixel_pose_translation
if frame_pose is None:
raise ValueError('frame_pose must be set when pixel_pose is set.')
# To vehicle frame corresponding to the given frame_pose
# [B, 4, 4]
world_to_vehicle = tf.matrix_inverse(frame_pose)
world_to_vehicle_rotation = world_to_vehicle[:, 0:3, 0:3]
world_to_vehicle_translation = world_to_vehicle[:, 0:3, 3]
# [B, H, W, 3]
range_image_points = tf.einsum(
'bij,bhwj->bhwi', world_to_vehicle_rotation,
range_image_points) + world_to_vehicle_translation[:, tf.newaxis,
tf.newaxis, :]
range_image_points = tf.cast(
range_image_points, dtype=range_image_polar_dtype)
return range_image_points
def build_camera_depth_image(range_image_cartesian,
extrinsic,
camera_projection,
camera_image_size,
camera_name,
pool_method=tf.unsorted_segment_min,
scope=None):
"""Builds camera depth image given camera projections.
The depth value is the distance between a lidar point and camera frame origin.
It is decided by cartesian coordinates in vehicle frame and the camera
extrinsic. Optionally, the cartesian coordinates can be set in the vehicle
frame corresponding to each pixel pose which makes the depth generated to have
vehicle motion taken into account.
Args:
range_image_cartesian: [B, H, W, 3] tensor. Range image points in vehicle
frame. Note that if the range image is provided by pixel_pose, then you
can optionally pass in the cartesian coordinates in each pixel frame.
extrinsic: [B, 4, 4] tensor. Camera extrinsic.
camera_projection: [B, H, W, 6] tensor. Each range image pixel is associated
with at most two camera projections. See dataset.proto for more details.
camera_image_size: a list of [width, height] integers.
camera_name: an integer that identifies a camera. See dataset.proto.
pool_method: pooling method when multiple lidar points are projected to one
image pixel.
scope: the name scope.
Returns:
image: [B, width, height] depth image generated.
"""
with tf.name_scope(scope, 'BuildCameraDepthImage',
[range_image_cartesian, extrinsic, camera_projection]):
# [B, 4, 4]
vehicle_to_camera = tf.matrix_inverse(extrinsic)
# [B, 3, 3]
vehicle_to_camera_rotation = vehicle_to_camera[:, 0:3, 0:3]
# [B, 3]
vehicle_to_camera_translation = vehicle_to_camera[:, 0:3, 3]
# [B, H, W, 3]
range_image_camera = tf.einsum(
'bij,bhwj->bhwi', vehicle_to_camera_rotation,
range_image_cartesian) + vehicle_to_camera_translation[:, tf.newaxis,
tf.newaxis, :]
# [B, H, W]
range_image_camera_norm = tf.norm(range_image_camera, axis=-1)
camera_projection_mask_1 = tf.tile(
tf.equal(camera_projection[..., 0:1], camera_name), [1, 1, 1, 2])
camera_projection_mask_2 = tf.tile(
tf.equal(camera_projection[..., 3:4], camera_name), [1, 1, 1, 2])
camera_projection_selected = tf.ones_like(
camera_projection[..., 1:3], dtype=camera_projection.dtype) * -1
camera_projection_selected = tf.where(camera_projection_mask_2,
camera_projection[..., 4:6],
camera_projection_selected)
# [B, H, W, 2]
camera_projection_selected = tf.where(camera_projection_mask_1,
camera_projection[..., 1:3],
camera_projection_selected)
# [B, H, W]
camera_projection_mask = tf.logical_or(camera_projection_mask_1,
camera_projection_mask_2)[..., 0]
def fn(args):
"""Builds depth image for a single frame."""
# NOTE: Do not use ri_range > 0 as mask as missing range image pixels are
# not necessarily populated as range = 0.
mask, ri_range, cp = args
mask_ids = tf.where(mask)
index = tf.gather_nd(
tf.stack([cp[..., 1], cp[..., 0]], axis=-1), mask_ids)
value = tf.gather_nd(ri_range, mask_ids)
return _scatter_nd_with_pool(index, value, camera_image_size, pool_method)
images = tf.map_fn(
fn,
elems=[
camera_projection_mask, range_image_camera_norm,
camera_projection_selected
],
dtype=range_image_camera_norm.dtype,
back_prop=False)
return images
def build_range_image_from_point_cloud(points_vehicle_frame,
num_points,
extrinsic,
inclination,
range_image_size,
dtype=tf.float64,
scope=None):
"""Build virtual range image from point cloud assuming uniform azimuth.
Args:
points_vehicle_frame: tf tensor with shape [B, N, 3] in the vehicle frame.
num_points: [B] int32 tensor indicating the number of points for each frame.
extrinsic: tf tensor with shape [B, 4, 4].
inclination: tf tensor of shape [B, H] that is the inclination angle per
row. sorted from highest value to lowest.
range_image_size: a size 2 [height, width] list that configures the size of
the range image.
dtype: the data type to use.
scope: tf name scope.
Returns:
range_images : [B, H, W, ?] or [B, H, W] tensor. Range images built from the
given points. Data type is the same as that of points_vehicle_frame. 0.0
is populated when a pixel is missing.
ri_indices: tf int32 tensor [B, N, 2]. It represents the range image index
for each point.
ri_ranges: [B, N] tensor. It represents the distance between a point and
sensor frame origin of each point.
"""
with tf.name_scope(
scope,
'BuildRangeImageFromPointCloud',
values=[points_vehicle_frame, extrinsic, inclination]):
points_vehicle_frame_dtype = points_vehicle_frame.dtype
points_vehicle_frame = tf.cast(points_vehicle_frame, dtype)
extrinsic = tf.cast(extrinsic, dtype)
inclination = tf.cast(inclination, dtype)
height, width = range_image_size
# [B, 4, 4]
vehicle_to_laser = tf.matrix_inverse(extrinsic)
# [B, 3, 3]
rotation = vehicle_to_laser[:, 0:3, 0:3]
# [B, 1, 3]
translation = tf.expand_dims(vehicle_to_laser[::, 0:3, 3], 1)
# Points in sensor frame
# [B, N, 3]
points = tf.einsum('bij,bkj->bik', points_vehicle_frame,
rotation) + translation
# [B, N]
xy_norm = tf.norm(points[..., 0:2], axis=-1)
# [B, N]
point_inclination = tf.atan2(points[..., 2], xy_norm)
# [B, N, H]
point_inclination_diff = tf.abs(
tf.expand_dims(point_inclination, axis=-1) -
tf.expand_dims(inclination, axis=1))
# [B, N]
point_ri_row_indices = tf.argmin(
point_inclination_diff, axis=-1, output_type=tf.int32)
# [B, 1], within [-pi, pi]
az_correction = tf.expand_dims(
tf.atan2(extrinsic[..., 1, 0], extrinsic[..., 0, 0]), -1)
# [B, N], within [-2pi, 2pi]
point_azimuth = tf.atan2(points[..., 1], points[..., 0]) + az_correction
point_azimuth_gt_pi_mask = point_azimuth > np.pi
point_azimuth_lt_minus_pi_mask = point_azimuth < -np.pi
point_azimuth = point_azimuth - tf.cast(point_azimuth_gt_pi_mask,
dtype) * 2 * np.pi
point_azimuth = point_azimuth + tf.cast(point_azimuth_lt_minus_pi_mask,
dtype) * 2 * np.pi
# [B, N].
point_ri_col_indices = width - 1.0 + 0.5 - (point_azimuth +
np.pi) / (2.0 * np.pi) * width
point_ri_col_indices = tf.cast(tf.round(point_ri_col_indices), tf.int32)
with tf.control_dependencies([
tf.assert_non_negative(point_ri_col_indices),
tf.assert_less(point_ri_col_indices, tf.cast(width, tf.int32))
]):
# [B, N, 2]
ri_indices = tf.stack([point_ri_row_indices, point_ri_col_indices], -1)
# [B, N]
ri_ranges = tf.cast(
tf.norm(points, axis=-1), dtype=points_vehicle_frame_dtype)
def fn(args):
"""Builds a range image for each frame.
Args:
args: a tuple containing:
- ri_index: [N, 2]
- ri_value: [N]
- num_point: scalar tensor
Returns:
range_image: [H, W]
"""
ri_index, ri_value, num_point = args
# pylint: disable=unbalanced-tuple-unpacking
ri_index = ri_index[0:num_point, :]
ri_value = ri_value[0:num_point]
range_image = _scatter_nd_with_pool(ri_index, ri_value, [height, width],
tf.unsorted_segment_max)
return range_image
range_images = tf.map_fn(
fn,
elems=[ri_indices, ri_ranges, num_points],
dtype=points_vehicle_frame_dtype,
back_prop=False)
return range_images, ri_indices, ri_ranges
def extract_point_cloud_from_range_image(range_image,
extrinsic,
inclination,
pixel_pose=None,
frame_pose=None,
dtype=tf.float64,
scope=None):
"""Extracts point cloud from range image.
Args:
range_image: [B, H, W] tensor. Lidar range images.
extrinsic: [B, 4, 4] tensor. Lidar extrinsic.
inclination: [B, H] tensor. Inclination for each row of the range image.
0-th entry corresponds to the 0-th row of the range image.
pixel_pose: [B, H, W, 4, 4] tensor. If not None, it sets pose for each range
image pixel.
frame_pose: [B, 4, 4] tensor. This must be set when pixel_pose is set. It
decides the vehicle frame at which the cartesian points are computed.
dtype: float type to use internally. This is needed as extrinsic and
inclination sometimes have higher resolution than range_image.
scope: the name scope.
Returns:
range_image_cartesian: [B, H, W, 3] with {x, y, z} as inner dims in vehicle
frame.
"""
with tf.name_scope(
scope, 'ExtractPointCloudFromRangeImage',
[range_image, extrinsic, inclination, pixel_pose, frame_pose]):
range_image_polar = compute_range_image_polar(
range_image, extrinsic, inclination, dtype=dtype)
range_image_cartesian = compute_range_image_cartesian(
range_image_polar,
extrinsic,
pixel_pose=pixel_pose,
frame_pose=frame_pose,
dtype=dtype)
return range_image_cartesian
def crop_range_image(range_images, new_width, scope=None):
"""Crops range image by shrinking the width.
Requires: new_width is smaller than the existing width.
Args:
range_images: [B, H, W, ...]
new_width: an integer.
scope: the name scope.
Returns:
range_image_crops: [B, H, new_width, ...]
"""
# pylint: disable=unbalanced-tuple-unpacking
shape = _combined_static_and_dynamic_shape(range_images)
width = shape[2]
if width == new_width:
return range_images
if new_width < 1:
raise ValueError('new_width must be positive.')
if width is not None and new_width >= width:
raise ValueError('new_width {} should be < the old width {}.'.format(
new_width, width))
with tf.control_dependencies([tf.assert_less(new_width, width)]):
with tf.name_scope(scope, 'CropRangeImage', [range_images]):
diff = width - new_width
left = diff // 2
right = diff - left
range_image_crops = range_images[:, :, left:-right, ...]
return range_image_crops
def compute_inclination(inclination_range, height, scope=None):
"""Compute uniform inclination range based the given range and height.
Args:
inclination_range: [..., 2] tensor. Inner dims are [min inclination, max
inclination].
height: an integer indicates height of the range image.
scope: the name scope.
Returns:
inclination: [..., height] tensor. Inclinations computed.
"""
with tf.name_scope(scope, 'ComputeInclination', [inclination_range]):
diff = inclination_range[..., 1] - inclination_range[..., 0]
inclination = (
(.5 + tf.cast(tf.range(0, height), dtype=inclination_range.dtype)) /
tf.cast(height, inclination_range.dtype) *
tf.expand_dims(diff, axis=-1) + inclination_range[..., 0:1])
return inclination
| 39.128599
| 80
| 0.629304
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
__all__ = [
'compute_range_image_polar', 'compute_range_image_cartesian',
'build_range_image_from_point_cloud', 'build_camera_depth_image',
'extract_point_cloud_from_range_image', 'crop_range_image',
'compute_inclination'
]
def _combined_static_and_dynamic_shape(tensor):
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def _scatter_nd_with_pool(index,
value,
shape,
pool_method=tf.unsorted_segment_max):
if len(shape) != 2:
raise ValueError('shape must be of size 2')
height = shape[0]
width = shape[1]
index_encoded, idx = tf.unique(index[:, 0] * width + index[:, 1])
value_pooled = pool_method(value, idx, tf.size(index_encoded))
index_unique = tf.stack(
[index_encoded // width,
tf.mod(index_encoded, width)], axis=-1)
image = tf.scatter_nd(index_unique, value_pooled, [height, width])
return image
def compute_range_image_polar(range_image,
extrinsic,
inclination,
dtype=tf.float64,
scope=None):
_, height, width = _combined_static_and_dynamic_shape(range_image)
range_image_dtype = range_image.dtype
range_image = tf.cast(range_image, dtype)
extrinsic = tf.cast(extrinsic, dtype)
inclination = tf.cast(inclination, dtype)
with tf.name_scope(scope, 'ComputeRangeImagePolar',
[range_image, extrinsic, inclination]):
with tf.name_scope('Azimuth'):
az_correction = tf.atan2(extrinsic[..., 1, 0], extrinsic[..., 0, 0])
ratios = (tf.cast(tf.range(width, 0, -1), dtype=dtype) - .5) / tf.cast(
width, dtype)
azimuth = (ratios * 2. - 1.) * np.pi - tf.expand_dims(az_correction, -1)
azimuth_tile = tf.tile(azimuth[:, tf.newaxis, :], [1, height, 1])
inclination_tile = tf.tile(inclination[:, :, tf.newaxis], [1, 1, width])
range_image_polar = tf.stack([azimuth_tile, inclination_tile, range_image],
axis=-1)
return tf.cast(range_image_polar, dtype=range_image_dtype)
def compute_range_image_cartesian(range_image_polar,
extrinsic,
pixel_pose=None,
frame_pose=None,
dtype=tf.float64,
scope=None):
range_image_polar_dtype = range_image_polar.dtype
range_image_polar = tf.cast(range_image_polar, dtype)
extrinsic = tf.cast(extrinsic, dtype)
if pixel_pose is not None:
pixel_pose = tf.cast(pixel_pose, dtype)
if frame_pose is not None:
frame_pose = tf.cast(frame_pose, dtype)
with tf.name_scope(scope, 'ComputeRangeImageCartesian',
[range_image_polar, extrinsic, pixel_pose, frame_pose]):
azimuth, inclination, range_image_range = tf.unstack(
range_image_polar, axis=-1)
cos_azimuth = tf.cos(azimuth)
sin_azimuth = tf.sin(azimuth)
cos_incl = tf.cos(inclination)
sin_incl = tf.sin(inclination)
x = cos_azimuth * cos_incl * range_image_range
y = sin_azimuth * cos_incl * range_image_range
z = sin_incl * range_image_range
range_image_points = tf.stack([x, y, z], -1)
rotation = extrinsic[..., 0:3, 0:3]
translation = tf.expand_dims(tf.expand_dims(extrinsic[..., 0:3, 3], 1), 1)
range_image_points = tf.einsum('bkr,bijr->bijk', rotation,
range_image_points) + translation
if pixel_pose is not None:
pixel_pose_rotation = pixel_pose[..., 0:3, 0:3]
pixel_pose_translation = pixel_pose[..., 0:3, 3]
range_image_points = tf.einsum(
'bhwij,bhwj->bhwi', pixel_pose_rotation,
range_image_points) + pixel_pose_translation
if frame_pose is None:
raise ValueError('frame_pose must be set when pixel_pose is set.')
world_to_vehicle = tf.matrix_inverse(frame_pose)
world_to_vehicle_rotation = world_to_vehicle[:, 0:3, 0:3]
world_to_vehicle_translation = world_to_vehicle[:, 0:3, 3]
range_image_points = tf.einsum(
'bij,bhwj->bhwi', world_to_vehicle_rotation,
range_image_points) + world_to_vehicle_translation[:, tf.newaxis,
tf.newaxis, :]
range_image_points = tf.cast(
range_image_points, dtype=range_image_polar_dtype)
return range_image_points
def build_camera_depth_image(range_image_cartesian,
extrinsic,
camera_projection,
camera_image_size,
camera_name,
pool_method=tf.unsorted_segment_min,
scope=None):
with tf.name_scope(scope, 'BuildCameraDepthImage',
[range_image_cartesian, extrinsic, camera_projection]):
vehicle_to_camera = tf.matrix_inverse(extrinsic)
vehicle_to_camera_rotation = vehicle_to_camera[:, 0:3, 0:3]
vehicle_to_camera_translation = vehicle_to_camera[:, 0:3, 3]
range_image_camera = tf.einsum(
'bij,bhwj->bhwi', vehicle_to_camera_rotation,
range_image_cartesian) + vehicle_to_camera_translation[:, tf.newaxis,
tf.newaxis, :]
range_image_camera_norm = tf.norm(range_image_camera, axis=-1)
camera_projection_mask_1 = tf.tile(
tf.equal(camera_projection[..., 0:1], camera_name), [1, 1, 1, 2])
camera_projection_mask_2 = tf.tile(
tf.equal(camera_projection[..., 3:4], camera_name), [1, 1, 1, 2])
camera_projection_selected = tf.ones_like(
camera_projection[..., 1:3], dtype=camera_projection.dtype) * -1
camera_projection_selected = tf.where(camera_projection_mask_2,
camera_projection[..., 4:6],
camera_projection_selected)
camera_projection_selected = tf.where(camera_projection_mask_1,
camera_projection[..., 1:3],
camera_projection_selected)
camera_projection_mask = tf.logical_or(camera_projection_mask_1,
camera_projection_mask_2)[..., 0]
def fn(args):
mask, ri_range, cp = args
mask_ids = tf.where(mask)
index = tf.gather_nd(
tf.stack([cp[..., 1], cp[..., 0]], axis=-1), mask_ids)
value = tf.gather_nd(ri_range, mask_ids)
return _scatter_nd_with_pool(index, value, camera_image_size, pool_method)
images = tf.map_fn(
fn,
elems=[
camera_projection_mask, range_image_camera_norm,
camera_projection_selected
],
dtype=range_image_camera_norm.dtype,
back_prop=False)
return images
def build_range_image_from_point_cloud(points_vehicle_frame,
num_points,
extrinsic,
inclination,
range_image_size,
dtype=tf.float64,
scope=None):
with tf.name_scope(
scope,
'BuildRangeImageFromPointCloud',
values=[points_vehicle_frame, extrinsic, inclination]):
points_vehicle_frame_dtype = points_vehicle_frame.dtype
points_vehicle_frame = tf.cast(points_vehicle_frame, dtype)
extrinsic = tf.cast(extrinsic, dtype)
inclination = tf.cast(inclination, dtype)
height, width = range_image_size
vehicle_to_laser = tf.matrix_inverse(extrinsic)
rotation = vehicle_to_laser[:, 0:3, 0:3]
translation = tf.expand_dims(vehicle_to_laser[::, 0:3, 3], 1)
points = tf.einsum('bij,bkj->bik', points_vehicle_frame,
rotation) + translation
xy_norm = tf.norm(points[..., 0:2], axis=-1)
point_inclination = tf.atan2(points[..., 2], xy_norm)
point_inclination_diff = tf.abs(
tf.expand_dims(point_inclination, axis=-1) -
tf.expand_dims(inclination, axis=1))
point_ri_row_indices = tf.argmin(
point_inclination_diff, axis=-1, output_type=tf.int32)
az_correction = tf.expand_dims(
tf.atan2(extrinsic[..., 1, 0], extrinsic[..., 0, 0]), -1)
point_azimuth = tf.atan2(points[..., 1], points[..., 0]) + az_correction
point_azimuth_gt_pi_mask = point_azimuth > np.pi
point_azimuth_lt_minus_pi_mask = point_azimuth < -np.pi
point_azimuth = point_azimuth - tf.cast(point_azimuth_gt_pi_mask,
dtype) * 2 * np.pi
point_azimuth = point_azimuth + tf.cast(point_azimuth_lt_minus_pi_mask,
dtype) * 2 * np.pi
point_ri_col_indices = width - 1.0 + 0.5 - (point_azimuth +
np.pi) / (2.0 * np.pi) * width
point_ri_col_indices = tf.cast(tf.round(point_ri_col_indices), tf.int32)
with tf.control_dependencies([
tf.assert_non_negative(point_ri_col_indices),
tf.assert_less(point_ri_col_indices, tf.cast(width, tf.int32))
]):
ri_indices = tf.stack([point_ri_row_indices, point_ri_col_indices], -1)
ri_ranges = tf.cast(
tf.norm(points, axis=-1), dtype=points_vehicle_frame_dtype)
def fn(args):
ri_index, ri_value, num_point = args
ri_index = ri_index[0:num_point, :]
ri_value = ri_value[0:num_point]
range_image = _scatter_nd_with_pool(ri_index, ri_value, [height, width],
tf.unsorted_segment_max)
return range_image
range_images = tf.map_fn(
fn,
elems=[ri_indices, ri_ranges, num_points],
dtype=points_vehicle_frame_dtype,
back_prop=False)
return range_images, ri_indices, ri_ranges
def extract_point_cloud_from_range_image(range_image,
extrinsic,
inclination,
pixel_pose=None,
frame_pose=None,
dtype=tf.float64,
scope=None):
with tf.name_scope(
scope, 'ExtractPointCloudFromRangeImage',
[range_image, extrinsic, inclination, pixel_pose, frame_pose]):
range_image_polar = compute_range_image_polar(
range_image, extrinsic, inclination, dtype=dtype)
range_image_cartesian = compute_range_image_cartesian(
range_image_polar,
extrinsic,
pixel_pose=pixel_pose,
frame_pose=frame_pose,
dtype=dtype)
return range_image_cartesian
def crop_range_image(range_images, new_width, scope=None):
shape = _combined_static_and_dynamic_shape(range_images)
width = shape[2]
if width == new_width:
return range_images
if new_width < 1:
raise ValueError('new_width must be positive.')
if width is not None and new_width >= width:
raise ValueError('new_width {} should be < the old width {}.'.format(
new_width, width))
with tf.control_dependencies([tf.assert_less(new_width, width)]):
with tf.name_scope(scope, 'CropRangeImage', [range_images]):
diff = width - new_width
left = diff // 2
right = diff - left
range_image_crops = range_images[:, :, left:-right, ...]
return range_image_crops
def compute_inclination(inclination_range, height, scope=None):
with tf.name_scope(scope, 'ComputeInclination', [inclination_range]):
diff = inclination_range[..., 1] - inclination_range[..., 0]
inclination = (
(.5 + tf.cast(tf.range(0, height), dtype=inclination_range.dtype)) /
tf.cast(height, inclination_range.dtype) *
tf.expand_dims(diff, axis=-1) + inclination_range[..., 0:1])
return inclination
| true
| true
|
1c408c36b6bdfcbced5d5fa89616a441e1a3f104
| 417
|
py
|
Python
|
Semester2/ShapeTester/Box.py
|
ConstantineLinardakis/Programming1Portfolio
|
9062590de87e495ecf19b759a5d7a132a6982e3b
|
[
"MIT"
] | 1
|
2020-11-23T19:02:21.000Z
|
2020-11-23T19:02:21.000Z
|
Semester2/ShapeTester/Box.py
|
ConstantineLinardakis/Programming1Portfolio
|
9062590de87e495ecf19b759a5d7a132a6982e3b
|
[
"MIT"
] | null | null | null |
Semester2/ShapeTester/Box.py
|
ConstantineLinardakis/Programming1Portfolio
|
9062590de87e495ecf19b759a5d7a132a6982e3b
|
[
"MIT"
] | null | null | null |
class Box:
l = 0
w = 0
h = 0
def __init__(self,l,w,h):
self.l = l
self.w = w
self.h = h
def calcvolume(self):
l = self.l
w = self.w
h = self.h
print("The volume equals:", str(w*l*h))
def calcsurface(self):
l = self.l
w = self.w
h = self.h
print("The surface area equals:", str(2*w*l + 2*h*l +2*h*w))
| 18.130435
| 68
| 0.441247
|
class Box:
l = 0
w = 0
h = 0
def __init__(self,l,w,h):
self.l = l
self.w = w
self.h = h
def calcvolume(self):
l = self.l
w = self.w
h = self.h
print("The volume equals:", str(w*l*h))
def calcsurface(self):
l = self.l
w = self.w
h = self.h
print("The surface area equals:", str(2*w*l + 2*h*l +2*h*w))
| true
| true
|
1c408c442e1e807f5d4ecb8193daa5b1f4184032
| 6,167
|
py
|
Python
|
IO/radiation.py
|
storage4grid/PROFESS-PROFEV
|
adf4e26488225206c249938c9eecc394a06f9677
|
[
"Apache-2.0"
] | null | null | null |
IO/radiation.py
|
storage4grid/PROFESS-PROFEV
|
adf4e26488225206c249938c9eecc394a06f9677
|
[
"Apache-2.0"
] | null | null | null |
IO/radiation.py
|
storage4grid/PROFESS-PROFEV
|
adf4e26488225206c249938c9eecc394a06f9677
|
[
"Apache-2.0"
] | null | null | null |
import configparser
import datetime
import json
from math import floor, ceil
import requests
from IO.locationData import LocationData
from utils_intern.messageLogger import MessageLogger
logger = MessageLogger.get_logger_parent()
# Date = Date & time (UTC)
# EPV = PV power output if requested (W)
# Bi = In-plane beam irradiance (W/m2)
# Di = Diffuse in-plane irradiance (W/m2) (if radiation components are requested)
# Ri = Refleted in-plane irradiance (W/m2) (if radiation components are requested)
# As = Sun elevation (degrees above horizon)
# Tamb = Air temperature (°C)
# W10 = Wind speed at 10m (m/s)
class RadiationData:
def __init__(self, date=datetime.datetime.now(), pv_output=0.0, beam_irradiance=0.0,
diffuse_irradiance=0.0, reflected_irradiance=0.0, sun_elevation=0.0, air_temp=0.0,
wind_speed=0.0):
self.date = datetime.datetime(datetime.datetime.now().year, date.month, date.day, date.hour, 0) + \
datetime.timedelta(hours=1)
self.pv_output = pv_output
self.beam_irradiance = beam_irradiance
self.diffuse_irradiance = diffuse_irradiance
self.reflected_irradiance = reflected_irradiance
self.sun_elevation = sun_elevation
self.air_temp = air_temp
self.wind_speed = wind_speed
def default(self):
return self.__dict__
def __repr__(self):
return self.date.strftime("%c") + " " + str(self.pv_output) + " " + str(self.beam_irradiance) + " " + \
str(self.diffuse_irradiance) + " " + str(self.reflected_irradiance) + " " + str(self.sun_elevation) + \
" " + str(self.air_temp) + " " + str(self.wind_speed)
class SolarRadiation:
"""
Radiation Service that collects data and grep the next 48h
"""
@staticmethod
def get_rad(lat, lon, maxPV, dT):
rad_data = []
logger.info("coord "+str(lat)+ ", "+ str(lon))
if lat is not None and lon is not None:
rad = requests.get("http://re.jrc.ec.europa.eu/pvgis5/seriescalc.php?lat=" +
"{:.3f}".format(float(lat)) + "&lon=" + "{:.3f}".format(float(lon)) + "&raddatabase=" +
"PVGIS-CMSAF&usehorizon=1&startyear=2016&endyear=2016&mountingplace=free&" +
"optimalinclination=0&optimalangles=1&hourlyoptimalangles=1&PVcalculation=1&" +
"pvtechchoice=crystSi&peakpower=" + str(maxPV) + "&loss=14&components=1")
red_arr = str(rad.content).split("\\n")
for x in range(11):
del red_arr[0]
now_file = datetime.datetime.now()
now = datetime.datetime(2000, now_file.month, now_file.day, now_file.hour, now_file.minute)
for x in range(0, red_arr.__len__()):
w = red_arr[x][:-2].split(",")
if w.__len__() != 9:
break
date_file = datetime.datetime.strptime(w[0], "%Y%m%d:%H%M%S")
date = datetime.datetime(2000, date_file.month, date_file.day, date_file.hour, date_file.minute)
if now <= date - datetime.timedelta(hours=-1) <= (now + datetime.timedelta(hours=48)):
rad_data.append(RadiationData(date, w[1], w[2], w[3], w[4], w[5], w[6], w[7]))
we = sorted(rad_data, key=lambda w: w.date)
data = SolarRadiation.extract_data(we)
data = SolarRadiation.expand_and_resample(data, dT)
return data
@staticmethod
def extract_data(rad):
data = []
for i in range(0, len(rad) - 1):
date = rad[i].date
timestamp = date.timestamp()
pv_output = float(rad[i].pv_output)
data.append([timestamp, pv_output])
return data
@staticmethod
def expand_and_resample(raw_data, dT):
step = float(dT)
j = len(raw_data) - 1
new_data = []
if j > 0:
start_time = raw_data[j][0]
start_value = raw_data[j][1]
new_data.append([start_time, start_value])
prev_time = start_time
prev_value = start_value
required_diff = step
j -= 1
while j >= 0:
end_time = raw_data[j][0]
end_value = raw_data[j][1]
diff_sec = prev_time - end_time
if diff_sec >= required_diff:
ratio = required_diff / diff_sec
inter_time = prev_time - required_diff
inter_value = prev_value - (prev_value - end_value) * ratio
new_data.append([inter_time, inter_value])
prev_time = inter_time
prev_value = inter_value
required_diff = step
else:
required_diff -= diff_sec
prev_time = end_time
prev_value = end_value
j -= 1
else:
new_data = raw_data
new_data.reverse()
return new_data
class Radiation:
def __init__(self, config, maxPV, dT_in_seconds, location):
self.data = {}
self.location = location
self.location_data = LocationData(config)
self.location_found = False
self.lat = 50.7374
self.lon = 7.0982
self.maxPV = maxPV
#self.maxPV /= 1000 # pv in kW
self.dT_in_seconds = dT_in_seconds
def get_data(self):
self.update_location_info()
data = SolarRadiation.get_rad(self.lat, self.lon, self.maxPV, self.dT_in_seconds)
jsm = json.dumps(data, default=str)
return jsm
def update_location_info(self):
if not self.location_found:
lat, lon = self.location_data.get_city_coordinate(self.location["city"], self.location["country"])
if lat is not None and lon is not None:
self.lat = lat
self.lon = lon
self.location_found = True
else:
logger.error("Error getting location info, setting to bonn, germany")
| 41.113333
| 118
| 0.574185
|
import configparser
import datetime
import json
from math import floor, ceil
import requests
from IO.locationData import LocationData
from utils_intern.messageLogger import MessageLogger
logger = MessageLogger.get_logger_parent()
class RadiationData:
def __init__(self, date=datetime.datetime.now(), pv_output=0.0, beam_irradiance=0.0,
diffuse_irradiance=0.0, reflected_irradiance=0.0, sun_elevation=0.0, air_temp=0.0,
wind_speed=0.0):
self.date = datetime.datetime(datetime.datetime.now().year, date.month, date.day, date.hour, 0) + \
datetime.timedelta(hours=1)
self.pv_output = pv_output
self.beam_irradiance = beam_irradiance
self.diffuse_irradiance = diffuse_irradiance
self.reflected_irradiance = reflected_irradiance
self.sun_elevation = sun_elevation
self.air_temp = air_temp
self.wind_speed = wind_speed
def default(self):
return self.__dict__
def __repr__(self):
return self.date.strftime("%c") + " " + str(self.pv_output) + " " + str(self.beam_irradiance) + " " + \
str(self.diffuse_irradiance) + " " + str(self.reflected_irradiance) + " " + str(self.sun_elevation) + \
" " + str(self.air_temp) + " " + str(self.wind_speed)
class SolarRadiation:
@staticmethod
def get_rad(lat, lon, maxPV, dT):
rad_data = []
logger.info("coord "+str(lat)+ ", "+ str(lon))
if lat is not None and lon is not None:
rad = requests.get("http://re.jrc.ec.europa.eu/pvgis5/seriescalc.php?lat=" +
"{:.3f}".format(float(lat)) + "&lon=" + "{:.3f}".format(float(lon)) + "&raddatabase=" +
"PVGIS-CMSAF&usehorizon=1&startyear=2016&endyear=2016&mountingplace=free&" +
"optimalinclination=0&optimalangles=1&hourlyoptimalangles=1&PVcalculation=1&" +
"pvtechchoice=crystSi&peakpower=" + str(maxPV) + "&loss=14&components=1")
red_arr = str(rad.content).split("\\n")
for x in range(11):
del red_arr[0]
now_file = datetime.datetime.now()
now = datetime.datetime(2000, now_file.month, now_file.day, now_file.hour, now_file.minute)
for x in range(0, red_arr.__len__()):
w = red_arr[x][:-2].split(",")
if w.__len__() != 9:
break
date_file = datetime.datetime.strptime(w[0], "%Y%m%d:%H%M%S")
date = datetime.datetime(2000, date_file.month, date_file.day, date_file.hour, date_file.minute)
if now <= date - datetime.timedelta(hours=-1) <= (now + datetime.timedelta(hours=48)):
rad_data.append(RadiationData(date, w[1], w[2], w[3], w[4], w[5], w[6], w[7]))
we = sorted(rad_data, key=lambda w: w.date)
data = SolarRadiation.extract_data(we)
data = SolarRadiation.expand_and_resample(data, dT)
return data
@staticmethod
def extract_data(rad):
data = []
for i in range(0, len(rad) - 1):
date = rad[i].date
timestamp = date.timestamp()
pv_output = float(rad[i].pv_output)
data.append([timestamp, pv_output])
return data
@staticmethod
def expand_and_resample(raw_data, dT):
step = float(dT)
j = len(raw_data) - 1
new_data = []
if j > 0:
start_time = raw_data[j][0]
start_value = raw_data[j][1]
new_data.append([start_time, start_value])
prev_time = start_time
prev_value = start_value
required_diff = step
j -= 1
while j >= 0:
end_time = raw_data[j][0]
end_value = raw_data[j][1]
diff_sec = prev_time - end_time
if diff_sec >= required_diff:
ratio = required_diff / diff_sec
inter_time = prev_time - required_diff
inter_value = prev_value - (prev_value - end_value) * ratio
new_data.append([inter_time, inter_value])
prev_time = inter_time
prev_value = inter_value
required_diff = step
else:
required_diff -= diff_sec
prev_time = end_time
prev_value = end_value
j -= 1
else:
new_data = raw_data
new_data.reverse()
return new_data
class Radiation:
def __init__(self, config, maxPV, dT_in_seconds, location):
self.data = {}
self.location = location
self.location_data = LocationData(config)
self.location_found = False
self.lat = 50.7374
self.lon = 7.0982
self.maxPV = maxPV
elf.dT_in_seconds = dT_in_seconds
def get_data(self):
self.update_location_info()
data = SolarRadiation.get_rad(self.lat, self.lon, self.maxPV, self.dT_in_seconds)
jsm = json.dumps(data, default=str)
return jsm
def update_location_info(self):
if not self.location_found:
lat, lon = self.location_data.get_city_coordinate(self.location["city"], self.location["country"])
if lat is not None and lon is not None:
self.lat = lat
self.lon = lon
self.location_found = True
else:
logger.error("Error getting location info, setting to bonn, germany")
| true
| true
|
1c408e8e8f9bc32650dab15d276e34f5c4975c7f
| 5,268
|
py
|
Python
|
sscutils/validation_functions.py
|
papsebestyen/sscutils
|
dff8b62ab31c9dfe1494264f9319e287945762bc
|
[
"MIT"
] | null | null | null |
sscutils/validation_functions.py
|
papsebestyen/sscutils
|
dff8b62ab31c9dfe1494264f9319e287945762bc
|
[
"MIT"
] | 21
|
2021-09-15T15:31:22.000Z
|
2022-03-20T17:10:50.000Z
|
sscutils/validation_functions.py
|
papsebestyen/sscutils
|
dff8b62ab31c9dfe1494264f9319e287945762bc
|
[
"MIT"
] | 2
|
2021-09-08T14:12:00.000Z
|
2021-09-29T10:58:08.000Z
|
import re
from functools import partial
from pathlib import Path
from dvc.repo import Repo
from structlog import get_logger
from .artifact_context import ArtifactContext
from .config_loading import DataEnvSpecification, DatasetConfig, ProjectConfig
from .exceptions import DatasetSetupException
from .helpers import import_env_creator_function, import_update_data_function
from .metadata import ArtifactMetadata
from .metadata.bedrock.atoms import NS_ATOM_TYPE
from .metadata.bedrock.imported_namespace import ImportedNamespace
from .metadata.datascript.conversion import imported_bedrock_to_datascript
from .metadata.datascript.to_bedrock import DatascriptToBedrockConverter
from .naming import (
COMPLETE_ENV_NAME,
ns_metadata_abs_module,
project_template_repo,
)
from .sql.draw import dump_graph
from .sql.loader import SqlLoader
from .utils import cd_into
logger = get_logger()
def log(msg, artifact_type):
logger.info(f"validating {artifact_type} - {msg}")
def sql_validation(constr, env=None, draw=False, batch_size=2000):
loader = SqlLoader(constr, echo=False, batch_size=batch_size)
loader.setup_schema()
if draw:
dump_graph(loader.sql_meta, loader.engine)
try:
loader.load_data(env)
loader.validate_data(env)
finally:
loader.purge()
def validate_project():
"""asserts a few things about a dataset
- all prefixes in envs have imported namespaces
- configuration files are present
- metadata is same across all branches
- metadata fits what is in the data files
- one step per module
Raises
------
ProjectSetupException
explains what is wrong
"""
_ = ProjectConfig()
def validate_dataset(
constr="sqlite:///:memory:", env=None, draw=False, batch_size=2000
):
"""asserts a few things about a dataset
- configuration files are present
- standard functions can be imported
- metadata is properly exported from datascript
- metadata fits what is in the data files
- is properly uploaded -> can be imported to a project
Raises
------
DatasetSetupException
explains what is wrong
"""
_log = partial(log, artifact_type="dataset")
_log("full context")
ctx = ArtifactContext()
_log("config files and naming")
conf = DatasetConfig()
ctx.branch_remote_pairs
for _env in [*conf.created_environments, conf.default_env]:
is_underscored_name(_env.name)
is_dashed_name(_env.branch)
_log("function imports")
import_env_creator_function()
import_update_data_function()
_log("serialized metadata fits conventions")
root_serialized_ns = ArtifactMetadata.load_serialized().root_ns
for table in root_serialized_ns.tables:
is_underscored_name(table.name)
for feat in table.features_w_ind:
is_underscored_name(feat.prime_id)
_log("serialized metadata matching datascript")
root_datascript_ns = DatascriptToBedrockConverter(
ns_metadata_abs_module
).to_ns_metadata()
ds_atom_n = 0
for ds_atom in root_datascript_ns.atoms:
try:
ser_atom = root_serialized_ns.get(ds_atom.name)
except KeyError as e:
raise DatasetSetupException(f"{ds_atom} not serialized: {e}")
_nondesc_eq(ser_atom, ds_atom)
ds_atom_n += 1
assert ds_atom_n == len(root_serialized_ns.atoms)
_log("data can be read to sql db")
sql_validation(constr, env, draw, batch_size=batch_size)
_log("data can be imported to a project via dvc")
validate_ds_importable(env or COMPLETE_ENV_NAME)
def validate_ds_importable(env):
artifact_dir = Path.cwd().as_posix()
test_prefix = "test_dataset"
with cd_into(project_template_repo, force_clone=True):
ctx = ArtifactContext()
ctx.config.data_envs.append(DataEnvSpecification(test_prefix, env))
ctx.metadata.imported_namespaces.append(
ImportedNamespace(test_prefix, artifact_dir)
)
ctx.serialize()
ctx.import_namespaces()
imported_bedrock_to_datascript()
_denv = ArtifactContext().data_envs[0]
_denv.out_path.parent.mkdir(exist_ok=True)
_denv.load_data(Repo())
# TODO: assert this data matches local
def is_underscored_name(s):
_check_match("_", s)
def is_dashed_name(s):
_check_match("-", s)
def is_repo_name(s):
_check_match("-", s, False)
def is_step_name(s):
_check_match("_", s, False)
def _check_match(bc, s, nums_ok=True):
ok_chr = "a-z|0-9" if nums_ok else "a-z"
rex = r"[a-z]+((?!{bc}{bc})[{okc}|\{bc}])*[{okc}]+".format(
bc=bc, okc=ok_chr
)
if re.compile(rex).fullmatch(s) is None:
raise NameError(
f"{s} does not fit the expected format of "
f"lower case letters and non-duplicated {bc}"
)
def _nondesc_eq(serialized: NS_ATOM_TYPE, datascript: NS_ATOM_TYPE):
if _dropdesc(serialized) != _dropdesc(datascript):
raise DatasetSetupException(
"inconsistent metadata: "
f"serialized: {serialized} datascript: {datascript}"
)
def _dropdesc(obj: NS_ATOM_TYPE):
return {k: v for k, v in obj.to_dict().items() if k != "description"}
| 29.104972
| 78
| 0.699696
|
import re
from functools import partial
from pathlib import Path
from dvc.repo import Repo
from structlog import get_logger
from .artifact_context import ArtifactContext
from .config_loading import DataEnvSpecification, DatasetConfig, ProjectConfig
from .exceptions import DatasetSetupException
from .helpers import import_env_creator_function, import_update_data_function
from .metadata import ArtifactMetadata
from .metadata.bedrock.atoms import NS_ATOM_TYPE
from .metadata.bedrock.imported_namespace import ImportedNamespace
from .metadata.datascript.conversion import imported_bedrock_to_datascript
from .metadata.datascript.to_bedrock import DatascriptToBedrockConverter
from .naming import (
COMPLETE_ENV_NAME,
ns_metadata_abs_module,
project_template_repo,
)
from .sql.draw import dump_graph
from .sql.loader import SqlLoader
from .utils import cd_into
logger = get_logger()
def log(msg, artifact_type):
logger.info(f"validating {artifact_type} - {msg}")
def sql_validation(constr, env=None, draw=False, batch_size=2000):
loader = SqlLoader(constr, echo=False, batch_size=batch_size)
loader.setup_schema()
if draw:
dump_graph(loader.sql_meta, loader.engine)
try:
loader.load_data(env)
loader.validate_data(env)
finally:
loader.purge()
def validate_project():
_ = ProjectConfig()
def validate_dataset(
constr="sqlite:///:memory:", env=None, draw=False, batch_size=2000
):
_log = partial(log, artifact_type="dataset")
_log("full context")
ctx = ArtifactContext()
_log("config files and naming")
conf = DatasetConfig()
ctx.branch_remote_pairs
for _env in [*conf.created_environments, conf.default_env]:
is_underscored_name(_env.name)
is_dashed_name(_env.branch)
_log("function imports")
import_env_creator_function()
import_update_data_function()
_log("serialized metadata fits conventions")
root_serialized_ns = ArtifactMetadata.load_serialized().root_ns
for table in root_serialized_ns.tables:
is_underscored_name(table.name)
for feat in table.features_w_ind:
is_underscored_name(feat.prime_id)
_log("serialized metadata matching datascript")
root_datascript_ns = DatascriptToBedrockConverter(
ns_metadata_abs_module
).to_ns_metadata()
ds_atom_n = 0
for ds_atom in root_datascript_ns.atoms:
try:
ser_atom = root_serialized_ns.get(ds_atom.name)
except KeyError as e:
raise DatasetSetupException(f"{ds_atom} not serialized: {e}")
_nondesc_eq(ser_atom, ds_atom)
ds_atom_n += 1
assert ds_atom_n == len(root_serialized_ns.atoms)
_log("data can be read to sql db")
sql_validation(constr, env, draw, batch_size=batch_size)
_log("data can be imported to a project via dvc")
validate_ds_importable(env or COMPLETE_ENV_NAME)
def validate_ds_importable(env):
artifact_dir = Path.cwd().as_posix()
test_prefix = "test_dataset"
with cd_into(project_template_repo, force_clone=True):
ctx = ArtifactContext()
ctx.config.data_envs.append(DataEnvSpecification(test_prefix, env))
ctx.metadata.imported_namespaces.append(
ImportedNamespace(test_prefix, artifact_dir)
)
ctx.serialize()
ctx.import_namespaces()
imported_bedrock_to_datascript()
_denv = ArtifactContext().data_envs[0]
_denv.out_path.parent.mkdir(exist_ok=True)
_denv.load_data(Repo())
def is_underscored_name(s):
_check_match("_", s)
def is_dashed_name(s):
_check_match("-", s)
def is_repo_name(s):
_check_match("-", s, False)
def is_step_name(s):
_check_match("_", s, False)
def _check_match(bc, s, nums_ok=True):
ok_chr = "a-z|0-9" if nums_ok else "a-z"
rex = r"[a-z]+((?!{bc}{bc})[{okc}|\{bc}])*[{okc}]+".format(
bc=bc, okc=ok_chr
)
if re.compile(rex).fullmatch(s) is None:
raise NameError(
f"{s} does not fit the expected format of "
f"lower case letters and non-duplicated {bc}"
)
def _nondesc_eq(serialized: NS_ATOM_TYPE, datascript: NS_ATOM_TYPE):
if _dropdesc(serialized) != _dropdesc(datascript):
raise DatasetSetupException(
"inconsistent metadata: "
f"serialized: {serialized} datascript: {datascript}"
)
def _dropdesc(obj: NS_ATOM_TYPE):
return {k: v for k, v in obj.to_dict().items() if k != "description"}
| true
| true
|
1c4090dcb2015b0c5a4cc7088154f7ca9e7c6fc2
| 27,882
|
py
|
Python
|
gedl/RPCGenerator.py
|
gaps-closure/capo
|
894d2f6d291ff79e18c77e0ca7073531147cbee8
|
[
"BSD-3-Clause"
] | 1
|
2021-04-20T18:43:44.000Z
|
2021-04-20T18:43:44.000Z
|
gedl/RPCGenerator.py
|
gaps-closure/capo
|
894d2f6d291ff79e18c77e0ca7073531147cbee8
|
[
"BSD-3-Clause"
] | 1
|
2021-09-23T14:55:43.000Z
|
2021-09-23T18:09:35.000Z
|
gedl/RPCGenerator.py
|
gaps-closure/capo
|
894d2f6d291ff79e18c77e0ca7073531147cbee8
|
[
"BSD-3-Clause"
] | 1
|
2020-05-21T03:12:16.000Z
|
2020-05-21T03:12:16.000Z
|
import json
import sys
import copy
import os
from argparse import ArgumentParser
def argparser(enclaveList, enclaveMap):
parser = ArgumentParser(description='CLOSURE RPC File and Wrapper Generator')
parser.add_argument('-o','--odir', required=True, type=str, help='Output Directory')
parser.add_argument('-g','--gedl', required=True, type=str, help='Input GEDL Filepath')
parser.add_argument('-i','--ipc', required=True, type=str, help='IPC Type (Singlethreaded/Multithreaded)')
parser.add_argument('-a','--hal', required=True, type=str, help='HAL Api Directory Path')
parser.add_argument('-n','--inuri', required=True, type=str, help='Input URI')
parser.add_argument('-t','--outuri', required=True, type=str, help='Output URI')
parser.add_argument('-x','--xdconf', required=True, type=str, help='Hal Config Map Filename')
parser.add_argument('-f','--files', required=True, type=str, nargs='+', help='List of Mod Files')
args = parser.parse_args()
for index, enclaveFile in enumerate(args.files):
enclaveName = enclaveFile[:enclaveFile.rfind('/')]
enclaveName = enclaveName[(enclaveName.rfind('/')+1):]
enclaveList.append(enclaveName)
enclaveMap[enclaveName] = [enclaveFile,"slave", index]
return args
def getFirstElem(list):
return list[0]
def GEDLParser(args,enclaveList, enclaveMap,replaceList,callerList,calleeList):
with open(args.gedl) as edl_file:
gedl = json.load(edl_file)
callNum = 3
callNumMap = {}
for index, enclave in enumerate(enclaveList):
occursList = []
callerList.append([])
calleeList.append([])
for enclavePair in gedl['gedl']:
if enclavePair["caller"] == enclave:
callsList = []
for call in enclavePair["calls"]:
paramsList = []
for param in call["params"]:
paramsList.append([str(param["type"]),str(param["name"]),str(param["dir"])])
if str(call["func"]) in callNumMap:
callsList.append([str(call["func"]),str(call["return"]["type"]),copy.copy(paramsList),callNumMap[str(call["func"])]])
else:
callsList.append([str(call["func"]),str(call["return"]["type"]),copy.copy(paramsList),callNum])
callNumMap[str(call["func"])] = callNum
callNum += 2
for occurance in call["occurs"]:
for line in occurance["lines"]:
occursList.append([line,str(call["func"])])
callerList[index].append([str(enclavePair["callee"]),enclaveMap[enclavePair["callee"]][2],copy.copy(callsList)])
if enclavePair["callee"] == enclave:
callsList = []
for call in enclavePair["calls"]:
paramsList = []
for param in call["params"]:
paramsList.append([str(param["type"]),str(param["name"]),str(param["dir"])])
if str(call["func"]) in callNumMap:
callsList.append([str(call["func"]),str(call["return"]["type"]),copy.copy(paramsList),callNumMap[str(call["func"])]])
else:
callsList.append([str(call["func"]),str(call["return"]["type"]),copy.copy(paramsList),callNum])
callNumMap[str(call["func"])] = callNum
callNum += 2
calleeList[index].append([str(enclavePair["caller"]),enclaveMap[enclavePair["caller"]][2],copy.copy(callsList)])
occursList.sort(key=getFirstElem)
replaceList.append(copy.copy(occursList))
def CModFunction(enclave,args,enclaveMap,replaceList,callerList,calleeList):
if not os.path.isfile(enclaveMap[enclave][0]):
print("File" + enclaveMap[enclave][0] + "does not exist. Please update GEDL Schema with valid C file.\n")
exit(0)
with open(enclaveMap[enclave][0]) as old_file:
newFile = enclaveMap[enclave][0][enclaveMap[enclave][0].rfind('/') + 1:].replace(".mod","")
enclaveIndex = enclaveMap[enclave][2]
with open((args.odir + "/" + enclave + "/" + newFile),"w") as modc_file:
modc_file.write("#include \"" + newFile[:newFile.rfind(".")] + "_rpc.h\"\n")
oldFileLines = list(old_file)
for index, line in enumerate(oldFileLines):
if "int main(" in line:
modc_file.write(line)
modc_file.write("\t_master_rpc_init();\n")
enclaveMap[enclave][1] = "master"
continue
while len(replaceList[enclaveIndex]) > 0 and (index+1) == replaceList[enclaveIndex][0][0]:
callIndex = line.find(replaceList[enclaveIndex][0][1])
if callIndex == -1:
print("Error: GEDL Cross-Enclave callsite in file %s for function %d at line %s could not be found" % (enclaveMap[enclave][0],index,replaceList[enclaveIndex][0][1]))
else:
line = line.replace(replaceList[enclaveIndex][0][1],"_rpc_" + replaceList[enclaveIndex][0][1])
del replaceList[enclaveIndex][0]
modc_file.write(line)
if enclaveMap[enclave][1] != "master":
modc_file.write("int main(int argc, char **argv) {\n\treturn _slave_rpc_loop();\n}")
def RPCGeneratorH(enclave,args,enclaveMap,callerList,calleeList):
rpchFile = enclaveMap[enclave][0][enclaveMap[enclave][0].rfind('/') + 1:].replace(".mod.c","_rpc.h")
enclaveIndex = enclaveMap[enclave][2]
with open((args.odir + "/" + enclave + "/" + rpchFile),"w") as rpch_file:
rpch_file.write("#ifndef _" + enclave.capitalize() + "_RPC_\n#define _" + enclave.capitalize() + "_RPC_\n#include \"xdcomms.h\"\n#include \"codec.h\"\n")
if args.ipc != "Singlethreaded" and enclaveMap[enclave][1] != "master":
rpch_file.write("#include <pthread.h>\n")
rpch_file.write("\n# define APP_BASE 0\n")
for callerPair in callerList[enclaveIndex]:
if 1: #args.ipc == "Singlethreaded":
rpch_file.write("# define MUX_NEXTRPC APP_BASE + " + str(callerPair[1] + 1) + "\n")
rpch_file.write("# define SEC_NEXTRPC APP_BASE + " + str(callerPair[1] + 1) + "\n")
rpch_file.write("# define MUX_OKAY APP_BASE + " + str(enclaveIndex+ 1) + "\n")
rpch_file.write("# define SEC_OKAY APP_BASE + " + str(enclaveIndex+ 1) + "\n")
for call in callerPair[2]:
rpch_file.write("# define MUX_REQUEST_" + call[0].upper() + " APP_BASE + " + str(callerPair[1] + 1) + "\n")
rpch_file.write("# define SEC_REQUEST_" + call[0].upper() + " APP_BASE + " + str(callerPair[1] + 1) + "\n")
rpch_file.write("# define MUX_RESPONSE_" + call[0].upper() + " APP_BASE + " + str(enclaveIndex+ 1) + "\n")
rpch_file.write("# define SEC_RESPONSE_" + call[0].upper() + " APP_BASE + " + str(enclaveIndex+ 1) + "\n")
for calleePair in calleeList[enclaveIndex]:
if 1: #args.ipc == "Singlethreaded":
rpch_file.write("# define MUX_NEXTRPC APP_BASE + " + str(enclaveIndex+ 1) + "\n")
rpch_file.write("# define SEC_NEXTRPC APP_BASE + " + str(enclaveIndex+ 1) + "\n")
rpch_file.write("# define MUX_OKAY APP_BASE + " + str(calleePair[1] + 1) + "\n")
rpch_file.write("# define SEC_OKAY APP_BASE + " + str(calleePair[1] + 1) + "\n")
for call in calleePair[2]:
rpch_file.write("# define MUX_REQUEST_" + call[0].upper() + " APP_BASE + " + str(enclaveIndex+ 1) + "\n")
rpch_file.write("# define SEC_REQUEST_" + call[0].upper() + " APP_BASE + " + str(enclaveIndex+ 1) + "\n")
rpch_file.write("# define MUX_RESPONSE_" + call[0].upper() + " APP_BASE + " + str(calleePair[1] + 1) + "\n")
rpch_file.write("# define SEC_RESPONSE_" + call[0].upper() + " APP_BASE + " + str(calleePair[1] + 1) + "\n")
rpch_file.write("\n#define INURI \"" + args.inuri + enclave + "\"\n#define OUTURI \"" + args.outuri + enclave + "\"\n")
for callerPair in callerList[enclaveIndex]:
for call in callerPair[2]:
rpch_file.write("#pragma cle def TAG_RESPONSE_" + call[0].upper() + " {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + enclave + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(enclaveIndex + 1) + "," + str(enclaveIndex + 1) + "," + str(call[3]+1) + "] }} \\\n\t] }\n")
rpch_file.write("#pragma cle def TAG_REQUEST_" + call[0].upper() + " {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + callerPair[0] + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(callerPair[1]+ 1) + "," + str(callerPair[1]+ 1) + "," + str(call[3]) + "] }} \\\n\t] }\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpch_file.write("#pragma cle def TAG_RESPONSE_" + call[0].upper() + " {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + calleePair[0] + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(calleePair[1]+ 1) + "," + str(calleePair[1]+ 1) + "," + str(call[3]+1) + "] }} \\\n\t] }\n")
rpch_file.write("#pragma cle def TAG_REQUEST_" + call[0].upper() + " {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + enclave + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(enclaveIndex + 1) + "," + str(enclaveIndex + 1) + "," + str(call[3]) + "] }} \\\n\t] }\n")
if 1: #args.ipc == "Singlethreaded":
for callerPair in callerList[enclaveIndex]:
#REMOVE HARDCODE ONCE IDL GEN FINISHED
rpch_file.write("#pragma cle def TAG_OKAY {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + enclave + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(enclaveIndex + 1) + "," + str(enclaveIndex + 1) + ",2] }} \\\n\t] }\n")
rpch_file.write("#pragma cle def TAG_NEXTRPC {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + callerPair[0] + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(callerPair[1]+ 1) + "," + str(callerPair[1]+ 1) + ",1] }} \\\n\t] }\n")
for calleePair in calleeList[enclaveIndex]:
rpch_file.write("#pragma cle def TAG_OKAY {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + calleePair[0] + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(calleePair[1]+ 1) + "," + str(calleePair[1]+ 1) + ",2] }} \\\n\t] }\n")
rpch_file.write("#pragma cle def TAG_NEXTRPC {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + enclave + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(enclaveIndex + 1) + "," + str(enclaveIndex + 1) + ",1] }} \\\n\t] }\n")
if enclaveMap[enclave][1] == "master":
rpch_file.write("extern void _master_rpc_init();\n")
else:
rpch_file.write("extern int _slave_rpc_loop();\n")
for callerPair in callerList[enclaveIndex]:
for call in callerPair[2]:
rpch_file.write("extern " + call[1] + " _rpc_" + call[0] + "(")
for param in call[2]:
rpch_file.write(param[0] + " " + param[1])
if param != call[2][-1]:
rpch_file.write(",")
rpch_file.write(");\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpch_file.write("extern " + call[1] + " " + call[0] + "(")
for param in call[2]:
rpch_file.write(param[0] + " " + param[1])
if param != call[2][-1]:
rpch_file.write(",")
rpch_file.write(");\n")
rpch_file.write("\n\n#endif /* _"+ enclave.upper() + "_RPC_ */")
def RPCGeneratorC(enclave,args,enclaveMap,callerList,calleeList):
rpccFile = enclaveMap[enclave][0][enclaveMap[enclave][0].rfind('/') + 1:].replace(".mod.c","_rpc.c")
enclaveIndex = enclaveMap[enclave][2]
with open((args.odir + "/" + enclave + "/" + rpccFile),"w") as rpcc_file:
rpcc_file.write("#include \"" + rpccFile[:rpccFile.rfind(".")] + ".h\"\n")
if enclaveMap[enclave][1] != "master":
rpcc_file.write("#define TAG_MATCH(X, Y) (X.mux == Y.mux && X.sec == Y.sec && X.typ == Y.typ)\n#define WRAP(X) void *_wrapper_##X(void *tag) { while(1) { _handle_##X(tag); } }\n\n")
if enclaveMap[enclave][1] == "master" and args.ipc == "Singlethreaded":
rpcc_file.write("void _notify_next_tag(gaps_tag* n_tag) {\n")
rpcc_file.write("\tstatic int inited = 0;\n\tstatic void *psocket;\n\tstatic void *ssocket;\n\tgaps_tag t_tag;\n\tgaps_tag o_tag;\n\t")
rpcc_file.write("#pragma cle begin TAG_NEXTRPC\n\tnextrpc_datatype nxt;\n\t#pragma cle end TAG_NEXTRPC\n")
rpcc_file.write("\t#pragma cle begin TAG_OKAY\n\tokay_datatype okay;\n\t#pragma cle end TAG_OKAY\n\n")
rpcc_file.write("\tnxt.mux = n_tag->mux;\n\tnxt.sec = n_tag->sec;\n\tnxt.typ = n_tag->typ;\n\n")
rpcc_file.write("\ttag_write(&t_tag, MUX_NEXTRPC, SEC_NEXTRPC, DATA_TYP_NEXTRPC);\n")
rpcc_file.write("\ttag_write(&o_tag, MUX_OKAY, SEC_OKAY, DATA_TYP_OKAY);\n\n")
rpcc_file.write("\tif(!inited) {\n\t\tinited = 1;\n\t\tpsocket = xdc_pub_socket();\n\t\tssocket = xdc_sub_socket(o_tag);\n\t\tsleep(1); /* zmq socket join delay */\n\t}\n\n")
rpcc_file.write("\txdc_asyn_send(psocket, &nxt, &t_tag);\n")
rpcc_file.write("\txdc_blocking_recv(ssocket, &okay, &o_tag);\n}\n\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("void _handle_request_" + call[0] + "(gaps_tag* tag) {\n\tstatic int inited = 0;\n\tstatic void *psocket;\n\tstatic void *ssocket;\n\tgaps_tag t_tag;\n\tgaps_tag o_tag;\n\t")
rpcc_file.write("#pragma cle begin TAG_REQUEST_" + call[0].upper() + "\n\trequest_" + call[0] + "_datatype req_" + call[0] + ";\n\t#pragma cle end TAG_REQUEST_" + call[0].upper() + "\n")
rpcc_file.write("\t#pragma cle begin TAG_RESPONSE_" + call[0].upper() + "\n\tresponse_" + call[0] + "_datatype res_" + call[0] + ";\n\t#pragma cle end TAG_RESPONSE_" + call[0].upper() + "\n\n")
rpcc_file.write("\ttag_write(&t_tag, MUX_REQUEST_" + call[0].upper() + ", SEC_REQUEST_" + call[0].upper() + ", DATA_TYP_REQUEST_" + call[0].upper() + ");\n")
rpcc_file.write("\tif(!inited) {\n\t\tinited = 1;\n\t\tpsocket = xdc_pub_socket();\n\t\tssocket = xdc_sub_socket(t_tag);\n\t\tsleep(1); /* zmq socket join delay */\n\t}\n\n")
rpcc_file.write("\txdc_blocking_recv(ssocket, &req_" + call[0] + ", &t_tag);\n\t")
if call[1] != "void":
rpcc_file.write("res_" + call[0] + ".ret = ")
rpcc_file.write(call[0] + "(")
for param in call[2]:
rpcc_file.write("req_" + call[0] + "." + param[1])
if param != call[2][-1]:
rpcc_file.write(",")
rpcc_file.write(");\n\n")
rpcc_file.write("\ttag_write(&o_tag, MUX_RESPONSE_" + call[0].upper() + ", SEC_RESPONSE_" + call[0].upper() + ", DATA_TYP_RESPONSE_" + call[0].upper() + ");\n\txdc_asyn_send(psocket, &res_" + call[0] + ", &o_tag);\n}\n\n")
if enclaveMap[enclave][1] != "master": # and args.ipc == "Singlethreaded":
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("void _handle_nxtrpc(gaps_tag* n_tag) {\n\tstatic int inited = 0;\n\tstatic void *psocket;\n\tstatic void *ssocket;\n\tgaps_tag t_tag;\n\tgaps_tag o_tag;\n\t")
rpcc_file.write("#pragma cle begin TAG_NEXTRPC\n\tnextrpc_datatype nxt;\n\t#pragma cle end TAG_NEXTRPC\n")
rpcc_file.write("\t#pragma cle begin TAG_OKAY\n\tokay_datatype okay;\n\t#pragma cle end TAG_OKAY\n\n")
rpcc_file.write("\ttag_write(&t_tag, MUX_NEXTRPC, SEC_NEXTRPC, DATA_TYP_NEXTRPC);\n")
rpcc_file.write("\tif(!inited) {\n\t\tinited = 1;\n\t\tpsocket = xdc_pub_socket();\n\t\tssocket = xdc_sub_socket(t_tag);\n\t\tsleep(1); /* zmq socket join delay */\n\t}\n\n")
rpcc_file.write("\txdc_blocking_recv(ssocket, &nxt, &t_tag);\n\n")
rpcc_file.write("\ttag_write(&o_tag, MUX_OKAY, SEC_OKAY, DATA_TYP_OKAY);\n\tokay.x = 0;\n")
rpcc_file.write("\txdc_asyn_send(psocket, &okay, &o_tag);\n\n")
rpcc_file.write("\tn_tag->mux = nxt.mux;\n\tn_tag->sec = nxt.sec;\n\tn_tag->typ = nxt.typ;\n}\n\n")
for callerPair in callerList[enclaveIndex]:
for call in callerPair[2]:
rpcc_file.write(call[1] + " _rpc_" + call[0] + "(")
for param in call[2]:
rpcc_file.write(param[0] + " " + param[1])
if param != call[2][-1]:
rpcc_file.write(",")
rpcc_file.write(") {\n")
rpcc_file.write("\tstatic int inited = 0;\n\tstatic void *psocket;\n\tstatic void *ssocket;\n\tgaps_tag t_tag;\n\tgaps_tag o_tag;\n\t")
rpcc_file.write("#pragma cle begin TAG_REQUEST_" + call[0].upper() + "\n\trequest_" + call[0] + "_datatype req_" + call[0] + ";\n\t#pragma cle end TAG_REQUEST_" + call[0].upper() + "\n")
rpcc_file.write("\t#pragma cle begin TAG_RESPONSE_" + call[0].upper() + "\n\tresponse_" + call[0] + "_datatype res_" + call[0] + ";\n\t#pragma cle end TAG_RESPONSE_" + call[0].upper() + "\n\n")
if len(call[2]) == 0:
rpcc_file.write("\treq_" + call[0] + ".dummy = 0;\n")
else:
for param in call[2]:
rpcc_file.write("\treq_" + call[0] + "." + param[1] + "=" + param[1] + ";\n")
rpcc_file.write("\ttag_write(&t_tag, MUX_REQUEST_" + call[0].upper() + ", SEC_REQUEST_" + call[0].upper() + ", DATA_TYP_REQUEST_" + call[0].upper() + ");\n")
rpcc_file.write("\ttag_write(&o_tag, MUX_RESPONSE_" + call[0].upper() + ", SEC_RESPONSE_" + call[0].upper() + ", DATA_TYP_RESPONSE_" + call[0].upper() + ");\n\n")
rpcc_file.write("\tif(!inited) {\n\t\tinited = 1;\n\t\tpsocket = xdc_pub_socket();\n\t\tssocket = xdc_sub_socket(o_tag);\n\t\tsleep(1); /* zmq socket join delay */\n\t}\n\n")
if args.ipc == "Singlethreaded":
rpcc_file.write("\t_notify_next_tag(&t_tag);\n")
rpcc_file.write("\txdc_asyn_send(psocket, &req_" + call[0] + ", &t_tag);\n\txdc_blocking_recv(ssocket, &res_" + call[0] + ", &o_tag);\n")
rpcc_file.write("\treturn (res_" + call[0] + ".ret);\n}\n\n")
rpcc_file.write("void _hal_init(char *inuri, char *outuri) {\n\txdc_set_in(inuri);\n\txdc_set_out(outuri);\n")
if 1:#args.ipc == "Singlethreaded":
for callerPair in callerList[enclaveIndex]:
for call in callerPair[2]:
rpcc_file.write("\txdc_register(nextrpc_data_encode, nextrpc_data_decode, DATA_TYP_NEXTRPC);\n")
rpcc_file.write("\txdc_register(okay_data_encode, okay_data_decode, DATA_TYP_OKAY);\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("\txdc_register(nextrpc_data_encode, nextrpc_data_decode, DATA_TYP_NEXTRPC);\n")
rpcc_file.write("\txdc_register(okay_data_encode, okay_data_decode, DATA_TYP_OKAY);\n")
for callerPair in callerList[enclaveIndex]:
for call in callerPair[2]:
rpcc_file.write("\txdc_register(request_" + call[0] + "_data_encode, request_" + call[0] + "_data_decode, DATA_TYP_REQUEST_" + call[0].upper() + ");\n")
rpcc_file.write("\txdc_register(response_" + call[0] + "_data_encode, response_" + call[0] + "_data_decode, DATA_TYP_RESPONSE_" + call[0].upper() + ");\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("\txdc_register(request_" + call[0] + "_data_encode, request_" + call[0] + "_data_decode, DATA_TYP_REQUEST_" + call[0].upper() + ");\n")
rpcc_file.write("\txdc_register(response_" + call[0] + "_data_encode, response_" + call[0] + "_data_decode, DATA_TYP_RESPONSE_" + call[0].upper() + ");\n")
rpcc_file.write("}\n\n")
if enclaveMap[enclave][1] == "master":
rpcc_file.write("void _master_rpc_init() {\n\t_hal_init((char*)INURI, (char *)OUTURI);\n}\n\n")
else:
if args.ipc == "Multithreaded":
crossDomains = 0
for calleePair in calleeList[enclaveIndex]:
crossDomains += 1 + len(calleePair[2])
rpcc_file.write("#define NXDRPC " + str(crossDomains) + "\n")
for calleePair in calleeList[enclaveIndex]:
rpcc_file.write("WRAP(nxtrpc)\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("WRAP(request_" + call[0] + ")\n")
rpcc_file.write("\nint _slave_rpc_loop() {\n\tgaps_tag n_tag;\n")
if args.ipc == "Multithreaded":
rpcc_file.write("\tpthread_t tid[NXDRPC];\n\t_hal_init((char *)INURI, (char *)OUTURI);\n")
tidIndex = 0
for calleePair in calleeList[enclaveIndex]:
rpcc_file.write("\tpthread_create(&tid[" + str(tidIndex) + "], NULL, _wrapper_nxtrpc, &n_tag);\n")
tidIndex += 1
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("\tpthread_create(&tid[" + str(tidIndex) + "], NULL, _wrapper_request_" + call[0] + ", &n_tag);\n")
tidIndex += 1
rpcc_file.write("\tfor (int i = 0; i < NXDRPC; i++) pthread_join(tid[i], NULL);\n\treturn 0;\n}\n\n")
else:
#FIX HARDCODING FOR NEXTRPC AND REQUEST
rpcc_file.write("int _slave_rpc_loop() {\n\tgaps_tag n_tag;\n\tgaps_tag t_tag;\n\n\t_hal_init((char *)INURI, (char *)OUTURI);\n\n")
rpcc_file.write("\twhile (1) {\n\t\t_handle_nxtrpc(&n_tag);\n\t\ttag_write(&t_tag, MUX_NEXTRPC, SEC_NEXTRPC, DATA_TYP_NEXTRPC);\n")
rpcc_file.write("\t\tif(TAG_MATCH(n_tag, t_tag)) {\n\t\t\tcontinue;\n\t\t}\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("\t\ttag_write(&t_tag, MUX_REQUEST_" + call[0].upper() + ", SEC_REQUEST_" + call[0].upper() + ", DATA_TYP_REQUEST_" + call[0].upper() + ");\n")
rpcc_file.write("\t\tif (TAG_MATCH(n_tag, t_tag)) {\n\t\t\t_handle_request_"+ call[0] + "(NULL);\n\t\t\tcontinue;\n\t\t}\n\t\tcontinue;\n\t}\n}\n\n")
def writeHALEntry(file, fromName , toName, mux, sec, typ, funcName):
file.write("{\"from\":\"" + fromName + "\",\"to\":\"" + toName + "\",\"mux\":" + str(mux) + ",\"sec\":" + str(sec) + ",\"typ\":" + str(typ) + ",\"name\":\"" + funcName +"\"}")
def XDCONFGenerator(args,enclaveMap,callerList,enclaveList):
with open((args.odir + "/" + args.xdconf),"a") as map_file:
map_file.write("{\"enclaves\": [")
first = 1
for enclave in enclaveList:
if first == 1:
first = 0
else:
map_file.write(",")
map_file.write("\n\t{\n\t\t\"enclave\":\"" + enclave + "\",\n\t\t\"inuri\":\"" + args.inuri + enclave + "\",\n\t\t\"outuri\":\"" + args.outuri + enclave + "\",\n\t\t\"halmaps\":[")
enclaveIndex = enclaveMap[enclave][2]
if enclaveMap[enclave][1] == "master":
for callerPair in callerList[enclaveIndex]:
writeHALEntry(map_file, enclave , callerPair[0], (callerPair[1] + 1), (callerPair[1] + 1), 1, "NEXTRPC")
map_file.write(",")
writeHALEntry(map_file, callerPair[0] , enclave, (enclaveIndex + 1), (enclaveIndex + 1), 2, "OKAY")
else:
for calleePair in calleeList[enclaveIndex]:
writeHALEntry(map_file, calleePair[0] , enclave, (enclaveIndex + 1), (enclaveIndex + 1), 1, "NEXTRPC")
map_file.write(",")
writeHALEntry(map_file, enclave , calleePair[0], (calleePair[1] + 1), (calleePair[1] + 1), 2, "OKAY")
for callerPair in callerList[enclaveIndex]:
for call in callerPair[2]:
map_file.write(",")
writeHALEntry(map_file, enclave , callerPair[0], (callerPair[1] + 1), (callerPair[1] + 1), call[3], ("REQUEST_" + call[0].upper()))
map_file.write(",")
writeHALEntry(map_file, callerPair[0] , enclave, (enclaveIndex + 1), (enclaveIndex + 1), (call[3]+1), ("RESPONSE_" + call[0].upper()))
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
map_file.write(",")
writeHALEntry(map_file, calleePair[0] , enclave, (enclaveIndex + 1), (enclaveIndex + 1), call[3], ("REQUEST_" + call[0].upper()))
map_file.write(",")
writeHALEntry(map_file, enclave , calleePair[0], (calleePair[1] + 1), (calleePair[1] + 1), (call[3]+1), ("RESPONSE_" + call[0].upper()))
map_file.write("]\n\t}")
map_file.write("\n]}")
#Main Script
enclaveMap = {}
enclaveList = []
replaceList = []
callerList = []
calleeList = []
args = argparser(enclaveList, enclaveMap)
GEDLParser(args, enclaveList, enclaveMap, replaceList,callerList,calleeList)
for enclave in enclaveList:
CModFunction(enclave, args, enclaveMap, replaceList,callerList,calleeList)
for enclave in enclaveList:
RPCGeneratorH(enclave, args, enclaveMap,callerList,calleeList)
RPCGeneratorC(enclave, args, enclaveMap,callerList,calleeList)
XDCONFGenerator(args, enclaveMap,callerList,enclaveList)
| 75.560976
| 238
| 0.551718
|
import json
import sys
import copy
import os
from argparse import ArgumentParser
def argparser(enclaveList, enclaveMap):
parser = ArgumentParser(description='CLOSURE RPC File and Wrapper Generator')
parser.add_argument('-o','--odir', required=True, type=str, help='Output Directory')
parser.add_argument('-g','--gedl', required=True, type=str, help='Input GEDL Filepath')
parser.add_argument('-i','--ipc', required=True, type=str, help='IPC Type (Singlethreaded/Multithreaded)')
parser.add_argument('-a','--hal', required=True, type=str, help='HAL Api Directory Path')
parser.add_argument('-n','--inuri', required=True, type=str, help='Input URI')
parser.add_argument('-t','--outuri', required=True, type=str, help='Output URI')
parser.add_argument('-x','--xdconf', required=True, type=str, help='Hal Config Map Filename')
parser.add_argument('-f','--files', required=True, type=str, nargs='+', help='List of Mod Files')
args = parser.parse_args()
for index, enclaveFile in enumerate(args.files):
enclaveName = enclaveFile[:enclaveFile.rfind('/')]
enclaveName = enclaveName[(enclaveName.rfind('/')+1):]
enclaveList.append(enclaveName)
enclaveMap[enclaveName] = [enclaveFile,"slave", index]
return args
def getFirstElem(list):
return list[0]
def GEDLParser(args,enclaveList, enclaveMap,replaceList,callerList,calleeList):
with open(args.gedl) as edl_file:
gedl = json.load(edl_file)
callNum = 3
callNumMap = {}
for index, enclave in enumerate(enclaveList):
occursList = []
callerList.append([])
calleeList.append([])
for enclavePair in gedl['gedl']:
if enclavePair["caller"] == enclave:
callsList = []
for call in enclavePair["calls"]:
paramsList = []
for param in call["params"]:
paramsList.append([str(param["type"]),str(param["name"]),str(param["dir"])])
if str(call["func"]) in callNumMap:
callsList.append([str(call["func"]),str(call["return"]["type"]),copy.copy(paramsList),callNumMap[str(call["func"])]])
else:
callsList.append([str(call["func"]),str(call["return"]["type"]),copy.copy(paramsList),callNum])
callNumMap[str(call["func"])] = callNum
callNum += 2
for occurance in call["occurs"]:
for line in occurance["lines"]:
occursList.append([line,str(call["func"])])
callerList[index].append([str(enclavePair["callee"]),enclaveMap[enclavePair["callee"]][2],copy.copy(callsList)])
if enclavePair["callee"] == enclave:
callsList = []
for call in enclavePair["calls"]:
paramsList = []
for param in call["params"]:
paramsList.append([str(param["type"]),str(param["name"]),str(param["dir"])])
if str(call["func"]) in callNumMap:
callsList.append([str(call["func"]),str(call["return"]["type"]),copy.copy(paramsList),callNumMap[str(call["func"])]])
else:
callsList.append([str(call["func"]),str(call["return"]["type"]),copy.copy(paramsList),callNum])
callNumMap[str(call["func"])] = callNum
callNum += 2
calleeList[index].append([str(enclavePair["caller"]),enclaveMap[enclavePair["caller"]][2],copy.copy(callsList)])
occursList.sort(key=getFirstElem)
replaceList.append(copy.copy(occursList))
def CModFunction(enclave,args,enclaveMap,replaceList,callerList,calleeList):
if not os.path.isfile(enclaveMap[enclave][0]):
print("File" + enclaveMap[enclave][0] + "does not exist. Please update GEDL Schema with valid C file.\n")
exit(0)
with open(enclaveMap[enclave][0]) as old_file:
newFile = enclaveMap[enclave][0][enclaveMap[enclave][0].rfind('/') + 1:].replace(".mod","")
enclaveIndex = enclaveMap[enclave][2]
with open((args.odir + "/" + enclave + "/" + newFile),"w") as modc_file:
modc_file.write("#include \"" + newFile[:newFile.rfind(".")] + "_rpc.h\"\n")
oldFileLines = list(old_file)
for index, line in enumerate(oldFileLines):
if "int main(" in line:
modc_file.write(line)
modc_file.write("\t_master_rpc_init();\n")
enclaveMap[enclave][1] = "master"
continue
while len(replaceList[enclaveIndex]) > 0 and (index+1) == replaceList[enclaveIndex][0][0]:
callIndex = line.find(replaceList[enclaveIndex][0][1])
if callIndex == -1:
print("Error: GEDL Cross-Enclave callsite in file %s for function %d at line %s could not be found" % (enclaveMap[enclave][0],index,replaceList[enclaveIndex][0][1]))
else:
line = line.replace(replaceList[enclaveIndex][0][1],"_rpc_" + replaceList[enclaveIndex][0][1])
del replaceList[enclaveIndex][0]
modc_file.write(line)
if enclaveMap[enclave][1] != "master":
modc_file.write("int main(int argc, char **argv) {\n\treturn _slave_rpc_loop();\n}")
def RPCGeneratorH(enclave,args,enclaveMap,callerList,calleeList):
rpchFile = enclaveMap[enclave][0][enclaveMap[enclave][0].rfind('/') + 1:].replace(".mod.c","_rpc.h")
enclaveIndex = enclaveMap[enclave][2]
with open((args.odir + "/" + enclave + "/" + rpchFile),"w") as rpch_file:
rpch_file.write("#ifndef _" + enclave.capitalize() + "_RPC_\n#define _" + enclave.capitalize() + "_RPC_\n#include \"xdcomms.h\"\n#include \"codec.h\"\n")
if args.ipc != "Singlethreaded" and enclaveMap[enclave][1] != "master":
rpch_file.write("#include <pthread.h>\n")
rpch_file.write("\n# define APP_BASE 0\n")
for callerPair in callerList[enclaveIndex]:
if 1:
rpch_file.write("# define MUX_NEXTRPC APP_BASE + " + str(callerPair[1] + 1) + "\n")
rpch_file.write("# define SEC_NEXTRPC APP_BASE + " + str(callerPair[1] + 1) + "\n")
rpch_file.write("# define MUX_OKAY APP_BASE + " + str(enclaveIndex+ 1) + "\n")
rpch_file.write("# define SEC_OKAY APP_BASE + " + str(enclaveIndex+ 1) + "\n")
for call in callerPair[2]:
rpch_file.write("# define MUX_REQUEST_" + call[0].upper() + " APP_BASE + " + str(callerPair[1] + 1) + "\n")
rpch_file.write("# define SEC_REQUEST_" + call[0].upper() + " APP_BASE + " + str(callerPair[1] + 1) + "\n")
rpch_file.write("# define MUX_RESPONSE_" + call[0].upper() + " APP_BASE + " + str(enclaveIndex+ 1) + "\n")
rpch_file.write("# define SEC_RESPONSE_" + call[0].upper() + " APP_BASE + " + str(enclaveIndex+ 1) + "\n")
for calleePair in calleeList[enclaveIndex]:
if 1:
rpch_file.write("# define MUX_NEXTRPC APP_BASE + " + str(enclaveIndex+ 1) + "\n")
rpch_file.write("# define SEC_NEXTRPC APP_BASE + " + str(enclaveIndex+ 1) + "\n")
rpch_file.write("# define MUX_OKAY APP_BASE + " + str(calleePair[1] + 1) + "\n")
rpch_file.write("# define SEC_OKAY APP_BASE + " + str(calleePair[1] + 1) + "\n")
for call in calleePair[2]:
rpch_file.write("# define MUX_REQUEST_" + call[0].upper() + " APP_BASE + " + str(enclaveIndex+ 1) + "\n")
rpch_file.write("# define SEC_REQUEST_" + call[0].upper() + " APP_BASE + " + str(enclaveIndex+ 1) + "\n")
rpch_file.write("# define MUX_RESPONSE_" + call[0].upper() + " APP_BASE + " + str(calleePair[1] + 1) + "\n")
rpch_file.write("# define SEC_RESPONSE_" + call[0].upper() + " APP_BASE + " + str(calleePair[1] + 1) + "\n")
rpch_file.write("\n#define INURI \"" + args.inuri + enclave + "\"\n#define OUTURI \"" + args.outuri + enclave + "\"\n")
for callerPair in callerList[enclaveIndex]:
for call in callerPair[2]:
rpch_file.write("#pragma cle def TAG_RESPONSE_" + call[0].upper() + " {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + enclave + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(enclaveIndex + 1) + "," + str(enclaveIndex + 1) + "," + str(call[3]+1) + "] }} \\\n\t] }\n")
rpch_file.write("#pragma cle def TAG_REQUEST_" + call[0].upper() + " {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + callerPair[0] + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(callerPair[1]+ 1) + "," + str(callerPair[1]+ 1) + "," + str(call[3]) + "] }} \\\n\t] }\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpch_file.write("#pragma cle def TAG_RESPONSE_" + call[0].upper() + " {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + calleePair[0] + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(calleePair[1]+ 1) + "," + str(calleePair[1]+ 1) + "," + str(call[3]+1) + "] }} \\\n\t] }\n")
rpch_file.write("#pragma cle def TAG_REQUEST_" + call[0].upper() + " {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + enclave + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(enclaveIndex + 1) + "," + str(enclaveIndex + 1) + "," + str(call[3]) + "] }} \\\n\t] }\n")
if 1:
for callerPair in callerList[enclaveIndex]:
rpch_file.write("#pragma cle def TAG_OKAY {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + enclave + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(enclaveIndex + 1) + "," + str(enclaveIndex + 1) + ",2] }} \\\n\t] }\n")
rpch_file.write("#pragma cle def TAG_NEXTRPC {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + callerPair[0] + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(callerPair[1]+ 1) + "," + str(callerPair[1]+ 1) + ",1] }} \\\n\t] }\n")
for calleePair in calleeList[enclaveIndex]:
rpch_file.write("#pragma cle def TAG_OKAY {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + calleePair[0] + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(calleePair[1]+ 1) + "," + str(calleePair[1]+ 1) + ",2] }} \\\n\t] }\n")
rpch_file.write("#pragma cle def TAG_NEXTRPC {\"level\":\"" + enclave + "\",\\\n\t\"cdf\": [\\\n\t\t{\"remotelevel\":\"" + enclave + "\", \\\n\t\t\t\"direction\": \"egress\", \\\n" \
"\t\t\t\"guarddirective\": { \"operation\": \"allow\", \\\n\t\t\t\t\t\t\"gapstag\": [" + str(enclaveIndex + 1) + "," + str(enclaveIndex + 1) + ",1] }} \\\n\t] }\n")
if enclaveMap[enclave][1] == "master":
rpch_file.write("extern void _master_rpc_init();\n")
else:
rpch_file.write("extern int _slave_rpc_loop();\n")
for callerPair in callerList[enclaveIndex]:
for call in callerPair[2]:
rpch_file.write("extern " + call[1] + " _rpc_" + call[0] + "(")
for param in call[2]:
rpch_file.write(param[0] + " " + param[1])
if param != call[2][-1]:
rpch_file.write(",")
rpch_file.write(");\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpch_file.write("extern " + call[1] + " " + call[0] + "(")
for param in call[2]:
rpch_file.write(param[0] + " " + param[1])
if param != call[2][-1]:
rpch_file.write(",")
rpch_file.write(");\n")
rpch_file.write("\n\n#endif /* _"+ enclave.upper() + "_RPC_ */")
def RPCGeneratorC(enclave,args,enclaveMap,callerList,calleeList):
rpccFile = enclaveMap[enclave][0][enclaveMap[enclave][0].rfind('/') + 1:].replace(".mod.c","_rpc.c")
enclaveIndex = enclaveMap[enclave][2]
with open((args.odir + "/" + enclave + "/" + rpccFile),"w") as rpcc_file:
rpcc_file.write("#include \"" + rpccFile[:rpccFile.rfind(".")] + ".h\"\n")
if enclaveMap[enclave][1] != "master":
rpcc_file.write("#define TAG_MATCH(X, Y) (X.mux == Y.mux && X.sec == Y.sec && X.typ == Y.typ)\n#define WRAP(X) void *_wrapper_##X(void *tag) { while(1) { _handle_##X(tag); } }\n\n")
if enclaveMap[enclave][1] == "master" and args.ipc == "Singlethreaded":
rpcc_file.write("void _notify_next_tag(gaps_tag* n_tag) {\n")
rpcc_file.write("\tstatic int inited = 0;\n\tstatic void *psocket;\n\tstatic void *ssocket;\n\tgaps_tag t_tag;\n\tgaps_tag o_tag;\n\t")
rpcc_file.write("#pragma cle begin TAG_NEXTRPC\n\tnextrpc_datatype nxt;\n\t#pragma cle end TAG_NEXTRPC\n")
rpcc_file.write("\t#pragma cle begin TAG_OKAY\n\tokay_datatype okay;\n\t#pragma cle end TAG_OKAY\n\n")
rpcc_file.write("\tnxt.mux = n_tag->mux;\n\tnxt.sec = n_tag->sec;\n\tnxt.typ = n_tag->typ;\n\n")
rpcc_file.write("\ttag_write(&t_tag, MUX_NEXTRPC, SEC_NEXTRPC, DATA_TYP_NEXTRPC);\n")
rpcc_file.write("\ttag_write(&o_tag, MUX_OKAY, SEC_OKAY, DATA_TYP_OKAY);\n\n")
rpcc_file.write("\tif(!inited) {\n\t\tinited = 1;\n\t\tpsocket = xdc_pub_socket();\n\t\tssocket = xdc_sub_socket(o_tag);\n\t\tsleep(1); /* zmq socket join delay */\n\t}\n\n")
rpcc_file.write("\txdc_asyn_send(psocket, &nxt, &t_tag);\n")
rpcc_file.write("\txdc_blocking_recv(ssocket, &okay, &o_tag);\n}\n\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("void _handle_request_" + call[0] + "(gaps_tag* tag) {\n\tstatic int inited = 0;\n\tstatic void *psocket;\n\tstatic void *ssocket;\n\tgaps_tag t_tag;\n\tgaps_tag o_tag;\n\t")
rpcc_file.write("#pragma cle begin TAG_REQUEST_" + call[0].upper() + "\n\trequest_" + call[0] + "_datatype req_" + call[0] + ";\n\t#pragma cle end TAG_REQUEST_" + call[0].upper() + "\n")
rpcc_file.write("\t#pragma cle begin TAG_RESPONSE_" + call[0].upper() + "\n\tresponse_" + call[0] + "_datatype res_" + call[0] + ";\n\t#pragma cle end TAG_RESPONSE_" + call[0].upper() + "\n\n")
rpcc_file.write("\ttag_write(&t_tag, MUX_REQUEST_" + call[0].upper() + ", SEC_REQUEST_" + call[0].upper() + ", DATA_TYP_REQUEST_" + call[0].upper() + ");\n")
rpcc_file.write("\tif(!inited) {\n\t\tinited = 1;\n\t\tpsocket = xdc_pub_socket();\n\t\tssocket = xdc_sub_socket(t_tag);\n\t\tsleep(1); /* zmq socket join delay */\n\t}\n\n")
rpcc_file.write("\txdc_blocking_recv(ssocket, &req_" + call[0] + ", &t_tag);\n\t")
if call[1] != "void":
rpcc_file.write("res_" + call[0] + ".ret = ")
rpcc_file.write(call[0] + "(")
for param in call[2]:
rpcc_file.write("req_" + call[0] + "." + param[1])
if param != call[2][-1]:
rpcc_file.write(",")
rpcc_file.write(");\n\n")
rpcc_file.write("\ttag_write(&o_tag, MUX_RESPONSE_" + call[0].upper() + ", SEC_RESPONSE_" + call[0].upper() + ", DATA_TYP_RESPONSE_" + call[0].upper() + ");\n\txdc_asyn_send(psocket, &res_" + call[0] + ", &o_tag);\n}\n\n")
if enclaveMap[enclave][1] != "master":
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("void _handle_nxtrpc(gaps_tag* n_tag) {\n\tstatic int inited = 0;\n\tstatic void *psocket;\n\tstatic void *ssocket;\n\tgaps_tag t_tag;\n\tgaps_tag o_tag;\n\t")
rpcc_file.write("#pragma cle begin TAG_NEXTRPC\n\tnextrpc_datatype nxt;\n\t#pragma cle end TAG_NEXTRPC\n")
rpcc_file.write("\t#pragma cle begin TAG_OKAY\n\tokay_datatype okay;\n\t#pragma cle end TAG_OKAY\n\n")
rpcc_file.write("\ttag_write(&t_tag, MUX_NEXTRPC, SEC_NEXTRPC, DATA_TYP_NEXTRPC);\n")
rpcc_file.write("\tif(!inited) {\n\t\tinited = 1;\n\t\tpsocket = xdc_pub_socket();\n\t\tssocket = xdc_sub_socket(t_tag);\n\t\tsleep(1); /* zmq socket join delay */\n\t}\n\n")
rpcc_file.write("\txdc_blocking_recv(ssocket, &nxt, &t_tag);\n\n")
rpcc_file.write("\ttag_write(&o_tag, MUX_OKAY, SEC_OKAY, DATA_TYP_OKAY);\n\tokay.x = 0;\n")
rpcc_file.write("\txdc_asyn_send(psocket, &okay, &o_tag);\n\n")
rpcc_file.write("\tn_tag->mux = nxt.mux;\n\tn_tag->sec = nxt.sec;\n\tn_tag->typ = nxt.typ;\n}\n\n")
for callerPair in callerList[enclaveIndex]:
for call in callerPair[2]:
rpcc_file.write(call[1] + " _rpc_" + call[0] + "(")
for param in call[2]:
rpcc_file.write(param[0] + " " + param[1])
if param != call[2][-1]:
rpcc_file.write(",")
rpcc_file.write(") {\n")
rpcc_file.write("\tstatic int inited = 0;\n\tstatic void *psocket;\n\tstatic void *ssocket;\n\tgaps_tag t_tag;\n\tgaps_tag o_tag;\n\t")
rpcc_file.write("#pragma cle begin TAG_REQUEST_" + call[0].upper() + "\n\trequest_" + call[0] + "_datatype req_" + call[0] + ";\n\t#pragma cle end TAG_REQUEST_" + call[0].upper() + "\n")
rpcc_file.write("\t#pragma cle begin TAG_RESPONSE_" + call[0].upper() + "\n\tresponse_" + call[0] + "_datatype res_" + call[0] + ";\n\t#pragma cle end TAG_RESPONSE_" + call[0].upper() + "\n\n")
if len(call[2]) == 0:
rpcc_file.write("\treq_" + call[0] + ".dummy = 0;\n")
else:
for param in call[2]:
rpcc_file.write("\treq_" + call[0] + "." + param[1] + "=" + param[1] + ";\n")
rpcc_file.write("\ttag_write(&t_tag, MUX_REQUEST_" + call[0].upper() + ", SEC_REQUEST_" + call[0].upper() + ", DATA_TYP_REQUEST_" + call[0].upper() + ");\n")
rpcc_file.write("\ttag_write(&o_tag, MUX_RESPONSE_" + call[0].upper() + ", SEC_RESPONSE_" + call[0].upper() + ", DATA_TYP_RESPONSE_" + call[0].upper() + ");\n\n")
rpcc_file.write("\tif(!inited) {\n\t\tinited = 1;\n\t\tpsocket = xdc_pub_socket();\n\t\tssocket = xdc_sub_socket(o_tag);\n\t\tsleep(1); /* zmq socket join delay */\n\t}\n\n")
if args.ipc == "Singlethreaded":
rpcc_file.write("\t_notify_next_tag(&t_tag);\n")
rpcc_file.write("\txdc_asyn_send(psocket, &req_" + call[0] + ", &t_tag);\n\txdc_blocking_recv(ssocket, &res_" + call[0] + ", &o_tag);\n")
rpcc_file.write("\treturn (res_" + call[0] + ".ret);\n}\n\n")
rpcc_file.write("void _hal_init(char *inuri, char *outuri) {\n\txdc_set_in(inuri);\n\txdc_set_out(outuri);\n")
if 1:
for callerPair in callerList[enclaveIndex]:
for call in callerPair[2]:
rpcc_file.write("\txdc_register(nextrpc_data_encode, nextrpc_data_decode, DATA_TYP_NEXTRPC);\n")
rpcc_file.write("\txdc_register(okay_data_encode, okay_data_decode, DATA_TYP_OKAY);\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("\txdc_register(nextrpc_data_encode, nextrpc_data_decode, DATA_TYP_NEXTRPC);\n")
rpcc_file.write("\txdc_register(okay_data_encode, okay_data_decode, DATA_TYP_OKAY);\n")
for callerPair in callerList[enclaveIndex]:
for call in callerPair[2]:
rpcc_file.write("\txdc_register(request_" + call[0] + "_data_encode, request_" + call[0] + "_data_decode, DATA_TYP_REQUEST_" + call[0].upper() + ");\n")
rpcc_file.write("\txdc_register(response_" + call[0] + "_data_encode, response_" + call[0] + "_data_decode, DATA_TYP_RESPONSE_" + call[0].upper() + ");\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("\txdc_register(request_" + call[0] + "_data_encode, request_" + call[0] + "_data_decode, DATA_TYP_REQUEST_" + call[0].upper() + ");\n")
rpcc_file.write("\txdc_register(response_" + call[0] + "_data_encode, response_" + call[0] + "_data_decode, DATA_TYP_RESPONSE_" + call[0].upper() + ");\n")
rpcc_file.write("}\n\n")
if enclaveMap[enclave][1] == "master":
rpcc_file.write("void _master_rpc_init() {\n\t_hal_init((char*)INURI, (char *)OUTURI);\n}\n\n")
else:
if args.ipc == "Multithreaded":
crossDomains = 0
for calleePair in calleeList[enclaveIndex]:
crossDomains += 1 + len(calleePair[2])
rpcc_file.write("#define NXDRPC " + str(crossDomains) + "\n")
for calleePair in calleeList[enclaveIndex]:
rpcc_file.write("WRAP(nxtrpc)\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("WRAP(request_" + call[0] + ")\n")
rpcc_file.write("\nint _slave_rpc_loop() {\n\tgaps_tag n_tag;\n")
if args.ipc == "Multithreaded":
rpcc_file.write("\tpthread_t tid[NXDRPC];\n\t_hal_init((char *)INURI, (char *)OUTURI);\n")
tidIndex = 0
for calleePair in calleeList[enclaveIndex]:
rpcc_file.write("\tpthread_create(&tid[" + str(tidIndex) + "], NULL, _wrapper_nxtrpc, &n_tag);\n")
tidIndex += 1
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("\tpthread_create(&tid[" + str(tidIndex) + "], NULL, _wrapper_request_" + call[0] + ", &n_tag);\n")
tidIndex += 1
rpcc_file.write("\tfor (int i = 0; i < NXDRPC; i++) pthread_join(tid[i], NULL);\n\treturn 0;\n}\n\n")
else:
rpcc_file.write("int _slave_rpc_loop() {\n\tgaps_tag n_tag;\n\tgaps_tag t_tag;\n\n\t_hal_init((char *)INURI, (char *)OUTURI);\n\n")
rpcc_file.write("\twhile (1) {\n\t\t_handle_nxtrpc(&n_tag);\n\t\ttag_write(&t_tag, MUX_NEXTRPC, SEC_NEXTRPC, DATA_TYP_NEXTRPC);\n")
rpcc_file.write("\t\tif(TAG_MATCH(n_tag, t_tag)) {\n\t\t\tcontinue;\n\t\t}\n")
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
rpcc_file.write("\t\ttag_write(&t_tag, MUX_REQUEST_" + call[0].upper() + ", SEC_REQUEST_" + call[0].upper() + ", DATA_TYP_REQUEST_" + call[0].upper() + ");\n")
rpcc_file.write("\t\tif (TAG_MATCH(n_tag, t_tag)) {\n\t\t\t_handle_request_"+ call[0] + "(NULL);\n\t\t\tcontinue;\n\t\t}\n\t\tcontinue;\n\t}\n}\n\n")
def writeHALEntry(file, fromName , toName, mux, sec, typ, funcName):
file.write("{\"from\":\"" + fromName + "\",\"to\":\"" + toName + "\",\"mux\":" + str(mux) + ",\"sec\":" + str(sec) + ",\"typ\":" + str(typ) + ",\"name\":\"" + funcName +"\"}")
def XDCONFGenerator(args,enclaveMap,callerList,enclaveList):
with open((args.odir + "/" + args.xdconf),"a") as map_file:
map_file.write("{\"enclaves\": [")
first = 1
for enclave in enclaveList:
if first == 1:
first = 0
else:
map_file.write(",")
map_file.write("\n\t{\n\t\t\"enclave\":\"" + enclave + "\",\n\t\t\"inuri\":\"" + args.inuri + enclave + "\",\n\t\t\"outuri\":\"" + args.outuri + enclave + "\",\n\t\t\"halmaps\":[")
enclaveIndex = enclaveMap[enclave][2]
if enclaveMap[enclave][1] == "master":
for callerPair in callerList[enclaveIndex]:
writeHALEntry(map_file, enclave , callerPair[0], (callerPair[1] + 1), (callerPair[1] + 1), 1, "NEXTRPC")
map_file.write(",")
writeHALEntry(map_file, callerPair[0] , enclave, (enclaveIndex + 1), (enclaveIndex + 1), 2, "OKAY")
else:
for calleePair in calleeList[enclaveIndex]:
writeHALEntry(map_file, calleePair[0] , enclave, (enclaveIndex + 1), (enclaveIndex + 1), 1, "NEXTRPC")
map_file.write(",")
writeHALEntry(map_file, enclave , calleePair[0], (calleePair[1] + 1), (calleePair[1] + 1), 2, "OKAY")
for callerPair in callerList[enclaveIndex]:
for call in callerPair[2]:
map_file.write(",")
writeHALEntry(map_file, enclave , callerPair[0], (callerPair[1] + 1), (callerPair[1] + 1), call[3], ("REQUEST_" + call[0].upper()))
map_file.write(",")
writeHALEntry(map_file, callerPair[0] , enclave, (enclaveIndex + 1), (enclaveIndex + 1), (call[3]+1), ("RESPONSE_" + call[0].upper()))
for calleePair in calleeList[enclaveIndex]:
for call in calleePair[2]:
map_file.write(",")
writeHALEntry(map_file, calleePair[0] , enclave, (enclaveIndex + 1), (enclaveIndex + 1), call[3], ("REQUEST_" + call[0].upper()))
map_file.write(",")
writeHALEntry(map_file, enclave , calleePair[0], (calleePair[1] + 1), (calleePair[1] + 1), (call[3]+1), ("RESPONSE_" + call[0].upper()))
map_file.write("]\n\t}")
map_file.write("\n]}")
enclaveMap = {}
enclaveList = []
replaceList = []
callerList = []
calleeList = []
args = argparser(enclaveList, enclaveMap)
GEDLParser(args, enclaveList, enclaveMap, replaceList,callerList,calleeList)
for enclave in enclaveList:
CModFunction(enclave, args, enclaveMap, replaceList,callerList,calleeList)
for enclave in enclaveList:
RPCGeneratorH(enclave, args, enclaveMap,callerList,calleeList)
RPCGeneratorC(enclave, args, enclaveMap,callerList,calleeList)
XDCONFGenerator(args, enclaveMap,callerList,enclaveList)
| true
| true
|
1c40916eb433571c4cd4fc33eb695f5113d7ecfa
| 9,370
|
py
|
Python
|
snips_nlu/dataset.py
|
ddorian/snips-nlu
|
0934d386bb138ebb34764446416856cfac664e65
|
[
"Apache-2.0"
] | null | null | null |
snips_nlu/dataset.py
|
ddorian/snips-nlu
|
0934d386bb138ebb34764446416856cfac664e65
|
[
"Apache-2.0"
] | null | null | null |
snips_nlu/dataset.py
|
ddorian/snips-nlu
|
0934d386bb138ebb34764446416856cfac664e65
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division, unicode_literals
import json
from builtins import str
from collections import Counter
from copy import deepcopy
from future.utils import iteritems, itervalues
from snips_nlu_ontology import get_all_languages
from snips_nlu.constants import (AUTOMATICALLY_EXTENSIBLE, CAPITALIZE, DATA,
ENTITIES, ENTITY, INTENTS, LANGUAGE,
PARSER_THRESHOLD, SLOT_NAME, SYNONYMS, TEXT,
USE_SYNONYMS, UTTERANCES, VALIDATED, VALUE)
from snips_nlu.entity_parser.builtin_entity_parser import (BuiltinEntityParser,
is_builtin_entity,
is_gazetteer_entity)
from snips_nlu.preprocessing import tokenize_light
from snips_nlu.string_variations import get_string_variations
from snips_nlu.utils import validate_key, validate_keys, validate_type
def extract_utterance_entities(dataset):
entities_values = {ent_name: set() for ent_name in dataset[ENTITIES]}
for intent in itervalues(dataset[INTENTS]):
for utterance in intent[UTTERANCES]:
for chunk in utterance[DATA]:
if ENTITY in chunk:
entities_values[chunk[ENTITY]].add(chunk[TEXT].strip())
return {k: list(v) for k, v in iteritems(entities_values)}
def extract_intent_entities(dataset, entity_filter=None):
intent_entities = {intent: set() for intent in dataset[INTENTS]}
for intent_name, intent_data in iteritems(dataset[INTENTS]):
for utterance in intent_data[UTTERANCES]:
for chunk in utterance[DATA]:
if ENTITY in chunk:
if entity_filter and not entity_filter(chunk[ENTITY]):
continue
intent_entities[intent_name].add(chunk[ENTITY])
return intent_entities
def validate_and_format_dataset(dataset):
"""Checks that the dataset is valid and format it"""
# Make this function idempotent
if dataset.get(VALIDATED, False):
return dataset
dataset = deepcopy(dataset)
dataset = json.loads(json.dumps(dataset))
validate_type(dataset, dict)
mandatory_keys = [INTENTS, ENTITIES, LANGUAGE]
for key in mandatory_keys:
validate_key(dataset, key, object_label="dataset")
validate_type(dataset[ENTITIES], dict)
validate_type(dataset[INTENTS], dict)
language = dataset[LANGUAGE]
validate_type(language, str)
if language not in get_all_languages():
raise ValueError("Unknown language: '%s'" % language)
for intent in itervalues(dataset[INTENTS]):
validate_and_format_intent(intent, dataset[ENTITIES])
utterance_entities_values = extract_utterance_entities(dataset)
builtin_entity_parser = BuiltinEntityParser.build(dataset=dataset)
for entity_name, entity in iteritems(dataset[ENTITIES]):
uterrance_entities = utterance_entities_values[entity_name]
if is_builtin_entity(entity_name):
dataset[ENTITIES][entity_name] = \
validate_and_format_builtin_entity(entity, uterrance_entities)
else:
dataset[ENTITIES][entity_name] = validate_and_format_custom_entity(
entity, uterrance_entities, language, builtin_entity_parser)
dataset[VALIDATED] = True
return dataset
def validate_and_format_intent(intent, entities):
validate_type(intent, dict)
validate_key(intent, UTTERANCES, object_label="intent dict")
validate_type(intent[UTTERANCES], list)
for utterance in intent[UTTERANCES]:
validate_type(utterance, dict)
validate_key(utterance, DATA, object_label="utterance")
validate_type(utterance[DATA], list)
for chunk in utterance[DATA]:
validate_type(chunk, dict)
validate_key(chunk, TEXT, object_label="chunk")
if ENTITY in chunk or SLOT_NAME in chunk:
mandatory_keys = [ENTITY, SLOT_NAME]
validate_keys(chunk, mandatory_keys, object_label="chunk")
if is_builtin_entity(chunk[ENTITY]):
continue
else:
validate_key(entities, chunk[ENTITY],
object_label=ENTITIES)
return intent
def get_text_from_chunks(chunks):
return "".join(chunk[TEXT] for chunk in chunks)
def has_any_capitalization(entity_utterances, language):
for utterance in entity_utterances:
tokens = tokenize_light(utterance, language)
if any(t.isupper() or t.istitle() for t in tokens):
return True
return False
def add_entity_variations(utterances, entity_variations, entity_value):
utterances[entity_value] = entity_value
for variation in entity_variations[entity_value]:
if variation:
utterances[variation] = entity_value
return utterances
def _extract_entity_values(entity):
values = set()
for ent in entity[DATA]:
values.add(ent[VALUE])
if entity[USE_SYNONYMS]:
values.update(set(ent[SYNONYMS]))
return values
def validate_and_format_custom_entity(entity, queries_entities, language,
builtin_entity_parser):
validate_type(entity, dict)
# TODO: this is here temporarily, only to allow backward compatibility
if PARSER_THRESHOLD not in entity:
entity[PARSER_THRESHOLD] = 1.0
mandatory_keys = [USE_SYNONYMS, AUTOMATICALLY_EXTENSIBLE, DATA,
PARSER_THRESHOLD]
validate_keys(entity, mandatory_keys, object_label="entity")
validate_type(entity[USE_SYNONYMS], bool)
validate_type(entity[AUTOMATICALLY_EXTENSIBLE], bool)
validate_type(entity[DATA], list)
validate_type(entity[PARSER_THRESHOLD], float)
formatted_entity = dict()
formatted_entity[AUTOMATICALLY_EXTENSIBLE] = entity[
AUTOMATICALLY_EXTENSIBLE]
formatted_entity[PARSER_THRESHOLD] = entity[PARSER_THRESHOLD]
use_synonyms = entity[USE_SYNONYMS]
# Validate format and filter out unused data
valid_entity_data = []
for entry in entity[DATA]:
validate_type(entry, dict)
validate_keys(entry, [VALUE, SYNONYMS], object_label="entity entry")
entry[VALUE] = entry[VALUE].strip()
if not entry[VALUE]:
continue
validate_type(entry[SYNONYMS], list)
entry[SYNONYMS] = [s.strip() for s in entry[SYNONYMS]
if len(s.strip()) > 0]
valid_entity_data.append(entry)
entity[DATA] = valid_entity_data
# Compute capitalization before normalizing
# Normalization lowercase and hence lead to bad capitalization calculation
formatted_entity[CAPITALIZE] = has_any_capitalization(queries_entities,
language)
validated_utterances = dict()
# Map original values an synonyms
for data in entity[DATA]:
ent_value = data[VALUE]
if not ent_value:
continue
validated_utterances[ent_value] = ent_value
if use_synonyms:
for s in data[SYNONYMS]:
if s and s not in validated_utterances:
validated_utterances[s] = ent_value
# Add variations if not colliding
all_original_values = _extract_entity_values(entity)
variations = dict()
for data in entity[DATA]:
ent_value = data[VALUE]
values_to_variate = {ent_value}
if use_synonyms:
values_to_variate.update(set(data[SYNONYMS]))
variations[ent_value] = set(
v for value in values_to_variate
for v in get_string_variations(value, language,
builtin_entity_parser))
variation_counter = Counter(
[v for vars in itervalues(variations) for v in vars])
non_colliding_variations = {
value: [
v for v in variations if
v not in all_original_values and variation_counter[v] == 1
]
for value, variations in iteritems(variations)
}
for entry in entity[DATA]:
entry_value = entry[VALUE]
validated_utterances = add_entity_variations(
validated_utterances, non_colliding_variations, entry_value)
# Merge queries entities
queries_entities_variations = {
ent: get_string_variations(ent, language, builtin_entity_parser)
for ent in queries_entities
}
for original_ent, variations in iteritems(queries_entities_variations):
if not original_ent or original_ent in validated_utterances:
continue
validated_utterances[original_ent] = original_ent
for variation in variations:
if variation and variation not in validated_utterances:
validated_utterances[variation] = original_ent
formatted_entity[UTTERANCES] = validated_utterances
return formatted_entity
def validate_and_format_builtin_entity(entity, queries_entities):
validate_type(entity, dict)
return {UTTERANCES: set(queries_entities)}
def get_dataset_gazetteer_entities(dataset, intent=None):
if intent is not None:
return extract_intent_entities(dataset, is_gazetteer_entity)[intent]
return {e for e in dataset[ENTITIES] if is_gazetteer_entity(e)}
| 39.369748
| 79
| 0.673212
|
from __future__ import division, unicode_literals
import json
from builtins import str
from collections import Counter
from copy import deepcopy
from future.utils import iteritems, itervalues
from snips_nlu_ontology import get_all_languages
from snips_nlu.constants import (AUTOMATICALLY_EXTENSIBLE, CAPITALIZE, DATA,
ENTITIES, ENTITY, INTENTS, LANGUAGE,
PARSER_THRESHOLD, SLOT_NAME, SYNONYMS, TEXT,
USE_SYNONYMS, UTTERANCES, VALIDATED, VALUE)
from snips_nlu.entity_parser.builtin_entity_parser import (BuiltinEntityParser,
is_builtin_entity,
is_gazetteer_entity)
from snips_nlu.preprocessing import tokenize_light
from snips_nlu.string_variations import get_string_variations
from snips_nlu.utils import validate_key, validate_keys, validate_type
def extract_utterance_entities(dataset):
entities_values = {ent_name: set() for ent_name in dataset[ENTITIES]}
for intent in itervalues(dataset[INTENTS]):
for utterance in intent[UTTERANCES]:
for chunk in utterance[DATA]:
if ENTITY in chunk:
entities_values[chunk[ENTITY]].add(chunk[TEXT].strip())
return {k: list(v) for k, v in iteritems(entities_values)}
def extract_intent_entities(dataset, entity_filter=None):
intent_entities = {intent: set() for intent in dataset[INTENTS]}
for intent_name, intent_data in iteritems(dataset[INTENTS]):
for utterance in intent_data[UTTERANCES]:
for chunk in utterance[DATA]:
if ENTITY in chunk:
if entity_filter and not entity_filter(chunk[ENTITY]):
continue
intent_entities[intent_name].add(chunk[ENTITY])
return intent_entities
def validate_and_format_dataset(dataset):
if dataset.get(VALIDATED, False):
return dataset
dataset = deepcopy(dataset)
dataset = json.loads(json.dumps(dataset))
validate_type(dataset, dict)
mandatory_keys = [INTENTS, ENTITIES, LANGUAGE]
for key in mandatory_keys:
validate_key(dataset, key, object_label="dataset")
validate_type(dataset[ENTITIES], dict)
validate_type(dataset[INTENTS], dict)
language = dataset[LANGUAGE]
validate_type(language, str)
if language not in get_all_languages():
raise ValueError("Unknown language: '%s'" % language)
for intent in itervalues(dataset[INTENTS]):
validate_and_format_intent(intent, dataset[ENTITIES])
utterance_entities_values = extract_utterance_entities(dataset)
builtin_entity_parser = BuiltinEntityParser.build(dataset=dataset)
for entity_name, entity in iteritems(dataset[ENTITIES]):
uterrance_entities = utterance_entities_values[entity_name]
if is_builtin_entity(entity_name):
dataset[ENTITIES][entity_name] = \
validate_and_format_builtin_entity(entity, uterrance_entities)
else:
dataset[ENTITIES][entity_name] = validate_and_format_custom_entity(
entity, uterrance_entities, language, builtin_entity_parser)
dataset[VALIDATED] = True
return dataset
def validate_and_format_intent(intent, entities):
validate_type(intent, dict)
validate_key(intent, UTTERANCES, object_label="intent dict")
validate_type(intent[UTTERANCES], list)
for utterance in intent[UTTERANCES]:
validate_type(utterance, dict)
validate_key(utterance, DATA, object_label="utterance")
validate_type(utterance[DATA], list)
for chunk in utterance[DATA]:
validate_type(chunk, dict)
validate_key(chunk, TEXT, object_label="chunk")
if ENTITY in chunk or SLOT_NAME in chunk:
mandatory_keys = [ENTITY, SLOT_NAME]
validate_keys(chunk, mandatory_keys, object_label="chunk")
if is_builtin_entity(chunk[ENTITY]):
continue
else:
validate_key(entities, chunk[ENTITY],
object_label=ENTITIES)
return intent
def get_text_from_chunks(chunks):
return "".join(chunk[TEXT] for chunk in chunks)
def has_any_capitalization(entity_utterances, language):
for utterance in entity_utterances:
tokens = tokenize_light(utterance, language)
if any(t.isupper() or t.istitle() for t in tokens):
return True
return False
def add_entity_variations(utterances, entity_variations, entity_value):
utterances[entity_value] = entity_value
for variation in entity_variations[entity_value]:
if variation:
utterances[variation] = entity_value
return utterances
def _extract_entity_values(entity):
values = set()
for ent in entity[DATA]:
values.add(ent[VALUE])
if entity[USE_SYNONYMS]:
values.update(set(ent[SYNONYMS]))
return values
def validate_and_format_custom_entity(entity, queries_entities, language,
builtin_entity_parser):
validate_type(entity, dict)
if PARSER_THRESHOLD not in entity:
entity[PARSER_THRESHOLD] = 1.0
mandatory_keys = [USE_SYNONYMS, AUTOMATICALLY_EXTENSIBLE, DATA,
PARSER_THRESHOLD]
validate_keys(entity, mandatory_keys, object_label="entity")
validate_type(entity[USE_SYNONYMS], bool)
validate_type(entity[AUTOMATICALLY_EXTENSIBLE], bool)
validate_type(entity[DATA], list)
validate_type(entity[PARSER_THRESHOLD], float)
formatted_entity = dict()
formatted_entity[AUTOMATICALLY_EXTENSIBLE] = entity[
AUTOMATICALLY_EXTENSIBLE]
formatted_entity[PARSER_THRESHOLD] = entity[PARSER_THRESHOLD]
use_synonyms = entity[USE_SYNONYMS]
valid_entity_data = []
for entry in entity[DATA]:
validate_type(entry, dict)
validate_keys(entry, [VALUE, SYNONYMS], object_label="entity entry")
entry[VALUE] = entry[VALUE].strip()
if not entry[VALUE]:
continue
validate_type(entry[SYNONYMS], list)
entry[SYNONYMS] = [s.strip() for s in entry[SYNONYMS]
if len(s.strip()) > 0]
valid_entity_data.append(entry)
entity[DATA] = valid_entity_data
formatted_entity[CAPITALIZE] = has_any_capitalization(queries_entities,
language)
validated_utterances = dict()
for data in entity[DATA]:
ent_value = data[VALUE]
if not ent_value:
continue
validated_utterances[ent_value] = ent_value
if use_synonyms:
for s in data[SYNONYMS]:
if s and s not in validated_utterances:
validated_utterances[s] = ent_value
all_original_values = _extract_entity_values(entity)
variations = dict()
for data in entity[DATA]:
ent_value = data[VALUE]
values_to_variate = {ent_value}
if use_synonyms:
values_to_variate.update(set(data[SYNONYMS]))
variations[ent_value] = set(
v for value in values_to_variate
for v in get_string_variations(value, language,
builtin_entity_parser))
variation_counter = Counter(
[v for vars in itervalues(variations) for v in vars])
non_colliding_variations = {
value: [
v for v in variations if
v not in all_original_values and variation_counter[v] == 1
]
for value, variations in iteritems(variations)
}
for entry in entity[DATA]:
entry_value = entry[VALUE]
validated_utterances = add_entity_variations(
validated_utterances, non_colliding_variations, entry_value)
queries_entities_variations = {
ent: get_string_variations(ent, language, builtin_entity_parser)
for ent in queries_entities
}
for original_ent, variations in iteritems(queries_entities_variations):
if not original_ent or original_ent in validated_utterances:
continue
validated_utterances[original_ent] = original_ent
for variation in variations:
if variation and variation not in validated_utterances:
validated_utterances[variation] = original_ent
formatted_entity[UTTERANCES] = validated_utterances
return formatted_entity
def validate_and_format_builtin_entity(entity, queries_entities):
validate_type(entity, dict)
return {UTTERANCES: set(queries_entities)}
def get_dataset_gazetteer_entities(dataset, intent=None):
if intent is not None:
return extract_intent_entities(dataset, is_gazetteer_entity)[intent]
return {e for e in dataset[ENTITIES] if is_gazetteer_entity(e)}
| true
| true
|
1c409292ee3ecf1d4fccae874588048198c4f948
| 3,185
|
py
|
Python
|
b_stage_deployment_test/testing_infrastructure.py
|
ignaloidas/B.StageDeployment
|
951af38e675e7d469e70d3460836d1e70bc1f63b
|
[
"Apache-2.0"
] | null | null | null |
b_stage_deployment_test/testing_infrastructure.py
|
ignaloidas/B.StageDeployment
|
951af38e675e7d469e70d3460836d1e70bc1f63b
|
[
"Apache-2.0"
] | null | null | null |
b_stage_deployment_test/testing_infrastructure.py
|
ignaloidas/B.StageDeployment
|
951af38e675e7d469e70d3460836d1e70bc1f63b
|
[
"Apache-2.0"
] | 1
|
2021-02-01T10:28:32.000Z
|
2021-02-01T10:28:32.000Z
|
from aws_cdk.aws_apigatewayv2 import CfnApi, CfnStage, CfnRoute, CfnIntegration
from aws_cdk.aws_lambda import Function, Code, Runtime
from aws_cdk.core import Construct
from b_aws_testing_framework.tools.cdk_testing.testing_manager import TestingManager
from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack
from b_stage_deployment.function import StageDeploymentSingletonFunction
from b_stage_deployment.resource import StageDeploymentResource
class TestingInfrastructure(TestingStack):
"""
This is an entry point for your infrastructure. Create other resources and stacks you want to test here.
"""
def __init__(self, scope: Construct):
super().__init__(scope=scope)
prefix = TestingManager.get_global_prefix()
api = CfnApi(
scope=self,
id=f'{prefix}Api',
description='Sample API.',
name=f'{prefix}Api',
protocol_type='HTTP'
)
stage = CfnStage(
scope=self,
id=f'{prefix}Stage',
api_id=api.ref,
stage_name='prod',
auto_deploy=False,
description='Test description.'
)
function = Function(
scope=self,
id=f'{prefix}TestFunction',
function_name=f'{prefix}TestFunction',
code=Code.from_inline(
'def handler(*args, **kwargs):\n'
' return {\n'
' "isBase64Encoded": False,\n'
' "statusCode": 200,\n'
' "headers": {},\n'
' "body": "{\\"message\\": \\"success\\"}"\n'
' }\n'
),
handler='index.handler',
runtime=Runtime.PYTHON_3_6,
)
integration = CfnIntegration(
scope=self,
id=f'{TestingManager.get_global_prefix()}LambdaIntegration',
api_id=api.ref,
integration_type='AWS_PROXY',
integration_uri=(
f'arn:aws:apigateway:{self.region}:lambda:path/2015-03-31'
f'/functions/{function.function_arn}/invocations'
),
description='Sample lambda proxy integration.',
payload_format_version='1.0'
)
CfnRoute(
scope=self,
id=f'{prefix}SampleRoute',
api_id=api.ref,
route_key='GET /test',
target=f'integrations/{integration.ref}'
)
backend = StageDeploymentSingletonFunction(self, 'DeploymentBackend')
# Make some deployments.
StageDeploymentResource(self, 'C1', backend, api.ref, stage.stage_name, 'Sample1.')
StageDeploymentResource(self, 'C2', backend, api.ref, stage.stage_name, 'Sample2.')
StageDeploymentResource(self, 'C3', backend, api.ref, stage.stage_name, 'Sample3.')
StageDeploymentResource(self, 'C4', backend, api.ref, stage.stage_name, 'Sample4.')
StageDeploymentResource(self, 'C5', backend, api.ref, stage.stage_name, 'Sample5.')
self.add_output('ApiId', api.ref)
self.add_output('StageName', stage.stage_name)
| 36.609195
| 108
| 0.595918
|
from aws_cdk.aws_apigatewayv2 import CfnApi, CfnStage, CfnRoute, CfnIntegration
from aws_cdk.aws_lambda import Function, Code, Runtime
from aws_cdk.core import Construct
from b_aws_testing_framework.tools.cdk_testing.testing_manager import TestingManager
from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack
from b_stage_deployment.function import StageDeploymentSingletonFunction
from b_stage_deployment.resource import StageDeploymentResource
class TestingInfrastructure(TestingStack):
def __init__(self, scope: Construct):
super().__init__(scope=scope)
prefix = TestingManager.get_global_prefix()
api = CfnApi(
scope=self,
id=f'{prefix}Api',
description='Sample API.',
name=f'{prefix}Api',
protocol_type='HTTP'
)
stage = CfnStage(
scope=self,
id=f'{prefix}Stage',
api_id=api.ref,
stage_name='prod',
auto_deploy=False,
description='Test description.'
)
function = Function(
scope=self,
id=f'{prefix}TestFunction',
function_name=f'{prefix}TestFunction',
code=Code.from_inline(
'def handler(*args, **kwargs):\n'
' return {\n'
' "isBase64Encoded": False,\n'
' "statusCode": 200,\n'
' "headers": {},\n'
' "body": "{\\"message\\": \\"success\\"}"\n'
' }\n'
),
handler='index.handler',
runtime=Runtime.PYTHON_3_6,
)
integration = CfnIntegration(
scope=self,
id=f'{TestingManager.get_global_prefix()}LambdaIntegration',
api_id=api.ref,
integration_type='AWS_PROXY',
integration_uri=(
f'arn:aws:apigateway:{self.region}:lambda:path/2015-03-31'
f'/functions/{function.function_arn}/invocations'
),
description='Sample lambda proxy integration.',
payload_format_version='1.0'
)
CfnRoute(
scope=self,
id=f'{prefix}SampleRoute',
api_id=api.ref,
route_key='GET /test',
target=f'integrations/{integration.ref}'
)
backend = StageDeploymentSingletonFunction(self, 'DeploymentBackend')
StageDeploymentResource(self, 'C1', backend, api.ref, stage.stage_name, 'Sample1.')
StageDeploymentResource(self, 'C2', backend, api.ref, stage.stage_name, 'Sample2.')
StageDeploymentResource(self, 'C3', backend, api.ref, stage.stage_name, 'Sample3.')
StageDeploymentResource(self, 'C4', backend, api.ref, stage.stage_name, 'Sample4.')
StageDeploymentResource(self, 'C5', backend, api.ref, stage.stage_name, 'Sample5.')
self.add_output('ApiId', api.ref)
self.add_output('StageName', stage.stage_name)
| true
| true
|
1c4093d7d9b58530b9b92d6b80208734e55bb251
| 2,498
|
py
|
Python
|
CircleciScripts/framework_list.py
|
code-surf/aws-sdk-ios
|
7d2d99691419e8aaaf70911cd9c34eece79c0a02
|
[
"Apache-2.0"
] | null | null | null |
CircleciScripts/framework_list.py
|
code-surf/aws-sdk-ios
|
7d2d99691419e8aaaf70911cd9c34eece79c0a02
|
[
"Apache-2.0"
] | null | null | null |
CircleciScripts/framework_list.py
|
code-surf/aws-sdk-ios
|
7d2d99691419e8aaaf70911cd9c34eece79c0a02
|
[
"Apache-2.0"
] | null | null | null |
# A list of frameworks/packages for the AWS iOS SDK. As of now, order on these
# packages is important, since we don't model dependencies in code that we
# consume for the release process. Packages toward the bottom of the list
# depend on packages toward the top of the list.
# Note that this list isn't a comprehensive list of Xcode schemas or targets
# that need to be built and tested, only a model of dependencies for cocoapods.
grouped_frameworks = [
# No dependencies
[
'AWSCore',
'AWSCognitoIdentityProviderASF',
],
[
# Depends only on AWSCognitoIdentityProviderASF
'AWSCognitoAuth',
# Depends on AWSCore and AWSCognitoIdentityProviderASF
'AWSCognitoIdentityProvider',
# Depends only on AWSCore
'AWSAuthCore',
# Service-API packages depend only on AWSCore
'AWSAPIGateway',
'AWSAutoScaling',
'AWSCloudWatch',
'AWSCognito',
'AWSComprehend',
'AWSConnect',
'AWSConnectParticipant',
'AWSDynamoDB',
'AWSEC2',
'AWSElasticLoadBalancing',
'AWSIoT',
'AWSKMS',
'AWSKinesis',
'AWSKinesisVideo',
'AWSKinesisVideoArchivedMedia',
'AWSKinesisVideoSignaling',
'AWSLambda',
'AWSLex',
'AWSLogs',
'AWSMachineLearning',
'AWSMobileAnalytics',
'AWSPinpoint',
'AWSPolly',
'AWSRekognition',
'AWSS3',
'AWSSES',
'AWSSNS',
'AWSSQS',
'AWSSageMakerRuntime',
'AWSSimpleDB',
'AWSTextract',
'AWSTranscribe',
'AWSTranscribeStreaming',
'AWSTranslate',
],
[
# Depends only on AWSCognito service-api package
'AWSCognitoSync',
# Depends on AWSCore and AWSAuthCore
'AWSAuthUI',
# Depends only on AWSAuthCore (and possibly external Pods, but nothing else
# built locally)
'AWSFacebookSignIn',
'AWSGoogleSignIn',
# Depends only on AWSAuthCore and AWSCognitoIdentityProvider
'AWSMobileClient',
'AWSUserPoolsSignIn',
],
[
# Depends on most previous packages except auth
'AWSiOSSDKv2',
# Depends on AWSAuthCore, AWSFacebookSignIn, AWSGoogleSignIn,
# AWSUserPoolsSignIn and AWSAuthUI
'AWSAuth',
],
]
# flatten the grouped frameworks
frameworks = [framework for group in grouped_frameworks for framework in group]
| 26.860215
| 83
| 0.618094
|
# consume for the release process. Packages toward the bottom of the list
# depend on packages toward the top of the list.
# Note that this list isn't a comprehensive list of Xcode schemas or targets
grouped_frameworks = [
[
'AWSCore',
'AWSCognitoIdentityProviderASF',
],
[
'AWSCognitoAuth',
'AWSCognitoIdentityProvider',
'AWSAuthCore',
'AWSAPIGateway',
'AWSAutoScaling',
'AWSCloudWatch',
'AWSCognito',
'AWSComprehend',
'AWSConnect',
'AWSConnectParticipant',
'AWSDynamoDB',
'AWSEC2',
'AWSElasticLoadBalancing',
'AWSIoT',
'AWSKMS',
'AWSKinesis',
'AWSKinesisVideo',
'AWSKinesisVideoArchivedMedia',
'AWSKinesisVideoSignaling',
'AWSLambda',
'AWSLex',
'AWSLogs',
'AWSMachineLearning',
'AWSMobileAnalytics',
'AWSPinpoint',
'AWSPolly',
'AWSRekognition',
'AWSS3',
'AWSSES',
'AWSSNS',
'AWSSQS',
'AWSSageMakerRuntime',
'AWSSimpleDB',
'AWSTextract',
'AWSTranscribe',
'AWSTranscribeStreaming',
'AWSTranslate',
],
[
'AWSCognitoSync',
'AWSAuthUI',
'AWSFacebookSignIn',
'AWSGoogleSignIn',
'AWSMobileClient',
'AWSUserPoolsSignIn',
],
[
'AWSiOSSDKv2',
'AWSAuth',
],
]
frameworks = [framework for group in grouped_frameworks for framework in group]
| true
| true
|
1c4094387a9d3f0cd170326eb874c55e90798d9e
| 9,919
|
py
|
Python
|
tests/components/modbus/test_modbus_sensor.py
|
jlvaillant/core
|
ae37f9a1d9c5067957854b3c25dcc73fe9a10bee
|
[
"Apache-2.0"
] | 2
|
2019-11-20T20:56:59.000Z
|
2021-01-03T08:52:18.000Z
|
tests/components/modbus/test_modbus_sensor.py
|
jlvaillant/core
|
ae37f9a1d9c5067957854b3c25dcc73fe9a10bee
|
[
"Apache-2.0"
] | 5
|
2020-04-26T10:50:01.000Z
|
2021-03-16T21:19:46.000Z
|
tests/components/modbus/test_modbus_sensor.py
|
winterscar/core
|
5a55d508791aae65f16396691d014c73fb2095f0
|
[
"Apache-2.0"
] | 1
|
2021-04-18T19:36:34.000Z
|
2021-04-18T19:36:34.000Z
|
"""The tests for the Modbus sensor component."""
from datetime import timedelta
from unittest import mock
import pytest
from homeassistant.components.modbus.const import (
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CONF_COUNT,
CONF_DATA_TYPE,
CONF_OFFSET,
CONF_PRECISION,
CONF_REGISTER,
CONF_REGISTER_TYPE,
CONF_REGISTERS,
CONF_REVERSE_ORDER,
CONF_SCALE,
DATA_TYPE_FLOAT,
DATA_TYPE_INT,
DATA_TYPE_UINT,
DEFAULT_HUB,
MODBUS_DOMAIN,
)
from homeassistant.const import CONF_NAME, CONF_PLATFORM, CONF_SCAN_INTERVAL
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import MockModule, async_fire_time_changed, mock_integration
@pytest.fixture()
def mock_hub(hass):
"""Mock hub."""
mock_integration(hass, MockModule(MODBUS_DOMAIN))
hub = mock.MagicMock()
hub.name = "hub"
hass.data[MODBUS_DOMAIN] = {DEFAULT_HUB: hub}
return hub
common_register_config = {CONF_NAME: "test-config", CONF_REGISTER: 1234}
class ReadResult:
"""Storage class for register read results."""
def __init__(self, register_words):
"""Init."""
self.registers = register_words
async def run_test(hass, mock_hub, register_config, register_words, expected):
"""Run test for given config and check that sensor outputs expected result."""
# Full sensor configuration
sensor_name = "modbus_test_sensor"
scan_interval = 5
config = {
MODBUS_DOMAIN: {
CONF_PLATFORM: "modbus",
CONF_SCAN_INTERVAL: scan_interval,
CONF_REGISTERS: [
dict(**{CONF_NAME: sensor_name, CONF_REGISTER: 1234}, **register_config)
],
}
}
# Setup inputs for the sensor
read_result = ReadResult(register_words)
if register_config.get(CONF_REGISTER_TYPE) == CALL_TYPE_REGISTER_INPUT:
mock_hub.read_input_registers.return_value = read_result
else:
mock_hub.read_holding_registers.return_value = read_result
# Initialize sensor
now = dt_util.utcnow()
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
assert await async_setup_component(hass, MODBUS_DOMAIN, config)
# Trigger update call with time_changed event
now += timedelta(seconds=scan_interval + 1)
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
async def test_simple_word_register(hass, mock_hub):
"""Test conversion of single word register."""
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(hass, mock_hub, register_config, register_words=[0], expected="0")
async def test_optional_conf_keys(hass, mock_hub):
"""Test handling of optional configuration keys."""
register_config = {}
await run_test(
hass, mock_hub, register_config, register_words=[0x8000], expected="-32768"
)
async def test_offset(hass, mock_hub):
"""Test offset calculation."""
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 1,
CONF_OFFSET: 13,
CONF_PRECISION: 0,
}
await run_test(hass, mock_hub, register_config, register_words=[7], expected="20")
async def test_scale_and_offset(hass, mock_hub):
"""Test handling of scale and offset."""
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 3,
CONF_OFFSET: 13,
CONF_PRECISION: 0,
}
await run_test(hass, mock_hub, register_config, register_words=[7], expected="34")
async def test_ints_can_have_precision(hass, mock_hub):
"""Test precision can be specified event if using integer values only."""
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 3,
CONF_OFFSET: 13,
CONF_PRECISION: 4,
}
await run_test(
hass, mock_hub, register_config, register_words=[7], expected="34.0000"
)
async def test_floats_get_rounded_correctly(hass, mock_hub):
"""Test that floating point values get rounded correctly."""
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 1.5,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(hass, mock_hub, register_config, register_words=[1], expected="2")
async def test_parameters_as_strings(hass, mock_hub):
"""Test that scale, offset and precision can be given as strings."""
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: "1.5",
CONF_OFFSET: "5",
CONF_PRECISION: "1",
}
await run_test(hass, mock_hub, register_config, register_words=[9], expected="18.5")
async def test_floating_point_scale(hass, mock_hub):
"""Test use of floating point scale."""
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 2.4,
CONF_OFFSET: 0,
CONF_PRECISION: 2,
}
await run_test(hass, mock_hub, register_config, register_words=[1], expected="2.40")
async def test_floating_point_offset(hass, mock_hub):
"""Test use of floating point scale."""
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 1,
CONF_OFFSET: -10.3,
CONF_PRECISION: 1,
}
await run_test(hass, mock_hub, register_config, register_words=[2], expected="-8.3")
async def test_signed_two_word_register(hass, mock_hub):
"""Test reading of signed register with two words."""
register_config = {
CONF_COUNT: 2,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x89AB, 0xCDEF],
expected="-1985229329",
)
async def test_unsigned_two_word_register(hass, mock_hub):
"""Test reading of unsigned register with two words."""
register_config = {
CONF_COUNT: 2,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x89AB, 0xCDEF],
expected=str(0x89ABCDEF),
)
async def test_reversed(hass, mock_hub):
"""Test handling of reversed register words."""
register_config = {
CONF_COUNT: 2,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_REVERSE_ORDER: True,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x89AB, 0xCDEF],
expected=str(0xCDEF89AB),
)
async def test_four_word_register(hass, mock_hub):
"""Test reading of 64-bit register."""
register_config = {
CONF_COUNT: 4,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x89AB, 0xCDEF, 0x0123, 0x4567],
expected="9920249030613615975",
)
async def test_four_word_register_precision_is_intact_with_int_params(hass, mock_hub):
"""Test that precision is not lost when doing integer arithmetic for 64-bit register."""
register_config = {
CONF_COUNT: 4,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 2,
CONF_OFFSET: 3,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x0123, 0x4567, 0x89AB, 0xCDEF],
expected="163971058432973793",
)
async def test_four_word_register_precision_is_lost_with_float_params(hass, mock_hub):
"""Test that precision is affected when floating point conversion is done."""
register_config = {
CONF_COUNT: 4,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 2.0,
CONF_OFFSET: 3.0,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x0123, 0x4567, 0x89AB, 0xCDEF],
expected="163971058432973792",
)
async def test_two_word_input_register(hass, mock_hub):
"""Test reaging of input register."""
register_config = {
CONF_COUNT: 2,
CONF_REGISTER_TYPE: CALL_TYPE_REGISTER_INPUT,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x89AB, 0xCDEF],
expected=str(0x89ABCDEF),
)
async def test_two_word_holding_register(hass, mock_hub):
"""Test reaging of holding register."""
register_config = {
CONF_COUNT: 2,
CONF_REGISTER_TYPE: CALL_TYPE_REGISTER_HOLDING,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x89AB, 0xCDEF],
expected=str(0x89ABCDEF),
)
async def test_float_data_type(hass, mock_hub):
"""Test floating point register data type."""
register_config = {
CONF_COUNT: 2,
CONF_REGISTER_TYPE: CALL_TYPE_REGISTER_HOLDING,
CONF_DATA_TYPE: DATA_TYPE_FLOAT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 5,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[16286, 1617],
expected="1.23457",
)
| 27.940845
| 92
| 0.654401
|
from datetime import timedelta
from unittest import mock
import pytest
from homeassistant.components.modbus.const import (
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CONF_COUNT,
CONF_DATA_TYPE,
CONF_OFFSET,
CONF_PRECISION,
CONF_REGISTER,
CONF_REGISTER_TYPE,
CONF_REGISTERS,
CONF_REVERSE_ORDER,
CONF_SCALE,
DATA_TYPE_FLOAT,
DATA_TYPE_INT,
DATA_TYPE_UINT,
DEFAULT_HUB,
MODBUS_DOMAIN,
)
from homeassistant.const import CONF_NAME, CONF_PLATFORM, CONF_SCAN_INTERVAL
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import MockModule, async_fire_time_changed, mock_integration
@pytest.fixture()
def mock_hub(hass):
mock_integration(hass, MockModule(MODBUS_DOMAIN))
hub = mock.MagicMock()
hub.name = "hub"
hass.data[MODBUS_DOMAIN] = {DEFAULT_HUB: hub}
return hub
common_register_config = {CONF_NAME: "test-config", CONF_REGISTER: 1234}
class ReadResult:
def __init__(self, register_words):
self.registers = register_words
async def run_test(hass, mock_hub, register_config, register_words, expected):
sensor_name = "modbus_test_sensor"
scan_interval = 5
config = {
MODBUS_DOMAIN: {
CONF_PLATFORM: "modbus",
CONF_SCAN_INTERVAL: scan_interval,
CONF_REGISTERS: [
dict(**{CONF_NAME: sensor_name, CONF_REGISTER: 1234}, **register_config)
],
}
}
read_result = ReadResult(register_words)
if register_config.get(CONF_REGISTER_TYPE) == CALL_TYPE_REGISTER_INPUT:
mock_hub.read_input_registers.return_value = read_result
else:
mock_hub.read_holding_registers.return_value = read_result
now = dt_util.utcnow()
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
assert await async_setup_component(hass, MODBUS_DOMAIN, config)
now += timedelta(seconds=scan_interval + 1)
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
async def test_simple_word_register(hass, mock_hub):
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(hass, mock_hub, register_config, register_words=[0], expected="0")
async def test_optional_conf_keys(hass, mock_hub):
register_config = {}
await run_test(
hass, mock_hub, register_config, register_words=[0x8000], expected="-32768"
)
async def test_offset(hass, mock_hub):
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 1,
CONF_OFFSET: 13,
CONF_PRECISION: 0,
}
await run_test(hass, mock_hub, register_config, register_words=[7], expected="20")
async def test_scale_and_offset(hass, mock_hub):
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 3,
CONF_OFFSET: 13,
CONF_PRECISION: 0,
}
await run_test(hass, mock_hub, register_config, register_words=[7], expected="34")
async def test_ints_can_have_precision(hass, mock_hub):
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 3,
CONF_OFFSET: 13,
CONF_PRECISION: 4,
}
await run_test(
hass, mock_hub, register_config, register_words=[7], expected="34.0000"
)
async def test_floats_get_rounded_correctly(hass, mock_hub):
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 1.5,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(hass, mock_hub, register_config, register_words=[1], expected="2")
async def test_parameters_as_strings(hass, mock_hub):
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: "1.5",
CONF_OFFSET: "5",
CONF_PRECISION: "1",
}
await run_test(hass, mock_hub, register_config, register_words=[9], expected="18.5")
async def test_floating_point_scale(hass, mock_hub):
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 2.4,
CONF_OFFSET: 0,
CONF_PRECISION: 2,
}
await run_test(hass, mock_hub, register_config, register_words=[1], expected="2.40")
async def test_floating_point_offset(hass, mock_hub):
register_config = {
CONF_COUNT: 1,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 1,
CONF_OFFSET: -10.3,
CONF_PRECISION: 1,
}
await run_test(hass, mock_hub, register_config, register_words=[2], expected="-8.3")
async def test_signed_two_word_register(hass, mock_hub):
register_config = {
CONF_COUNT: 2,
CONF_DATA_TYPE: DATA_TYPE_INT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x89AB, 0xCDEF],
expected="-1985229329",
)
async def test_unsigned_two_word_register(hass, mock_hub):
register_config = {
CONF_COUNT: 2,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x89AB, 0xCDEF],
expected=str(0x89ABCDEF),
)
async def test_reversed(hass, mock_hub):
register_config = {
CONF_COUNT: 2,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_REVERSE_ORDER: True,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x89AB, 0xCDEF],
expected=str(0xCDEF89AB),
)
async def test_four_word_register(hass, mock_hub):
register_config = {
CONF_COUNT: 4,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x89AB, 0xCDEF, 0x0123, 0x4567],
expected="9920249030613615975",
)
async def test_four_word_register_precision_is_intact_with_int_params(hass, mock_hub):
register_config = {
CONF_COUNT: 4,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 2,
CONF_OFFSET: 3,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x0123, 0x4567, 0x89AB, 0xCDEF],
expected="163971058432973793",
)
async def test_four_word_register_precision_is_lost_with_float_params(hass, mock_hub):
register_config = {
CONF_COUNT: 4,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 2.0,
CONF_OFFSET: 3.0,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x0123, 0x4567, 0x89AB, 0xCDEF],
expected="163971058432973792",
)
async def test_two_word_input_register(hass, mock_hub):
register_config = {
CONF_COUNT: 2,
CONF_REGISTER_TYPE: CALL_TYPE_REGISTER_INPUT,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x89AB, 0xCDEF],
expected=str(0x89ABCDEF),
)
async def test_two_word_holding_register(hass, mock_hub):
register_config = {
CONF_COUNT: 2,
CONF_REGISTER_TYPE: CALL_TYPE_REGISTER_HOLDING,
CONF_DATA_TYPE: DATA_TYPE_UINT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 0,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[0x89AB, 0xCDEF],
expected=str(0x89ABCDEF),
)
async def test_float_data_type(hass, mock_hub):
register_config = {
CONF_COUNT: 2,
CONF_REGISTER_TYPE: CALL_TYPE_REGISTER_HOLDING,
CONF_DATA_TYPE: DATA_TYPE_FLOAT,
CONF_SCALE: 1,
CONF_OFFSET: 0,
CONF_PRECISION: 5,
}
await run_test(
hass,
mock_hub,
register_config,
register_words=[16286, 1617],
expected="1.23457",
)
| true
| true
|
1c40946627caba4610fd0f0f0091de2790b9ccf1
| 1,018
|
py
|
Python
|
tests/rules/test_cp_create_destination.py
|
HiteshMah-Jan/thefuck
|
132c62262246824470934c2c6f46919ef6f00203
|
[
"MIT"
] | 75,504
|
2015-04-08T18:22:19.000Z
|
2022-03-31T23:59:52.000Z
|
tests/rules/test_cp_create_destination.py
|
HiteshMah-Jan/thefuck
|
132c62262246824470934c2c6f46919ef6f00203
|
[
"MIT"
] | 1,160
|
2015-04-17T18:47:12.000Z
|
2022-03-30T20:42:26.000Z
|
tests/rules/test_cp_create_destination.py
|
HiteshMah-Jan/thefuck
|
132c62262246824470934c2c6f46919ef6f00203
|
[
"MIT"
] | 4,399
|
2015-04-17T18:36:04.000Z
|
2022-03-31T07:01:03.000Z
|
import pytest
from thefuck.rules.cp_create_destination import match, get_new_command
from thefuck.types import Command
@pytest.mark.parametrize(
"script, output",
[("cp", "cp: directory foo does not exist\n"), ("mv", "No such file or directory")],
)
def test_match(script, output):
assert match(Command(script, output))
@pytest.mark.parametrize(
"script, output", [("cp", ""), ("mv", ""), ("ls", "No such file or directory")]
)
def test_not_match(script, output):
assert not match(Command(script, output))
@pytest.mark.parametrize(
"script, output, new_command",
[
("cp foo bar/", "cp: directory foo does not exist\n", "mkdir -p bar/ && cp foo bar/"),
("mv foo bar/", "No such file or directory", "mkdir -p bar/ && mv foo bar/"),
("cp foo bar/baz/", "cp: directory foo does not exist\n", "mkdir -p bar/baz/ && cp foo bar/baz/"),
],
)
def test_get_new_command(script, output, new_command):
assert get_new_command(Command(script, output)) == new_command
| 32.83871
| 106
| 0.654224
|
import pytest
from thefuck.rules.cp_create_destination import match, get_new_command
from thefuck.types import Command
@pytest.mark.parametrize(
"script, output",
[("cp", "cp: directory foo does not exist\n"), ("mv", "No such file or directory")],
)
def test_match(script, output):
assert match(Command(script, output))
@pytest.mark.parametrize(
"script, output", [("cp", ""), ("mv", ""), ("ls", "No such file or directory")]
)
def test_not_match(script, output):
assert not match(Command(script, output))
@pytest.mark.parametrize(
"script, output, new_command",
[
("cp foo bar/", "cp: directory foo does not exist\n", "mkdir -p bar/ && cp foo bar/"),
("mv foo bar/", "No such file or directory", "mkdir -p bar/ && mv foo bar/"),
("cp foo bar/baz/", "cp: directory foo does not exist\n", "mkdir -p bar/baz/ && cp foo bar/baz/"),
],
)
def test_get_new_command(script, output, new_command):
assert get_new_command(Command(script, output)) == new_command
| true
| true
|
1c409472d8cb1e03d6991c52aa165e63f057563c
| 505
|
py
|
Python
|
week2/scripts/publisher2.py
|
manasdesai/Robotics-Automation-QSTP-2021
|
a51e01dd9fcbae106f618d82737e01e279ba0ff2
|
[
"MIT"
] | 1
|
2021-09-19T03:34:35.000Z
|
2021-09-19T03:34:35.000Z
|
week2/scripts/publisher2.py
|
manasdesai/Robotics-Automation-QSTP-2021
|
a51e01dd9fcbae106f618d82737e01e279ba0ff2
|
[
"MIT"
] | null | null | null |
week2/scripts/publisher2.py
|
manasdesai/Robotics-Automation-QSTP-2021
|
a51e01dd9fcbae106f618d82737e01e279ba0ff2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
def publisher():
pub=rospy.Publisher('World',String,queue_size=10)
rospy.init_node('publish2',anonymous=True)
rate=rospy.Rate(10)
while not rospy.is_shutdown():
pub.publish('World')
publishstring='World is being published'
rospy.loginfo(publishstring)
rate.sleep()
if __name__=='__main__':
try:
publisher()
except rospy.ROSInterruptException:
pass
| 25.25
| 53
| 0.649505
|
import rospy
from std_msgs.msg import String
def publisher():
pub=rospy.Publisher('World',String,queue_size=10)
rospy.init_node('publish2',anonymous=True)
rate=rospy.Rate(10)
while not rospy.is_shutdown():
pub.publish('World')
publishstring='World is being published'
rospy.loginfo(publishstring)
rate.sleep()
if __name__=='__main__':
try:
publisher()
except rospy.ROSInterruptException:
pass
| true
| true
|
1c409484510aab17d13a436173f168f6acfe19e1
| 889
|
py
|
Python
|
api/responders/grafana/__init__.py
|
korenlev/calipso-cvim
|
39278a5cf09c40b26a8a143ccc0c8d437961abc2
|
[
"Apache-2.0"
] | null | null | null |
api/responders/grafana/__init__.py
|
korenlev/calipso-cvim
|
39278a5cf09c40b26a8a143ccc0c8d437961abc2
|
[
"Apache-2.0"
] | null | null | null |
api/responders/grafana/__init__.py
|
korenlev/calipso-cvim
|
39278a5cf09c40b26a8a143ccc0c8d437961abc2
|
[
"Apache-2.0"
] | null | null | null |
###############################################################################
# Copyright (c) 2017-2020 Koren Lev (Cisco Systems), #
# Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from api.responders.responder_base import ResponderBase
class Health(ResponderBase):
def on_get(self, req, resp):
self.set_ok_response(resp, "We're open")
| 55.5625
| 79
| 0.460067
| true
| true
|
|
1c4095e149b03c67d4661ad4fca4684d6028a5e9
| 318
|
py
|
Python
|
rastervision/evaluation/api.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 4
|
2019-03-11T12:38:15.000Z
|
2021-04-06T14:57:52.000Z
|
rastervision/evaluation/api.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
rastervision/evaluation/api.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 1
|
2021-12-02T08:07:21.000Z
|
2021-12-02T08:07:21.000Z
|
# flake8: noqa
# Registry keys
EVALUATOR = 'EVALUATOR'
OBJECT_DETECTION_EVALUATOR = 'OBJECT_DETECTION_EVALUATOR'
CHIP_CLASSIFICATION_EVALUATOR = 'CHIP_CLASSIFICATION_EVALUATOR'
SEMANTIC_SEGMENTATION_EVALUATOR = 'SEMANTIC_SEGMENTATION_EVALUATOR'
from rastervision.evaluation.evaluator_config import EvaluatorConfig
| 26.5
| 68
| 0.86478
|
EVALUATOR = 'EVALUATOR'
OBJECT_DETECTION_EVALUATOR = 'OBJECT_DETECTION_EVALUATOR'
CHIP_CLASSIFICATION_EVALUATOR = 'CHIP_CLASSIFICATION_EVALUATOR'
SEMANTIC_SEGMENTATION_EVALUATOR = 'SEMANTIC_SEGMENTATION_EVALUATOR'
from rastervision.evaluation.evaluator_config import EvaluatorConfig
| true
| true
|
1c40969107ea62c91965c9ee8aec48640843570c
| 1,251
|
py
|
Python
|
tests.py
|
MasterOdin/LogicalEquivalency
|
c1f4e053c4c18b8fc23a5842944bbd9ef9f37843
|
[
"MIT"
] | 1
|
2018-02-02T17:11:24.000Z
|
2018-02-02T17:11:24.000Z
|
tests.py
|
MasterOdin/LogicalEquivalency
|
c1f4e053c4c18b8fc23a5842944bbd9ef9f37843
|
[
"MIT"
] | null | null | null |
tests.py
|
MasterOdin/LogicalEquivalency
|
c1f4e053c4c18b8fc23a5842944bbd9ef9f37843
|
[
"MIT"
] | 1
|
2019-01-16T21:11:52.000Z
|
2019-01-16T21:11:52.000Z
|
import copy
from forseti.formula import Symbol, Or, And, Not
from nose import runmodule
from nose.tools import assert_equal, assert_true
import util
from extra_formulas import GeneralizedAnd, GeneralizedOr
def test_helper():
statement = Or(Or(Symbol("B"), Symbol("C")), Symbol("A"))
new_statement, change = util.flatten(copy.deepcopy(statement))
assert_equal(new_statement, GeneralizedOr(Symbol("B"), Symbol("C"), Symbol("A")))
assert_true(change)
def test_helper2():
statement = GeneralizedOr(Symbol("a"), Symbol("a"))
# need to manually set it to this as otherwise the constructor would flatten it automatically
statement.args[0] = Or(And(Symbol("b"), Not(Symbol("c"))), And(Symbol("c"), Not(Symbol("b"))))
new_statement, change = util.flatten(copy.deepcopy(statement))
assert_equal(new_statement, GeneralizedOr(Symbol("a"), And(Symbol("b"), Not(Symbol("c"))),
And(Symbol("c"), Not(Symbol("b")))))
assert_true(change)
def test_generalized_or_constructor():
statement = GeneralizedOr(Or(Symbol("B"), Symbol("C")), Symbol("A"))
assert_equal(statement, GeneralizedOr(Symbol("B"), Symbol("C"), Symbol("A")))
if __name__ == "__main__":
runmodule()
| 37.909091
| 98
| 0.677058
|
import copy
from forseti.formula import Symbol, Or, And, Not
from nose import runmodule
from nose.tools import assert_equal, assert_true
import util
from extra_formulas import GeneralizedAnd, GeneralizedOr
def test_helper():
statement = Or(Or(Symbol("B"), Symbol("C")), Symbol("A"))
new_statement, change = util.flatten(copy.deepcopy(statement))
assert_equal(new_statement, GeneralizedOr(Symbol("B"), Symbol("C"), Symbol("A")))
assert_true(change)
def test_helper2():
statement = GeneralizedOr(Symbol("a"), Symbol("a"))
statement.args[0] = Or(And(Symbol("b"), Not(Symbol("c"))), And(Symbol("c"), Not(Symbol("b"))))
new_statement, change = util.flatten(copy.deepcopy(statement))
assert_equal(new_statement, GeneralizedOr(Symbol("a"), And(Symbol("b"), Not(Symbol("c"))),
And(Symbol("c"), Not(Symbol("b")))))
assert_true(change)
def test_generalized_or_constructor():
statement = GeneralizedOr(Or(Symbol("B"), Symbol("C")), Symbol("A"))
assert_equal(statement, GeneralizedOr(Symbol("B"), Symbol("C"), Symbol("A")))
if __name__ == "__main__":
runmodule()
| true
| true
|
1c40979d9fa04f5d63f1a60a08cb903ed94e2d4b
| 1,652
|
py
|
Python
|
exe/load_spiketimes_subsampled.py
|
Priesemann-Group/historydependence
|
e1adc5eea8cb05cc686bfda0b979244b34d63bb4
|
[
"BSD-3-Clause"
] | 1
|
2022-03-25T21:56:53.000Z
|
2022-03-25T21:56:53.000Z
|
exe/load_spiketimes_subsampled.py
|
Priesemann-Group/historydependence
|
e1adc5eea8cb05cc686bfda0b979244b34d63bb4
|
[
"BSD-3-Clause"
] | null | null | null |
exe/load_spiketimes_subsampled.py
|
Priesemann-Group/historydependence
|
e1adc5eea8cb05cc686bfda0b979244b34d63bb4
|
[
"BSD-3-Clause"
] | null | null | null |
from sys import stderr, exit, argv
from os.path import isfile, isdir, realpath, dirname, exists
import numpy as np
from scipy.io import loadmat
# Loading spiketimes for entorhinal cortex recording
recorded_system = argv[1]
run_index = int(argv[2])
rec_length = argv[3]
if len(argv) > 4:
data_path = argv[4]
else:
CODE_DIR = '{}/..'.format(dirname(realpath(__file__)))
data_path = '{}/data'.format(CODE_DIR)
rec_lengths = {'1min': 60., '3min': 180., '5min': 300.,
'10min': 600., '20min': 1200., '45min': 2700., '90min': 5400.}
rec_lengths_Nsamples = {'1min': 10, '3min': 10, '5min': 10,
'10min': 8, '20min': 4, '45min': 2}
DATA_DIR = '{}/{}'.format(data_path, recorded_system)
N_neurons = 10
N_samples = rec_lengths_Nsamples[rec_length]
T_rec = rec_lengths[rec_length]
neuron_index = int(run_index/N_samples)
sample_index = run_index%N_samples
validNeurons = np.load(
'{}/validNeurons.npy'.format(DATA_DIR)).astype(int)
np.random.seed(41)
neuron_selection = np.random.choice(len(validNeurons), N_neurons, replace = False)
neuron = validNeurons[neuron_selection][neuron_index]
spiketimes = np.load(
'{}/spks/spiketimes_neuron{}.npy'.format(DATA_DIR, neuron))
# Add 5 seconds to make sure that only spikes with sufficient spiking history are considered
T_0 = spiketimes[0] + 5.
# End of the recordings seem to be unstable from time to time, therefore only subsample the first 80 minutes
T_step = 4800. / N_samples
T_0 = T_0 + sample_index * T_step
spiketimes = spiketimes - T_0
spiketimes = spiketimes[spiketimes > 0]
spiketimes = spiketimes[spiketimes < T_rec]
print(*spiketimes, sep='\n')
| 31.169811
| 108
| 0.710048
|
from sys import stderr, exit, argv
from os.path import isfile, isdir, realpath, dirname, exists
import numpy as np
from scipy.io import loadmat
recorded_system = argv[1]
run_index = int(argv[2])
rec_length = argv[3]
if len(argv) > 4:
data_path = argv[4]
else:
CODE_DIR = '{}/..'.format(dirname(realpath(__file__)))
data_path = '{}/data'.format(CODE_DIR)
rec_lengths = {'1min': 60., '3min': 180., '5min': 300.,
'10min': 600., '20min': 1200., '45min': 2700., '90min': 5400.}
rec_lengths_Nsamples = {'1min': 10, '3min': 10, '5min': 10,
'10min': 8, '20min': 4, '45min': 2}
DATA_DIR = '{}/{}'.format(data_path, recorded_system)
N_neurons = 10
N_samples = rec_lengths_Nsamples[rec_length]
T_rec = rec_lengths[rec_length]
neuron_index = int(run_index/N_samples)
sample_index = run_index%N_samples
validNeurons = np.load(
'{}/validNeurons.npy'.format(DATA_DIR)).astype(int)
np.random.seed(41)
neuron_selection = np.random.choice(len(validNeurons), N_neurons, replace = False)
neuron = validNeurons[neuron_selection][neuron_index]
spiketimes = np.load(
'{}/spks/spiketimes_neuron{}.npy'.format(DATA_DIR, neuron))
T_0 = spiketimes[0] + 5.
T_step = 4800. / N_samples
T_0 = T_0 + sample_index * T_step
spiketimes = spiketimes - T_0
spiketimes = spiketimes[spiketimes > 0]
spiketimes = spiketimes[spiketimes < T_rec]
print(*spiketimes, sep='\n')
| true
| true
|
1c4097f0c6e4c010cfebeb8ec84a06cd6e86692b
| 42,266
|
py
|
Python
|
util/genIncJS.py
|
ahmedmrefaat/lang
|
68e962ada2ab3d81398cb2651fead30205f01c20
|
[
"Apache-2.0"
] | null | null | null |
util/genIncJS.py
|
ahmedmrefaat/lang
|
68e962ada2ab3d81398cb2651fead30205f01c20
|
[
"Apache-2.0"
] | null | null | null |
util/genIncJS.py
|
ahmedmrefaat/lang
|
68e962ada2ab3d81398cb2651fead30205f01c20
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2017 Yoav Seginer, Theo Vosse, Gil Harari, and Uri Kolodny.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# this script is used to create an application file from a cdl script,
# resolving references to other files to their actual paths.
#
# it is used in two different stages:
#
# 1. creating a compilation program, a program that would compile the
# cdl code into a feg program
# the generated program includes the cdl-source along with all the
# class-files it may refer to, as well as the compilation code
#
# 2. creating the runtime program, that implements the application
# the generated program includes the compiled feg, as well as the cdl
# runtime environment interpreting it
#
# Both of these could use this script in either of its two modes:
#
# 1. 'js' mode - generates a single javascript file, into which all the
# javascript files are concatenated
#
# 2. 'html' mode - generate am html file referencing the javascript file
# with html '<script>' tags
#
# 3. 'incl' mode - generates a list of included files
#
# 4. 'make' mode - generates a make file dependency
#
# Notes:
# Compilation stage normally generates a 'js' file, which is then run in
# 'node.js'.
# However, when debugging the compiler it is convenient to generate an 'html'
# version of the compilation program, and execute it in a browser, with its
# superior debugging environment
#
# 'html' mode is used in the development environment for runtime programs,
# so that the browser's debugger provides easy access to the run-time
# javascript files. Changes to the run-time javascript files take affect
# with but a reload of the browser, while changes to typescript files require
# compiling typescript to javascript (but not cdl recompilation or
# re-generation of the .html file by this script).
# For batch automatic test execution, node.js is used, so 'js' mode is
# required.
# Also, the 'production' format, as uploaded to the build-web-site, uses
# an 'uglified' single-file javascript referenced by an html file. This
# single-file javascript is generated by this script using 'js' mode.
#
#
#
# Basic Functionality:
#
# This script takes two main input files: a template file and a source file.
# The template file guides the generation of the output file. For 'simple'
# template lines, the script merely copies the line from the template file to
# the output file.
# However, the template files typically also uses directives and macros.
# Directives affect an inclusion of another file. In 'html' mode, this is
# achieved by placing a <script> tag in the output file, while in 'js' mode
# the actual content of the included file is written to the output file.
# In either modes, included files are recursively processed, so that nested
# inclusion directives are handled.
# Macros are replaced by a string which this script computes. For example,
# the title macro, '%%title:()%%', is replaced with the value of the '--title'
# argument to this script.
#
# confLib handling:
#
# When generating a compilation program, this script can be given the
# path of the a --libconf' file. A libconf file describes a hierarchy of
# conf-libs, where each conf-lib is described in a single line by its name
# and path, separated by a colon, e.g. 'Core:../../core'
#
# In a libconf file is specified, the behavior of this script is modified in
# the following ways:
#
# - the script translates an instance of 'var classes = ' in a file 'fn.js'
# of confLib 'CL' to 'var CL__fn__classes ='
# if the classes are already defined with a variable which is the basename
# of the file, that case is recognized too:
# 'var fn =' in fn.js is converted to 'var CL__fn ='
#
# - the script emits a list with the class-lists and their confLib,
# { confLib: "CL1", classes: [CL1__fn1_1__classes, CL1__fn1_2__classes, ...] },
# { confLib: "CL2", classes: CL2__fn2_1__classes, ...] }, ...
#
# - the scripts recognizes a macro '%%conflibPreamble:()%%', and replaces it
# with the inclusion of a file with a fixed name for each conf-lib in the
# libconf file. Given a conflib path <clpath1>, the script includes the
# file "<clpath1>/includeList.js".
#
#
# command line arguments:
#
# -o/--out_file - the output file, into which the generated .js/.html file is
# written
#
# -t/--template - the 'template' input file, the file controlling the output
# file format
#
# -m/--mode - js/html
#
# --includedir - add the argument to the list of directories in which include
# files (encountered in %%include%%: macros) are searched for
#
# --langdir - set the argument as the root directory for scripts. it is added
# to all search paths (include/classfile/image/constantfile/textfile/url)
#
# --cdldir -- like langdir, but for cdl
#
# -c/--libConf - the argument is a libConf file, detailing a hierarchy of
# confLibs and their paths
#
# --buildInfoFile - the argument is the buildInfo file, which would become the
# value of the %%buildinfo:()%% macro. This file typically defines the
# version control revision, time stamp, etc
#
# --commonImageDir - when specified, images encountered in %%image:()%% macros
# are not resolved as usual, to the first directory in the image path
# containing the named image file. Rather, all image macros are resolved
# to the specified commonImageDir. This is used when generating the
# production environment, where all images are collated to a single dir
#
# --commonDataDir - like commonImageDir, but for %data:()%%
#
# --title - the argument would become the value of the %%title:()%% macro
#
# --max-include-level - do not process include lines above this level
#
# --referencedir - sets the reference directory for file name normalization
#
# --sourcedir - sets the directory that should be used for relative includes
# from the source file; by default the directory of the source
#
# --splash-screen-url - the path to the HTML file which provides the splash
# screen. If not specified, the default is used.
from __future__ import print_function
import os
import sys
import re
import argparse
import pickle
import gzip
import shutil
# global variables
# js/html
mode = None
# --title --> %%title:()%%
title = None
# --splash-screen-url
splash_screen_url = None
# don't include files higher than given level
max_include_level = 999999999
# --commonImageDir
common_image_dir = None
# --commonDataDir
common_data_dir = None
# all normalized paths are relative to this directory
# (absolute paths generated in a cygwin environment are meaningless to the
# non-cygwin browser reading these paths, so paths are created as relative)
reference_dir = None
# When not none, this is supposed to be the base directory relative to the
# source file; useful for ignoring the intermediate/ prefix when processing
# .run.js files.
source_dir = None
# a list of '{ name: cl-name, path: cl-path }' generated by parsing the
# confLib file
#
conf_lib_list = []
# for each file read/being-read, processed-files has an attribute which is the
# file's name, and whose value is the line# we've reached within that file
processed_files = {}
# the name of the file from which the last input line was read
# has different value for each dtype (classfile/include/constantfile etc)
last_file_name = {}
# writing into this handle writes into the output file
out_file_handle = None
# handle to the file with the dependencies
dep_file_handle = None
# search path per dtype - a list of directories
path_per_dtype = {
'include': [],
'classfile': [],
'constantfile': [],
'template': [],
'image': [],
'data': [],
'foreign': [],
'text': [],
'url': []
}
# the 'source' file, the single positional run-time argument --> 'source'
cdl_source = None
# --buildInfoFile --> %%buildinfo:()%%
build_info_file = None
# the stack of files currently being read, which file included which file etc
# to get us to read the current file (at the top of stack)
fn_stack = []
# inclusion_cycle_permitted, default to false
# section, defaults to 'infix' (can be set to 'prefix'/'suffix')
# 'conf_lib' - include paths used while procesing that conf-lib
# 'sticky_conf_lib' - include paths added and remain there
dtype_property = {
'include': {
'conf_lib': ['design', 'func', 'automated' ],
'html': 'script',
'inclusion_cycle_permitted': True
},
'classfile': {
'conf_lib': ['design', 'func', 'automated' ],
'html': 'script'
},
'constantfile': {
'section': 'prefix',
'conf_lib': ['design', 'func', 'automated' ],
'html': 'script'
},
'template': {},
'image': {
'sticky_conf_lib': ['design/img']
}
}
# generally, text is not written directly into the output; rather it is
# 'written' by appending it to the appropriate section.
# in practice, all dtypes write to 'infix' except for 'constantfile' which
# writes into 'prefix', so that constants are defined by the time the classes
# attempt to use them
section_text = {
'prefix': [],
'infix': [],
'postfix': [],
}
# conf_libs are inserted into it at 0, so that the conf-lib that ends up being
# last (== the conf-lib of least priority) is the first to be inserted
# each entry has the following attributes:
# 'name' - conf-lib-name
# 'class_list' - the list of class_lists associated with this conf_lib
# (e.g. ['Core__draggableClasses', 'Core__snappabaleClasses',..])
# 'constant' - the list of constant defs associated with this conf_lib
# (as {
# 'name': 'positioningConstants',
# 'element': 'Core__positioningConstants'
# } )
#
conf_lib_by_priority = []
current_conf_lib = None
# Counter for number of screenArea declarations and var test lines
nr_screen_area = 0
nr_test = 0
# Target for make file include
make_target = None
def error_exit(msg):
print(sys.argv[0], ": ", msg, file=sys.stderr)
# print(str(fn_stack), file=sys.stderr)
sys.exit(1)
# Dictionary to check that no two file names get mapped onto the same class
# variable
class_stem_names = {}
def stemname(path, conf_lib_name):
"""return the extension-less basename of path and make sure it's a legal JS identifier"""
basename = os.path.splitext(os.path.basename(path))[0]
class_identifier = re.sub("[^a-zA-Z0-9_]+", "_", basename)
if conf_lib_name is None:
return class_identifier
conflib_class_identifier = conf_lib_name + "__" + class_identifier
if conflib_class_identifier in class_stem_names and class_stem_names[conflib_class_identifier] != path:
error_exit("files {} and {} map to the same variable name".format(path, class_stem_names[conflib_class_identifier]))
class_stem_names[class_identifier] = path
return class_identifier
def get_arg_parser():
parser = argparse.ArgumentParser(description='cdl script aggregator')
parser.add_argument('-o', '--out_file', help='output file name',
required=True)
parser.add_argument('-d', '--dep_file', help='dependency file name')
parser.add_argument('--dep_target', help='dependency target')
parser.add_argument('cdl_source', help='input cdl script file')
parser.add_argument('-t', '--template', help='template file name')
parser.add_argument('-m', '--mode', choices=['js', 'html', 'incl', 'make'],
help='processing mode, "js"/"html"/"incl"/"make"', required=True)
parser.add_argument('--includedir', help='include directory',
action='append')
parser.add_argument('--langdir', help='root of the lang directory')
parser.add_argument('--cdldir', help='root of the cdl apps and classes directory')
parser.add_argument('--referencedir', help='set reference directory; by default cwd')
parser.add_argument('--sourcedir', help='set directory for includes from source file')
parser.add_argument('-c', '--libConf', help='library configuration')
parser.add_argument('--buildInfoFile',
help='path of the build-info file, with rev#/date/etc')
parser.add_argument('--resourceOutFile', help='path for writing used resources')
parser.add_argument('--resourceUseFile', help='path for used resources')
parser.add_argument('--max-include-level', help='suppress indirect includes above level')
parser.add_argument('--commonImageDir', help='replaces all references in image macros')
parser.add_argument('--commonDataDir', help='replaces all references in data macros')
parser.add_argument('--title')
parser.add_argument('--splash-screen-url', help='a URL pointing at the HTML page which should server as a splash screen')
return parser
def annotate(msg):
print(msg)
def set_reference_dir(rd):
global reference_dir
if rd is None:
reference_dir = os.path.realpath(os.getcwd())
else:
reference_dir = os.path.realpath(rd)
def set_source_dir(sd):
global source_dir
if sd is not None:
source_dir = os.path.realpath(sd)
def normalize_path(path):
real_path = os.path.realpath(path)
return os.path.relpath(real_path, reference_dir)
def push_include_file(dtype, fn):
global fn_stack
global dtype_property
if dtype not in dtype_property:
error_exit("unknown type '" + dtype + "'")
cycle_permitted = dtype_property[dtype]['inclusion_cycle_permitted'] if 'inclusion_cycle_permitted' in dtype_property[dtype] else False
if not cycle_permitted:
if fn in fn_stack:
error_exit("inclusion cycle for '" + fn + "'")
fn_stack.append(fn)
def pop_include_file(dtype, fn):
global fn_stack
popped_file = fn_stack.pop()
if popped_file != fn:
error_exit("internal error: wrong file in pop (" +
fn + "!=" + popped_file + ")")
if len(fn_stack) == 1:
str = get_output_str()
write_str(str)
def get_last_file_name(dtype):
global last_file_name
if dtype in last_file_name:
return last_file_name[dtype]
return ""
def set_last_file_name(dtype, fn):
global last_file_name
last_file_name[dtype] = fn
def append_root_path(path):
global path_per_dtype
if (path != None):
paths = path.split(";")
path_per_dtype['include'].extend(paths)
path_per_dtype['classfile'].extend(paths)
path_per_dtype['constantfile'].extend(paths)
path_per_dtype['image'].extend(paths)
path_per_dtype['data'].extend(paths)
path_per_dtype['foreign'].extend(
map(lambda path: os.path.join(path, "external", "foreignInterface"),
paths))
path_per_dtype['text'].extend(paths)
path_per_dtype['url'].extend(paths)
def set_include_path(path_list):
global path_per_dtype
if (path_list != None):
path_per_dtype['include'].extend(path_list)
def set_build_info_file(path):
global build_info_file
if path != None:
build_info_file = path
# Creates a directory at path if it doesn't exist, or is not a directory
def mkdir_if_not_exists(path):
if not os.path.isdir(path):
if os.path.exists(path):
os.remove(path)
os.makedirs(path)
def set_common_image_dir(path):
global common_image_dir
if path != None:
common_image_dir = path
mkdir_if_not_exists(path)
def set_common_data_dir(path):
global common_data_dir
if path != None:
common_data_dir = path
mkdir_if_not_exists(path)
def set_cdl_source(fn):
global cdl_source
cdl_source = fn
def get_cdl_source():
global cdl_source
return cdl_source
def set_mode(m):
global mode
mode = m
def get_mode():
global mode
return mode
def set_out_file(fn):
global out_file_handle
out_file_handle = open(fn, 'w');
def set_dep_file(fn):
global dep_file_handle
if fn != None:
dep_file_handle = open(fn, 'w');
# Name of the file that stores the resources used in the current input
res_out_file_name = None
# used resources per resource type
used_resources = { "foreign": set(), "font": set(), "text": set() }
# flag to indicate use of external resources
does_use_resources = False
def set_res_out_file(fn):
global res_out_file_name
res_out_file_name = fn
def add_resource_usage(res_type, res_uri):
global used_resources, does_use_resources
if res_uri in used_resources.get(res_type):
return
used_resources.get(res_type).add(res_uri)
does_use_resources = True
def write_resource_usage():
global used_resources, res_out_file_name, does_use_resources
if res_out_file_name != None:
if does_use_resources:
if os.path.exists(res_out_file_name):
with open(res_out_file_name, 'rb') as input:
dict = pickle.load(input)
if dict == used_resources:
return
with open(res_out_file_name, 'wb') as output:
pickle.dump(used_resources, output, pickle.HIGHEST_PROTOCOL)
elif os.path.exists(res_out_file_name):
os.remove(res_out_file_name)
def set_res_use_file(fn):
global used_resources
if fn != None and os.path.exists(fn):
with open(fn, 'rb') as input:
used_resources = pickle.load(input)
def write_font_urls(directive_prefix):
global used_resources
mode = get_mode()
if mode == 'html':
for url in used_resources.get('font'):
write_str(directive_prefix + '<link rel="stylesheet" type="text/css" href="' + url + '">\n')
def set_title(str):
global title
if str == None:
title = ""
else:
title = str
def set_splash_screen_url(str):
global splash_screen_url
if str == None:
splash_screen_url = ""
else:
splash_screen_url = str
def set_make_target(arg, deflt):
global make_target
if arg == None:
make_target = deflt
else:
make_target = arg
def set_max_include_level(arg):
global max_include_level
if arg is not None:
max_include_level = arg
def write_str(str):
global out_file_handle
out_file_handle.write(str)
def write_dep(fn):
global make_target
global dep_file_handle
if dep_file_handle != None:
dep_file_handle.write(make_target + ': ' + fn + '\n')
#
# called just before concatenating the sections (prefix/infix/postfix) into the
# output file
# generates code to merge constants defined in several conf-lib constantfiles
# into a single constant (assumes js mode)
def output_section_hook():
constant_dict = {}
for clentry in conf_lib_by_priority:
cl_const_list = clentry['constant']
for const_entry in cl_const_list:
const_name = const_entry['name']
const_elem = const_entry['element']
if const_name not in constant_dict:
constant_dict[const_name] = []
constant_dict[const_name].append(const_elem)
clentry['constant'] = []
for const_name in constant_dict:
const_merge_def = 'var ' + const_name + ' = ' + \
'mergeCdlConstants(\n\t[\n\t\t'
const_merge_def += ',\n\t\t'.join(constant_dict[const_name])
const_merge_def += '\n\t]\n)\n'
section_print('constantfile', const_merge_def)
def get_output_str():
global section_text
output_section_hook()
str = "".join(section_text['prefix'])
str += "".join(section_text['infix'])
str += "".join(section_text['postfix'])
section_text['prefix'] = []
section_text['infix'] = []
section_text['postfix'] = []
return str
def section_print(dtype, line):
global section_text
if dtype == 'template':
write_str(line)
else:
if 'section' in dtype_property[dtype]:
section = dtype_property[dtype]['section']
else:
section = 'infix'
section_text[section].append(line)
# annotate js output file with the input fn/line#
def gen_filename_and_line_number(dtype, fn):
global processed_files
mode = get_mode()
if mode == 'html':
return
if get_last_file_name(dtype) != fn:
if mode == 'js':
section_print(dtype, "//# " + fn + ":" + str(processed_files[fn]) +
'\n');
set_last_file_name(dtype, fn)
processed_files[fn] = processed_files[fn] + 1
def find_file_in_path(dtype, basename):
global path_per_dtype
if basename.startswith("."):
if (os.path.isfile(basename)):
return basename
else:
path = path_per_dtype[dtype]
for dirp in path:
file_path = os.path.join(dirp, basename)
if (os.path.isfile(file_path)):
return file_path
if dtype == "image":
print("could not find path for image " + basename, file=sys.stderr)
return basename
path_list = ":".join(path)
error_exit("could not find path for <" + dtype + "> file '" +
basename + "' in path '" + path_list + "'");
# add the conf-lib path for dtypes where this is required (e.g. classfile,
# include)
def push_conf_lib_path(path):
global path_per_dtype
global dtype_property
for dtype in dtype_property:
if 'conf_lib' in dtype_property[dtype]:
for sub_path in dtype_property[dtype]['conf_lib']:
dpath = os.path.join(path, sub_path)
path_per_dtype[dtype].append(dpath)
def pop_conf_lib_path(path):
global path_per_dtype
global dtype_property
for dtype in dtype_property:
if 'conf_lib' in dtype_property[dtype]:
for sub_path in reversed(dtype_property[dtype]['conf_lib']):
dpath = os.path.join(path, sub_path)
apath = path_per_dtype[dtype].pop()
if apath != dpath:
error_exit("popped path does not match pushed path")
# add search path per the currently processed conf-lib, and leave it there so
# that following files may use it too (also outside the current conf-lib)
# for the appropriate dtypes (e.g. image, data)
def add_conf_lib_sticky_path(path):
global dtype_property
for dtype in dtype_property:
if 'sticky_conf_lib' in dtype_property[dtype]:
for sub_path in dtype_property[dtype]['sticky_conf_lib']:
dpath = os.path.join(path, sub_path)
path_per_dtype[dtype].append(dpath)
def add_conf_lib(priority, name, path):
global conf_lib_by_priority
global current_conf_lib
conf_lib_by_priority.insert(0,
{
'name': name,
'class_list': [],
'constant': []
})
current_conf_lib = { 'priority': priority, 'name': name, 'path': path }
if name == None:
return
push_conf_lib_path(path)
conf_lib_include = os.path.join(path, "includeList.js")
if os.path.exists(conf_lib_include):
process_file('include', conf_lib_include, os.path.dirname(conf_lib_include))
pop_conf_lib_path(path)
current_conf_lib = None
def set_lib_conf(lib_conf):
global conf_lib_list
if lib_conf == None or not os.path.exists(lib_conf):
return
with open(lib_conf) as lib_conf_handle:
for line in lib_conf_handle:
# remove comments
match = re.search('^[^#]*', line)
line = match.group()
# skip empty lines
if re.search('^\s*$', line):
continue
# parse (allowing spaces)
# <confLibName>:<confLibPath>
match = re.search('^\s*(?P<name>[a-zA-Z0-9_]+)\s*:' +
'\s*(?P<path>[^\s]*)\s*$', line)
if (not match.group('name')) or (not match.group('path')):
error_exit("libConf file syntax error: '" + line + "'");
lcname = match.group('name')
lcpath = match.group('path')
conf_lib_list.append({ 'name': lcname, 'path': lcpath })
def gen_conf_lib_preamble():
for conf_lib in conf_lib_list:
add_conf_lib_sticky_path(conf_lib['path'])
for idx, conf_lib in reversed(list(enumerate(conf_lib_list))):
clpriority = len(conf_lib_list) - idx
add_conf_lib(clpriority, conf_lib['name'], conf_lib['path'])
add_conf_lib(0, None, None)
preamble = get_output_str()
return preamble
def process_directive(line, directive_fn, linenr, basedir):
global make_target, used_resources
filename = None
filenames = None
relative_dir = None
match = re.search('^([^a-z]*)%%([a-z]+)%%:\s*([^\s]*)\s*$', line)
if match == None or len(match.groups()) != 3:
error_exit(directive_fn + ':' + str(linenr) + ': directive has invalid syntax: ' + line)
directive_prefix = match.group(1)
directive = match.group(2)
basename = match.group(3)
stdmatch = re.search('^<(.*)>$', basename)
quotematch = re.search('^"(.*)"$', basename)
# tildematch = re.search('^~/(.*)$', basename)
if stdmatch != None:
filename = find_file_in_path(directive, stdmatch.group(1))
elif quotematch != None:
filename = os.path.join(basedir, quotematch.group(1))
if not os.path.isfile(filename):
filename = find_file_in_path(directive, quotematch.group(1))
elif basename == 'source':
filename = get_cdl_source()
if source_dir is not None:
relative_dir = source_dir
elif basename == 'foreign':
filenames = []
for fn in used_resources.get("foreign"):
filenames.append(find_file_in_path('foreign', fn))
elif basename == 'fonturls':
write_font_urls(directive_prefix)
return
# elif tildematch != None:
# filename = os.path.join(get_root_dir(), tildematch.group(1))
else:
print('basename="' + basename + '"')
if get_mode() == 'incl':
print(directive, filename)
if filename is not None:
if relative_dir is None:
relative_dir = os.path.dirname(filename)
process_file(directive, filename, relative_dir)
elif filenames is not None:
for filename in filenames:
process_file(directive, filename, os.path.dirname(filename))
else:
error_exit('invalid directive: ' + line)
# Only compress svg images
def use_compression_for_image(filename):
return filename.endswith(".svg")
# Compress all data files
def use_compression_for_data(filename):
return True
# Stores which resource has been copied to which path; avoids duplicate copies
# and resolves faster
copied_resources = {}
# Stores which path is the target for which resource; avoids duplicate naming
resource_targets = {}
def add_copied_resource(resource_hash, path):
global copied_resources
if path in resource_targets and resource_targets[path] != resource_hash:
error_exit("{} is the target for both {} and {}".format(
path, resource_targets[path], resource_hash
))
copied_resources[resource_hash] = path
resource_targets[path] = resource_hash
# Returns the path to the file from the macro. When common_dir has been set,
# copies the file to that directory, compressing it when the extension allows
# it, but only when the source file is newer.
def copy_and_compress(type, macro_arg, use_compression_fun, common_dir):
global copied_resources
resource_hash = type + ':' + macro_arg
if resource_hash in copied_resources:
return copied_resources[resource_hash]
src_path = find_file_in_path(type, macro_arg)
if common_dir == None:
add_copied_resource(resource_hash, src_path)
return src_path
out_path = os.path.join(common_dir, os.path.basename(macro_arg))
if not os.path.exists(src_path):
print("{0} does not exist: {1}".format(type, src_path), file=sys.stderr)
add_copied_resource(resource_hash, out_path)
return out_path
use_compression = use_compression_fun(macro_arg)
if out_path == src_path:
add_copied_resource(resource_hash, src_path)
return out_path # In case someone puts the images in the common_dir
target_path = out_path
if use_compression:
target_path += '.gz'
if not os.path.exists(target_path) or os.path.getmtime(target_path) < os.path.getmtime(src_path):
if use_compression:
with open(src_path, 'rb') as f_in, gzip.open(target_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
else:
with open(src_path, 'rb') as f_in, open(target_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
add_copied_resource(resource_hash, src_path)
return out_path
# format: %%image:(url)%%. Behaves like process_image_macro.
# Calls copy_and_compress for an image
def process_image_macro(macro_name, macro_args):
global common_image_dir
return copy_and_compress('image', macro_args[0], use_compression_for_image, common_image_dir)
# format: %%font:(fontFamily,url)%%, no comma in the font name, no superfluous spaces
def process_font_macro(macro_name, macro_args):
if len(macro_args) < 2:
error_exit('font macro should have two arguments')
url = ",".join(macro_args[1:]) # in case the URL constains commas
add_resource_usage('font', url)
return macro_args[0]
# format: %%data:(url)%%. Behaves like process_image_macro.
# Calls copy_and_compress for a data file
def process_data_macro(macro_name, macro_args):
global common_data_dir
return copy_and_compress('data', macro_args[0], use_compression_for_data, common_data_dir)
def process_buildinfo_macro(macro_name, macro_args):
global build_info_file
return build_info_file
def process_conf_lib_preamble_macro(macro_name, macro_args):
push_include_file('template', '--conf-lib-include--')
str = '\n' + gen_conf_lib_preamble()
pop_include_file('template', '--conf-lib-include--')
return str
def process_title_macro(macro_name, macro_args):
global title
return title
def process_splash_screen_url_macro(macro_name, macro_args):
global splash_screen_url
return normalize_path(find_file_in_path('url', splash_screen_url))
def process_classes_macro(macro_name, macro_args):
global conf_lib_by_priority
return "\n" + \
",\n".join(
map(
lambda x: "\t{\n\t\tname: '" +
("" if x['name'] == None else x['name']) +
"',\n\t\tclasses: [\n\t\t\t" +
",\n\t\t\t".join(x['class_list']) +
"\n\t\t]\n\t}",
conf_lib_by_priority
)
) + "\n"
def process_textfile_macro(macro_name, macro_args):
if len(macro_args) != 1:
error_exit('textfile macro should have one argument')
src_path = find_file_in_path('text', macro_args[0])
if get_mode() == 'incl':
print('textfile', src_path)
return ""
str = ""
with open(src_path) as input_handle:
for line in input_handle:
str += "\\n" + line[:-1].replace('\\', '\\\\').replace('"', '\\"')
return str[2:]
def process_url_macro(macro_name, macro_args):
if len(macro_args) != 1:
error_exit('textfile macro should have one argument')
return find_file_in_path('url', macro_args[0])
def process_macro(dtype, line, fn, linenr, match):
macro_name = match.group(1)
macro_arg_str = match.group(2)
# extract arguments
macro_args = re.findall('[^,]+', macro_arg_str)
if macro_name == 'image':
macro_subst = process_image_macro(macro_name, macro_args)
elif macro_name == 'data':
macro_subst = process_data_macro(macro_name, macro_args)
elif macro_name == 'font':
macro_subst = process_font_macro(macro_name, macro_args)
elif macro_name == 'buildinfo':
macro_subst = process_buildinfo_macro(macro_name, macro_args)
elif macro_name == 'conflibPreamble':
macro_subst = process_conf_lib_preamble_macro(macro_name, macro_args)
elif macro_name == 'title':
macro_subst = process_title_macro(macro_name, macro_args)
elif macro_name == 'splashScreenUrl':
macro_subst = process_splash_screen_url_macro(macro_name, macro_args)
elif macro_name == 'classes':
macro_subst = process_classes_macro(macro_name, macro_args)
elif macro_name == 'textfile':
macro_subst = process_textfile_macro(macro_name, macro_args)
elif macro_name == 'url':
macro_subst = process_url_macro(macro_name, macro_args)
else:
error_exit(fn + ':' + str(linenr) + ": don't know (yet) how to handle macro '" + macro_name +
"' in '" + line + "'")
if macro_subst == None:
error_exit(fn + ':' + str(linenr) + ': empty subst')
return macro_subst
def get_current_conf_lib_name():
global current_conf_lib
if current_conf_lib == None or current_conf_lib['name'] == None:
conf_lib_name = ""
else:
conf_lib_name = current_conf_lib['name']
return conf_lib_name
def verify_current_conf_lib(conf_lib_name):
cblp_name = conf_lib_by_priority[0]['name']
if cblp_name == None:
cblp_name = ""
if cblp_name != conf_lib_name:
error_exit('confLib names do not match')
def process_class_def(dtype, line, fn):
"""replace 'var classes =' with 'var <CL>__<fn>__classes ='
where <CL> is the current confLib (may be empty) and <fn> is the current
source file name"""
global conf_lib_by_priority
conf_lib_name = get_current_conf_lib_name()
verify_current_conf_lib(conf_lib_name)
mclass_name = conf_lib_name + '__' + stemname(fn, conf_lib_name) + '__classes'
mclass_def = 'var ' + mclass_name + ' ='
match = re.search('^\s*var[^=]*=(.*)$', line)
mclass_def = mclass_def + match.group(1) + "\n"
section_print(dtype, mclass_def)
conf_lib_by_priority[0]['class_list'].append(mclass_name)
def process_constant_def(dtype, line, fn):
"""
replace
'var xxxConstants = { ... };'
with
'var <confLib1>__xxxConstants = { ... };'
and then, at the end of the 'constantfile' section append
'var xxxConstants = mergeCdlConstants(
<confLib1>__xxxConstants,
<confLib2>__xxxConstants,
...
);'
(ordered by confLib priority) to allow higher priority confLibs to
overwrite constants defined in lower priority confLibs, such that the
affect reaches back into the lower priority confLib. For example, if Core has
CellConstants = {
width: 5
}
Cell: { position: { width: CellConstants.width } }
and Mon1 has
CellConstants = {
width: 2
}
then setting CellConstants.width to 2 must occur before including Core::Cell
a constant definition is also identified as
var xxx = { // %%constantdef%%
"""
conf_lib_name = get_current_conf_lib_name()
verify_current_conf_lib(conf_lib_name)
# neutralize processed %%constantdef%% by converting %% to %-
constdef_match = re.search('^(.*//.*)%%constantdef%%(.*)$', line)
if constdef_match:
line = constdef_match.group(1) + '%-constantdef-%' + \
constdef_match.group(2)
match = re.search('^\s*var\s+([a-zA-Z0-9_]+)\s*=(.*)$', line)
if (not match) or (not match.group(1)) or (not match.group(2)):
error_exit('constant_def: parse failure (' + line + ')')
const_name = match.group(1)
mconst_name = conf_lib_name + '__' + const_name
mconst_def = 'var ' + mconst_name + ' =' + match.group(2) + "\n"
section_print(dtype, mconst_def)
conf_lib_by_priority[0]['constant'].append({
'name': const_name,
'element': mconst_name
})
# The pattern for macros
macro_re = re.compile('%%([a-zA-Z0-9_]*):\(([^%()]*)\)%%')
# The pattern for includes
include_re = re.compile('^[^a-z]*%%[a-z]+%%:')
# Returns a string indicating the line type
# - 'class' when the line is var classes/stemname = ...
# - 'screen' when the line is var screenArea = ...
# - '' otherwise
def process_line(dtype, line, fn, linenr, basedir):
line = line.rstrip('\n')
line += '\n'
mode = get_mode()
line = macro_re.sub(lambda match_group: process_macro(dtype, line, fn, linenr, match_group), line)
if include_re.search(line):
process_directive(line, fn, linenr, basedir)
elif dtype == 'classfile' and (re.search('^\s*var\s+classes\s*=', line) or \
re.search('^\s*var\s*' + stemname(fn, None) + '\s*=', line)):
if mode == 'js':
process_class_def(dtype, line, fn)
return 'class'
elif (dtype == 'constantfile' and \
re.search('^\s*var\s+[a-zA-Z0-9_]+[cC]onstants\s*=', line)) \
or \
re.search('\s*var\s+[a-zA-Z0-9_]+\s*=.*//.*%%constantdef%%', line):
if mode == 'js':
process_constant_def(dtype, line, fn)
return 'constant'
else:
if dtype == 'template' or get_mode() == 'js':
section_print(dtype, line)
if re.search('^\s*var\s+screenArea\s*=', line):
return 'screen'
if re.search('^\s*var\s+test\s*=', line):
return 'test'
return ''
def process_file(dtype, filename, basedir):
global processed_files
global nr_screen_area
global nr_test
global max_include_level
class_found = False
screen_area_found = False
test_found = False
constant_found = False
mode = get_mode()
linenr = 1
if dtype == 'foreign':
add_resource_usage('foreign', filename)
return
normalized_filename = normalize_path(filename)
if dtype == 'include' and len(fn_stack) >= max_include_level:
return
write_dep(normalized_filename)
# annotate("process_file: type='" + dtype + "' filename='" + filename +
# "' (" + normalized_filename + ")")
push_include_file(dtype, normalized_filename)
if normalized_filename not in processed_files:
processed_files[normalized_filename] = 1
try:
with open(normalized_filename) as input_handle:
for line in input_handle:
gen_filename_and_line_number(dtype, normalized_filename)
line_type = process_line(dtype, line, normalized_filename, linenr, basedir)
if line_type == 'class':
if class_found:
error_exit("two class definitions in " + normalized_filename)
class_found = True
elif line_type == 'screen':
if screen_area_found:
error_exit("two screenAreas in " + normalized_filename)
screen_area_found = True
nr_screen_area += 1
elif line_type == 'test':
if test_found:
error_exit("two test definitions in " + normalized_filename)
test_found = True
nr_test += 1
elif line_type == 'constant':
constant_found = True
# if mode == 'incl' and (class_found or screen_area_found or test_found or constant_found):
# break # Stop scanning file for includes
linenr += 1
except IOError:
print("cannot open file: " + normalized_filename + " from " + fn_stack[len(fn_stack)-2], file=sys.stderr)
sys.exit(1)
if mode == 'html':
if 'html' in dtype_property[dtype]:
html_handling = dtype_property[dtype]['html']
if html_handling == 'script':
section_print(dtype, '\t<script src="' +
normalized_filename + '">')
section_print(dtype, '</script>\n')
if dtype == 'classfile' and mode != 'incl' and \
not screen_area_found and not class_found and not test_found:
print("WARNING: no screenArea, classes or test defined in " + normalized_filename)
pop_include_file(dtype, normalized_filename)
def main():
global reference_dir
parser = get_arg_parser()
args = parser.parse_args()
mode = args.mode
set_reference_dir(args.referencedir)
set_mode(mode)
set_source_dir(args.sourcedir)
append_root_path(args.langdir)
append_root_path(args.cdldir)
set_include_path(args.includedir)
set_dep_file(args.dep_file)
set_res_out_file(args.resourceOutFile)
set_res_use_file(args.resourceUseFile)
set_build_info_file(args.buildInfoFile)
set_common_image_dir(args.commonImageDir)
set_common_data_dir(args.commonDataDir)
cdl_source = args.cdl_source
set_cdl_source(cdl_source)
template = args.template
out_file = args.out_file
set_make_target(args.dep_target, out_file)
set_max_include_level(args.max_include_level)
set_out_file(out_file)
libConf = args.libConf
set_lib_conf(libConf)
set_title(args.title)
set_splash_screen_url(args.splash_screen_url)
process_file('template', template, os.path.dirname(template))
write_resource_usage()
if mode == 'js' and out_file.endswith(".comp.js.tmp") and \
nr_screen_area != 1:
error_exit("no screenArea definition")
sys.exit(0)
if __name__ == "__main__":
main()
| 32.738962
| 139
| 0.654403
|
# javascript files. Changes to the run-time javascript files take affect
# with but a reload of the browser, while changes to typescript files require
# compiling typescript to javascript (but not cdl recompilation or
# re-generation of the .html file by this script).
# For batch automatic test execution, node.js is used, so 'js' mode is
# required.
# Also, the 'production' format, as uploaded to the build-web-site, uses
# an 'uglified' single-file javascript referenced by an html file. This
# single-file javascript is generated by this script using 'js' mode.
#
#
#
# Basic Functionality:
#
# This script takes two main input files: a template file and a source file.
# The template file guides the generation of the output file. For 'simple'
# template lines, the script merely copies the line from the template file to
# the output file.
# However, the template files typically also uses directives and macros.
# Directives affect an inclusion of another file. In 'html' mode, this is
# achieved by placing a <script> tag in the output file, while in 'js' mode
# the actual content of the included file is written to the output file.
# In either modes, included files are recursively processed, so that nested
# inclusion directives are handled.
# Macros are replaced by a string which this script computes. For example,
# the title macro, '%%title:()%%', is replaced with the value of the '--title'
# argument to this script.
#
# confLib handling:
#
# When generating a compilation program, this script can be given the
# path of the a --libconf' file. A libconf file describes a hierarchy of
from __future__ import print_function
import os
import sys
import re
import argparse
import pickle
import gzip
import shutil
mode = None
title = None
splash_screen_url = None
max_include_level = 999999999
# --commonImageDir
common_image_dir = None
# --commonDataDir
common_data_dir = None
# all normalized paths are relative to this directory
# (absolute paths generated in a cygwin environment are meaningless to the
# non-cygwin browser reading these paths, so paths are created as relative)
reference_dir = None
# When not none, this is supposed to be the base directory relative to the
# source file; useful for ignoring the intermediate/ prefix when processing
# .run.js files.
source_dir = None
# a list of '{ name: cl-name, path: cl-path }' generated by parsing the
# confLib file
#
conf_lib_list = []
# for each file read/being-read, processed-files has an attribute which is the
# file's name, and whose value is the line
processed_files = {}
# the name of the file from which the last input line was read
# has different value for each dtype (classfile/include/constantfile etc)
last_file_name = {}
# writing into this handle writes into the output file
out_file_handle = None
# handle to the file with the dependencies
dep_file_handle = None
# search path per dtype - a list of directories
path_per_dtype = {
'include': [],
'classfile': [],
'constantfile': [],
'template': [],
'image': [],
'data': [],
'foreign': [],
'text': [],
'url': []
}
# the 'source' file, the single positional run-time argument --> 'source'
cdl_source = None
# --buildInfoFile --> %%buildinfo:()%%
build_info_file = None
# the stack of files currently being read, which file included which file etc
# to get us to read the current file (at the top of stack)
fn_stack = []
# inclusion_cycle_permitted, default to false
# section, defaults to 'infix' (can be set to 'prefix'/'suffix')
# 'conf_lib' - include paths used while procesing that conf-lib
# 'sticky_conf_lib' - include paths added and remain there
dtype_property = {
'include': {
'conf_lib': ['design', 'func', 'automated' ],
'html': 'script',
'inclusion_cycle_permitted': True
},
'classfile': {
'conf_lib': ['design', 'func', 'automated' ],
'html': 'script'
},
'constantfile': {
'section': 'prefix',
'conf_lib': ['design', 'func', 'automated' ],
'html': 'script'
},
'template': {},
'image': {
'sticky_conf_lib': ['design/img']
}
}
# generally, text is not written directly into the output; rather it is
# 'written' by appending it to the appropriate section.
# in practice, all dtypes write to 'infix' except for 'constantfile' which
# writes into 'prefix', so that constants are defined by the time the classes
# attempt to use them
section_text = {
'prefix': [],
'infix': [],
'postfix': [],
}
# conf_libs are inserted into it at 0, so that the conf-lib that ends up being
# last (== the conf-lib of least priority) is the first to be inserted
# each entry has the following attributes:
# 'name' - conf-lib-name
# 'class_list' - the list of class_lists associated with this conf_lib
# (e.g. ['Core__draggableClasses', 'Core__snappabaleClasses',..])
# 'constant' - the list of constant defs associated with this conf_lib
# (as {
# 'name': 'positioningConstants',
# 'element': 'Core__positioningConstants'
# } )
#
conf_lib_by_priority = []
current_conf_lib = None
# Counter for number of screenArea declarations and var test lines
nr_screen_area = 0
nr_test = 0
# Target for make file include
make_target = None
def error_exit(msg):
print(sys.argv[0], ": ", msg, file=sys.stderr)
# print(str(fn_stack), file=sys.stderr)
sys.exit(1)
# Dictionary to check that no two file names get mapped onto the same class
# variable
class_stem_names = {}
def stemname(path, conf_lib_name):
basename = os.path.splitext(os.path.basename(path))[0]
class_identifier = re.sub("[^a-zA-Z0-9_]+", "_", basename)
if conf_lib_name is None:
return class_identifier
conflib_class_identifier = conf_lib_name + "__" + class_identifier
if conflib_class_identifier in class_stem_names and class_stem_names[conflib_class_identifier] != path:
error_exit("files {} and {} map to the same variable name".format(path, class_stem_names[conflib_class_identifier]))
class_stem_names[class_identifier] = path
return class_identifier
def get_arg_parser():
parser = argparse.ArgumentParser(description='cdl script aggregator')
parser.add_argument('-o', '--out_file', help='output file name',
required=True)
parser.add_argument('-d', '--dep_file', help='dependency file name')
parser.add_argument('--dep_target', help='dependency target')
parser.add_argument('cdl_source', help='input cdl script file')
parser.add_argument('-t', '--template', help='template file name')
parser.add_argument('-m', '--mode', choices=['js', 'html', 'incl', 'make'],
help='processing mode, "js"/"html"/"incl"/"make"', required=True)
parser.add_argument('--includedir', help='include directory',
action='append')
parser.add_argument('--langdir', help='root of the lang directory')
parser.add_argument('--cdldir', help='root of the cdl apps and classes directory')
parser.add_argument('--referencedir', help='set reference directory; by default cwd')
parser.add_argument('--sourcedir', help='set directory for includes from source file')
parser.add_argument('-c', '--libConf', help='library configuration')
parser.add_argument('--buildInfoFile',
help='path of the build-info file, with rev
parser.add_argument('--resourceOutFile', help='path for writing used resources')
parser.add_argument('--resourceUseFile', help='path for used resources')
parser.add_argument('--max-include-level', help='suppress indirect includes above level')
parser.add_argument('--commonImageDir', help='replaces all references in image macros')
parser.add_argument('--commonDataDir', help='replaces all references in data macros')
parser.add_argument('--title')
parser.add_argument('--splash-screen-url', help='a URL pointing at the HTML page which should server as a splash screen')
return parser
def annotate(msg):
print(msg)
def set_reference_dir(rd):
global reference_dir
if rd is None:
reference_dir = os.path.realpath(os.getcwd())
else:
reference_dir = os.path.realpath(rd)
def set_source_dir(sd):
global source_dir
if sd is not None:
source_dir = os.path.realpath(sd)
def normalize_path(path):
real_path = os.path.realpath(path)
return os.path.relpath(real_path, reference_dir)
def push_include_file(dtype, fn):
global fn_stack
global dtype_property
if dtype not in dtype_property:
error_exit("unknown type '" + dtype + "'")
cycle_permitted = dtype_property[dtype]['inclusion_cycle_permitted'] if 'inclusion_cycle_permitted' in dtype_property[dtype] else False
if not cycle_permitted:
if fn in fn_stack:
error_exit("inclusion cycle for '" + fn + "'")
fn_stack.append(fn)
def pop_include_file(dtype, fn):
global fn_stack
popped_file = fn_stack.pop()
if popped_file != fn:
error_exit("internal error: wrong file in pop (" +
fn + "!=" + popped_file + ")")
if len(fn_stack) == 1:
str = get_output_str()
write_str(str)
def get_last_file_name(dtype):
global last_file_name
if dtype in last_file_name:
return last_file_name[dtype]
return ""
def set_last_file_name(dtype, fn):
global last_file_name
last_file_name[dtype] = fn
def append_root_path(path):
global path_per_dtype
if (path != None):
paths = path.split(";")
path_per_dtype['include'].extend(paths)
path_per_dtype['classfile'].extend(paths)
path_per_dtype['constantfile'].extend(paths)
path_per_dtype['image'].extend(paths)
path_per_dtype['data'].extend(paths)
path_per_dtype['foreign'].extend(
map(lambda path: os.path.join(path, "external", "foreignInterface"),
paths))
path_per_dtype['text'].extend(paths)
path_per_dtype['url'].extend(paths)
def set_include_path(path_list):
global path_per_dtype
if (path_list != None):
path_per_dtype['include'].extend(path_list)
def set_build_info_file(path):
global build_info_file
if path != None:
build_info_file = path
# Creates a directory at path if it doesn't exist, or is not a directory
def mkdir_if_not_exists(path):
if not os.path.isdir(path):
if os.path.exists(path):
os.remove(path)
os.makedirs(path)
def set_common_image_dir(path):
global common_image_dir
if path != None:
common_image_dir = path
mkdir_if_not_exists(path)
def set_common_data_dir(path):
global common_data_dir
if path != None:
common_data_dir = path
mkdir_if_not_exists(path)
def set_cdl_source(fn):
global cdl_source
cdl_source = fn
def get_cdl_source():
global cdl_source
return cdl_source
def set_mode(m):
global mode
mode = m
def get_mode():
global mode
return mode
def set_out_file(fn):
global out_file_handle
out_file_handle = open(fn, 'w');
def set_dep_file(fn):
global dep_file_handle
if fn != None:
dep_file_handle = open(fn, 'w');
res_out_file_name = None
used_resources = { "foreign": set(), "font": set(), "text": set() }
does_use_resources = False
def set_res_out_file(fn):
global res_out_file_name
res_out_file_name = fn
def add_resource_usage(res_type, res_uri):
global used_resources, does_use_resources
if res_uri in used_resources.get(res_type):
return
used_resources.get(res_type).add(res_uri)
does_use_resources = True
def write_resource_usage():
global used_resources, res_out_file_name, does_use_resources
if res_out_file_name != None:
if does_use_resources:
if os.path.exists(res_out_file_name):
with open(res_out_file_name, 'rb') as input:
dict = pickle.load(input)
if dict == used_resources:
return
with open(res_out_file_name, 'wb') as output:
pickle.dump(used_resources, output, pickle.HIGHEST_PROTOCOL)
elif os.path.exists(res_out_file_name):
os.remove(res_out_file_name)
def set_res_use_file(fn):
global used_resources
if fn != None and os.path.exists(fn):
with open(fn, 'rb') as input:
used_resources = pickle.load(input)
def write_font_urls(directive_prefix):
global used_resources
mode = get_mode()
if mode == 'html':
for url in used_resources.get('font'):
write_str(directive_prefix + '<link rel="stylesheet" type="text/css" href="' + url + '">\n')
def set_title(str):
global title
if str == None:
title = ""
else:
title = str
def set_splash_screen_url(str):
global splash_screen_url
if str == None:
splash_screen_url = ""
else:
splash_screen_url = str
def set_make_target(arg, deflt):
global make_target
if arg == None:
make_target = deflt
else:
make_target = arg
def set_max_include_level(arg):
global max_include_level
if arg is not None:
max_include_level = arg
def write_str(str):
global out_file_handle
out_file_handle.write(str)
def write_dep(fn):
global make_target
global dep_file_handle
if dep_file_handle != None:
dep_file_handle.write(make_target + ': ' + fn + '\n')
def output_section_hook():
constant_dict = {}
for clentry in conf_lib_by_priority:
cl_const_list = clentry['constant']
for const_entry in cl_const_list:
const_name = const_entry['name']
const_elem = const_entry['element']
if const_name not in constant_dict:
constant_dict[const_name] = []
constant_dict[const_name].append(const_elem)
clentry['constant'] = []
for const_name in constant_dict:
const_merge_def = 'var ' + const_name + ' = ' + \
'mergeCdlConstants(\n\t[\n\t\t'
const_merge_def += ',\n\t\t'.join(constant_dict[const_name])
const_merge_def += '\n\t]\n)\n'
section_print('constantfile', const_merge_def)
def get_output_str():
global section_text
output_section_hook()
str = "".join(section_text['prefix'])
str += "".join(section_text['infix'])
str += "".join(section_text['postfix'])
section_text['prefix'] = []
section_text['infix'] = []
section_text['postfix'] = []
return str
def section_print(dtype, line):
global section_text
if dtype == 'template':
write_str(line)
else:
if 'section' in dtype_property[dtype]:
section = dtype_property[dtype]['section']
else:
section = 'infix'
section_text[section].append(line)
def gen_filename_and_line_number(dtype, fn):
global processed_files
mode = get_mode()
if mode == 'html':
return
if get_last_file_name(dtype) != fn:
if mode == 'js':
section_print(dtype, "//# " + fn + ":" + str(processed_files[fn]) +
'\n');
set_last_file_name(dtype, fn)
processed_files[fn] = processed_files[fn] + 1
def find_file_in_path(dtype, basename):
global path_per_dtype
if basename.startswith("."):
if (os.path.isfile(basename)):
return basename
else:
path = path_per_dtype[dtype]
for dirp in path:
file_path = os.path.join(dirp, basename)
if (os.path.isfile(file_path)):
return file_path
if dtype == "image":
print("could not find path for image " + basename, file=sys.stderr)
return basename
path_list = ":".join(path)
error_exit("could not find path for <" + dtype + "> file '" +
basename + "' in path '" + path_list + "'");
def push_conf_lib_path(path):
global path_per_dtype
global dtype_property
for dtype in dtype_property:
if 'conf_lib' in dtype_property[dtype]:
for sub_path in dtype_property[dtype]['conf_lib']:
dpath = os.path.join(path, sub_path)
path_per_dtype[dtype].append(dpath)
def pop_conf_lib_path(path):
global path_per_dtype
global dtype_property
for dtype in dtype_property:
if 'conf_lib' in dtype_property[dtype]:
for sub_path in reversed(dtype_property[dtype]['conf_lib']):
dpath = os.path.join(path, sub_path)
apath = path_per_dtype[dtype].pop()
if apath != dpath:
error_exit("popped path does not match pushed path")
def add_conf_lib_sticky_path(path):
global dtype_property
for dtype in dtype_property:
if 'sticky_conf_lib' in dtype_property[dtype]:
for sub_path in dtype_property[dtype]['sticky_conf_lib']:
dpath = os.path.join(path, sub_path)
path_per_dtype[dtype].append(dpath)
def add_conf_lib(priority, name, path):
global conf_lib_by_priority
global current_conf_lib
conf_lib_by_priority.insert(0,
{
'name': name,
'class_list': [],
'constant': []
})
current_conf_lib = { 'priority': priority, 'name': name, 'path': path }
if name == None:
return
push_conf_lib_path(path)
conf_lib_include = os.path.join(path, "includeList.js")
if os.path.exists(conf_lib_include):
process_file('include', conf_lib_include, os.path.dirname(conf_lib_include))
pop_conf_lib_path(path)
current_conf_lib = None
def set_lib_conf(lib_conf):
global conf_lib_list
if lib_conf == None or not os.path.exists(lib_conf):
return
with open(lib_conf) as lib_conf_handle:
for line in lib_conf_handle:
match = re.search('^[^#]*', line)
line = match.group()
if re.search('^\s*$', line):
continue
match = re.search('^\s*(?P<name>[a-zA-Z0-9_]+)\s*:' +
'\s*(?P<path>[^\s]*)\s*$', line)
if (not match.group('name')) or (not match.group('path')):
error_exit("libConf file syntax error: '" + line + "'");
lcname = match.group('name')
lcpath = match.group('path')
conf_lib_list.append({ 'name': lcname, 'path': lcpath })
def gen_conf_lib_preamble():
for conf_lib in conf_lib_list:
add_conf_lib_sticky_path(conf_lib['path'])
for idx, conf_lib in reversed(list(enumerate(conf_lib_list))):
clpriority = len(conf_lib_list) - idx
add_conf_lib(clpriority, conf_lib['name'], conf_lib['path'])
add_conf_lib(0, None, None)
preamble = get_output_str()
return preamble
def process_directive(line, directive_fn, linenr, basedir):
global make_target, used_resources
filename = None
filenames = None
relative_dir = None
match = re.search('^([^a-z]*)%%([a-z]+)%%:\s*([^\s]*)\s*$', line)
if match == None or len(match.groups()) != 3:
error_exit(directive_fn + ':' + str(linenr) + ': directive has invalid syntax: ' + line)
directive_prefix = match.group(1)
directive = match.group(2)
basename = match.group(3)
stdmatch = re.search('^<(.*)>$', basename)
quotematch = re.search('^"(.*)"$', basename)
if stdmatch != None:
filename = find_file_in_path(directive, stdmatch.group(1))
elif quotematch != None:
filename = os.path.join(basedir, quotematch.group(1))
if not os.path.isfile(filename):
filename = find_file_in_path(directive, quotematch.group(1))
elif basename == 'source':
filename = get_cdl_source()
if source_dir is not None:
relative_dir = source_dir
elif basename == 'foreign':
filenames = []
for fn in used_resources.get("foreign"):
filenames.append(find_file_in_path('foreign', fn))
elif basename == 'fonturls':
write_font_urls(directive_prefix)
return
else:
print('basename="' + basename + '"')
if get_mode() == 'incl':
print(directive, filename)
if filename is not None:
if relative_dir is None:
relative_dir = os.path.dirname(filename)
process_file(directive, filename, relative_dir)
elif filenames is not None:
for filename in filenames:
process_file(directive, filename, os.path.dirname(filename))
else:
error_exit('invalid directive: ' + line)
def use_compression_for_image(filename):
return filename.endswith(".svg")
def use_compression_for_data(filename):
return True
copied_resources = {}
resource_targets = {}
def add_copied_resource(resource_hash, path):
global copied_resources
if path in resource_targets and resource_targets[path] != resource_hash:
error_exit("{} is the target for both {} and {}".format(
path, resource_targets[path], resource_hash
))
copied_resources[resource_hash] = path
resource_targets[path] = resource_hash
def copy_and_compress(type, macro_arg, use_compression_fun, common_dir):
global copied_resources
resource_hash = type + ':' + macro_arg
if resource_hash in copied_resources:
return copied_resources[resource_hash]
src_path = find_file_in_path(type, macro_arg)
if common_dir == None:
add_copied_resource(resource_hash, src_path)
return src_path
out_path = os.path.join(common_dir, os.path.basename(macro_arg))
if not os.path.exists(src_path):
print("{0} does not exist: {1}".format(type, src_path), file=sys.stderr)
add_copied_resource(resource_hash, out_path)
return out_path
use_compression = use_compression_fun(macro_arg)
if out_path == src_path:
add_copied_resource(resource_hash, src_path)
return out_path
target_path = out_path
if use_compression:
target_path += '.gz'
if not os.path.exists(target_path) or os.path.getmtime(target_path) < os.path.getmtime(src_path):
if use_compression:
with open(src_path, 'rb') as f_in, gzip.open(target_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
else:
with open(src_path, 'rb') as f_in, open(target_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
add_copied_resource(resource_hash, src_path)
return out_path
def process_image_macro(macro_name, macro_args):
global common_image_dir
return copy_and_compress('image', macro_args[0], use_compression_for_image, common_image_dir)
def process_font_macro(macro_name, macro_args):
if len(macro_args) < 2:
error_exit('font macro should have two arguments')
url = ",".join(macro_args[1:])
add_resource_usage('font', url)
return macro_args[0]
def process_data_macro(macro_name, macro_args):
global common_data_dir
return copy_and_compress('data', macro_args[0], use_compression_for_data, common_data_dir)
def process_buildinfo_macro(macro_name, macro_args):
global build_info_file
return build_info_file
def process_conf_lib_preamble_macro(macro_name, macro_args):
push_include_file('template', '--conf-lib-include--')
str = '\n' + gen_conf_lib_preamble()
pop_include_file('template', '--conf-lib-include--')
return str
def process_title_macro(macro_name, macro_args):
global title
return title
def process_splash_screen_url_macro(macro_name, macro_args):
global splash_screen_url
return normalize_path(find_file_in_path('url', splash_screen_url))
def process_classes_macro(macro_name, macro_args):
global conf_lib_by_priority
return "\n" + \
",\n".join(
map(
lambda x: "\t{\n\t\tname: '" +
("" if x['name'] == None else x['name']) +
"',\n\t\tclasses: [\n\t\t\t" +
",\n\t\t\t".join(x['class_list']) +
"\n\t\t]\n\t}",
conf_lib_by_priority
)
) + "\n"
def process_textfile_macro(macro_name, macro_args):
if len(macro_args) != 1:
error_exit('textfile macro should have one argument')
src_path = find_file_in_path('text', macro_args[0])
if get_mode() == 'incl':
print('textfile', src_path)
return ""
str = ""
with open(src_path) as input_handle:
for line in input_handle:
str += "\\n" + line[:-1].replace('\\', '\\\\').replace('"', '\\"')
return str[2:]
def process_url_macro(macro_name, macro_args):
if len(macro_args) != 1:
error_exit('textfile macro should have one argument')
return find_file_in_path('url', macro_args[0])
def process_macro(dtype, line, fn, linenr, match):
macro_name = match.group(1)
macro_arg_str = match.group(2)
macro_args = re.findall('[^,]+', macro_arg_str)
if macro_name == 'image':
macro_subst = process_image_macro(macro_name, macro_args)
elif macro_name == 'data':
macro_subst = process_data_macro(macro_name, macro_args)
elif macro_name == 'font':
macro_subst = process_font_macro(macro_name, macro_args)
elif macro_name == 'buildinfo':
macro_subst = process_buildinfo_macro(macro_name, macro_args)
elif macro_name == 'conflibPreamble':
macro_subst = process_conf_lib_preamble_macro(macro_name, macro_args)
elif macro_name == 'title':
macro_subst = process_title_macro(macro_name, macro_args)
elif macro_name == 'splashScreenUrl':
macro_subst = process_splash_screen_url_macro(macro_name, macro_args)
elif macro_name == 'classes':
macro_subst = process_classes_macro(macro_name, macro_args)
elif macro_name == 'textfile':
macro_subst = process_textfile_macro(macro_name, macro_args)
elif macro_name == 'url':
macro_subst = process_url_macro(macro_name, macro_args)
else:
error_exit(fn + ':' + str(linenr) + ": don't know (yet) how to handle macro '" + macro_name +
"' in '" + line + "'")
if macro_subst == None:
error_exit(fn + ':' + str(linenr) + ': empty subst')
return macro_subst
def get_current_conf_lib_name():
global current_conf_lib
if current_conf_lib == None or current_conf_lib['name'] == None:
conf_lib_name = ""
else:
conf_lib_name = current_conf_lib['name']
return conf_lib_name
def verify_current_conf_lib(conf_lib_name):
cblp_name = conf_lib_by_priority[0]['name']
if cblp_name == None:
cblp_name = ""
if cblp_name != conf_lib_name:
error_exit('confLib names do not match')
def process_class_def(dtype, line, fn):
global conf_lib_by_priority
conf_lib_name = get_current_conf_lib_name()
verify_current_conf_lib(conf_lib_name)
mclass_name = conf_lib_name + '__' + stemname(fn, conf_lib_name) + '__classes'
mclass_def = 'var ' + mclass_name + ' ='
match = re.search('^\s*var[^=]*=(.*)$', line)
mclass_def = mclass_def + match.group(1) + "\n"
section_print(dtype, mclass_def)
conf_lib_by_priority[0]['class_list'].append(mclass_name)
def process_constant_def(dtype, line, fn):
conf_lib_name = get_current_conf_lib_name()
verify_current_conf_lib(conf_lib_name)
# neutralize processed %%constantdef%% by converting %% to %-
constdef_match = re.search('^(.*//.*)%%constantdef%%(.*)$', line)
if constdef_match:
line = constdef_match.group(1) + '%-constantdef-%' + \
constdef_match.group(2)
match = re.search('^\s*var\s+([a-zA-Z0-9_]+)\s*=(.*)$', line)
if (not match) or (not match.group(1)) or (not match.group(2)):
error_exit('constant_def: parse failure (' + line + ')')
const_name = match.group(1)
mconst_name = conf_lib_name + '__' + const_name
mconst_def = 'var ' + mconst_name + ' =' + match.group(2) + "\n"
section_print(dtype, mconst_def)
conf_lib_by_priority[0]['constant'].append({
'name': const_name,
'element': mconst_name
})
# The pattern for macros
macro_re = re.compile('%%([a-zA-Z0-9_]*):\(([^%()]*)\)%%')
# The pattern for includes
include_re = re.compile('^[^a-z]*%%[a-z]+%%:')
# Returns a string indicating the line type
# - 'class' when the line is var classes/stemname = ...
# - 'screen' when the line is var screenArea = ...
# - '' otherwise
def process_line(dtype, line, fn, linenr, basedir):
line = line.rstrip('\n')
line += '\n'
mode = get_mode()
line = macro_re.sub(lambda match_group: process_macro(dtype, line, fn, linenr, match_group), line)
if include_re.search(line):
process_directive(line, fn, linenr, basedir)
elif dtype == 'classfile' and (re.search('^\s*var\s+classes\s*=', line) or \
re.search('^\s*var\s*' + stemname(fn, None) + '\s*=', line)):
if mode == 'js':
process_class_def(dtype, line, fn)
return 'class'
elif (dtype == 'constantfile' and \
re.search('^\s*var\s+[a-zA-Z0-9_]+[cC]onstants\s*=', line)) \
or \
re.search('\s*var\s+[a-zA-Z0-9_]+\s*=.*//.*%%constantdef%%', line):
if mode == 'js':
process_constant_def(dtype, line, fn)
return 'constant'
else:
if dtype == 'template' or get_mode() == 'js':
section_print(dtype, line)
if re.search('^\s*var\s+screenArea\s*=', line):
return 'screen'
if re.search('^\s*var\s+test\s*=', line):
return 'test'
return ''
def process_file(dtype, filename, basedir):
global processed_files
global nr_screen_area
global nr_test
global max_include_level
class_found = False
screen_area_found = False
test_found = False
constant_found = False
mode = get_mode()
linenr = 1
if dtype == 'foreign':
add_resource_usage('foreign', filename)
return
normalized_filename = normalize_path(filename)
if dtype == 'include' and len(fn_stack) >= max_include_level:
return
write_dep(normalized_filename)
# annotate("process_file: type='" + dtype + "' filename='" + filename +
push_include_file(dtype, normalized_filename)
if normalized_filename not in processed_files:
processed_files[normalized_filename] = 1
try:
with open(normalized_filename) as input_handle:
for line in input_handle:
gen_filename_and_line_number(dtype, normalized_filename)
line_type = process_line(dtype, line, normalized_filename, linenr, basedir)
if line_type == 'class':
if class_found:
error_exit("two class definitions in " + normalized_filename)
class_found = True
elif line_type == 'screen':
if screen_area_found:
error_exit("two screenAreas in " + normalized_filename)
screen_area_found = True
nr_screen_area += 1
elif line_type == 'test':
if test_found:
error_exit("two test definitions in " + normalized_filename)
test_found = True
nr_test += 1
elif line_type == 'constant':
constant_found = True
# if mode == 'incl' and (class_found or screen_area_found or test_found or constant_found):
# break # Stop scanning file for includes
linenr += 1
except IOError:
print("cannot open file: " + normalized_filename + " from " + fn_stack[len(fn_stack)-2], file=sys.stderr)
sys.exit(1)
if mode == 'html':
if 'html' in dtype_property[dtype]:
html_handling = dtype_property[dtype]['html']
if html_handling == 'script':
section_print(dtype, '\t<script src="' +
normalized_filename + '">')
section_print(dtype, '</script>\n')
if dtype == 'classfile' and mode != 'incl' and \
not screen_area_found and not class_found and not test_found:
print("WARNING: no screenArea, classes or test defined in " + normalized_filename)
pop_include_file(dtype, normalized_filename)
def main():
global reference_dir
parser = get_arg_parser()
args = parser.parse_args()
mode = args.mode
set_reference_dir(args.referencedir)
set_mode(mode)
set_source_dir(args.sourcedir)
append_root_path(args.langdir)
append_root_path(args.cdldir)
set_include_path(args.includedir)
set_dep_file(args.dep_file)
set_res_out_file(args.resourceOutFile)
set_res_use_file(args.resourceUseFile)
set_build_info_file(args.buildInfoFile)
set_common_image_dir(args.commonImageDir)
set_common_data_dir(args.commonDataDir)
cdl_source = args.cdl_source
set_cdl_source(cdl_source)
template = args.template
out_file = args.out_file
set_make_target(args.dep_target, out_file)
set_max_include_level(args.max_include_level)
set_out_file(out_file)
libConf = args.libConf
set_lib_conf(libConf)
set_title(args.title)
set_splash_screen_url(args.splash_screen_url)
process_file('template', template, os.path.dirname(template))
write_resource_usage()
if mode == 'js' and out_file.endswith(".comp.js.tmp") and \
nr_screen_area != 1:
error_exit("no screenArea definition")
sys.exit(0)
if __name__ == "__main__":
main()
| true
| true
|
1c40984042dd5944e2952e5085793718543a185e
| 464
|
py
|
Python
|
setup.py
|
rohansurve212/Black_Friday_Data_Hack
|
83e536db35383b7e5266cf8370405b20aa4641b0
|
[
"MIT"
] | null | null | null |
setup.py
|
rohansurve212/Black_Friday_Data_Hack
|
83e536db35383b7e5266cf8370405b20aa4641b0
|
[
"MIT"
] | null | null | null |
setup.py
|
rohansurve212/Black_Friday_Data_Hack
|
83e536db35383b7e5266cf8370405b20aa4641b0
|
[
"MIT"
] | 1
|
2019-11-20T20:52:32.000Z
|
2019-11-20T20:52:32.000Z
|
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='A retail company "ABC Private Limited" wants to understand the customer purchase behaviour (specifically, purchase amount) against various products of different categories. They have shared purchase summary of various customers for selected high volume products from last month.',
author='Rohan_Surve',
license='MIT',
)
| 42.181818
| 297
| 0.760776
|
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='A retail company "ABC Private Limited" wants to understand the customer purchase behaviour (specifically, purchase amount) against various products of different categories. They have shared purchase summary of various customers for selected high volume products from last month.',
author='Rohan_Surve',
license='MIT',
)
| true
| true
|
1c4098c795f2d81b7286028374fdf10586f51fe0
| 149
|
py
|
Python
|
application/rustLab/src/saySMTH.py
|
pianoft/subStringSearchUsingRust
|
722ec006d54614b61708a804ed0f658d1b64841d
|
[
"MIT"
] | null | null | null |
application/rustLab/src/saySMTH.py
|
pianoft/subStringSearchUsingRust
|
722ec006d54614b61708a804ed0f658d1b64841d
|
[
"MIT"
] | null | null | null |
application/rustLab/src/saySMTH.py
|
pianoft/subStringSearchUsingRust
|
722ec006d54614b61708a804ed0f658d1b64841d
|
[
"MIT"
] | null | null | null |
import subprocess
import sys
def say(files):
subprocess.run(['spd-say -w -r 50 -i 100 "'+files+'";'], shell=True)
return
say(sys.argv[1])
| 14.9
| 72
| 0.630872
|
import subprocess
import sys
def say(files):
subprocess.run(['spd-say -w -r 50 -i 100 "'+files+'";'], shell=True)
return
say(sys.argv[1])
| true
| true
|
1c4098d9a3e1c3fcf7b89358aabcf4cc56825e04
| 417
|
py
|
Python
|
src/dataset/dataset_factory.py
|
lupvasile/keypoint-mot
|
e185f150e5ea5f234c06402b8ea5db30487d16cc
|
[
"Apache-2.0"
] | null | null | null |
src/dataset/dataset_factory.py
|
lupvasile/keypoint-mot
|
e185f150e5ea5f234c06402b8ea5db30487d16cc
|
[
"Apache-2.0"
] | 1
|
2020-10-06T13:17:41.000Z
|
2020-10-06T17:38:47.000Z
|
src/dataset/dataset_factory.py
|
lupvasile/keypoint-mot
|
e185f150e5ea5f234c06402b8ea5db30487d16cc
|
[
"Apache-2.0"
] | 2
|
2020-09-01T05:48:25.000Z
|
2021-12-27T18:34:51.000Z
|
from config import config
from dataset import generic_dataset, nuscenes_dataset
DATASETS = {'nuscenes': nuscenes_dataset.NuscenesDataset}
def get_dataset(dataset_name: str, subset: str, opts: generic_dataset.DatasetOptions, mini_version: bool):
return DATASETS[dataset_name](subset=subset, dataset_root=config.get_data_dir(dataset_name), opts=opts,
mini_version=mini_version)
| 41.7
| 107
| 0.757794
|
from config import config
from dataset import generic_dataset, nuscenes_dataset
DATASETS = {'nuscenes': nuscenes_dataset.NuscenesDataset}
def get_dataset(dataset_name: str, subset: str, opts: generic_dataset.DatasetOptions, mini_version: bool):
return DATASETS[dataset_name](subset=subset, dataset_root=config.get_data_dir(dataset_name), opts=opts,
mini_version=mini_version)
| true
| true
|
1c40996853a1bb6f37c0af088d55832404461e76
| 1,232
|
py
|
Python
|
external/workload-automation/wa/workloads/homescreen/__init__.py
|
qais-yousef/lisa
|
8343e26bf0565589928a69ccbe67b1be03403db7
|
[
"Apache-2.0"
] | 159
|
2016-01-25T11:08:39.000Z
|
2022-03-28T05:20:41.000Z
|
external/workload-automation/wa/workloads/homescreen/__init__.py
|
qais-yousef/lisa
|
8343e26bf0565589928a69ccbe67b1be03403db7
|
[
"Apache-2.0"
] | 656
|
2016-01-25T11:16:56.000Z
|
2022-03-23T16:03:28.000Z
|
external/workload-automation/wa/workloads/homescreen/__init__.py
|
qais-yousef/lisa
|
8343e26bf0565589928a69ccbe67b1be03403db7
|
[
"Apache-2.0"
] | 127
|
2015-03-11T16:36:17.000Z
|
2022-02-15T02:26:43.000Z
|
# Copyright 2013-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
from wa import Workload, Parameter
class HomeScreen(Workload):
name = 'homescreen'
description = """
A workload that goes to the home screen and idles for the the
specified duration.
"""
supported_platforms = ['android']
parameters = [
Parameter('duration', kind=int, default=20,
description='Specifies the duration, in seconds, of this workload.'),
]
def setup(self, context):
self.target.clear_logcat()
self.target.execute('input keyevent 3') # press the home key
def run(self, context):
self.target.sleep(self.duration)
| 29.333333
| 87
| 0.696429
|
from wa import Workload, Parameter
class HomeScreen(Workload):
name = 'homescreen'
description = """
A workload that goes to the home screen and idles for the the
specified duration.
"""
supported_platforms = ['android']
parameters = [
Parameter('duration', kind=int, default=20,
description='Specifies the duration, in seconds, of this workload.'),
]
def setup(self, context):
self.target.clear_logcat()
self.target.execute('input keyevent 3')
def run(self, context):
self.target.sleep(self.duration)
| true
| true
|
1c409a06ca2bbf206d7c22c10aa1a7a8e7d67207
| 715
|
py
|
Python
|
eeggan/pytorch/modules/modify/noise.py
|
kahartma/eeggan
|
1fd5b45938ea6f1033f301430a5c7fb3b9bf4fb4
|
[
"BSD-3-Clause"
] | 3
|
2020-08-04T08:54:55.000Z
|
2021-02-19T14:17:46.000Z
|
eeggan/pytorch/modules/modify/noise.py
|
kahartma/eeggan
|
1fd5b45938ea6f1033f301430a5c7fb3b9bf4fb4
|
[
"BSD-3-Clause"
] | 2
|
2020-10-08T14:14:20.000Z
|
2021-06-11T07:08:42.000Z
|
eeggan/pytorch/modules/modify/noise.py
|
kahartma/eeggan
|
1fd5b45938ea6f1033f301430a5c7fb3b9bf4fb4
|
[
"BSD-3-Clause"
] | 2
|
2020-07-06T11:00:36.000Z
|
2020-08-10T20:48:43.000Z
|
# Author: Kay Hartmann <kg.hartma@gmail.com>
import torch
from torch import nn
from eeggan.pytorch.modules.module import Module
from eeggan.pytorch.utils.weights import fill_weights_normal
class WeightedNoise(Module):
def __init__(self, n_features, n_time):
super().__init__()
self.weight_conv = nn.Conv1d(1, n_features, 1, bias=False)
self.n_features = n_features
self.n_time = n_time
fill_weights_normal(self.weight_conv.weight)
def forward(self, x, **kwargs):
noise = torch.normal(0, 1, size=(x.size(0), 1, self.n_time))
if x.is_cuda:
noise = noise.cuda()
noise = self.weight_conv.forward(noise)
return x + noise
| 28.6
| 68
| 0.667133
|
import torch
from torch import nn
from eeggan.pytorch.modules.module import Module
from eeggan.pytorch.utils.weights import fill_weights_normal
class WeightedNoise(Module):
def __init__(self, n_features, n_time):
super().__init__()
self.weight_conv = nn.Conv1d(1, n_features, 1, bias=False)
self.n_features = n_features
self.n_time = n_time
fill_weights_normal(self.weight_conv.weight)
def forward(self, x, **kwargs):
noise = torch.normal(0, 1, size=(x.size(0), 1, self.n_time))
if x.is_cuda:
noise = noise.cuda()
noise = self.weight_conv.forward(noise)
return x + noise
| true
| true
|
1c409ab22a09222efa40721215f153389109b31f
| 2,408
|
py
|
Python
|
python/docs/file_handling.py
|
caleberi/LeetCode
|
fa170244648f73e76d316a6d7fc0e813adccaa82
|
[
"MIT"
] | 1
|
2021-08-10T20:00:24.000Z
|
2021-08-10T20:00:24.000Z
|
python/docs/file_handling.py
|
caleberi/LeetCode
|
fa170244648f73e76d316a6d7fc0e813adccaa82
|
[
"MIT"
] | null | null | null |
python/docs/file_handling.py
|
caleberi/LeetCode
|
fa170244648f73e76d316a6d7fc0e813adccaa82
|
[
"MIT"
] | 3
|
2021-06-11T11:56:39.000Z
|
2021-08-10T08:50:49.000Z
|
import sys
from random import randint
import pickle
_file_object = None
def count_file_lines(path):
file = open("input.txt","r")
count = 0
# while file.readline()!='':
# count+=1
#OR
for line in file:
count+=1
file.close();
return count
def set_stdout(path,mode):
global _file_object
_file_object = open(path,mode)
sys.stdout = _file_object
def reset_stdout():
sys.stdout = sys.__stdout__
def reset_stdin():
sys.stdout = sys.__stdin__
def reset_stderr():
sys.stderr = sys.__stderr__
def set_stdin(path,mode):
global _file_object
_file_object = open(path,mode)
sys.stdin = _file_object
def set_stderr(path,mode):
global _file_object
_file_object = open(path,mode)
sys.stderr = _file_object
"""
mio module , (contains functions capture_output ,restore_output
print_file , and clear_file )
"""
def capture_output(file="capture_file.txt"):
"""redirect the standard output to capture_output.txt """
global _file_object
print("output will be sent to file : {0} ".format(file))
print("restore to normal by calling mio.restore_output()")
set_stdout(file,"w")
def restore_output():
"""
restore the standard output back to the default stdout
"""
global _file_object
reset_stdout()
_file_object.close()
print("standard output has been back to stdout (normal)")
def print_file(file="capture_file.txt"):
"""
print the given file to the stdout
"""
set_stdout(file,"r")
print(_file_object.read)
_file_object.close()
def clear_file(file="capture_file.txt"):
"""
clears the content of the file
"""
global _file_object
_file_object = open(file,"w")
_file_object.close()
mem_cache ={}
def sole(m,n,t,fn):
if (m,n,t) in mem_cache:
return mem_cache[(m,n,t)]
else:
# time-consuming operation
result = fn(randint(1,1000))
mem_cache[(m,n,t)] = result
return result
_mem_disk_file = "mem_cache"
file = open(_mem_disk_file,"r")
mem_cache = pickle.load(file)
file.close()
def save_mem_to_disk():
"""
save the mem_cache to disk
"""
global mem_cache,_mem_disk_file
file=open(_mem_disk_file,"w")
pickle.dump(mem_cache,file)
file.close()
def show_mem_cache():
global mem_cache,_mem_disk_file
print(_mem_disk_file)
| 21.122807
| 65
| 0.656977
|
import sys
from random import randint
import pickle
_file_object = None
def count_file_lines(path):
file = open("input.txt","r")
count = 0
for line in file:
count+=1
file.close();
return count
def set_stdout(path,mode):
global _file_object
_file_object = open(path,mode)
sys.stdout = _file_object
def reset_stdout():
sys.stdout = sys.__stdout__
def reset_stdin():
sys.stdout = sys.__stdin__
def reset_stderr():
sys.stderr = sys.__stderr__
def set_stdin(path,mode):
global _file_object
_file_object = open(path,mode)
sys.stdin = _file_object
def set_stderr(path,mode):
global _file_object
_file_object = open(path,mode)
sys.stderr = _file_object
def capture_output(file="capture_file.txt"):
global _file_object
print("output will be sent to file : {0} ".format(file))
print("restore to normal by calling mio.restore_output()")
set_stdout(file,"w")
def restore_output():
global _file_object
reset_stdout()
_file_object.close()
print("standard output has been back to stdout (normal)")
def print_file(file="capture_file.txt"):
set_stdout(file,"r")
print(_file_object.read)
_file_object.close()
def clear_file(file="capture_file.txt"):
global _file_object
_file_object = open(file,"w")
_file_object.close()
mem_cache ={}
def sole(m,n,t,fn):
if (m,n,t) in mem_cache:
return mem_cache[(m,n,t)]
else:
result = fn(randint(1,1000))
mem_cache[(m,n,t)] = result
return result
_mem_disk_file = "mem_cache"
file = open(_mem_disk_file,"r")
mem_cache = pickle.load(file)
file.close()
def save_mem_to_disk():
global mem_cache,_mem_disk_file
file=open(_mem_disk_file,"w")
pickle.dump(mem_cache,file)
file.close()
def show_mem_cache():
global mem_cache,_mem_disk_file
print(_mem_disk_file)
| true
| true
|
1c409ad508c1eae122b7a06a9bacbc2b829b4b63
| 1,283
|
py
|
Python
|
homework/hw07/editor/primitives.py
|
zltshadow/CS61A-2019-summer
|
0f5dd0be5f51927364aec1bc974526837328b695
|
[
"MIT"
] | 3
|
2021-11-21T06:09:39.000Z
|
2022-03-12T08:05:27.000Z
|
project/pro4-scheme/editor/primitives.py
|
zltshadow/CS61A-2019-summer
|
0f5dd0be5f51927364aec1bc974526837328b695
|
[
"MIT"
] | null | null | null |
project/pro4-scheme/editor/primitives.py
|
zltshadow/CS61A-2019-summer
|
0f5dd0be5f51927364aec1bc974526837328b695
|
[
"MIT"
] | null | null | null |
from typing import List
from helper import verify_exact_callable_length
from log import Holder
from datamodel import Expression
from evaluate_apply import Frame, evaluate_all, Applicable
class BuiltIn(Applicable):
def execute(self, operands: List[Expression], frame: Frame, gui_holder: Holder, eval_operands=True) -> Expression:
if eval_operands:
operands = evaluate_all(
operands, frame, gui_holder.expression.children[1:])
gui_holder.expression.set_entries([])
gui_holder.apply()
return self.execute_evaluated(operands, frame)
def execute_evaluated(self, operands: List[Expression], frame: Frame) -> Expression:
raise NotImplementedError()
class SingleOperandPrimitive(BuiltIn):
def execute_evaluated(self, operands: List[Expression], frame: Frame) -> Expression:
verify_exact_callable_length(self, 1, len(operands))
operand = operands[0]
return self.execute_simple(operand)
def execute_simple(self, operand: Expression) -> Expression:
raise NotImplementedError()
def load_primitives():
__import__("arithmetic")
__import__("lists")
__import__("type_checking")
__import__("console")
__import__("graphics")
__import__("visualizing")
| 32.075
| 118
| 0.718628
|
from typing import List
from helper import verify_exact_callable_length
from log import Holder
from datamodel import Expression
from evaluate_apply import Frame, evaluate_all, Applicable
class BuiltIn(Applicable):
def execute(self, operands: List[Expression], frame: Frame, gui_holder: Holder, eval_operands=True) -> Expression:
if eval_operands:
operands = evaluate_all(
operands, frame, gui_holder.expression.children[1:])
gui_holder.expression.set_entries([])
gui_holder.apply()
return self.execute_evaluated(operands, frame)
def execute_evaluated(self, operands: List[Expression], frame: Frame) -> Expression:
raise NotImplementedError()
class SingleOperandPrimitive(BuiltIn):
def execute_evaluated(self, operands: List[Expression], frame: Frame) -> Expression:
verify_exact_callable_length(self, 1, len(operands))
operand = operands[0]
return self.execute_simple(operand)
def execute_simple(self, operand: Expression) -> Expression:
raise NotImplementedError()
def load_primitives():
__import__("arithmetic")
__import__("lists")
__import__("type_checking")
__import__("console")
__import__("graphics")
__import__("visualizing")
| true
| true
|
1c409b1d5977d47078785e85a8ed5dcc6bda98ef
| 17,700
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/operations/local_network_gateways_operations.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/operations/local_network_gateways_operations.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 2
|
2016-09-30T21:40:24.000Z
|
2017-11-10T18:16:18.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/operations/local_network_gateways_operations.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class LocalNetworkGatewaysOperations(object):
"""LocalNetworkGatewaysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-08-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-08-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, local_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, local_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a local network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local
network gateway operation.
:type parameters:
~azure.mgmt.network.v2017_08_01.models.LocalNetworkGateway
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
LocalNetworkGateway or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_08_01.models.LocalNetworkGateway]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LocalNetworkGateway or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_08_01.models.LocalNetworkGateway or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _delete_initial(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LocalNetworkGateway
:rtype:
~azure.mgmt.network.v2017_08_01.models.LocalNetworkGatewayPaged[~azure.mgmt.network.v2017_08_01.models.LocalNetworkGateway]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LocalNetworkGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LocalNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| 45.153061
| 157
| 0.669096
|
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class LocalNetworkGatewaysOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-08-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, local_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, local_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _delete_initial(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
def internal_paging(next_link=None, raw=False):
if not next_link:
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
deserialized = models.LocalNetworkGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LocalNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| true
| true
|
1c409d645821398acbf2c2725c69932ce4d91f2b
| 5,431
|
py
|
Python
|
data/external/repositories_2to3/137656/blundercheck-master/combine/contest_20150303a/modeling/fit_errorchunk_models.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/137656/blundercheck-master/combine/contest_20150303a/modeling/fit_errorchunk_models.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/137656/blundercheck-master/combine/contest_20150303a/modeling/fit_errorchunk_models.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1
|
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
#!/usr/bin/env python
import os, code
import pickle as pickle
from djeval import *
import numpy as np
from pandas import read_pickle, cut, concat, Series, get_dummies
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, ExtraTreesClassifier
from sklearn.cross_validation import StratifiedKFold, cross_val_score
from sklearn.metrics import average_precision_score
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
NUM_ELO_GROUPS = int(sys.argv[1])
NUM_ERRORCHUNKS = int(sys.argv[2])
NUM_ESTIMATORS = int(sys.argv[3])
LOW_BOUND = float(sys.argv[4])
HIGH_BOUND = float(sys.argv[5])
n_cv_groups = 2
def shell():
vars = globals()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
chunk_spacing_factor = (HIGH_BOUND / LOW_BOUND) ** (1/(float(NUM_ERRORCHUNKS)-1.))
chunk_bounds = [-1. * LOW_BOUND * (chunk_spacing_factor ** i) for i in range(0,NUM_ERRORCHUNKS)]
chunk_bounds.insert(0, 0.)
msg('errorchunk bounds are %s' % chunk_bounds)
msg('splitting ELOs')
eheaders_filename = '/data/eheaders.p'
eheaders_file = open(eheaders_filename, 'r')
eheaders = pickle.load(eheaders_file)
elos = list(eheaders['elos'].values())
elo_bins = np.percentile(elos, np.arange(0, 100. + 1e-9, 100./float(NUM_ELO_GROUPS)))
msg('ELO bins are %s' % str(elo_bins))
msg('reading movedata')
moves_df = read_pickle('/data/movedata.p')
moves_df['clipped_movergain'] = moves_df['movergain'].clip(-1e9,0)
train_df = moves_df[moves_df['elo'].notnull()]
chain_validating = True
if chain_validating:
train_df = train_df[train_df['gamenum'] % 3 == 0]
msg('Looking at %i moves' % train_df.shape[0])
train_df['elo_groups'] = cut(train_df['elo'], elo_bins, include_lowest=True)
blundermodel_dir = sys.argv[6]
if not os.path.exists(blundermodel_dir):
os.makedirs(blundermodel_dir)
categorical_features = ['bestmove_piece', 'bestmove_dir']
dummy_features = []
for index, cf in enumerate(categorical_features):
dummies = get_dummies(train_df[cf], prefix=cf)
dummy_features.extend(dummies.columns.values)
features = ['side', 'halfply', 'moverscore', 'bestmove_is_capture', 'bestmove_is_check', 'depth', 'seldepth', 'num_bestmoves', 'num_bestmove_changes', 'bestmove_depths_agreeing', 'deepest_change', 'bestmove_dist', 'prevgain']
features.extend(dummy_features)
joblib.dump([elo_bins, chunk_bounds, features], blundermodel_dir + 'groups.p')
# more features you could have:
# * loss for the 2nd, 3rd, 4th, 5th best move, etc (perfect move is
# less likely if there are several very close alternatives)
modelnum = 0
for elo_name, elo_df in train_df.groupby(train_df['elo_groups']):
subset_df = elo_df
for cb in chunk_bounds:
msg('working on elo group %s, of size %i. fitting model for error >= %f' % (elo_name, subset_df.shape[0], cb))
X = subset_df[features]
y = (subset_df['clipped_movergain'] >= cb)
rfc = True
if rfc:
extra = True
if extra:
clf = ExtraTreesClassifier(min_samples_split=200, min_samples_leaf=50, n_jobs=-1, n_estimators=NUM_ESTIMATORS, verbose=1)
else:
clf = RandomForestClassifier(min_samples_split=200, min_samples_leaf=50, n_jobs=-1, n_estimators=NUM_ESTIMATORS, verbose=1, oob_score=True)
else:
clf = GradientBoostingClassifier(min_samples_split=500, min_samples_leaf=300, n_estimators=NUM_ESTIMATORS, verbose=1, subsample=0.5, learning_rate=0.2)
msg('CROSS VALIDATING')
skf = StratifiedKFold(y, n_folds=2, shuffle=True)
ins = []
outs = []
for train_index, test_index in skf:
foo = clf.fit(X.iloc[train_index], y.iloc[train_index])
ins.append(average_precision_score(clf.predict(X.iloc[train_index]), y.iloc[train_index]))
outs.append(average_precision_score(clf.predict(X.iloc[test_index]), y.iloc[test_index]))
msg("insample average precision score: %s = %f" % (ins, np.mean(ins)))
msg("outsample average precision score: %s = %f" % (outs, np.mean(outs)))
# cvs = cross_val_score(clf, X, y, cv=n_cv_groups, n_jobs=-1, scoring='roc_auc')
# msg('CV scores: %s = %f' % (cvs, np.mean(cvs)))
msg('FITTING')
if chain_validating:
fit_df = subset_df[subset_df['gamenum'] % 3 == 0]
fit_X = fit_df[features]
fit_y = (fit_df['clipped_movergain'] >= cb)
clf.fit(fit_X, fit_y)
else:
clf.fit(X, y)
# measure in-sample score
# measure extent of over-fitting
# measure model quality in-sample and out-of-sample
pred_y = clf.predict_proba(X)
pred_y = [x[1] for x in pred_y]
combo = concat([Series(y.values), Series(pred_y)], axis=1)
combo.columns = ['actual', 'predicted']
combo_groups = cut(combo['predicted'], 10)
msg("PREDICTION DISTRIBUTION AND SUCCESS:\n%s" % combo.groupby(combo_groups)['actual'].agg({'mean actual': np.mean, 'count': len}))
msg("FULL INSAMPLE AVERAGE PRECISION SCORE: %f" % average_precision_score(y, pred_y))
joblib.dump([elo_name, cb, clf], '%s%i.p' % (blundermodel_dir, modelnum))
modelnum = modelnum + 1
subset_df = subset_df[~y]
| 42.100775
| 226
| 0.669674
|
import os, code
import pickle as pickle
from djeval import *
import numpy as np
from pandas import read_pickle, cut, concat, Series, get_dummies
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, ExtraTreesClassifier
from sklearn.cross_validation import StratifiedKFold, cross_val_score
from sklearn.metrics import average_precision_score
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
NUM_ELO_GROUPS = int(sys.argv[1])
NUM_ERRORCHUNKS = int(sys.argv[2])
NUM_ESTIMATORS = int(sys.argv[3])
LOW_BOUND = float(sys.argv[4])
HIGH_BOUND = float(sys.argv[5])
n_cv_groups = 2
def shell():
vars = globals()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
chunk_spacing_factor = (HIGH_BOUND / LOW_BOUND) ** (1/(float(NUM_ERRORCHUNKS)-1.))
chunk_bounds = [-1. * LOW_BOUND * (chunk_spacing_factor ** i) for i in range(0,NUM_ERRORCHUNKS)]
chunk_bounds.insert(0, 0.)
msg('errorchunk bounds are %s' % chunk_bounds)
msg('splitting ELOs')
eheaders_filename = '/data/eheaders.p'
eheaders_file = open(eheaders_filename, 'r')
eheaders = pickle.load(eheaders_file)
elos = list(eheaders['elos'].values())
elo_bins = np.percentile(elos, np.arange(0, 100. + 1e-9, 100./float(NUM_ELO_GROUPS)))
msg('ELO bins are %s' % str(elo_bins))
msg('reading movedata')
moves_df = read_pickle('/data/movedata.p')
moves_df['clipped_movergain'] = moves_df['movergain'].clip(-1e9,0)
train_df = moves_df[moves_df['elo'].notnull()]
chain_validating = True
if chain_validating:
train_df = train_df[train_df['gamenum'] % 3 == 0]
msg('Looking at %i moves' % train_df.shape[0])
train_df['elo_groups'] = cut(train_df['elo'], elo_bins, include_lowest=True)
blundermodel_dir = sys.argv[6]
if not os.path.exists(blundermodel_dir):
os.makedirs(blundermodel_dir)
categorical_features = ['bestmove_piece', 'bestmove_dir']
dummy_features = []
for index, cf in enumerate(categorical_features):
dummies = get_dummies(train_df[cf], prefix=cf)
dummy_features.extend(dummies.columns.values)
features = ['side', 'halfply', 'moverscore', 'bestmove_is_capture', 'bestmove_is_check', 'depth', 'seldepth', 'num_bestmoves', 'num_bestmove_changes', 'bestmove_depths_agreeing', 'deepest_change', 'bestmove_dist', 'prevgain']
features.extend(dummy_features)
joblib.dump([elo_bins, chunk_bounds, features], blundermodel_dir + 'groups.p')
modelnum = 0
for elo_name, elo_df in train_df.groupby(train_df['elo_groups']):
subset_df = elo_df
for cb in chunk_bounds:
msg('working on elo group %s, of size %i. fitting model for error >= %f' % (elo_name, subset_df.shape[0], cb))
X = subset_df[features]
y = (subset_df['clipped_movergain'] >= cb)
rfc = True
if rfc:
extra = True
if extra:
clf = ExtraTreesClassifier(min_samples_split=200, min_samples_leaf=50, n_jobs=-1, n_estimators=NUM_ESTIMATORS, verbose=1)
else:
clf = RandomForestClassifier(min_samples_split=200, min_samples_leaf=50, n_jobs=-1, n_estimators=NUM_ESTIMATORS, verbose=1, oob_score=True)
else:
clf = GradientBoostingClassifier(min_samples_split=500, min_samples_leaf=300, n_estimators=NUM_ESTIMATORS, verbose=1, subsample=0.5, learning_rate=0.2)
msg('CROSS VALIDATING')
skf = StratifiedKFold(y, n_folds=2, shuffle=True)
ins = []
outs = []
for train_index, test_index in skf:
foo = clf.fit(X.iloc[train_index], y.iloc[train_index])
ins.append(average_precision_score(clf.predict(X.iloc[train_index]), y.iloc[train_index]))
outs.append(average_precision_score(clf.predict(X.iloc[test_index]), y.iloc[test_index]))
msg("insample average precision score: %s = %f" % (ins, np.mean(ins)))
msg("outsample average precision score: %s = %f" % (outs, np.mean(outs)))
msg('FITTING')
if chain_validating:
fit_df = subset_df[subset_df['gamenum'] % 3 == 0]
fit_X = fit_df[features]
fit_y = (fit_df['clipped_movergain'] >= cb)
clf.fit(fit_X, fit_y)
else:
clf.fit(X, y)
pred_y = clf.predict_proba(X)
pred_y = [x[1] for x in pred_y]
combo = concat([Series(y.values), Series(pred_y)], axis=1)
combo.columns = ['actual', 'predicted']
combo_groups = cut(combo['predicted'], 10)
msg("PREDICTION DISTRIBUTION AND SUCCESS:\n%s" % combo.groupby(combo_groups)['actual'].agg({'mean actual': np.mean, 'count': len}))
msg("FULL INSAMPLE AVERAGE PRECISION SCORE: %f" % average_precision_score(y, pred_y))
joblib.dump([elo_name, cb, clf], '%s%i.p' % (blundermodel_dir, modelnum))
modelnum = modelnum + 1
subset_df = subset_df[~y]
| true
| true
|
1c409e3d1e5473d1df3c2d6d1260a8eddbe059ab
| 30,300
|
py
|
Python
|
framework/JobHandler.py
|
bonifak/raven
|
666978e8546d1f948b2ad55a4c3b0fce5cc8533c
|
[
"Apache-2.0"
] | null | null | null |
framework/JobHandler.py
|
bonifak/raven
|
666978e8546d1f948b2ad55a4c3b0fce5cc8533c
|
[
"Apache-2.0"
] | null | null | null |
framework/JobHandler.py
|
bonifak/raven
|
666978e8546d1f948b2ad55a4c3b0fce5cc8533c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Mar 5, 2013
@author: alfoa, cogljj, crisr
"""
#for future compatibility with Python 3-----------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
#End compatibility block for Python 3-------------------------------------------
#External Modules---------------------------------------------------------------
import time
import collections
import subprocess
import os
import copy
import sys
import abc
import threading
import random
import socket
#External Modules End-----------------------------------------------------------
#Internal Modules---------------------------------------------------------------
from utils import utils
from BaseClasses import BaseType
import MessageHandler
import Runners
import Models
# for internal parallel
import pp
import ppserver
# end internal parallel module
#Internal Modules End-----------------------------------------------------------
## FIXME: Finished jobs can bog down the queue waiting for other objects to take
## them away. Can we shove them onto a different list and free up the job queue?
class JobHandler(MessageHandler.MessageUser):
"""
JobHandler class. This handles the execution of any job in the RAVEN
framework
"""
def __init__(self):
"""
Init method
@ In, None
@ Out, None
"""
self.printTag = 'Job Handler'
self.runInfoDict = {}
self.isParallelPythonInitialized = False
self.sleepTime = 0.005
self.completed = False
## Determines whether to collect and print job timing summaries at the end of job runs.
self.__profileJobs = False
## Prevents the pending queue from growing indefinitely, but also allowing
## extra jobs to be queued to prevent starving parallelized environments of
## jobs.
self.maxQueueSize = None
############################################################################
## The following variables are protected by the __queueLock
## Placeholders for each actively running job. When a job finishes, its
## spot in one of these lists will be reset to None and the next Runner will
## be placed in a free None spot, and set to start
self.__running = []
self.__clientRunning = []
## Queue of jobs to be run, when something on the list above opens up, the
## corresponding queue will pop a job (Runner) and put it into that location
## and set it to start
self.__queue = collections.deque()
self.__clientQueue = collections.deque()
## A counter used for uniquely identifying the next id for an ExternalRunner
## InternalRunners will increment this counter, but do not use it currently
self.__nextId = 0
## List of finished jobs. When a job finishes, it is placed here until
## something from the main thread can remove them.
self.__finished = []
## End block of __queueLock protected variables
############################################################################
self.__queueLock = threading.RLock()
## List of submitted job identifiers, includes jobs that have completed as
## this list is not cleared until a new step is entered
self.__submittedJobs = []
## Dict of failed jobs of the form { identifer: metadata }
self.__failedJobs = {}
#self.__noResourcesJobs = []
def initialize(self, runInfoDict, messageHandler):
"""
Method to initialize the JobHandler
@ In, runInfoDict, dict, dictionary of run info settings
@ In, messageHandler, MessageHandler object, instance of the global RAVEN
message handler
@ Out, None
"""
self.runInfoDict = runInfoDict
self.messageHandler = messageHandler
# set the maximum queue size (number of jobs to queue past the running number)
self.maxQueueSize = runInfoDict['maxQueueSize']
# defaults to None; if None, then use batchSize instead
if self.maxQueueSize is None:
self.maxQueueSize = runInfoDict['batchSize']
# if requsted max size less than 1, we can't do that, so take 1 instead
if self.maxQueueSize < 1:
self.raiseAWarning('maxQueueSize was set to be less than 1! Setting to 1...')
self.maxQueueSize = 1
self.raiseADebug('Setting maxQueueSize to',self.maxQueueSize)
#initialize PBS
with self.__queueLock:
self.__running = [None]*self.runInfoDict['batchSize']
self.__clientRunning = [None]*self.runInfoDict['batchSize']
def __checkAndRemoveFinished(self, running):
"""
Method to check if a run is finished and remove it from the queque
@ In, running, instance, the job instance (InternalRunner or ExternalRunner)
@ Out, None
"""
with self.__queueLock:
returnCode = running.getReturnCode()
if returnCode != 0:
metadataFailedRun = running.getMetadata()
metadataToKeep = metadataFailedRun
if metadataFailedRun is not None:
metadataKeys = list(metadataFailedRun.keys())
if 'jobHandler' in metadataKeys:
metadataKeys.pop(metadataKeys.index("jobHandler"))
metadataToKeep = { keepKey: metadataFailedRun[keepKey] for keepKey in metadataKeys }
## FIXME: The running.command was always internal now, so I removed it.
## We should probably find a way to give more pertinent information.
self.raiseAMessage(" Process Failed " + str(running) + " internal returnCode " + str(returnCode))
self.__failedJobs[running.identifier]=(returnCode,copy.deepcopy(metadataToKeep))
def __initializeParallelPython(self):
"""
Internal method that is aimed to initialize the internal parallel system.
It initilizes the paralle python implementation (with socketing system) in
case RAVEN is run in a cluster with multiple nodes or the NumMPI > 1,
otherwise multi-threading is used.
@ In, None
@ Out, None
"""
## Check if the list of unique nodes is present and, in case, initialize the
## socket
if self.runInfoDict['internalParallel']:
if len(self.runInfoDict['Nodes']) > 0:
availableNodes = [nodeId.strip() for nodeId in self.runInfoDict['Nodes']]
## Set the initial port randomly among the user accessible ones
## Is there any problem if we select the same port as something else?
randomPort = random.randint(1024,65535)
## Get localHost and servers
localHostName, ppservers = self.__runRemoteListeningSockets(randomPort)
self.raiseADebug("Local host is "+ localHostName)
if len(ppservers) == 0:
## We are on a single node
self.ppserver = pp.Server(ncpus=len(availableNodes))
else:
## We are using multiple nodes
self.raiseADebug("Servers found are " + ','.join(ppservers))
self.raiseADebug("Server port in use is " + str(randomPort))
self.ppserver = pp.Server(ncpus=0, ppservers=tuple(ppservers))
else:
## We are using the parallel python system
self.ppserver = pp.Server(ncpus=int(self.runInfoDict['totalNumCoresUsed']))
else:
## We are just using threading
self.ppserver = None
self.isParallelPythonInitialized = True
def __getLocalAndRemoteMachineNames(self):
"""
Method to get the qualified host and remote nodes' names
@ In, None
@ Out, hostNameMapping, dict, dictionary containing the qualified names
{'local':hostName,'remote':{nodeName1:IP1,nodeName2:IP2,etc}}
"""
hostNameMapping = {'local':"",'remote':{}}
## Store the local machine name as its fully-qualified domain name (FQDN)
hostNameMapping['local'] = str(socket.getfqdn()).strip()
self.raiseADebug("Local Host is " + hostNameMapping['local'])
## collect the qualified hostnames for each remote node
for nodeId in list(set(self.runInfoDict['Nodes'])):
hostNameMapping['remote'][nodeId.strip()] = socket.gethostbyname(nodeId.strip())
self.raiseADebug("Remote Host identified " + hostNameMapping['remote'][nodeId.strip()])
return hostNameMapping
def __runRemoteListeningSockets(self,newPort):
"""
Method to activate the remote sockets for parallel python
@ In, newPort, integer, the comunication port to use
@ Out, (qualifiedHostName, ppservers), tuple, tuple containining:
- in position 0 the host name and
- in position 1 the list containing the nodes in which the remote
sockets have been activated
"""
## Get the local machine name and the remote nodes one
hostNameMapping = self.__getLocalAndRemoteMachineNames()
qualifiedHostName = hostNameMapping['local']
remoteNodesIP = hostNameMapping['remote']
## Strip out the nodes' names
availableNodes = [node.strip() for node in self.runInfoDict['Nodes']]
## Get unique nodes
uniqueNodes = list(set(availableNodes))
ppservers = []
if len(uniqueNodes) > 1:
## There are remote nodes that need to be activated
## Locate the ppserver script to be executed
ppserverScript = os.path.join(self.runInfoDict['FrameworkDir'],"contrib","pp","ppserver.py")
## Modify the python path used by the local environment
localenv = os.environ.copy()
pathSeparator = os.pathsep
localenv["PYTHONPATH"] = pathSeparator.join(sys.path)
for nodeId in uniqueNodes:
## Build the filename
outFileName = nodeId.strip()+"_port:"+str(newPort)+"_server_out.log"
outFileName = os.path.join(self.runInfoDict['WorkingDir'], outFileName)
outFile = open(outFileName, 'w')
## Check how many processors are available in the node
ntasks = availableNodes.count(nodeId)
remoteHostName = remoteNodesIP[nodeId]
## Activate the remote socketing system
## Next line is a direct execute of a ppserver:
#subprocess.Popen(['ssh', nodeId, "python2.7", ppserverScript,"-w",str(ntasks),"-i",remoteHostName,"-p",str(newPort),"-t","1000","-g",localenv["PYTHONPATH"],"-d"],shell=False,stdout=outFile,stderr=outFile,env=localenv)
## Instead, let's build the command and then call the os-agnostic version
command=" ".join(["python",ppserverScript,"-w",str(ntasks),"-i",remoteHostName,"-p",str(newPort),"-t","50000","-g",localenv["PYTHONPATH"],"-d"])
utils.pickleSafeSubprocessPopen(['ssh',nodeId,"COMMAND='"+command+"'",self.runInfoDict['RemoteRunCommand']],shell=False,stdout=outFile,stderr=outFile,env=localenv)
## e.g., ssh nodeId COMMAND='python ppserverScript -w stuff'
## update list of servers
ppservers.append(nodeId+":"+str(newPort))
return qualifiedHostName, ppservers
def startLoop(self):
"""
This function begins the polling loop for the JobHandler where it will
constantly fill up its running queue with jobs in its pending queue and
unload finished jobs into its finished queue to be extracted by
"""
while not self.completed:
self.fillJobQueue()
self.cleanJobQueue()
## TODO May want to revisit this:
## http://stackoverflow.com/questions/29082268/python-time-sleep-vs-event-wait
## probably when we move to Python 3.
time.sleep(self.sleepTime)
def addJob(self, args, functionToRun, identifier, metadata=None, modulesToImport = [], forceUseThreads = False, uniqueHandler="any", clientQueue = False):
"""
Method to add an internal run (function execution)
@ In, args, dict, this is a list of arguments that will be passed as
function parameters into whatever method is stored in functionToRun.
e.g., functionToRun(*args)
@ In, functionToRun,function or method, the function that needs to be
executed
@ In, identifier, string, the job identifier
@ In, metadata, dict, optional, dictionary of metadata associated to this
run
@ In, modulesToImport, list, optional, list of modules that need to be
imported for internal parallelization (parallel python). This list
should be generated with the method returnImportModuleString in utils.py
@ In, forceUseThreads, bool, optional, flag that, if True, is going to
force the usage of multi-threading even if parallel python is activated
@ In, uniqueHandler, string, optional, it is a special keyword attached to
this runner. For example, if present, to retrieve this runner using the
method jobHandler.getFinished, the uniqueHandler needs to be provided.
If uniqueHandler == 'any', every "client" can get this runner
@ In, clientQueue, boolean, optional, if this run needs to be added in the
clientQueue
@ Out, None
"""
## internal server is initialized only in case an internal calc is requested
if not self.isParallelPythonInitialized:
self.__initializeParallelPython()
if self.ppserver is None or forceUseThreads:
internalJob = Runners.SharedMemoryRunner(self.messageHandler, args,
functionToRun,
identifier, metadata,
uniqueHandler,
profile=self.__profileJobs)
else:
skipFunctions = [utils.metaclass_insert(abc.ABCMeta,BaseType)]
internalJob = Runners.DistributedMemoryRunner(self.messageHandler,
self.ppserver, args,
functionToRun,
modulesToImport, identifier,
metadata, skipFunctions,
uniqueHandler,
profile=self.__profileJobs)
# set the client info
internalJob.clientRunner = clientQueue
# add the runner in the Queue
self.reAddJob(internalJob)
def reAddJob(self, runner):
"""
Method to add a runner object in the queue
@ In, runner, Runner Instance, this is the instance of the runner that we want to readd in the queque
@ Out, None
"""
with self.__queueLock:
if not runner.clientRunner:
self.__queue.append(runner)
else:
self.__clientQueue.append(runner)
if self.__profileJobs:
runner.trackTime('queue')
self.__submittedJobs.append(runner.identifier)
def addClientJob(self, args, functionToRun, identifier, metadata=None, modulesToImport = [], uniqueHandler="any"):
"""
Method to add an internal run (function execution), without consuming
resources (free spots). This can be used for client handling (see
metamodel)
@ In, args, dict, this is a list of arguments that will be passed as
function parameters into whatever method is stored in functionToRun.
e.g., functionToRun(*args)
@ In, functionToRun,function or method, the function that needs to be
executed
@ In, identifier, string, the job identifier
@ In, metadata, dict, optional, dictionary of metadata associated to this
run
@ In, uniqueHandler, string, optional, it is a special keyword attached to
this runner. For example, if present, to retrieve this runner using the
method jobHandler.getFinished, the uniqueHandler needs to be provided.
If uniqueHandler == 'any', every "client" can get this runner.
@ Out, None
"""
self.addJob(args, functionToRun, identifier, metadata, modulesToImport,
forceUseThreads = True, uniqueHandler = uniqueHandler,
clientQueue = True)
def isFinished(self):
"""
Method to check if all the runs in the queue are finished
@ In, None
@ Out, isFinished, bool, True all the runs in the queue are finished
"""
with self.__queueLock:
## If there is still something left in the queue, we are not done yet.
if len(self.__queue) > 0 or len(self.__clientQueue) > 0:
return False
## Otherwise, let's look at our running lists and see if there is a job
## that is not done.
for run in self.__running+self.__clientRunning:
if run:
return False
## Are there runs that need to be claimed? If so, then I cannot say I am
## done.
if len(self.getFinishedNoPop()) > 0:
return False
return True
def availability(self, client=False):
"""
Returns the number of runs that can be added until we consider our queue
saturated
@ In, client, bool, if true, then return the values for the
__clientQueue, otherwise use __queue
@ Out, availability, int the number of runs that can be added until we
reach saturation
"""
## Due to possibility of memory explosion, we should include the finished
## queue when considering whether we should add a new job. There was an
## issue when running on a distributed system where we saw that this list
## seemed to be growing indefinitely as the main thread was unable to clear
## that list within a reasonable amount of time. The issue on the main thread
## should also be addressed, but at least we can prevent it on this end since
## the main thread's issue may be legitimate.
maxCount = self.maxQueueSize
finishedCount = len(self.__finished)
if client:
if maxCount is None:
maxCount = self.__clientRunning.count(None)
queueCount = len(self.__clientQueue)
else:
if maxCount is None:
maxCount = self.__running.count(None)
queueCount = len(self.__queue)
availability = maxCount - queueCount - finishedCount
return availability
def isThisJobFinished(self, identifier):
"""
Method to check if the run identified by "identifier" is finished
@ In, identifier, string, identifier
@ Out, isFinished, bool, True if the job identified by "identifier" is
finished
"""
identifier = identifier.strip()
with self.__queueLock:
## Look through the finished jobs and attempt to find a matching
## identifier. If the job exists here, it is finished
for run in self.__finished:
if run.identifier == identifier:
return True
## Look through the pending jobs and attempt to find a matching identifier
## If the job exists here, it is not finished
for queue in [self.__queue, self.__clientQueue]:
for run in queue:
if run.identifier == identifier:
return False
## Look through the running jobs and attempt to find a matching identifier
## If the job exists here, it is not finished
for run in self.__running+self.__clientRunning:
if run is not None and run.identifier == identifier:
return False
## If you made it here and we still have not found anything, we have got
## problems.
self.raiseAnError(RuntimeError,"Job "+identifier+" is unknown!")
def areTheseJobsFinished(self, uniqueHandler="any"):
"""
Method to check if all the runs in the queue are finished
@ In, uniqueHandler, string, optional, it is a special keyword attached to
each runner. If provided, just the jobs that have the uniqueIdentifier
will be retrieved. By default uniqueHandler = 'any' => all the jobs for
which no uniqueIdentifier has been set up are going to be retrieved
@ Out, isFinished, bool, True all the runs in the queue are finished
"""
uniqueHandler = uniqueHandler.strip()
with self.__queueLock:
for run in self.__finished:
if run.uniqueHandler == uniqueHandler:
return False
for queue in [self.__queue, self.__clientQueue]:
for run in queue:
if run.uniqueHandler == uniqueHandler:
return False
for run in self.__running + self.__clientRunning:
if run is not None and run.uniqueHandler == uniqueHandler:
return False
self.raiseADebug("The jobs with uniqueHandler ", uniqueHandler, "are finished")
return True
def getFailedJobs(self):
"""
Method to get list of failed jobs
@ In, None
@ Out, __failedJobs, list, list of the identifiers (jobs) that failed
"""
return self.__failedJobs
def getFinished(self, removeFinished=True, jobIdentifier = '', uniqueHandler = "any"):
"""
Method to get the list of jobs that ended (list of objects)
@ In, removeFinished, bool, optional, flag to control if the finished jobs
need to be removed from the queue
@ In, jobIdentifier, string, optional, if specified, only collects
finished runs that start with this text. If not specified collect all.
@ In, uniqueHandler, string, optional, it is a special keyword attached to
each runner. If provided, just the jobs that have the uniqueIdentifier
will be retrieved. By default uniqueHandler = 'any' => all the jobs for
which no uniqueIdentifier has been set up are going to be retrieved
@ Out, finished, list, list of finished jobs (InternalRunner or
ExternalRunner objects) (if jobIdentifier is None), else the finished
jobs matching the base case jobIdentifier
"""
finished = []
## If the user does not specify a jobIdentifier, then set it to the empty
## string because every job will match this starting string.
if jobIdentifier is None:
jobIdentifier = ''
with self.__queueLock:
runsToBeRemoved = []
for i,run in enumerate(self.__finished):
## If the jobIdentifier does not match or the uniqueHandler does not
## match, then don't bother trying to do anything with it
if not run.identifier.startswith(jobIdentifier) \
or uniqueHandler != run.uniqueHandler:
continue
finished.append(run)
if removeFinished:
runsToBeRemoved.append(i)
self.__checkAndRemoveFinished(run)
##Since these indices are sorted, reverse them to ensure that when we
## delete something it will not shift anything to the left (lower index)
## than it.
for i in reversed(runsToBeRemoved):
self.__finished[i].trackTime('collected')
del self.__finished[i]
## end with self.__queueLock
return finished
def getFinishedNoPop(self):
"""
Method to get the list of jobs that ended (list of objects) without
removing them from the queue
@ In, None
@ Out, finished, list, list of finished jobs (InternalRunner or
ExternalRunner objects)
"""
finished = self.getFinished(False)
return finished
## Deprecating this function because I don't think it is doing the right thing
## People using the job handler should be asking for what is available not the
## number of free spots in the running block. Only the job handler should be
## able to internally alter or query the running and clientRunning queues.
## The outside environment can only access the queue and clientQueue variables.
# def numFreeSpots(self, client=False):
def numRunning(self):
"""
Returns the number of runs currently running.
@ In, None
@ Out, activeRuns, int, number of active runs
"""
#with self.__queueLock:
## The size of the list does not change, only its contents, so I don't
## think there should be any conflict if we are reading a variable from
## one thread and updating it on the other thread.
activeRuns = sum(run is not None for run in self.__running)
return activeRuns
def numSubmitted(self):
"""
Method to get the number of submitted jobs
@ In, None
@ Out, len(self.__submittedJobs), int, number of submitted jobs
"""
return len(self.__submittedJobs)
def fillJobQueue(self):
"""
Method to start running the jobs in queue. If there are empty slots
takes jobs out of the queue and starts running them.
@ In, None
@ Out, None
"""
## Only the jobHandler's startLoop thread should have write access to the
## self.__running variable, so we should be able to safely query this outside
## of the lock given that this function is called only on that thread as well.
emptySlots = [i for i,run in enumerate(self.__running) if run is None]
## Don't bother acquiring the lock if there are no empty spots or nothing
## in the queue (this could be simultaneously added to by the main thread,
## but I will be back here after a short wait on this thread so I am not
## concerned about this potential inconsistency)
if len(emptySlots) > 0 and len(self.__queue) > 0:
with self.__queueLock:
for i in emptySlots:
## The queue could be emptied during this loop, so we will to break
## out as soon as that happens so we don't hog the lock.
if len(self.__queue) > 0:
item = self.__queue.popleft()
## Okay, this is a little tricky, but hang with me here. Whenever
## a code model is run, we need to replace some of its command
## parameters. The way we do this is by looking at the job instance
## and checking if the first argument (the self in
## self.evaluateSample) is an instance of Code, if so, then we need
## to replace the execution command. Is this fragile? Possibly. We may
## want to revisit this on the next iteration of this code.
if len(item.args) > 0 and isinstance(item.args[0], Models.Code):
kwargs = {}
kwargs['INDEX'] = str(i)
kwargs['INDEX1'] = str(i+i)
kwargs['CURRENT_ID'] = str(self.__nextId)
kwargs['CURRENT_ID1'] = str(self.__nextId+1)
kwargs['SCRIPT_DIR'] = self.runInfoDict['ScriptDir']
kwargs['FRAMEWORK_DIR'] = self.runInfoDict['FrameworkDir']
## This will not be used since the Code will create a new
## directory for its specific files and will spawn a process there
## so we will let the Code fill that in. Note, the line below
## represents the WRONG directory for an instance of a code!
## It is however the correct directory for a MultiRun step
## -- DPM 5/4/17
kwargs['WORKING_DIR'] = item.args[0].workingDir
kwargs['BASE_WORKING_DIR'] = self.runInfoDict['WorkingDir']
kwargs['METHOD'] = os.environ.get("METHOD","opt")
kwargs['NUM_CPUS'] = str(self.runInfoDict['NumThreads'])
item.args[3].update(kwargs)
self.__running[i] = item
self.__running[i].start()
self.__running[i].trackTime('started')
self.__nextId += 1
else:
break
## Repeat the same process above, only for the clientQueue
emptySlots = [i for i,run in enumerate(self.__clientRunning) if run is None]
if len(emptySlots) > 0 and len(self.__clientQueue) > 0:
with self.__queueLock:
for i in emptySlots:
if len(self.__clientQueue) > 0:
self.__clientRunning[i] = self.__clientQueue.popleft()
self.__clientRunning[i].start()
self.__clientRunning[i].trackTime('jobHandler_started')
self.__nextId += 1
else:
break
def cleanJobQueue(self):
"""
Method that will remove finished jobs from the queue and place them into the
finished queue to be read by some other thread.
@ In, None
@ Out, None
"""
## The code handling these two lists was the exact same, I have taken the
## liberty of condensing these loops into one and removing some of the
## redundant checks to make this code a bit simpler.
for runList in [self.__running, self.__clientRunning]:
for i,run in enumerate(runList):
if run is not None and run.isDone():
## We should only need the lock if we are touching the finished queue
## which is cleared by the main thread. Again, the running queues
## should not be modified by the main thread, however they may inquire
## it by calling numRunning.
with self.__queueLock:
self.__finished.append(run)
self.__finished[-1].trackTime('jobHandler_finished')
runList[i] = None
def setProfileJobs(self,profile=False):
"""
Sets whether profiles for jobs are printed or not.
@ In, profile, bool, optional, if True then print timings for jobs when they are garbage collected
@ Out, None
"""
self.__profileJobs = profile
def startingNewStep(self):
"""
Method to reset the __submittedJobs to an empty list.
@ In, None
@ Out, None
"""
with self.__queueLock:
self.__submittedJobs = []
def shutdown(self):
"""
This function will mark the job handler as done, so it can shutdown its
polling thread.
@ In, None
@ Out, None
"""
self.completed = True
def terminateAll(self):
"""
Method to clear out the queue by killing all running processes.
@ In, None
@ Out, None
"""
with self.__queueLock:
for queue in [self.__queue, self.__clientQueue]:
queue.clear()
for runList in [self.__running, self.__clientRunning]:
unfinishedRuns = [run for run in runList if run is not None]
for run in unfinishedRuns:
run.kill()
| 41.678129
| 226
| 0.651518
|
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
import time
import collections
import subprocess
import os
import copy
import sys
import abc
import threading
import random
import socket
from utils import utils
from BaseClasses import BaseType
import MessageHandler
import Runners
import Models
import pp
import ppserver
rallelPythonInitialized = False
self.sleepTime = 0.005
self.completed = False
e=self.__profileJobs)
# set the client info
internalJob.clientRunner = clientQueue
# add the runner in the Queue
self.reAddJob(internalJob)
def reAddJob(self, runner):
with self.__queueLock:
if not runner.clientRunner:
self.__queue.append(runner)
else:
self.__clientQueue.append(runner)
if self.__profileJobs:
runner.trackTime('queue')
self.__submittedJobs.append(runner.identifier)
def addClientJob(self, args, functionToRun, identifier, metadata=None, modulesToImport = [], uniqueHandler="any"):
self.addJob(args, functionToRun, identifier, metadata, modulesToImport,
forceUseThreads = True, uniqueHandler = uniqueHandler,
clientQueue = True)
def isFinished(self):
with self.__queueLock:
## If there is still something left in the queue, we are not done yet.
if len(self.__queue) > 0 or len(self.__clientQueue) > 0:
return False
## Otherwise, let's look at our running lists and see if there is a job
elf.__running+self.__clientRunning:
if run:
return False
True
def availability(self, client=False):
ier):
identifier = identifier.strip()
with self.__queueLock:
## Look through the finished jobs and attempt to find a matching
## identifier. If the job exists here, it is finished
for run in self.__finished:
if run.identifier == identifier:
return True
## Look through the pending jobs and attempt to find a matching identifier
## If the job exists here, it is not finished
for queue in [self.__queue, self.__clientQueue]:
for run in queue:
if run.identifier == identifier:
return False
## Look through the running jobs and attempt to find a matching identifier
## If the job exists here, it is not finished
for run in self.__running+self.__clientRunning:
if run is not None and run.identifier == identifier:
return False
## If you made it here and we still have not found anything, we have got
## problems.
self.raiseAnError(RuntimeError,"Job "+identifier+" is unknown!")
def areTheseJobsFinished(self, uniqueHandler="any"):
uniqueHandler = uniqueHandler.strip()
with self.__queueLock:
for run in self.__finished:
if run.uniqueHandler == uniqueHandler:
return False
for queue in [self.__queue, self.__clientQueue]:
for run in queue:
if run.uniqueHandler == uniqueHandler:
return False
for run in self.__running + self.__clientRunning:
if run is not None and run.uniqueHandler == uniqueHandler:
return False
self.raiseADebug("The jobs with uniqueHandler ", uniqueHandler, "are finished")
return True
def getFailedJobs(self):
return self.__failedJobs
def getFinished(self, removeFinished=True, jobIdentifier = '', uniqueHandler = "any"):
finished = []
## If the user does not specify a jobIdentifier, then set it to the empty
## string because every job will match this starting string.
if jobIdentifier is None:
jobIdentifier = ''
with self.__queueLock:
runsToBeRemoved = []
for i,run in enumerate(self.__finished):
## If the jobIdentifier does not match or the uniqueHandler does not
## match, then don't bother trying to do anything with it
if not run.identifier.startswith(jobIdentifier) \
or uniqueHandler != run.uniqueHandler:
continue
finished.append(run)
if removeFinished:
runsToBeRemoved.append(i)
self.__checkAndRemoveFinished(run)
nishedNoPop(self):
finished = self.getFinished(False)
return finished
the
## number of free spots in the running block. Only the job handler should be
## able to internally alter or query the running and clientRunning queues.
## The outside environment can only access the queue and clientQueue variables.
# def numFreeSpots(self, client=False):
def numRunning(self):
#with self.__queueLock:
## The size of the list does not change, only its contents, so I don't
return len(self.__submittedJobs)
def fillJobQueue(self):
is outside
## of the lock given that this function is called only on that thread as well.
emptySlots = [i for i,run in enumerate(self.__running) if run is None]
## Don't bother acquiring the lock if there are no empty spots or nothing
## a code model is run, we need to replace some of its command
## parameters. The way we do this is by looking at the job instance
## and checking if the first argument (the self in
## self.evaluateSample) is an instance of Code, if so, then we need
## to replace the execution command. Is this fragile? Possibly. We may
## want to revisit this on the next iteration of this code.
if len(item.args) > 0 and isinstance(item.args[0], Models.Code):
kwargs = {}
kwargs['INDEX'] = str(i)
kwargs['INDEX1'] = str(i+i)
kwargs['CURRENT_ID'] = str(self.__nextId)
kwargs['CURRENT_ID1'] = str(self.__nextId+1)
kwargs['SCRIPT_DIR'] = self.runInfoDict['ScriptDir']
kwargs['FRAMEWORK_DIR'] = self.runInfoDict['FrameworkDir']
## This will not be used since the Code will create a new
## directory for its specific files and will spawn a process there
## so we will let the Code fill that in. Note, the line below
## represents the WRONG directory for an instance of a code!
## It is however the correct directory for a MultiRun step
## -- DPM 5/4/17
kwargs['WORKING_DIR'] = item.args[0].workingDir
kwargs['BASE_WORKING_DIR'] = self.runInfoDict['WorkingDir']
kwargs['METHOD'] = os.environ.get("METHOD","opt")
kwargs['NUM_CPUS'] = str(self.runInfoDict['NumThreads'])
item.args[3].update(kwargs)
self.__running[i] = item
self.__running[i].start()
self.__running[i].trackTime('started')
self.__nextId += 1
else:
break
## Repeat the same process above, only for the clientQueue
emptySlots = [i for i,run in enumerate(self.__clientRunning) if run is None]
if len(emptySlots) > 0 and len(self.__clientQueue) > 0:
with self.__queueLock:
for i in emptySlots:
if len(self.__clientQueue) > 0:
self.__clientRunning[i] = self.__clientQueue.popleft()
self.__clientRunning[i].start()
self.__clientRunning[i].trackTime('jobHandler_started')
self.__nextId += 1
else:
break
def cleanJobQueue(self):
## The code handling these two lists was the exact same, I have taken the
## liberty of condensing these loops into one and removing some of the
## redundant checks to make this code a bit simpler.
for runList in [self.__running, self.__clientRunning]:
for i,run in enumerate(runList):
if run is not None and run.isDone():
## We should only need the lock if we are touching the finished queue
## which is cleared by the main thread. Again, the running queues
## should not be modified by the main thread, however they may inquire
## it by calling numRunning.
with self.__queueLock:
self.__finished.append(run)
self.__finished[-1].trackTime('jobHandler_finished')
runList[i] = None
def setProfileJobs(self,profile=False):
self.__profileJobs = profile
def startingNewStep(self):
with self.__queueLock:
self.__submittedJobs = []
def shutdown(self):
self.completed = True
def terminateAll(self):
with self.__queueLock:
for queue in [self.__queue, self.__clientQueue]:
queue.clear()
for runList in [self.__running, self.__clientRunning]:
unfinishedRuns = [run for run in runList if run is not None]
for run in unfinishedRuns:
run.kill()
| true
| true
|
1c409e9156c0eb16b725cac2a4f067e9d329c65f
| 2,047
|
py
|
Python
|
chia/server/start_full_node.py
|
ForestCrazy/chia-blockchain-remote-plot
|
0ba838b7a8ea2b5410d438ac70295df699a30dae
|
[
"Apache-2.0"
] | 11,902
|
2019-12-05T00:14:29.000Z
|
2022-03-31T23:25:37.000Z
|
chia/server/start_full_node.py
|
ForestCrazy/chia-blockchain-remote-plot
|
0ba838b7a8ea2b5410d438ac70295df699a30dae
|
[
"Apache-2.0"
] | 5,246
|
2019-12-05T04:00:03.000Z
|
2022-03-31T21:33:30.000Z
|
chia/server/start_full_node.py
|
Devh4ox4d/silishitcoin
|
4372d06aa4a54220f2bde29c8081410503679a82
|
[
"Apache-2.0"
] | 2,149
|
2019-12-05T11:12:53.000Z
|
2022-03-31T06:08:34.000Z
|
import logging
import pathlib
from multiprocessing import freeze_support
from typing import Dict
from chia.consensus.constants import ConsensusConstants
from chia.consensus.default_constants import DEFAULT_CONSTANTS
from chia.full_node.full_node import FullNode
from chia.full_node.full_node_api import FullNodeAPI
from chia.rpc.full_node_rpc_api import FullNodeRpcApi
from chia.server.outbound_message import NodeType
from chia.server.start_service import run_service
from chia.util.config import load_config_cli
from chia.util.default_root import DEFAULT_ROOT_PATH
# See: https://bugs.python.org/issue29288
"".encode("idna")
SERVICE_NAME = "full_node"
log = logging.getLogger(__name__)
def service_kwargs_for_full_node(
root_path: pathlib.Path, config: Dict, consensus_constants: ConsensusConstants
) -> Dict:
full_node = FullNode(
config,
root_path=root_path,
consensus_constants=consensus_constants,
)
api = FullNodeAPI(full_node)
upnp_list = []
if config["enable_upnp"]:
upnp_list = [config["port"]]
network_id = config["selected_network"]
kwargs = dict(
root_path=root_path,
node=api.full_node,
peer_api=api,
node_type=NodeType.FULL_NODE,
advertised_port=config["port"],
service_name=SERVICE_NAME,
upnp_ports=upnp_list,
server_listen_ports=[config["port"]],
on_connect_callback=full_node.on_connect,
network_id=network_id,
)
if config["start_rpc_server"]:
kwargs["rpc_info"] = (FullNodeRpcApi, config["rpc_port"])
return kwargs
def main() -> None:
config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
overrides = config["network_overrides"]["constants"][config["selected_network"]]
updated_constants = DEFAULT_CONSTANTS.replace_str_to_bytes(**overrides)
kwargs = service_kwargs_for_full_node(DEFAULT_ROOT_PATH, config, updated_constants)
return run_service(**kwargs)
if __name__ == "__main__":
freeze_support()
main()
| 31.015152
| 87
| 0.741085
|
import logging
import pathlib
from multiprocessing import freeze_support
from typing import Dict
from chia.consensus.constants import ConsensusConstants
from chia.consensus.default_constants import DEFAULT_CONSTANTS
from chia.full_node.full_node import FullNode
from chia.full_node.full_node_api import FullNodeAPI
from chia.rpc.full_node_rpc_api import FullNodeRpcApi
from chia.server.outbound_message import NodeType
from chia.server.start_service import run_service
from chia.util.config import load_config_cli
from chia.util.default_root import DEFAULT_ROOT_PATH
"".encode("idna")
SERVICE_NAME = "full_node"
log = logging.getLogger(__name__)
def service_kwargs_for_full_node(
root_path: pathlib.Path, config: Dict, consensus_constants: ConsensusConstants
) -> Dict:
full_node = FullNode(
config,
root_path=root_path,
consensus_constants=consensus_constants,
)
api = FullNodeAPI(full_node)
upnp_list = []
if config["enable_upnp"]:
upnp_list = [config["port"]]
network_id = config["selected_network"]
kwargs = dict(
root_path=root_path,
node=api.full_node,
peer_api=api,
node_type=NodeType.FULL_NODE,
advertised_port=config["port"],
service_name=SERVICE_NAME,
upnp_ports=upnp_list,
server_listen_ports=[config["port"]],
on_connect_callback=full_node.on_connect,
network_id=network_id,
)
if config["start_rpc_server"]:
kwargs["rpc_info"] = (FullNodeRpcApi, config["rpc_port"])
return kwargs
def main() -> None:
config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
overrides = config["network_overrides"]["constants"][config["selected_network"]]
updated_constants = DEFAULT_CONSTANTS.replace_str_to_bytes(**overrides)
kwargs = service_kwargs_for_full_node(DEFAULT_ROOT_PATH, config, updated_constants)
return run_service(**kwargs)
if __name__ == "__main__":
freeze_support()
main()
| true
| true
|
1c409eb65c6a92e583e1f42208d95c66824f1c7f
| 449
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/scattergl/marker/colorbar/title/_text.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/scattergl/marker/colorbar/title/_text.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/scattergl/marker/colorbar/title/_text.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="text",
parent_name="scattergl.marker.colorbar.title",
**kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| 26.411765
| 66
| 0.621381
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="text",
parent_name="scattergl.marker.colorbar.title",
**kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| true
| true
|
1c409f00899ab1c7539a27e717468362b2df8d14
| 2,500
|
py
|
Python
|
demos/person_detect.py
|
altest-com/dnfal
|
d1fb15508c5583aeaa0957fcc3e37634d36bf237
|
[
"MIT"
] | null | null | null |
demos/person_detect.py
|
altest-com/dnfal
|
d1fb15508c5583aeaa0957fcc3e37634d36bf237
|
[
"MIT"
] | 1
|
2020-03-31T17:04:09.000Z
|
2020-03-31T17:04:09.000Z
|
demos/person_detect.py
|
altest-com/dnfal
|
d1fb15508c5583aeaa0957fcc3e37634d36bf237
|
[
"MIT"
] | null | null | null |
import argparse
import sys
from os import path
from time import time
import cv2 as cv
from cvtlib.drawing import Drawer
from cvtlib.image import resize
from utils import list_images, DEMOS_DIR, MODELS_DIR
from dnfal.persons import BodyDetector
from dnfal.loggers import logger, config_logger
def run(image_path: str, weights_path: str):
config_logger(level='DEBUG', to_console=True)
person_detector = BodyDetector(
weights_path=weights_path,
resize_height=192
)
images_paths = list_images(image_path)
logger.info('Starting analysis...')
logger.info('Press "space" key to display next result. Press "q" to quit.')
max_image_size = 1920
drawer = Drawer()
drawer.font_scale = 0.5
drawer.font_linewidth = 1
for image_path in images_paths:
image_name = path.basename(image_path)
logger.info(f'Analyzing image {image_name}...')
image = cv.imread(image_path)
if image is None:
logger.warn(f'Unable to open image file {image_path}')
continue
h, w, = image.shape[0:2]
logger.info(f'Image loaded. Image size is {w}x{h} pixels.')
if max(w, h) > max_image_size:
image, scale = resize(image, max_image_size)
h, w, = image.shape[0:2]
logger.info(f'Image resized to {w}x{h} pixels.')
tic = time()
boxes, scores = person_detector.detect(image)
toc = time()
logger.info(f'Found {len(boxes)} persons in {(toc - tic):.3f} s.')
for ind, box in enumerate(boxes):
drawer.draw_labeled_box(image, f'{int(100*scores[ind])}%', box)
cv.imshow(f'Faces in {image_name}', image)
ret = cv.waitKey()
if ret == ord(' '):
cv.destroyAllWindows()
elif ret == ord('q'):
cv.destroyAllWindows()
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
type=str,
required=False,
default=path.join(DEMOS_DIR, 'data/images/persons'),
help='Path to input image file or directory containing image files.'
)
parser.add_argument(
'--weights',
type=str,
required=False,
default=path.join(MODELS_DIR, 'weights_person_detector.pth'),
help='Path to file containing the model weights of person detector.'
)
args = parser.parse_args(sys.argv[1:])
run(args.input, args.weights)
| 26.041667
| 79
| 0.6248
|
import argparse
import sys
from os import path
from time import time
import cv2 as cv
from cvtlib.drawing import Drawer
from cvtlib.image import resize
from utils import list_images, DEMOS_DIR, MODELS_DIR
from dnfal.persons import BodyDetector
from dnfal.loggers import logger, config_logger
def run(image_path: str, weights_path: str):
config_logger(level='DEBUG', to_console=True)
person_detector = BodyDetector(
weights_path=weights_path,
resize_height=192
)
images_paths = list_images(image_path)
logger.info('Starting analysis...')
logger.info('Press "space" key to display next result. Press "q" to quit.')
max_image_size = 1920
drawer = Drawer()
drawer.font_scale = 0.5
drawer.font_linewidth = 1
for image_path in images_paths:
image_name = path.basename(image_path)
logger.info(f'Analyzing image {image_name}...')
image = cv.imread(image_path)
if image is None:
logger.warn(f'Unable to open image file {image_path}')
continue
h, w, = image.shape[0:2]
logger.info(f'Image loaded. Image size is {w}x{h} pixels.')
if max(w, h) > max_image_size:
image, scale = resize(image, max_image_size)
h, w, = image.shape[0:2]
logger.info(f'Image resized to {w}x{h} pixels.')
tic = time()
boxes, scores = person_detector.detect(image)
toc = time()
logger.info(f'Found {len(boxes)} persons in {(toc - tic):.3f} s.')
for ind, box in enumerate(boxes):
drawer.draw_labeled_box(image, f'{int(100*scores[ind])}%', box)
cv.imshow(f'Faces in {image_name}', image)
ret = cv.waitKey()
if ret == ord(' '):
cv.destroyAllWindows()
elif ret == ord('q'):
cv.destroyAllWindows()
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
type=str,
required=False,
default=path.join(DEMOS_DIR, 'data/images/persons'),
help='Path to input image file or directory containing image files.'
)
parser.add_argument(
'--weights',
type=str,
required=False,
default=path.join(MODELS_DIR, 'weights_person_detector.pth'),
help='Path to file containing the model weights of person detector.'
)
args = parser.parse_args(sys.argv[1:])
run(args.input, args.weights)
| true
| true
|
1c409f237c768935f49616c6f9bf5539055aaee2
| 19,269
|
py
|
Python
|
datasets/siam_rpn_dataset.py
|
ywang-37/EnhancedSiamShipTracking
|
0b25cf02b6088268a6c374cb20a7f0355bc65b2e
|
[
"Apache-2.0"
] | 3
|
2022-03-03T09:14:50.000Z
|
2022-03-28T13:46:29.000Z
|
datasets/siam_rpn_dataset.py
|
ywang-37/EnhancedSiamShipTracking
|
0b25cf02b6088268a6c374cb20a7f0355bc65b2e
|
[
"Apache-2.0"
] | null | null | null |
datasets/siam_rpn_dataset.py
|
ywang-37/EnhancedSiamShipTracking
|
0b25cf02b6088268a6c374cb20a7f0355bc65b2e
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division
from torch.utils.data import Dataset
import numpy as np
import json
import random
import logging
from os.path import join
from utils.bbox_helper import *
from utils.anchors import Anchors
import math
import sys
pyv = sys.version[0]
import cv2
if pyv[0] == '3':
cv2.ocl.setUseOpenCL(False)
logger = logging.getLogger('global')
sample_random = random.Random()
sample_random.seed(123456)
class SubDataSet(object):
def __init__(self, cfg):
for string in ['root', 'anno']:
if string not in cfg:
raise Exception('SubDataSet need "{}"'.format(string))
with open(cfg['anno']) as fin:
logger.info("loading " + cfg['anno'])
self.labels = self.filter_zero(json.load(fin), cfg)
def isint(x):
try:
int(x)
return True
except:
return False
# add frames args into labels
to_del = []
for video in self.labels:
for track in self.labels[video]:
frames = self.labels[video][track]
frames = list(map(int, filter(lambda x: isint(x), frames.keys())))
frames.sort()
self.labels[video][track]['frames'] = frames
if len(frames) <= 0:
logger.info("warning {}/{} has no frames.".format(video, track))
to_del.append((video, track))
# delete tracks with no frames
for video, track in to_del:
del self.labels[video][track]
# delete videos with no valid track
to_del = []
for video in self.labels:
if len(self.labels[video]) <= 0:
logger.info("warning {} has no tracks".format(video))
to_del.append(video)
for video in to_del:
del self.labels[video]
self.videos = list(self.labels.keys())
logger.info(cfg['anno'] + " loaded.")
# default args
self.root = "/"
self.start = 0
self.num = len(self.labels)
self.num_use = self.num
self.frame_range = 100
self.mark = "vid"
self.path_format = "{}.{}.{}.jpg"
self.pick = []
# input args
self.__dict__.update(cfg)
self.num_use = int(self.num_use)
# shuffle
self.shuffle()
def filter_zero(self, anno, cfg):
name = cfg.get('mark', '')
out = {}
tot = 0
new = 0
zero = 0
for video, tracks in anno.items():
new_tracks = {}
for trk, frames in tracks.items():
new_frames = {}
for frm, bbox in frames.items():
tot += 1
if len(bbox) == 4:
x1, y1, x2, y2 = bbox
w, h = x2 - x1, y2 -y1
else:
w, h= bbox
if w == 0 or h == 0:
logger.info('Error, {name} {video} {trk} {bbox}'.format(**locals()))
zero += 1
continue
new += 1
new_frames[frm] = bbox
if len(new_frames) > 0:
new_tracks[trk] = new_frames
if len(new_tracks) > 0:
out[video] = new_tracks
return out
def log(self):
logger.info('SubDataSet {name} start-index {start} select [{select}/{num}] path {format}'.format(
name=self.mark, start=self.start, select=self.num_use, num=self.num, format=self.path_format
))
def shuffle(self):
lists = list(range(self.start, self.start + self.num))
m = 0
pick = []
while m < self.num_use:
sample_random.shuffle(lists)
pick += lists
m += self.num
self.pick = pick[:self.num_use]
return self.pick
def get_image_anno(self, video, track, frame):
frame = "{:06d}".format(frame)
image_path = join(self.root, video, self.path_format.format(frame, track, 'x'))
image_anno = self.labels[video][track][frame]
return image_path, image_anno
def get_positive_pair(self, index):
video_name = self.videos[index]
video = self.labels[video_name]
track = random.choice(list(video.keys()))
track_info = video[track]
frames = track_info['frames']
if 'hard' not in track_info:
template_frame = random.randint(0, len(frames)-1)
left = max(template_frame - self.frame_range, 0)
right = min(template_frame + self.frame_range, len(frames)-1) + 1
search_range = frames[left:right]
template_frame = frames[template_frame]
search_frame = random.choice(search_range)
else:
search_frame = random.choice(track_info['hard'])
left = max(search_frame - self.frame_range, 0)
right = min(search_frame + self.frame_range, len(frames)-1) + 1 # python [left:right+1) = [left:right]
template_range = frames[left:right]
template_frame = random.choice(template_range)
search_frame = frames[search_frame]
return self.get_image_anno(video_name, track, template_frame), \
self.get_image_anno(video_name, track, search_frame)
def get_random_target(self, index=-1):
if index == -1:
index = random.randint(0, self.num-1)
video_name = self.videos[index]
video = self.labels[video_name]
track = random.choice(list(video.keys()))
track_info = video[track]
frames = track_info['frames']
frame = random.choice(frames)
return self.get_image_anno(video_name, track, frame)
def crop_hwc(image, bbox, out_sz, padding=(0, 0, 0)):
bbox = [float(x) for x in bbox]
a = (out_sz-1) / (bbox[2]-bbox[0])
b = (out_sz-1) / (bbox[3]-bbox[1])
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float)
crop = cv2.warpAffine(image, mapping, (out_sz, out_sz), borderMode=cv2.BORDER_CONSTANT, borderValue=padding)
return crop
class Augmentation:
def __init__(self, cfg):
# default args
self.shift = 0
self.scale = 0
self.blur = 0 #False
self.resize = False
self.rgbVar = np.array([[-0.55919361, 0.98062831, - 0.41940627],
[1.72091413, 0.19879334, - 1.82968581],
[4.64467907, 4.73710203, 4.88324118]], dtype=np.float32)
self.flip = 0
self.eig_vec = np.array([
[0.4009, 0.7192, -0.5675],
[-0.8140, -0.0045, -0.5808],
[0.4203, -0.6948, -0.5836],
], dtype=np.float32)
self.eig_val = np.array([[0.2175, 0.0188, 0.0045]], np.float32)
self.__dict__.update(cfg)
@staticmethod
def random():
return random.random() * 2 - 1.0
def blur_image(self, image):
def rand_kernel():
size = np.random.randn(1)
size = int(np.round(size)) * 2 + 1
if size < 0: return None
if random.random() < 0.5: return None
size = min(size, 45)
kernel = np.zeros((size, size))
c = int(size/2)
wx = random.random()
kernel[:, c] += 1. / size * wx
kernel[c, :] += 1. / size * (1-wx)
return kernel
kernel = rand_kernel()
if kernel is not None:
image = cv2.filter2D(image, -1, kernel)
return image
def __call__(self, image, bbox, size, gray=False):
if gray:
grayed = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = np.zeros((grayed.shape[0], grayed.shape[1], 3), np.uint8)
image[:, :, 0] = image[:, :, 1] = image[:, :, 2] = grayed
shape = image.shape
crop_bbox = center2corner((shape[0]//2, shape[1]//2, size-1, size-1))
param = {}
if self.shift:
param['shift'] = (Augmentation.random() * self.shift, Augmentation.random() * self.shift)
if self.scale:
param['scale'] = ((1.0 + Augmentation.random() * self.scale), (1.0 + Augmentation.random() * self.scale))
crop_bbox, _ = aug_apply(Corner(*crop_bbox), param, shape)
x1 = crop_bbox.x1
y1 = crop_bbox.y1
bbox = BBox(bbox.x1 - x1, bbox.y1 - y1,
bbox.x2 - x1, bbox.y2 - y1)
if self.scale:
scale_x, scale_y = param['scale']
bbox = Corner(bbox.x1 / scale_x, bbox.y1 / scale_y, bbox.x2 / scale_x, bbox.y2 / scale_y)
image = crop_hwc(image, crop_bbox, size)
offset = np.dot(self.rgbVar, np.random.randn(3, 1))
offset = offset[::-1] # bgr 2 rgb
offset = offset.reshape(3)
image = image - offset
if self.blur > random.random():
image = self.blur_image(image)
if self.resize:
imageSize = image.shape[:2]
ratio = max(math.pow(random.random(), 0.5), 0.2) # 25 ~ 255
rand_size = (int(round(ratio*imageSize[0])), int(round(ratio*imageSize[1])))
image = cv2.resize(image, rand_size)
image = cv2.resize(image, tuple(imageSize))
if self.flip and self.flip > Augmentation.random():
image = cv2.flip(image, 1)
width = image.shape[1]
bbox = Corner(width - 1 - bbox.x2, bbox.y1, width - 1 - bbox.x1, bbox.y2)
return image, bbox
class AnchorTargetLayer:
def __init__(self, cfg):
self.thr_high = 0.6
self.thr_low = 0.3
self.negative = 16
self.rpn_batch = 64
self.positive = 16
self.__dict__.update(cfg)
def __call__(self, anchor, target, size, neg=False, need_iou=False):
anchor_num = anchor.anchors.shape[0]
cls = np.zeros((anchor_num, size, size), dtype=np.int64)
cls[...] = -1 # -1 ignore 0 negative 1 positive
delta = np.zeros((4, anchor_num, size, size), dtype=np.float32)
delta_weight = np.zeros((anchor_num, size, size), dtype=np.float32)
def select(position, keep_num=16):
num = position[0].shape[0]
if num <= keep_num:
return position, num
slt = np.arange(num)
np.random.shuffle(slt)
slt = slt[:keep_num]
return tuple(p[slt] for p in position), keep_num
if neg:
l = size // 2 - 3
r = size // 2 + 3 + 1
cls[:, l:r, l:r] = 0
neg, neg_num = select(np.where(cls == 0), self.negative)
cls[:] = -1
cls[neg] = 0
if not need_iou:
return cls, delta, delta_weight
else:
overlap = np.zeros((anchor_num, size, size), dtype=np.float32)
return cls, delta, delta_weight, overlap
tcx, tcy, tw, th = corner2center(target)
anchor_box = anchor.all_anchors[0]
anchor_center = anchor.all_anchors[1]
x1, y1, x2, y2 = anchor_box[0], anchor_box[1], anchor_box[2], anchor_box[3]
cx, cy, w, h = anchor_center[0], anchor_center[1], anchor_center[2], anchor_center[3]
# delta
delta[0] = (tcx - cx) / w
delta[1] = (tcy - cy) / h
delta[2] = np.log(tw / w)
delta[3] = np.log(th / h)
# IoU
overlap = IoU([x1, y1, x2, y2], target)
pos = np.where(overlap > self.thr_high)
neg = np.where(overlap < self.thr_low)
pos, pos_num = select(pos, self.positive)
neg, neg_num = select(neg, self.rpn_batch - pos_num)
cls[pos] = 1
delta_weight[pos] = 1. / (pos_num + 1e-6)
cls[neg] = 0
if not need_iou:
return cls, delta, delta_weight
else:
return cls, delta, delta_weight, overlap
class DataSets(Dataset):
def __init__(self, cfg, anchor_cfg, num_epoch=1):
super(DataSets, self).__init__()
global logger
logger = logging.getLogger('global')
# anchors
self.anchors = Anchors(anchor_cfg)
# size
self.template_size = 127
self.origin_size = 127
self.search_size = 255
self.size = 17
self.base_size = 0
self.crop_size = 0
if 'template_size' in cfg:
self.template_size = cfg['template_size']
if 'origin_size' in cfg:
self.origin_size = cfg['origin_size']
if 'search_size' in cfg:
self.search_size = cfg['search_size']
if 'base_size' in cfg:
self.base_size = cfg['base_size']
if 'size' in cfg:
self.size = cfg['size']
if (self.search_size - self.template_size) / self.anchors.stride + 1 + self.base_size != self.size:
raise Exception("size not match!") # TODO: calculate size online
if 'crop_size' in cfg:
self.crop_size = cfg['crop_size']
self.template_small = False
if 'template_small' in cfg and cfg['template_small']:
self.template_small = True
self.anchors.generate_all_anchors(im_c=self.search_size//2, size=self.size)
if 'anchor_target' not in cfg:
cfg['anchor_target'] = {}
self.anchor_target = AnchorTargetLayer(cfg['anchor_target'])
# data sets
if 'datasets' not in cfg:
raise(Exception('DataSet need "{}"'.format('datasets')))
self.all_data = []
start = 0
self.num = 0
for name in cfg['datasets']:
dataset = cfg['datasets'][name]
dataset['mark'] = name
dataset['start'] = start
dataset = SubDataSet(dataset)
dataset.log()
self.all_data.append(dataset)
start += dataset.num # real video number
self.num += dataset.num_use # the number used for subset shuffle
# data augmentation
aug_cfg = cfg['augmentation']
self.template_aug = Augmentation(aug_cfg['template'])
self.search_aug = Augmentation(aug_cfg['search'])
self.gray = aug_cfg['gray']
self.neg = aug_cfg['neg']
self.inner_neg = 0 if 'inner_neg' not in aug_cfg else aug_cfg['inner_neg']
self.pick = None # list to save id for each img
if 'num' in cfg: # number used in training for all dataset
self.num = int(cfg['num'])
self.num *= num_epoch
self.shuffle()
self.infos = {
'template': self.template_size,
'search': self.search_size,
'template_small': self.template_small,
'gray': self.gray,
'neg': self.neg,
'inner_neg': self.inner_neg,
'crop_size': self.crop_size,
'anchor_target': self.anchor_target.__dict__,
'num': self.num // num_epoch
}
logger.info('dataset informations: \n{}'.format(json.dumps(self.infos, indent=4)))
def imread(self, path):
img = cv2.imread(path)
if self.origin_size == self.template_size:
return img, 1.0
def map_size(exe, size):
return int(round(((exe + 1) / (self.origin_size + 1) * (size+1) - 1)))
nsize = map_size(self.template_size, img.shape[1])
img = cv2.resize(img, (nsize, nsize))
return img, nsize / img.shape[1]
def shuffle(self):
pick = []
m = 0
while m < self.num:
p = []
for subset in self.all_data:
sub_p = subset.shuffle()
p += sub_p
sample_random.shuffle(p)
pick += p
m = len(pick)
self.pick = pick
logger.info("shuffle done!")
logger.info("dataset length {}".format(self.num))
def __len__(self):
return self.num
def find_dataset(self, index):
for dataset in self.all_data:
if dataset.start + dataset.num > index:
return dataset, index - dataset.start
def __getitem__(self, index, debug=False):
index = self.pick[index]
dataset, index = self.find_dataset(index)
gray = self.gray and self.gray > random.random()
neg = self.neg and self.neg > random.random()
if neg:
template = dataset.get_random_target(index)
if self.inner_neg and self.inner_neg > random.random():
search = dataset.get_random_target()
else:
search = random.choice(self.all_data).get_random_target()
else:
template, search = dataset.get_positive_pair(index)
def center_crop(img, size):
shape = img.shape[1]
if shape == size: return img
c = shape // 2
l = c - size // 2
r = c + size // 2 + 1
return img[l:r, l:r]
template_image, scale_z = self.imread(template[0])
if self.template_small:
template_image = center_crop(template_image, self.template_size)
search_image, scale_x = self.imread(search[0])
if self.crop_size > 0:
search_image = center_crop(search_image, self.crop_size)
def toBBox(image, shape):
imh, imw = image.shape[:2]
if len(shape) == 4:
w, h = shape[2]-shape[0], shape[3]-shape[1]
else:
w, h = shape
context_amount = 0.5
exemplar_size = self.template_size # 127
wc_z = w + context_amount * (w+h)
hc_z = h + context_amount * (w+h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
w = w*scale_z
h = h*scale_z
cx, cy = imw//2, imh//2
bbox = center2corner(Center(cx, cy, w, h))
return bbox
template_box = toBBox(template_image, template[1])
search_box = toBBox(search_image, search[1])
template, _ = self.template_aug(template_image, template_box, self.template_size, gray=gray)
search, bbox = self.search_aug(search_image, search_box, self.search_size, gray=gray)
def draw(image, box, name):
image = image.copy()
x1, y1, x2, y2 = map(lambda x: int(round(x)), box)
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0))
cv2.imwrite(name, image)
if debug:
draw(template_image, template_box, "debug/{:06d}_ot.jpg".format(index))
draw(search_image, search_box, "debug/{:06d}_os.jpg".format(index))
draw(template, _, "debug/{:06d}_t.jpg".format(index))
draw(search, bbox, "debug/{:06d}_s.jpg".format(index))
cls, delta, delta_weight = self.anchor_target(self.anchors, bbox, self.size, neg)
template, search = map(lambda x: np.transpose(x, (2, 0, 1)).astype(np.float32), [template, search])
return template, search, cls, delta, delta_weight, np.array(bbox, np.float32)
| 33.165232
| 117
| 0.539104
|
from __future__ import division
from torch.utils.data import Dataset
import numpy as np
import json
import random
import logging
from os.path import join
from utils.bbox_helper import *
from utils.anchors import Anchors
import math
import sys
pyv = sys.version[0]
import cv2
if pyv[0] == '3':
cv2.ocl.setUseOpenCL(False)
logger = logging.getLogger('global')
sample_random = random.Random()
sample_random.seed(123456)
class SubDataSet(object):
def __init__(self, cfg):
for string in ['root', 'anno']:
if string not in cfg:
raise Exception('SubDataSet need "{}"'.format(string))
with open(cfg['anno']) as fin:
logger.info("loading " + cfg['anno'])
self.labels = self.filter_zero(json.load(fin), cfg)
def isint(x):
try:
int(x)
return True
except:
return False
to_del = []
for video in self.labels:
for track in self.labels[video]:
frames = self.labels[video][track]
frames = list(map(int, filter(lambda x: isint(x), frames.keys())))
frames.sort()
self.labels[video][track]['frames'] = frames
if len(frames) <= 0:
logger.info("warning {}/{} has no frames.".format(video, track))
to_del.append((video, track))
for video, track in to_del:
del self.labels[video][track]
to_del = []
for video in self.labels:
if len(self.labels[video]) <= 0:
logger.info("warning {} has no tracks".format(video))
to_del.append(video)
for video in to_del:
del self.labels[video]
self.videos = list(self.labels.keys())
logger.info(cfg['anno'] + " loaded.")
self.root = "/"
self.start = 0
self.num = len(self.labels)
self.num_use = self.num
self.frame_range = 100
self.mark = "vid"
self.path_format = "{}.{}.{}.jpg"
self.pick = []
self.__dict__.update(cfg)
self.num_use = int(self.num_use)
self.shuffle()
def filter_zero(self, anno, cfg):
name = cfg.get('mark', '')
out = {}
tot = 0
new = 0
zero = 0
for video, tracks in anno.items():
new_tracks = {}
for trk, frames in tracks.items():
new_frames = {}
for frm, bbox in frames.items():
tot += 1
if len(bbox) == 4:
x1, y1, x2, y2 = bbox
w, h = x2 - x1, y2 -y1
else:
w, h= bbox
if w == 0 or h == 0:
logger.info('Error, {name} {video} {trk} {bbox}'.format(**locals()))
zero += 1
continue
new += 1
new_frames[frm] = bbox
if len(new_frames) > 0:
new_tracks[trk] = new_frames
if len(new_tracks) > 0:
out[video] = new_tracks
return out
def log(self):
logger.info('SubDataSet {name} start-index {start} select [{select}/{num}] path {format}'.format(
name=self.mark, start=self.start, select=self.num_use, num=self.num, format=self.path_format
))
def shuffle(self):
lists = list(range(self.start, self.start + self.num))
m = 0
pick = []
while m < self.num_use:
sample_random.shuffle(lists)
pick += lists
m += self.num
self.pick = pick[:self.num_use]
return self.pick
def get_image_anno(self, video, track, frame):
frame = "{:06d}".format(frame)
image_path = join(self.root, video, self.path_format.format(frame, track, 'x'))
image_anno = self.labels[video][track][frame]
return image_path, image_anno
def get_positive_pair(self, index):
video_name = self.videos[index]
video = self.labels[video_name]
track = random.choice(list(video.keys()))
track_info = video[track]
frames = track_info['frames']
if 'hard' not in track_info:
template_frame = random.randint(0, len(frames)-1)
left = max(template_frame - self.frame_range, 0)
right = min(template_frame + self.frame_range, len(frames)-1) + 1
search_range = frames[left:right]
template_frame = frames[template_frame]
search_frame = random.choice(search_range)
else:
search_frame = random.choice(track_info['hard'])
left = max(search_frame - self.frame_range, 0)
right = min(search_frame + self.frame_range, len(frames)-1) + 1
template_range = frames[left:right]
template_frame = random.choice(template_range)
search_frame = frames[search_frame]
return self.get_image_anno(video_name, track, template_frame), \
self.get_image_anno(video_name, track, search_frame)
def get_random_target(self, index=-1):
if index == -1:
index = random.randint(0, self.num-1)
video_name = self.videos[index]
video = self.labels[video_name]
track = random.choice(list(video.keys()))
track_info = video[track]
frames = track_info['frames']
frame = random.choice(frames)
return self.get_image_anno(video_name, track, frame)
def crop_hwc(image, bbox, out_sz, padding=(0, 0, 0)):
bbox = [float(x) for x in bbox]
a = (out_sz-1) / (bbox[2]-bbox[0])
b = (out_sz-1) / (bbox[3]-bbox[1])
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float)
crop = cv2.warpAffine(image, mapping, (out_sz, out_sz), borderMode=cv2.BORDER_CONSTANT, borderValue=padding)
return crop
class Augmentation:
def __init__(self, cfg):
self.shift = 0
self.scale = 0
self.blur = 0
self.resize = False
self.rgbVar = np.array([[-0.55919361, 0.98062831, - 0.41940627],
[1.72091413, 0.19879334, - 1.82968581],
[4.64467907, 4.73710203, 4.88324118]], dtype=np.float32)
self.flip = 0
self.eig_vec = np.array([
[0.4009, 0.7192, -0.5675],
[-0.8140, -0.0045, -0.5808],
[0.4203, -0.6948, -0.5836],
], dtype=np.float32)
self.eig_val = np.array([[0.2175, 0.0188, 0.0045]], np.float32)
self.__dict__.update(cfg)
@staticmethod
def random():
return random.random() * 2 - 1.0
def blur_image(self, image):
def rand_kernel():
size = np.random.randn(1)
size = int(np.round(size)) * 2 + 1
if size < 0: return None
if random.random() < 0.5: return None
size = min(size, 45)
kernel = np.zeros((size, size))
c = int(size/2)
wx = random.random()
kernel[:, c] += 1. / size * wx
kernel[c, :] += 1. / size * (1-wx)
return kernel
kernel = rand_kernel()
if kernel is not None:
image = cv2.filter2D(image, -1, kernel)
return image
def __call__(self, image, bbox, size, gray=False):
if gray:
grayed = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = np.zeros((grayed.shape[0], grayed.shape[1], 3), np.uint8)
image[:, :, 0] = image[:, :, 1] = image[:, :, 2] = grayed
shape = image.shape
crop_bbox = center2corner((shape[0]//2, shape[1]//2, size-1, size-1))
param = {}
if self.shift:
param['shift'] = (Augmentation.random() * self.shift, Augmentation.random() * self.shift)
if self.scale:
param['scale'] = ((1.0 + Augmentation.random() * self.scale), (1.0 + Augmentation.random() * self.scale))
crop_bbox, _ = aug_apply(Corner(*crop_bbox), param, shape)
x1 = crop_bbox.x1
y1 = crop_bbox.y1
bbox = BBox(bbox.x1 - x1, bbox.y1 - y1,
bbox.x2 - x1, bbox.y2 - y1)
if self.scale:
scale_x, scale_y = param['scale']
bbox = Corner(bbox.x1 / scale_x, bbox.y1 / scale_y, bbox.x2 / scale_x, bbox.y2 / scale_y)
image = crop_hwc(image, crop_bbox, size)
offset = np.dot(self.rgbVar, np.random.randn(3, 1))
offset = offset[::-1]
offset = offset.reshape(3)
image = image - offset
if self.blur > random.random():
image = self.blur_image(image)
if self.resize:
imageSize = image.shape[:2]
ratio = max(math.pow(random.random(), 0.5), 0.2)
rand_size = (int(round(ratio*imageSize[0])), int(round(ratio*imageSize[1])))
image = cv2.resize(image, rand_size)
image = cv2.resize(image, tuple(imageSize))
if self.flip and self.flip > Augmentation.random():
image = cv2.flip(image, 1)
width = image.shape[1]
bbox = Corner(width - 1 - bbox.x2, bbox.y1, width - 1 - bbox.x1, bbox.y2)
return image, bbox
class AnchorTargetLayer:
def __init__(self, cfg):
self.thr_high = 0.6
self.thr_low = 0.3
self.negative = 16
self.rpn_batch = 64
self.positive = 16
self.__dict__.update(cfg)
def __call__(self, anchor, target, size, neg=False, need_iou=False):
anchor_num = anchor.anchors.shape[0]
cls = np.zeros((anchor_num, size, size), dtype=np.int64)
cls[...] = -1
delta = np.zeros((4, anchor_num, size, size), dtype=np.float32)
delta_weight = np.zeros((anchor_num, size, size), dtype=np.float32)
def select(position, keep_num=16):
num = position[0].shape[0]
if num <= keep_num:
return position, num
slt = np.arange(num)
np.random.shuffle(slt)
slt = slt[:keep_num]
return tuple(p[slt] for p in position), keep_num
if neg:
l = size // 2 - 3
r = size // 2 + 3 + 1
cls[:, l:r, l:r] = 0
neg, neg_num = select(np.where(cls == 0), self.negative)
cls[:] = -1
cls[neg] = 0
if not need_iou:
return cls, delta, delta_weight
else:
overlap = np.zeros((anchor_num, size, size), dtype=np.float32)
return cls, delta, delta_weight, overlap
tcx, tcy, tw, th = corner2center(target)
anchor_box = anchor.all_anchors[0]
anchor_center = anchor.all_anchors[1]
x1, y1, x2, y2 = anchor_box[0], anchor_box[1], anchor_box[2], anchor_box[3]
cx, cy, w, h = anchor_center[0], anchor_center[1], anchor_center[2], anchor_center[3]
delta[0] = (tcx - cx) / w
delta[1] = (tcy - cy) / h
delta[2] = np.log(tw / w)
delta[3] = np.log(th / h)
overlap = IoU([x1, y1, x2, y2], target)
pos = np.where(overlap > self.thr_high)
neg = np.where(overlap < self.thr_low)
pos, pos_num = select(pos, self.positive)
neg, neg_num = select(neg, self.rpn_batch - pos_num)
cls[pos] = 1
delta_weight[pos] = 1. / (pos_num + 1e-6)
cls[neg] = 0
if not need_iou:
return cls, delta, delta_weight
else:
return cls, delta, delta_weight, overlap
class DataSets(Dataset):
def __init__(self, cfg, anchor_cfg, num_epoch=1):
super(DataSets, self).__init__()
global logger
logger = logging.getLogger('global')
self.anchors = Anchors(anchor_cfg)
self.template_size = 127
self.origin_size = 127
self.search_size = 255
self.size = 17
self.base_size = 0
self.crop_size = 0
if 'template_size' in cfg:
self.template_size = cfg['template_size']
if 'origin_size' in cfg:
self.origin_size = cfg['origin_size']
if 'search_size' in cfg:
self.search_size = cfg['search_size']
if 'base_size' in cfg:
self.base_size = cfg['base_size']
if 'size' in cfg:
self.size = cfg['size']
if (self.search_size - self.template_size) / self.anchors.stride + 1 + self.base_size != self.size:
raise Exception("size not match!")
if 'crop_size' in cfg:
self.crop_size = cfg['crop_size']
self.template_small = False
if 'template_small' in cfg and cfg['template_small']:
self.template_small = True
self.anchors.generate_all_anchors(im_c=self.search_size//2, size=self.size)
if 'anchor_target' not in cfg:
cfg['anchor_target'] = {}
self.anchor_target = AnchorTargetLayer(cfg['anchor_target'])
if 'datasets' not in cfg:
raise(Exception('DataSet need "{}"'.format('datasets')))
self.all_data = []
start = 0
self.num = 0
for name in cfg['datasets']:
dataset = cfg['datasets'][name]
dataset['mark'] = name
dataset['start'] = start
dataset = SubDataSet(dataset)
dataset.log()
self.all_data.append(dataset)
start += dataset.num
self.num += dataset.num_use
aug_cfg = cfg['augmentation']
self.template_aug = Augmentation(aug_cfg['template'])
self.search_aug = Augmentation(aug_cfg['search'])
self.gray = aug_cfg['gray']
self.neg = aug_cfg['neg']
self.inner_neg = 0 if 'inner_neg' not in aug_cfg else aug_cfg['inner_neg']
self.pick = None
if 'num' in cfg:
self.num = int(cfg['num'])
self.num *= num_epoch
self.shuffle()
self.infos = {
'template': self.template_size,
'search': self.search_size,
'template_small': self.template_small,
'gray': self.gray,
'neg': self.neg,
'inner_neg': self.inner_neg,
'crop_size': self.crop_size,
'anchor_target': self.anchor_target.__dict__,
'num': self.num // num_epoch
}
logger.info('dataset informations: \n{}'.format(json.dumps(self.infos, indent=4)))
def imread(self, path):
img = cv2.imread(path)
if self.origin_size == self.template_size:
return img, 1.0
def map_size(exe, size):
return int(round(((exe + 1) / (self.origin_size + 1) * (size+1) - 1)))
nsize = map_size(self.template_size, img.shape[1])
img = cv2.resize(img, (nsize, nsize))
return img, nsize / img.shape[1]
def shuffle(self):
pick = []
m = 0
while m < self.num:
p = []
for subset in self.all_data:
sub_p = subset.shuffle()
p += sub_p
sample_random.shuffle(p)
pick += p
m = len(pick)
self.pick = pick
logger.info("shuffle done!")
logger.info("dataset length {}".format(self.num))
def __len__(self):
return self.num
def find_dataset(self, index):
for dataset in self.all_data:
if dataset.start + dataset.num > index:
return dataset, index - dataset.start
def __getitem__(self, index, debug=False):
index = self.pick[index]
dataset, index = self.find_dataset(index)
gray = self.gray and self.gray > random.random()
neg = self.neg and self.neg > random.random()
if neg:
template = dataset.get_random_target(index)
if self.inner_neg and self.inner_neg > random.random():
search = dataset.get_random_target()
else:
search = random.choice(self.all_data).get_random_target()
else:
template, search = dataset.get_positive_pair(index)
def center_crop(img, size):
shape = img.shape[1]
if shape == size: return img
c = shape // 2
l = c - size // 2
r = c + size // 2 + 1
return img[l:r, l:r]
template_image, scale_z = self.imread(template[0])
if self.template_small:
template_image = center_crop(template_image, self.template_size)
search_image, scale_x = self.imread(search[0])
if self.crop_size > 0:
search_image = center_crop(search_image, self.crop_size)
def toBBox(image, shape):
imh, imw = image.shape[:2]
if len(shape) == 4:
w, h = shape[2]-shape[0], shape[3]-shape[1]
else:
w, h = shape
context_amount = 0.5
exemplar_size = self.template_size
wc_z = w + context_amount * (w+h)
hc_z = h + context_amount * (w+h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
w = w*scale_z
h = h*scale_z
cx, cy = imw//2, imh//2
bbox = center2corner(Center(cx, cy, w, h))
return bbox
template_box = toBBox(template_image, template[1])
search_box = toBBox(search_image, search[1])
template, _ = self.template_aug(template_image, template_box, self.template_size, gray=gray)
search, bbox = self.search_aug(search_image, search_box, self.search_size, gray=gray)
def draw(image, box, name):
image = image.copy()
x1, y1, x2, y2 = map(lambda x: int(round(x)), box)
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0))
cv2.imwrite(name, image)
if debug:
draw(template_image, template_box, "debug/{:06d}_ot.jpg".format(index))
draw(search_image, search_box, "debug/{:06d}_os.jpg".format(index))
draw(template, _, "debug/{:06d}_t.jpg".format(index))
draw(search, bbox, "debug/{:06d}_s.jpg".format(index))
cls, delta, delta_weight = self.anchor_target(self.anchors, bbox, self.size, neg)
template, search = map(lambda x: np.transpose(x, (2, 0, 1)).astype(np.float32), [template, search])
return template, search, cls, delta, delta_weight, np.array(bbox, np.float32)
| true
| true
|
1c409f3486947fc3583859ea467e7541e363dce0
| 1,873
|
py
|
Python
|
classifier.py
|
adithyasunil26/LID-Excitation-Features
|
ae15e3f24016723ddbb832421746d2c0ef64fd03
|
[
"MIT"
] | null | null | null |
classifier.py
|
adithyasunil26/LID-Excitation-Features
|
ae15e3f24016723ddbb832421746d2c0ef64fd03
|
[
"MIT"
] | null | null | null |
classifier.py
|
adithyasunil26/LID-Excitation-Features
|
ae15e3f24016723ddbb832421746d2c0ef64fd03
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn.metrics import classification_report
print("Loading data...")
df=pd.read_csv('generated_csvs/df.csv')
df=df.drop('Unnamed: 0',axis=1)
df['gvv']=preprocessing.normalize([df['gvv'].values])[0]
df['ep_str']=preprocessing.normalize([df['ep_str'].values])[0]
df['ep_inst']=preprocessing.normalize([df['ep_inst'].values])[0]
df['rmfcc']=preprocessing.normalize([df['rmfcc'].values])[0]
print("Splitting data...")
X_train, X_test, y_train, y_test = train_test_split(df.drop('lang',axis=1), df['lang'], test_size=0.2, random_state=1)
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.5, random_state=1)
print("Decision Tree Classifier:")
print("Training model...")
clf = DecisionTreeClassifier().fit(X_train, y_train)
print("Making predictions...")
print('Accuracy of Decision Tree classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Decision Tree classifier on validation set: {:.2f}'
.format(clf.score(X_val, y_val)))
print('Accuracy of Decision Tree classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
print("Random Forest Classifier:")
print("Training model...")
clf = RandomForestClassifier().fit(X_train, y_train)
print("Making predictions...")
print('Accuracy of Random Forest classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Random Forest classifier on validation set: {:.2f}'
.format(clf.score(X_val, y_val)))
print('Accuracy of Random Forest classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
print(classification_report(y_test, clf.predict(X_test)))
| 40.717391
| 118
| 0.744794
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn.metrics import classification_report
print("Loading data...")
df=pd.read_csv('generated_csvs/df.csv')
df=df.drop('Unnamed: 0',axis=1)
df['gvv']=preprocessing.normalize([df['gvv'].values])[0]
df['ep_str']=preprocessing.normalize([df['ep_str'].values])[0]
df['ep_inst']=preprocessing.normalize([df['ep_inst'].values])[0]
df['rmfcc']=preprocessing.normalize([df['rmfcc'].values])[0]
print("Splitting data...")
X_train, X_test, y_train, y_test = train_test_split(df.drop('lang',axis=1), df['lang'], test_size=0.2, random_state=1)
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.5, random_state=1)
print("Decision Tree Classifier:")
print("Training model...")
clf = DecisionTreeClassifier().fit(X_train, y_train)
print("Making predictions...")
print('Accuracy of Decision Tree classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Decision Tree classifier on validation set: {:.2f}'
.format(clf.score(X_val, y_val)))
print('Accuracy of Decision Tree classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
print("Random Forest Classifier:")
print("Training model...")
clf = RandomForestClassifier().fit(X_train, y_train)
print("Making predictions...")
print('Accuracy of Random Forest classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Random Forest classifier on validation set: {:.2f}'
.format(clf.score(X_val, y_val)))
print('Accuracy of Random Forest classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
print(classification_report(y_test, clf.predict(X_test)))
| true
| true
|
1c40a0eadbf9d3c24d1cbe180d9dda2ca9e5488c
| 249
|
py
|
Python
|
molmodmt/lib/__init__.py
|
LMMV/MolModMT
|
5725d6d5627b07edcbbd5e55318345a136b28c35
|
[
"MIT"
] | null | null | null |
molmodmt/lib/__init__.py
|
LMMV/MolModMT
|
5725d6d5627b07edcbbd5e55318345a136b28c35
|
[
"MIT"
] | null | null | null |
molmodmt/lib/__init__.py
|
LMMV/MolModMT
|
5725d6d5627b07edcbbd5e55318345a136b28c35
|
[
"MIT"
] | null | null | null |
from .libbox import module_box as box
from .libgeometry import module_geometry as geometry
from .libmath import module_math as math
from .libcom import module_com as com
from .libpbc import module_pbc as pbc
from .librmsd import module_rmsd as rmsd
| 35.571429
| 52
| 0.831325
|
from .libbox import module_box as box
from .libgeometry import module_geometry as geometry
from .libmath import module_math as math
from .libcom import module_com as com
from .libpbc import module_pbc as pbc
from .librmsd import module_rmsd as rmsd
| true
| true
|
1c40a12cbe2a34082679b984edcde861e85f515c
| 2,167
|
py
|
Python
|
clinicadl/train/tasks/reconstruction_cli.py
|
sophieloiz/clinicadl
|
d26a1c6ce4f6da9de59e3d15c27ae3be2d33bc9d
|
[
"MIT"
] | null | null | null |
clinicadl/train/tasks/reconstruction_cli.py
|
sophieloiz/clinicadl
|
d26a1c6ce4f6da9de59e3d15c27ae3be2d33bc9d
|
[
"MIT"
] | null | null | null |
clinicadl/train/tasks/reconstruction_cli.py
|
sophieloiz/clinicadl
|
d26a1c6ce4f6da9de59e3d15c27ae3be2d33bc9d
|
[
"MIT"
] | null | null | null |
import click
from .. import train_option
from .task_utils import task_launcher
@click.command(name="reconstruction", no_args_is_help=True)
# Mandatory arguments
@train_option.caps_directory
@train_option.preprocessing_json
@train_option.tsv_directory
@train_option.output_maps
# Options
@train_option.config_file
# Computational
@train_option.gpu
@train_option.n_proc
@train_option.batch_size
@train_option.evaluation_steps
# Reproducibility
@train_option.seed
@train_option.deterministic
@train_option.compensation
# Model
@train_option.architecture
@train_option.multi_network
# Data
@train_option.multi_cohort
@train_option.diagnoses
@train_option.baseline
@train_option.normalize
@train_option.data_augmentation
@train_option.sampler
# Cross validation
@train_option.n_splits
@train_option.split
# Optimization
@train_option.optimizer
@train_option.epochs
@train_option.learning_rate
@train_option.weight_decay
@train_option.dropout
@train_option.patience
@train_option.tolerance
@train_option.accumulation_steps
# transfer learning
@train_option.transfer_path
@train_option.transfer_selection_metric
# Task-related
@train_option.selection_metrics
@train_option.reconstruction_loss
def cli(**kwargs):
"""
Train a deep learning model to learn a reconstruction task on neuroimaging data.
CAPS_DIRECTORY is the CAPS folder from where tensors will be loaded.
PREPROCESSING_JSON is the name of the JSON file in CAPS_DIRECTORY/tensor_extraction folder where
all information about extraction are stored in order to read the wanted tensors.
TSV_DIRECTORY is a folder were TSV files defining train and validation sets are stored.
OUTPUT_MAPS_DIRECTORY is the path to the MAPS folder where outputs and results will be saved.
Options for this command can be input by declaring argument on the command line or by providing a
configuration file in TOML format. For more details, please visit the documentation:
https://clinicadl.readthedocs.io/en/stable/Train/Introduction/#configuration-file
"""
task_specific_options = ["selection_metrics", "loss"]
task_launcher("reconstruction", task_specific_options, **kwargs)
| 30.521127
| 101
| 0.820951
|
import click
from .. import train_option
from .task_utils import task_launcher
@click.command(name="reconstruction", no_args_is_help=True)
@train_option.caps_directory
@train_option.preprocessing_json
@train_option.tsv_directory
@train_option.output_maps
@train_option.config_file
@train_option.gpu
@train_option.n_proc
@train_option.batch_size
@train_option.evaluation_steps
@train_option.seed
@train_option.deterministic
@train_option.compensation
@train_option.architecture
@train_option.multi_network
@train_option.multi_cohort
@train_option.diagnoses
@train_option.baseline
@train_option.normalize
@train_option.data_augmentation
@train_option.sampler
@train_option.n_splits
@train_option.split
@train_option.optimizer
@train_option.epochs
@train_option.learning_rate
@train_option.weight_decay
@train_option.dropout
@train_option.patience
@train_option.tolerance
@train_option.accumulation_steps
@train_option.transfer_path
@train_option.transfer_selection_metric
@train_option.selection_metrics
@train_option.reconstruction_loss
def cli(**kwargs):
task_specific_options = ["selection_metrics", "loss"]
task_launcher("reconstruction", task_specific_options, **kwargs)
| true
| true
|
1c40a1338382a36ef22891301005bcea31b2b08f
| 644
|
py
|
Python
|
src/apis/text/text/similarities/all-MiniLM-L6-v2/all-MiniLM-L6-v2.py
|
theunifai/unifai-apis-core
|
1f2a9051c1e3df1bd19a96f22e4a07767ef3973a
|
[
"MIT"
] | 2
|
2021-11-09T07:18:06.000Z
|
2022-01-04T19:37:17.000Z
|
src/apis/text/text/similarities/all-MiniLM-L6-v2/all-MiniLM-L6-v2.py
|
theunifai/unifai-apis-core
|
1f2a9051c1e3df1bd19a96f22e4a07767ef3973a
|
[
"MIT"
] | 4
|
2021-11-04T08:28:59.000Z
|
2021-11-07T05:59:59.000Z
|
src/apis/text/text/similarities/all-MiniLM-L6-v2/all-MiniLM-L6-v2.py
|
theunifai/unifai-apis-core
|
1f2a9051c1e3df1bd19a96f22e4a07767ef3973a
|
[
"MIT"
] | 1
|
2022-01-07T09:12:22.000Z
|
2022-01-07T09:12:22.000Z
|
def predict(sentence_1: str, sentence_2: str) -> str:
"""
For two given sentences, say whether they are similar or not.
:param sentence_1: first sentence to compare
:param sentence_2: second sentence to compare
:return: similarity score (between 0 and 1)
"""
from sentence_transformers import SentenceTransformer, util
model = SentenceTransformer('all-MiniLM-L6-v2')
embedding1 = model.encode(sentence_1, convert_to_tensor=True)
embedding2 = model.encode(sentence_2, convert_to_tensor=True)
cosine_scores = util.pytorch_cos_sim(embedding1, embedding2)
return str(cosine_scores.item())
| 30.666667
| 65
| 0.728261
|
def predict(sentence_1: str, sentence_2: str) -> str:
from sentence_transformers import SentenceTransformer, util
model = SentenceTransformer('all-MiniLM-L6-v2')
embedding1 = model.encode(sentence_1, convert_to_tensor=True)
embedding2 = model.encode(sentence_2, convert_to_tensor=True)
cosine_scores = util.pytorch_cos_sim(embedding1, embedding2)
return str(cosine_scores.item())
| true
| true
|
1c40a179b5c9b26cca7c57dde90dfa09fa7626b7
| 13,221
|
py
|
Python
|
newsletter/tests/test_admin.py
|
vikrantsingh-vs53/Final-Senior-Year-Project-
|
94f2786956ebaad08711701ef03071d1051e368c
|
[
"MIT"
] | 68
|
2019-05-02T06:54:59.000Z
|
2022-03-08T07:54:06.000Z
|
newsletter/tests/test_admin.py
|
arjunkr123/Final-Senior-Year-Project-
|
12b65915dbd9bf5a4a2ae7e3c56c7eaedcb8646b
|
[
"MIT"
] | 4
|
2019-12-26T16:41:30.000Z
|
2022-01-18T22:02:03.000Z
|
newsletter/tests/test_admin.py
|
arjunkr123/Final-Senior-Year-Project-
|
12b65915dbd9bf5a4a2ae7e3c56c7eaedcb8646b
|
[
"MIT"
] | 40
|
2019-03-08T20:21:05.000Z
|
2022-03-15T03:48:46.000Z
|
import os
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import patch_logger
from newsletter import admin # Triggers model admin registration
from newsletter.admin_utils import make_subscription
from newsletter.models import Message, Newsletter, Submission, Subscription
test_files_dir = os.path.join(os.path.dirname(__file__), 'files')
class AdminTestMixin(object):
def setUp(self):
super(AdminTestMixin, self).setUp()
User = get_user_model()
self.password = 'johnpassword'
self.admin_user = User.objects.create_superuser(
'john', 'lennon@thebeatles.com', self.password
)
self.client.login(username=self.admin_user.username, password=self.password)
self.newsletter = Newsletter.objects.create(
sender='Test Sender', title='Test Newsletter',
slug='test-newsletter', visible=True, email='test@test.com',
)
self.message = Message.objects.create(
newsletter=self.newsletter, title='Test message', slug='test-message'
)
class AdminTestCase(AdminTestMixin, TestCase):
def admin_import_file(self, source_file, ignore_errors=''):
""" Upload an address file for import to admin. """
import_url = reverse('admin:newsletter_subscription_import')
with open(os.path.join(test_files_dir, source_file), 'rb') as fh:
return self.client.post(import_url, {
'newsletter': self.newsletter.pk,
'address_file': fh,
'ignore_errors': ignore_errors,
}, follow=True)
def admin_import_subscribers(self, source_file, ignore_errors=''):
"""
Import process of a CSV/LDIF/VCARD file containing subscription
addresses from the admin site.
"""
response = self.admin_import_file(source_file, ignore_errors)
self.assertContains(response, "<h1>Confirm import</h1>")
import_confirm_url = reverse(
'admin:newsletter_subscription_import_confirm'
)
return self.client.post(
import_confirm_url, {'confirm': True}, follow=True
)
def test_newsletter_admin(self):
"""
Testing newsletter admin change list display.
"""
changelist_url = reverse('admin:newsletter_newsletter_changelist')
response = self.client.get(changelist_url)
self.assertContains(
response,
'<a href="../message/?newsletter__id__exact=%s">Messages</a>' % self.newsletter.pk
)
self.assertContains(
response,
'<a href="../subscription/?newsletter__id__exact=%s">Subscriptions</a>' % self.newsletter.pk
)
def test_subscription_admin(self):
"""
Testing subscription admin change list display and actions.
"""
Subscription.objects.bulk_create([
Subscription(
newsletter=self.newsletter, name_field='Sara',
email_field='sara@example.org', subscribed=True,
),
Subscription(
newsletter=self.newsletter, name_field='Bob',
email_field='bob@example.org', unsubscribed=True,
),
Subscription(
newsletter=self.newsletter, name_field='Khaled',
email_field='khaled@example.org', subscribed=False,
unsubscribed=False,
),
])
changelist_url = reverse('admin:newsletter_subscription_changelist')
response = self.client.get(changelist_url)
self.assertContains(
response,
'<img src="/static/newsletter/admin/img/icon-no.gif" width="10" height="10" alt="Unsubscribed"/>',
html=True
)
self.assertContains(
response,
'<img src="/static/newsletter/admin/img/icon-yes.gif" width="10" height="10" alt="Subscribed"/>',
html=True
)
self.assertContains(
response,
'<img src="/static/newsletter/admin/img/waiting.gif" width="10" height="10" alt="Unactivated"/>',
html=True
)
# Test actions
response = self.client.post(changelist_url, data={
'index': 0,
'action': ['make_subscribed'],
'_selected_action': [str(Subscription.objects.get(name_field='Khaled').pk)],
})
self.assertTrue(Subscription.objects.get(name_field='Khaled').subscribed)
response = self.client.post(changelist_url, data={
'index': 0,
'action': ['make_unsubscribed'],
'_selected_action': [str(Subscription.objects.get(name_field='Sara').pk)],
})
self.assertFalse(Subscription.objects.get(name_field='Sara').subscribed)
def test_admin_import_get_form(self):
""" Test Import form. """
import_url = reverse('admin:newsletter_subscription_import')
response = self.client.get(import_url)
self.assertContains(response, "<h1>Import addresses</h1>")
def test_admin_import_subscribers_csv(self):
response = self.admin_import_subscribers('addresses.csv')
self.assertContains(
response,
"2 subscriptions have been successfully added."
)
self.assertEqual(self.newsletter.subscription_set.count(), 2)
def test_admin_import_subscribers_ldif(self):
response = self.admin_import_subscribers('addresses.ldif')
self.assertContains(
response,
"2 subscriptions have been successfully added."
)
self.assertEqual(self.newsletter.subscription_set.count(), 2)
def test_admin_import_subscribers_vcf(self):
response = self.admin_import_subscribers('addresses.vcf')
self.assertContains(
response,
"2 subscriptions have been successfully added."
)
self.assertEqual(self.newsletter.subscription_set.count(), 2)
def test_admin_import_subscribers_duplicates(self):
""" Test importing a file with duplicate addresses. """
with patch_logger('newsletter.addressimport.parsers', 'warning') as messages:
response = self.admin_import_subscribers(
'addresses_duplicates.csv', ignore_errors='true'
)
self.assertContains(
response,
"2 subscriptions have been successfully added."
)
self.assertEqual(len(messages), 2)
self.assertEqual(self.newsletter.subscription_set.count(), 2)
def test_admin_import_subscribers_existing(self):
""" Test importing already existing subscriptions. """
subscription = make_subscription(self.newsletter, 'john@example.org')
subscription.save()
with patch_logger('newsletter.addressimport.parsers', 'warning') as messages:
response = self.admin_import_subscribers(
'addresses.csv', ignore_errors='true'
)
self.assertContains(
response,
"1 subscription has been successfully added."
)
self.assertEqual(len(messages), 1)
self.assertEqual(self.newsletter.subscription_set.count(), 2)
with patch_logger('newsletter.addressimport.parsers', 'warning') as messages:
response = self.admin_import_file('addresses.csv')
self.assertContains(
response,
"Some entries are already subscribed to."
)
self.assertEqual(len(messages), 1)
self.assertEqual(self.newsletter.subscription_set.count(), 2)
def test_admin_import_subscribers_permission(self):
"""
To be able to import subscriptions, user must have the
'add_subscription' permission.
"""
self.admin_user.is_superuser = False
self.admin_user.save()
import_url = reverse('admin:newsletter_subscription_import')
response = self.client.get(import_url)
self.assertEqual(response.status_code, 403)
self.admin_user.user_permissions.add(
Permission.objects.get(codename='add_subscription')
)
response = self.client.get(import_url)
self.assertEqual(response.status_code, 200)
def test_admin_import_subscribers_no_addresses(self):
"""
Cannot confirm subscribers import if 'addresses' misses in session.
"""
import_url = reverse('admin:newsletter_subscription_import')
import_confirm_url = reverse(
'admin:newsletter_subscription_import_confirm'
)
response = self.client.post(
import_confirm_url, {'confirm': True}
)
self.assertRedirects(response, import_url)
def test_message_admin(self):
"""
Testing message admin change list display and message previews.
"""
changelist_url = reverse('admin:newsletter_message_changelist')
response = self.client.get(changelist_url)
self.assertContains(
response,
'<a href="%d/preview/">Preview</a>' % self.message.pk,
html=True
)
# Previews
preview_url = reverse('admin:newsletter_message_preview', args=[self.message.pk])
preview_text_url = reverse('admin:newsletter_message_preview_text', args=[self.message.pk])
preview_html_url = reverse('admin:newsletter_message_preview_html', args=[self.message.pk])
response = self.client.get(preview_url)
self.assertContains(
response,
'<iframe src ="%s" width="960px" height="720px"></iframe>' % preview_html_url,
html=True
)
self.assertContains(
response,
'<iframe src ="%s" width="960px" height="720px"></iframe>' % preview_text_url,
html=True
)
response = self.client.get(preview_text_url)
self.assertEqual(
response.content,
b'''++++++++++++++++++++
Test Newsletter: Test message
++++++++++++++++++++
++++++++++++++++++++
Unsubscribe: http://example.com/newsletter/test-newsletter/unsubscribe/
''')
response = self.client.get(preview_html_url)
self.assertContains(response, '<h1>Test Newsletter</h1>')
self.assertContains(response, '<h2>Test message</h2>')
self.assertContains(response, self.newsletter.unsubscribe_url())
# HTML preview returns 404 if send_html is False
self.newsletter.send_html = False
self.newsletter.save()
response = self.client.get(preview_html_url)
self.assertEqual(response.status_code, 404)
class SubmissionAdminTests(AdminTestMixin, TestCase):
""" Tests for Submission admin. """
def setUp(self):
super(SubmissionAdminTests, self).setUp()
self.add_url = reverse('admin:newsletter_submission_add')
self.changelist_url = reverse('admin:newsletter_submission_changelist')
def test_changelist(self):
""" Testing submission admin change list display. """
# Assure there's a submission
Submission.from_message(self.message)
response = self.client.get(self.changelist_url)
self.assertContains(
response,
'<td class="field-admin_status_text">Not sent.</td>'
)
def test_duplicate_fail(self):
""" Test that a message cannot be published twice. """
# Assure there's a submission
Submission.from_message(self.message)
response = self.client.post(self.add_url, data={
'message': self.message.pk,
'publish_date_0': '2016-01-09',
'publish_date_1': '07:24',
'publish': 'on',
})
self.assertContains(
response,
"This message has already been published in some other submission."
)
def test_add(self):
""" Test adding a Submission. """
response = self.client.post(self.add_url, data={
'message': self.message.pk,
'publish_date_0': '2016-01-09',
'publish_date_1': '07:24',
'publish': 'on',
}, follow=True)
self.assertContains(response, "added")
self.assertEqual(Submission.objects.count(), 1)
submission = Submission.objects.all()[0]
self.assertEqual(submission.message, self.message)
def test_add_wrongmessage_regression(self):
""" Regression test for #170. """
# Create a second message
Message.objects.create(
newsletter=self.newsletter, title='2nd message', slug='test-message-2'
)
response = self.client.post(self.add_url, data={
'message': self.message.pk,
'publish_date_0': '2016-01-09',
'publish_date_1': '07:24',
'publish': 'on',
}, follow=True)
self.assertContains(response, "added")
self.assertEqual(Submission.objects.count(), 1)
submission = Submission.objects.all()[0]
self.assertEqual(submission.message, self.message)
| 35.540323
| 110
| 0.625596
|
import os
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import patch_logger
from newsletter import admin
from newsletter.admin_utils import make_subscription
from newsletter.models import Message, Newsletter, Submission, Subscription
test_files_dir = os.path.join(os.path.dirname(__file__), 'files')
class AdminTestMixin(object):
def setUp(self):
super(AdminTestMixin, self).setUp()
User = get_user_model()
self.password = 'johnpassword'
self.admin_user = User.objects.create_superuser(
'john', 'lennon@thebeatles.com', self.password
)
self.client.login(username=self.admin_user.username, password=self.password)
self.newsletter = Newsletter.objects.create(
sender='Test Sender', title='Test Newsletter',
slug='test-newsletter', visible=True, email='test@test.com',
)
self.message = Message.objects.create(
newsletter=self.newsletter, title='Test message', slug='test-message'
)
class AdminTestCase(AdminTestMixin, TestCase):
def admin_import_file(self, source_file, ignore_errors=''):
import_url = reverse('admin:newsletter_subscription_import')
with open(os.path.join(test_files_dir, source_file), 'rb') as fh:
return self.client.post(import_url, {
'newsletter': self.newsletter.pk,
'address_file': fh,
'ignore_errors': ignore_errors,
}, follow=True)
def admin_import_subscribers(self, source_file, ignore_errors=''):
response = self.admin_import_file(source_file, ignore_errors)
self.assertContains(response, "<h1>Confirm import</h1>")
import_confirm_url = reverse(
'admin:newsletter_subscription_import_confirm'
)
return self.client.post(
import_confirm_url, {'confirm': True}, follow=True
)
def test_newsletter_admin(self):
changelist_url = reverse('admin:newsletter_newsletter_changelist')
response = self.client.get(changelist_url)
self.assertContains(
response,
'<a href="../message/?newsletter__id__exact=%s">Messages</a>' % self.newsletter.pk
)
self.assertContains(
response,
'<a href="../subscription/?newsletter__id__exact=%s">Subscriptions</a>' % self.newsletter.pk
)
def test_subscription_admin(self):
Subscription.objects.bulk_create([
Subscription(
newsletter=self.newsletter, name_field='Sara',
email_field='sara@example.org', subscribed=True,
),
Subscription(
newsletter=self.newsletter, name_field='Bob',
email_field='bob@example.org', unsubscribed=True,
),
Subscription(
newsletter=self.newsletter, name_field='Khaled',
email_field='khaled@example.org', subscribed=False,
unsubscribed=False,
),
])
changelist_url = reverse('admin:newsletter_subscription_changelist')
response = self.client.get(changelist_url)
self.assertContains(
response,
'<img src="/static/newsletter/admin/img/icon-no.gif" width="10" height="10" alt="Unsubscribed"/>',
html=True
)
self.assertContains(
response,
'<img src="/static/newsletter/admin/img/icon-yes.gif" width="10" height="10" alt="Subscribed"/>',
html=True
)
self.assertContains(
response,
'<img src="/static/newsletter/admin/img/waiting.gif" width="10" height="10" alt="Unactivated"/>',
html=True
)
response = self.client.post(changelist_url, data={
'index': 0,
'action': ['make_subscribed'],
'_selected_action': [str(Subscription.objects.get(name_field='Khaled').pk)],
})
self.assertTrue(Subscription.objects.get(name_field='Khaled').subscribed)
response = self.client.post(changelist_url, data={
'index': 0,
'action': ['make_unsubscribed'],
'_selected_action': [str(Subscription.objects.get(name_field='Sara').pk)],
})
self.assertFalse(Subscription.objects.get(name_field='Sara').subscribed)
def test_admin_import_get_form(self):
import_url = reverse('admin:newsletter_subscription_import')
response = self.client.get(import_url)
self.assertContains(response, "<h1>Import addresses</h1>")
def test_admin_import_subscribers_csv(self):
response = self.admin_import_subscribers('addresses.csv')
self.assertContains(
response,
"2 subscriptions have been successfully added."
)
self.assertEqual(self.newsletter.subscription_set.count(), 2)
def test_admin_import_subscribers_ldif(self):
response = self.admin_import_subscribers('addresses.ldif')
self.assertContains(
response,
"2 subscriptions have been successfully added."
)
self.assertEqual(self.newsletter.subscription_set.count(), 2)
def test_admin_import_subscribers_vcf(self):
response = self.admin_import_subscribers('addresses.vcf')
self.assertContains(
response,
"2 subscriptions have been successfully added."
)
self.assertEqual(self.newsletter.subscription_set.count(), 2)
def test_admin_import_subscribers_duplicates(self):
with patch_logger('newsletter.addressimport.parsers', 'warning') as messages:
response = self.admin_import_subscribers(
'addresses_duplicates.csv', ignore_errors='true'
)
self.assertContains(
response,
"2 subscriptions have been successfully added."
)
self.assertEqual(len(messages), 2)
self.assertEqual(self.newsletter.subscription_set.count(), 2)
def test_admin_import_subscribers_existing(self):
subscription = make_subscription(self.newsletter, 'john@example.org')
subscription.save()
with patch_logger('newsletter.addressimport.parsers', 'warning') as messages:
response = self.admin_import_subscribers(
'addresses.csv', ignore_errors='true'
)
self.assertContains(
response,
"1 subscription has been successfully added."
)
self.assertEqual(len(messages), 1)
self.assertEqual(self.newsletter.subscription_set.count(), 2)
with patch_logger('newsletter.addressimport.parsers', 'warning') as messages:
response = self.admin_import_file('addresses.csv')
self.assertContains(
response,
"Some entries are already subscribed to."
)
self.assertEqual(len(messages), 1)
self.assertEqual(self.newsletter.subscription_set.count(), 2)
def test_admin_import_subscribers_permission(self):
self.admin_user.is_superuser = False
self.admin_user.save()
import_url = reverse('admin:newsletter_subscription_import')
response = self.client.get(import_url)
self.assertEqual(response.status_code, 403)
self.admin_user.user_permissions.add(
Permission.objects.get(codename='add_subscription')
)
response = self.client.get(import_url)
self.assertEqual(response.status_code, 200)
def test_admin_import_subscribers_no_addresses(self):
import_url = reverse('admin:newsletter_subscription_import')
import_confirm_url = reverse(
'admin:newsletter_subscription_import_confirm'
)
response = self.client.post(
import_confirm_url, {'confirm': True}
)
self.assertRedirects(response, import_url)
def test_message_admin(self):
changelist_url = reverse('admin:newsletter_message_changelist')
response = self.client.get(changelist_url)
self.assertContains(
response,
'<a href="%d/preview/">Preview</a>' % self.message.pk,
html=True
)
preview_url = reverse('admin:newsletter_message_preview', args=[self.message.pk])
preview_text_url = reverse('admin:newsletter_message_preview_text', args=[self.message.pk])
preview_html_url = reverse('admin:newsletter_message_preview_html', args=[self.message.pk])
response = self.client.get(preview_url)
self.assertContains(
response,
'<iframe src ="%s" width="960px" height="720px"></iframe>' % preview_html_url,
html=True
)
self.assertContains(
response,
'<iframe src ="%s" width="960px" height="720px"></iframe>' % preview_text_url,
html=True
)
response = self.client.get(preview_text_url)
self.assertEqual(
response.content,
b'''++++++++++++++++++++
Test Newsletter: Test message
++++++++++++++++++++
++++++++++++++++++++
Unsubscribe: http://example.com/newsletter/test-newsletter/unsubscribe/
''')
response = self.client.get(preview_html_url)
self.assertContains(response, '<h1>Test Newsletter</h1>')
self.assertContains(response, '<h2>Test message</h2>')
self.assertContains(response, self.newsletter.unsubscribe_url())
self.newsletter.send_html = False
self.newsletter.save()
response = self.client.get(preview_html_url)
self.assertEqual(response.status_code, 404)
class SubmissionAdminTests(AdminTestMixin, TestCase):
def setUp(self):
super(SubmissionAdminTests, self).setUp()
self.add_url = reverse('admin:newsletter_submission_add')
self.changelist_url = reverse('admin:newsletter_submission_changelist')
def test_changelist(self):
Submission.from_message(self.message)
response = self.client.get(self.changelist_url)
self.assertContains(
response,
'<td class="field-admin_status_text">Not sent.</td>'
)
def test_duplicate_fail(self):
# Assure there's a submission
Submission.from_message(self.message)
response = self.client.post(self.add_url, data={
'message': self.message.pk,
'publish_date_0': '2016-01-09',
'publish_date_1': '07:24',
'publish': 'on',
})
self.assertContains(
response,
"This message has already been published in some other submission."
)
def test_add(self):
response = self.client.post(self.add_url, data={
'message': self.message.pk,
'publish_date_0': '2016-01-09',
'publish_date_1': '07:24',
'publish': 'on',
}, follow=True)
self.assertContains(response, "added")
self.assertEqual(Submission.objects.count(), 1)
submission = Submission.objects.all()[0]
self.assertEqual(submission.message, self.message)
def test_add_wrongmessage_regression(self):
Message.objects.create(
newsletter=self.newsletter, title='2nd message', slug='test-message-2'
)
response = self.client.post(self.add_url, data={
'message': self.message.pk,
'publish_date_0': '2016-01-09',
'publish_date_1': '07:24',
'publish': 'on',
}, follow=True)
self.assertContains(response, "added")
self.assertEqual(Submission.objects.count(), 1)
submission = Submission.objects.all()[0]
self.assertEqual(submission.message, self.message)
| true
| true
|
1c40a1b821d8871c7412044d06a39c3001541806
| 7,979
|
py
|
Python
|
azure/train_landcover.py
|
mjevans26/Satellite_ComputerVision
|
013c69c5cf6f86126e6ad2d715f8b13b300e29a8
|
[
"BSD-2-Clause"
] | 10
|
2020-04-06T04:51:27.000Z
|
2022-02-23T16:00:43.000Z
|
azure/train_landcover.py
|
mjevans26/Satellite_ComputerVision
|
013c69c5cf6f86126e6ad2d715f8b13b300e29a8
|
[
"BSD-2-Clause"
] | 2
|
2020-04-06T06:25:35.000Z
|
2021-03-22T21:55:41.000Z
|
azure/train_landcover.py
|
mjevans26/Satellite_ComputerVision
|
013c69c5cf6f86126e6ad2d715f8b13b300e29a8
|
[
"BSD-2-Clause"
] | 5
|
2020-04-18T16:44:44.000Z
|
2021-08-31T00:10:08.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 21 12:13:11 2021
@author: MEvans
"""
from utils import model_tools, processing
from utils.prediction_tools import makePredDataset, callback_predictions, plot_to_image
from matplotlib import pyplot as plt
import argparse
import os
import glob
import json
import math
import tensorflow as tf
from datetime import datetime
from azureml.core import Run, Workspace, Model
# Set Global variables
parser = argparse.ArgumentParser()
parser.add_argument('--train_data', type = str, required = True, help = 'Training datasets')
parser.add_argument('--eval_data', type = str, required = True, help = 'Evaluation datasets')
parser.add_argument('--test_data', type = str, default = None, help = 'directory containing test image(s) and mixer')
parser.add_argument('--model_id', type = str, required = False, default = None, help = 'model id for continued training')
parser.add_argument('-lr', '--learning_rate', type = float, default = 0.001, help = 'Initial learning rate')
parser.add_argument('-w', '--weight', type = float, default = 1.0, help = 'Positive sample weight for iou, bce, etc.')
parser.add_argument('--bias', type = float, default = None, help = 'bias value for keras output layer initializer')
parser.add_argument('-e', '--epochs', type = int, default = 10, help = 'Number of epochs to train the model for')
parser.add_argument('-b', '--batch', type = int, default = 16, help = 'Training batch size')
parser.add_argument('--size', type = int, default = 3000, help = 'Size of training dataset')
parser.add_argument('--kernel_size', type = int, default = 256, dest = 'kernel_size', help = 'Size in pixels of incoming patches')
parser.add_argument('--response', type = str, required = True, default = 'landcover', help = 'Name of the response variable in tfrecords')
parser.add_argument('--bands', type = str, nargs = '+', required = False, default = ['B3_summer', 'B3_fall', 'B3_spring', 'B4_summer', 'B4_fall', 'B4_spring', 'B5_summer', 'B5_fall', 'B5_spring', 'B6_summer', 'B6_fall', 'B6_spring', 'B8_summer', 'B8_fall', 'B8_spring', 'B11_summer', 'B11_fall', 'B11_spring', 'B12_summer', 'B12_fall', 'B12_spring', 'R', 'G', 'B', 'N', 'lidar_intensity', 'geomorphons'])
parser.add_argument('--splits', type = int, nargs = '+', required = False, default = None )
parser.add_argument('--one_hot_levels', type = int, nargs = '+', required = False, default = [10])
parser.add_argument('--one_hot_names', type = str, nargs = '+', required = False, default = ['landcover'])
args = parser.parse_args()
ONE_HOT = dict(zip(args.one_hot_names, args.one_hot_levels))
SPLITS = args.splits
TRAIN_SIZE = args.size
BATCH = args.batch
EPOCHS = args.epochs
BIAS = args.bias
WEIGHT = args.weight
LR = args.learning_rate
BANDS = args.bands
RESPONSE = args.response
if RESPONSE in ONE_HOT.keys():
RESPONSE = ONE_HOT
OPTIMIZER = tf.keras.optimizers.Adam(learning_rate=LR, beta_1=0.9, beta_2=0.999)
DEPTH = len(BANDS)
print(BANDS)
METRICS = {
'logits':[tf.keras.metrics.MeanSquaredError(name='mse'), tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall')],
'classes':[tf.keras.metrics.MeanIoU(num_classes=2, name = 'mean_iou')]
}
FEATURES = BANDS + [RESPONSE]
# round the training data size up to nearest 100 to define buffer
BUFFER = math.ceil(args.size/100)*100
# Specify the size and shape of patches expected by the model.
KERNEL_SIZE = args.kernel_size
KERNEL_SHAPE = [KERNEL_SIZE, KERNEL_SIZE]
COLUMNS = [
tf.io.FixedLenFeature(shape=KERNEL_SHAPE, dtype=tf.float32) for k in FEATURES
]
FEATURES_DICT = dict(zip(FEATURES, COLUMNS))
# create special folders './outputs' and './logs' which automatically get saved
os.makedirs('outputs', exist_ok = True)
os.makedirs('logs', exist_ok = True)
out_dir = './outputs'
log_dir = './logs'
# create training dataset
# train_files = glob.glob(os.path.join(args.data_folder, 'training', 'UNET_256_[A-Z]*.gz'))
# eval_files = glob.glob(os.path.join(args.data_folder, 'eval', 'UNET_256_[A-Z]*.gz'))
i = 1
train_files = []
for root, dirs, files in os.walk(args.train_data):
for f in files:
if i%2==0:
train_files.append(os.path.join(root, f))
i+=1
eval_files = []
for root, dirs, files in os.walk(args.eval_data):
for f in files:
if i%2==0:
eval_files.append(os.path.join(root, f))
i+=1
# train_files = glob.glob(os.path.join(args.train_data, 'UNET_256_[A-Z]*.gz'))
# eval_files = glob.glob(os.path.join(args.eval_data, 'UNET_256_[A-Z]*.gz'))
training = processing.get_training_dataset(
files = train_files,
ftDict = FEATURES_DICT,
features = BANDS,
response = RESPONSE,
buff = BUFFER,
batch = BATCH,
repeat = True,
splits = SPLITS,
one_hot = ONE_HOT)
evaluation = processing.get_eval_dataset(
files = eval_files,
ftDict = FEATURES_DICT,
features = BANDS,
response = RESPONSE,
splits = SPLITS,
one_hot = ONE_HOT)
## DEFINE CALLBACKS
def get_gen_dice(y_true, y_pred):
return model_tools.gen_dice(y_true, y_pred, global_weights = WEIGHT)
# get the current time
now = datetime.now()
date = now.strftime("%d%b%y")
date
# define a checkpoint callback to save best models during training
checkpoint = tf.keras.callbacks.ModelCheckpoint(
os.path.join(out_dir, 'best_weights_' + date + '.hdf5'),
monitor='val_mean_iou',
verbose=1,
save_best_only=True,
mode='max'
)
# define a tensorboard callback to write training logs
tensorboard = tf.keras.callbacks.TensorBoard(log_dir = log_dir)
# get the run context
run = Run.get_context()
exp = run.experiment
ws = exp.workspace
## BUILD THE MODEL
# Create a MirroredStrategy.
strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
# Open a strategy scope.
with strategy.scope():
METRICS = {
'logits':[tf.keras.metrics.MeanSquaredError(name='mse'), tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall')],
'classes':[tf.keras.metrics.MeanIoU(num_classes=2, name = 'mean_iou')]
}
# METRICS = [tf.keras.metrics.categorical_accuracy, tf.keras.metrics.MeanIoU(num_classes=2, name = 'mean_iou')]
OPTIMIZER = tf.keras.optimizers.Adam(learning_rate=LR, beta_1=0.9, beta_2=0.999)
m = model_tools.get_model(depth = DEPTH, optim = OPTIMIZER, loss = get_gen_dice, mets = METRICS, bias = BIAS)
initial_epoch = 0
# if test images provided, define an image saving callback
if args.test_data:
test_files = glob.glob(os.path.join(args.test_data, '*.gz'))
mixer_file = glob.glob(os.path.join(args.test_data, '*.json'))
# run predictions on a test image and log so we can see what the model is doing at each epoch
jsonFile = mixer_file[0]
with open(jsonFile,) as file:
mixer = json.load(file)
pred_data = makePredDataset(test_files, BANDS, one_hot = ONE_HOT)
file_writer = tf.summary.create_file_writer(log_dir + '/preds')
def log_pred_image(epoch, logs):
out_image = callback_predictions(pred_data, m, mixer)
prob = out_image[:, :, 0]
figure = plt.figure(figsize=(10, 10))
plt.imshow(prob)
image = plot_to_image(figure)
with file_writer.as_default():
tf.summary.image("Predicted Image", image, step=epoch)
pred_callback = tf.keras.callbacks.LambdaCallback(on_epoch_end = log_pred_image)
callbacks = [checkpoint, tensorboard, pred_callback]
else:
callbacks = [checkpoint, tensorboard]
# train the model
steps_per_epoch = int(TRAIN_SIZE//BATCH)
print(steps_per_epoch)
m.fit(
x = training,
epochs = EPOCHS,
steps_per_epoch = steps_per_epoch,
validation_data = evaluation,
callbacks = callbacks#,
#initial_epoch = initial_epoch
)
m.save(os.path.join(out_dir, 'unet256.h5'))
| 37.995238
| 404
| 0.691816
|
from utils import model_tools, processing
from utils.prediction_tools import makePredDataset, callback_predictions, plot_to_image
from matplotlib import pyplot as plt
import argparse
import os
import glob
import json
import math
import tensorflow as tf
from datetime import datetime
from azureml.core import Run, Workspace, Model
parser = argparse.ArgumentParser()
parser.add_argument('--train_data', type = str, required = True, help = 'Training datasets')
parser.add_argument('--eval_data', type = str, required = True, help = 'Evaluation datasets')
parser.add_argument('--test_data', type = str, default = None, help = 'directory containing test image(s) and mixer')
parser.add_argument('--model_id', type = str, required = False, default = None, help = 'model id for continued training')
parser.add_argument('-lr', '--learning_rate', type = float, default = 0.001, help = 'Initial learning rate')
parser.add_argument('-w', '--weight', type = float, default = 1.0, help = 'Positive sample weight for iou, bce, etc.')
parser.add_argument('--bias', type = float, default = None, help = 'bias value for keras output layer initializer')
parser.add_argument('-e', '--epochs', type = int, default = 10, help = 'Number of epochs to train the model for')
parser.add_argument('-b', '--batch', type = int, default = 16, help = 'Training batch size')
parser.add_argument('--size', type = int, default = 3000, help = 'Size of training dataset')
parser.add_argument('--kernel_size', type = int, default = 256, dest = 'kernel_size', help = 'Size in pixels of incoming patches')
parser.add_argument('--response', type = str, required = True, default = 'landcover', help = 'Name of the response variable in tfrecords')
parser.add_argument('--bands', type = str, nargs = '+', required = False, default = ['B3_summer', 'B3_fall', 'B3_spring', 'B4_summer', 'B4_fall', 'B4_spring', 'B5_summer', 'B5_fall', 'B5_spring', 'B6_summer', 'B6_fall', 'B6_spring', 'B8_summer', 'B8_fall', 'B8_spring', 'B11_summer', 'B11_fall', 'B11_spring', 'B12_summer', 'B12_fall', 'B12_spring', 'R', 'G', 'B', 'N', 'lidar_intensity', 'geomorphons'])
parser.add_argument('--splits', type = int, nargs = '+', required = False, default = None )
parser.add_argument('--one_hot_levels', type = int, nargs = '+', required = False, default = [10])
parser.add_argument('--one_hot_names', type = str, nargs = '+', required = False, default = ['landcover'])
args = parser.parse_args()
ONE_HOT = dict(zip(args.one_hot_names, args.one_hot_levels))
SPLITS = args.splits
TRAIN_SIZE = args.size
BATCH = args.batch
EPOCHS = args.epochs
BIAS = args.bias
WEIGHT = args.weight
LR = args.learning_rate
BANDS = args.bands
RESPONSE = args.response
if RESPONSE in ONE_HOT.keys():
RESPONSE = ONE_HOT
OPTIMIZER = tf.keras.optimizers.Adam(learning_rate=LR, beta_1=0.9, beta_2=0.999)
DEPTH = len(BANDS)
print(BANDS)
METRICS = {
'logits':[tf.keras.metrics.MeanSquaredError(name='mse'), tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall')],
'classes':[tf.keras.metrics.MeanIoU(num_classes=2, name = 'mean_iou')]
}
FEATURES = BANDS + [RESPONSE]
BUFFER = math.ceil(args.size/100)*100
KERNEL_SIZE = args.kernel_size
KERNEL_SHAPE = [KERNEL_SIZE, KERNEL_SIZE]
COLUMNS = [
tf.io.FixedLenFeature(shape=KERNEL_SHAPE, dtype=tf.float32) for k in FEATURES
]
FEATURES_DICT = dict(zip(FEATURES, COLUMNS))
os.makedirs('outputs', exist_ok = True)
os.makedirs('logs', exist_ok = True)
out_dir = './outputs'
log_dir = './logs'
i = 1
train_files = []
for root, dirs, files in os.walk(args.train_data):
for f in files:
if i%2==0:
train_files.append(os.path.join(root, f))
i+=1
eval_files = []
for root, dirs, files in os.walk(args.eval_data):
for f in files:
if i%2==0:
eval_files.append(os.path.join(root, f))
i+=1
training = processing.get_training_dataset(
files = train_files,
ftDict = FEATURES_DICT,
features = BANDS,
response = RESPONSE,
buff = BUFFER,
batch = BATCH,
repeat = True,
splits = SPLITS,
one_hot = ONE_HOT)
evaluation = processing.get_eval_dataset(
files = eval_files,
ftDict = FEATURES_DICT,
features = BANDS,
response = RESPONSE,
splits = SPLITS,
one_hot = ONE_HOT)
(y_true, y_pred):
return model_tools.gen_dice(y_true, y_pred, global_weights = WEIGHT)
now = datetime.now()
date = now.strftime("%d%b%y")
date
checkpoint = tf.keras.callbacks.ModelCheckpoint(
os.path.join(out_dir, 'best_weights_' + date + '.hdf5'),
monitor='val_mean_iou',
verbose=1,
save_best_only=True,
mode='max'
)
tensorboard = tf.keras.callbacks.TensorBoard(log_dir = log_dir)
run = Run.get_context()
exp = run.experiment
ws = exp.workspace
istribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
METRICS = {
'logits':[tf.keras.metrics.MeanSquaredError(name='mse'), tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall')],
'classes':[tf.keras.metrics.MeanIoU(num_classes=2, name = 'mean_iou')]
}
OPTIMIZER = tf.keras.optimizers.Adam(learning_rate=LR, beta_1=0.9, beta_2=0.999)
m = model_tools.get_model(depth = DEPTH, optim = OPTIMIZER, loss = get_gen_dice, mets = METRICS, bias = BIAS)
initial_epoch = 0
if args.test_data:
test_files = glob.glob(os.path.join(args.test_data, '*.gz'))
mixer_file = glob.glob(os.path.join(args.test_data, '*.json'))
jsonFile = mixer_file[0]
with open(jsonFile,) as file:
mixer = json.load(file)
pred_data = makePredDataset(test_files, BANDS, one_hot = ONE_HOT)
file_writer = tf.summary.create_file_writer(log_dir + '/preds')
def log_pred_image(epoch, logs):
out_image = callback_predictions(pred_data, m, mixer)
prob = out_image[:, :, 0]
figure = plt.figure(figsize=(10, 10))
plt.imshow(prob)
image = plot_to_image(figure)
with file_writer.as_default():
tf.summary.image("Predicted Image", image, step=epoch)
pred_callback = tf.keras.callbacks.LambdaCallback(on_epoch_end = log_pred_image)
callbacks = [checkpoint, tensorboard, pred_callback]
else:
callbacks = [checkpoint, tensorboard]
steps_per_epoch = int(TRAIN_SIZE//BATCH)
print(steps_per_epoch)
m.fit(
x = training,
epochs = EPOCHS,
steps_per_epoch = steps_per_epoch,
validation_data = evaluation,
callbacks = callbacks
)
m.save(os.path.join(out_dir, 'unet256.h5'))
| true
| true
|
1c40a1ea96587cb8301d06fdf671f5dd84c4a694
| 2,219
|
py
|
Python
|
config/_base_/models/retinanet_mydartsnet_fpn.py
|
automlresearch/autodetector
|
e959baf589fb329509cd25edcab11c7d22ea5e7e
|
[
"Apache-2.0"
] | null | null | null |
config/_base_/models/retinanet_mydartsnet_fpn.py
|
automlresearch/autodetector
|
e959baf589fb329509cd25edcab11c7d22ea5e7e
|
[
"Apache-2.0"
] | null | null | null |
config/_base_/models/retinanet_mydartsnet_fpn.py
|
automlresearch/autodetector
|
e959baf589fb329509cd25edcab11c7d22ea5e7e
|
[
"Apache-2.0"
] | 1
|
2021-12-08T08:28:16.000Z
|
2021-12-08T08:28:16.000Z
|
# model settings
model = dict(
type='RetinaNet',
# pretrained='torchvision://resnet50',
# backbone=dict(
# type='ResNet',
# depth=50,
# num_stages=4,
# out_indices=(0, 1, 2, 3),
# frozen_stages=1,
# norm_cfg=dict(type='BN', requires_grad=True),
# norm_eval=True,
# style='pytorch'),
# Model Path of Desktop
# pretrained='/media/p/research/experiment/NAS/DARTSImp/PC-DARTS/pretained/0608_selected/model_best.pth.tar',
pretrained="/home/p/Documents/experiment/experiment/Classification/PC-DARTS/pretained/0608_selected/model_best.pth.tar",
backbone=dict(
type='MyNetworkImageNet_FPN',
# type='NetworkImageNet_FPN',
C=48,
# C=96,
num_classes=1,
layers=14,
auxiliary=False),
neck=dict(
type='FPN',
in_channels=[48, 192, 384, 768],
# in_channels=[96, 384, 768, 1536],
# in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
bbox_head=dict(
type='RetinaHead',
num_classes=1,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.0005,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
| 29.986486
| 124
| 0.566021
|
model = dict(
type='RetinaNet',
pretrained="/home/p/Documents/experiment/experiment/Classification/PC-DARTS/pretained/0608_selected/model_best.pth.tar",
backbone=dict(
type='MyNetworkImageNet_FPN',
C=48,
num_classes=1,
layers=14,
auxiliary=False),
neck=dict(
type='FPN',
in_channels=[48, 192, 384, 768],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
bbox_head=dict(
type='RetinaHead',
num_classes=1,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)))
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.0005,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
| true
| true
|
1c40a20e9676da219f65a2279c95dfe0c266c1d7
| 1,009
|
py
|
Python
|
FaceRecog/LiveFaceJudge.py
|
lnblanke/DL
|
4d2631e27a1a5c6de1f7239c2979af63c4019e34
|
[
"MIT"
] | null | null | null |
FaceRecog/LiveFaceJudge.py
|
lnblanke/DL
|
4d2631e27a1a5c6de1f7239c2979af63c4019e34
|
[
"MIT"
] | null | null | null |
FaceRecog/LiveFaceJudge.py
|
lnblanke/DL
|
4d2631e27a1a5c6de1f7239c2979af63c4019e34
|
[
"MIT"
] | null | null | null |
# Judge whether the face in the camera is the person in the dataset
# @Time: 8/17/2020
# @Author: lnblanke
# @Email: fjh314.84@gmail.com
# @File: LiveFaceJudge.py
import cv2, dlib, numpy, time
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
model = dlib.face_recognition_model_v1("dlib_face_recognition_resnet_model_v1.dat")
global desp
pic = cv2.imread("Dataset/obama.jpg")
faces = detector(pic, 1)
for i, face in enumerate(faces):
shape = predictor(pic, face)
descriptor = model.compute_face_descriptor(pic, shape)
vec = numpy.array(descriptor)
desp = vec
cap = cv2.VideoCapture(0)
_, img = cap.read()
faces = detector(img, 1)
for i, face in enumerate(faces):
shape = predictor(img, face)
descriptor = model.compute_face_descriptor(img, shape)
vect = numpy.array(descriptor)
d = numpy.linalg.norm(desp - vect)
if d < 0.7:
print("Correct!")
else:
print("Incorrect!")
| 22.422222
| 83
| 0.704658
|
import cv2, dlib, numpy, time
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
model = dlib.face_recognition_model_v1("dlib_face_recognition_resnet_model_v1.dat")
global desp
pic = cv2.imread("Dataset/obama.jpg")
faces = detector(pic, 1)
for i, face in enumerate(faces):
shape = predictor(pic, face)
descriptor = model.compute_face_descriptor(pic, shape)
vec = numpy.array(descriptor)
desp = vec
cap = cv2.VideoCapture(0)
_, img = cap.read()
faces = detector(img, 1)
for i, face in enumerate(faces):
shape = predictor(img, face)
descriptor = model.compute_face_descriptor(img, shape)
vect = numpy.array(descriptor)
d = numpy.linalg.norm(desp - vect)
if d < 0.7:
print("Correct!")
else:
print("Incorrect!")
| true
| true
|
1c40a2125b76ad4cc2545126d2abffb8b1786211
| 9,525
|
py
|
Python
|
Time_Series/mainTestOfTSModels.py
|
ZGChung/P2E_FreqPred
|
79544e9547a94b0d492d14af43ccf271cb175c47
|
[
"MIT"
] | 2
|
2021-06-12T10:29:44.000Z
|
2022-01-01T13:01:34.000Z
|
Time_Series/mainTestOfTSModels.py
|
ZGChung/P2E_FreqPred
|
79544e9547a94b0d492d14af43ccf271cb175c47
|
[
"MIT"
] | null | null | null |
Time_Series/mainTestOfTSModels.py
|
ZGChung/P2E_FreqPred
|
79544e9547a94b0d492d14af43ccf271cb175c47
|
[
"MIT"
] | null | null | null |
from warnings import simplefilter
import warnings
from statsmodels.tools.sm_exceptions import ConvergenceWarning
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import time
import sklearn as sk
import sklearn.metrics as metrics
from sklearn.neighbors import KNeighborsRegressor
from sktime.utils.plotting import plot_series
from sktime.forecasting.compose import (
EnsembleForecaster,
# MultiplexForecaster,
ReducedForecaster,
TransformedTargetForecaster,
)
from sktime.forecasting.model_selection import (
ExpandingWindowSplitter,
ForecastingGridSearchCV,
SlidingWindowSplitter,
temporal_train_test_split,
)
from sktime.forecasting.arima import ARIMA, AutoARIMA
from sktime.forecasting.bats import BATS
from sktime.forecasting.tbats import TBATS
from sktime.forecasting.ets import AutoETS
from sktime.forecasting.base import ForecastingHorizon
from sktime.forecasting.exp_smoothing import ExponentialSmoothing
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.theta import ThetaForecaster
from sktime.forecasting.trend import PolynomialTrendForecaster
from sktime.performance_metrics.forecasting import sMAPE, smape_loss
from sktime.transformations.series.detrend import Deseasonalizer, Detrender
simplefilter("ignore", FutureWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
warnings.simplefilter('ignore', RuntimeWarning)
NumberOfPredictions = 3
print("Hello world! Program begins.")
df1 = pd.read_csv("data_daily_preCOVID_2cols.csv")
# test read file
print("df1.shape", df1.shape)
print("df1", df1)
df = df1.loc[df1["entries_daily"] != 0]
df = df.reset_index()
print("df.shape", df.shape)
print("df", df)
# convert the first column to datetime format
# df['time'] = pd.to_datetime(df['time'], unit = 's')
# print(df)
# df = df.set_index('time')
y = pd.Series(data = df['entries_daily'])
# x = df.time
# y = df.entries_daily
# Use the data of 2019 as training set, marked in blue in the plot
# Use the data pre-COVID of 2020 as testing set, marked in orange in the plot
# fig1, ax1 = plot_series(y)
# plt.show()
y_train, y_test = temporal_train_test_split(y, test_size = 42)
# fig2, ax2 = plot_series(y_train, y_test, labels = ["y=train", "y=test"])
# ax2.set_title("Original data after Train-Test separation")
# plt.show()
# print(y_train.shape[0], y_test.shape[0])
# use a forecasting horizon the same size as the test set
fh = np.arange(len(y_test)+1)
# print(fh)
'''
# predicting with the last value
# a naive test just to verify the model works
forecaster = NaiveForecaster(strategy = "last")
forecaster.fit(y_train)
y_pred_NaiveForecaster = forecaster.predict(fh)
fig3, ax3 = plot_series(y_train, y_test, y_pred_NaiveForecaster, labels = ["y_train", "y_test", "y_pred"])
ax3.set_title("Naive Forecaster: predict directly the final value")
plt.show()
# we use sMAPE as the evaluation metric here
# sMAPE represents: symmetric Mean Absolute Percentage Error
y_pred_NaiveForecaster = y_pred_NaiveForecaster.drop(y_pred_NaiveForecaster.index[0])
loss3 = smape_loss(y_pred_NaiveForecaster, y_test)
print("The sMAPE for NaiveForecaster method is:", loss3)
'''
# predicting with kNN
# search the k for the kNN minimizing the sMAPE
listOfsMAPE = []
listOfsMAPE.append(20) # initialize the first as a big number
rangeMax = 324
for i in range(1,rangeMax):
regressor = KNeighborsRegressor(n_neighbors = i)
forecaster = ReducedForecaster(
regressor, scitype = "regressor", window_length = 15, strategy = "recursive"
)
forecaster.fit(y_train)
y_pred = forecaster.predict(fh)
y_pred = y_pred.drop(y_pred.index[0])
loss = smape_loss(y_test, y_pred)
print("The sMAPE loss for ", i,"NN prediction is:", loss)
listOfsMAPE.append(loss)
# search the min of sMAPE
minOfsMAPE = 20
for i in range(1,rangeMax):
if listOfsMAPE[i] < minOfsMAPE:
minOfsMAPE = listOfsMAPE[i]
k = listOfsMAPE.index(minOfsMAPE)
print("the best k is", k)
regressor = KNeighborsRegressor(n_neighbors = k)
forecaster = ReducedForecaster(
regressor, scitype = "regressor", window_length = 15, strategy = "recursive"
)
forecaster.fit(y_train)
y_pred_kNN_bestk = forecaster.predict(fh)
print(y_test)
print(y_pred_kNN_bestk)
# loss4 = smape_loss(y_test, y_pred_kNN_bestk)
# print("The best sMAPE loss for kNN method is obtained when k =", 1, ", which is:", loss4)
fig4, ax4 = plot_series(y_train, y_test, y_pred_kNN_bestk, labels = ["y_train", "y_test", "y_pred"])
ax4.set_title("Prediction with kNR optimized")
plt.show()
# plot and zoom in the test set
fig4bis, ax4bis = plot_series(y_test, y_pred_kNN_bestk.drop(y_pred_kNN_bestk.index[0]), labels = ["y_test", "y_pred"])
ax4bis.set_title("The Same result zoomed in to the test set y_test")
plt.show()
# plot the curve of sMAPE - k
listOfsMAPE[0] = listOfsMAPE[1]
plt.figure(2)
plt.plot(range(0, rangeMax), listOfsMAPE)
plt.title("sMPAE-k with k is the length of the forecasting window")
plt.show()
'''
# predicting with ExponentialSmoothing
listOfsMAPE_ES = []
for spTrial in range(1,54):
forecaster = ExponentialSmoothing(trend = None, seasonal = None, sp = spTrial)
forecaster.fit(y_train)
y_pred_withES = forecaster.predict(fh)
y_pred_withES = y_pred_withES.drop(y_pred_withES.index[0])
loss5 = smape_loss(y_test, y_pred_withES)
listOfsMAPE_ES.append(loss5)
# search the min of sMAPE
minOfsMAPE = 20
for i in range(1, len(listOfsMAPE_ES)):
if listOfsMAPE_ES[i] < minOfsMAPE:
minOfsMAPE = listOfsMAPE_ES[i]
sptOptimal = listOfsMAPE_ES.index(minOfsMAPE)
print("The best sp for Exponential Smoothing method is:", sptOptimal+1)
print("The corresponding sMAPE is :", listOfsMAPE_ES[sptOptimal])
forecaster = ExponentialSmoothing(trend = None, seasonal = None, sp = sptOptimal+1)
forecaster.fit(y_train)
y_pred_withES = forecaster.predict(fh)
fig5, ax5 = plot_series(y_test, y_pred_withES, labels = ["y_test", "y_pred"])
ax5.set_title("Exponantial Smooting")
plt.show()
'''
'''
# prediction with autoArima
# didn't get the result, it takes too much time to train the model
forecaster = AutoARIMA(sp = 60, suppress_warnings = True)
forecaster.fit(y_train)
y_pred_withAutoArima = forecaster.predict(fh)
fig6, ax6 = plot_series(y_train, y_test, y_pred_withAutoArima, labels = ["y_train", "y_test", "y_pred"])
ax6.set_title("autoArima")
loss6 = smape_loss(y_test, y_pred_withAutoArima)
print("The sMAPE for auto-Arima method is:", loss6)
'''
'''
# prediction with single Arima
forecaster = ARIMA(
order = (1, 1, 2), seasonal_order = (1, 1, 1, 54), suppress_warnings = True
)
forecaster.fit(y_train)
y_pred_singleArima = forecaster.predict(fh)
# print("Method single Arima : y_train:", y_train)
# print("Method single Arima : y_test:", y_test)
# print("Method single Arima : y_pred:", y_pred_withES)
# the result is ridiculously bad, it presents a trend of decrease
fig7, ax7 = plot_series(y_test, y_pred_singleArima, labels = ["y_test", "y_pred"])
ax7.set_title("Arima")
plt.show()
y_pred_singleArima = y_pred_singleArima.drop(y_pred_singleArima.index[0])
loss7 = smape_loss(y_test, y_pred_singleArima)
print("The sMAPE for single-Arima method is:", loss7)
'''
'''
# prediction with BATS
# This method runs relatively slow and it produces an outcome similar to mean value prediction
forecaster = BATS(sp=7, use_trend=True, use_box_cox=False)
forecaster.fit(y_train)
y_pred_BATS = forecaster.predict(fh)
fig8, ax8 = plot_series(y_test, y_pred_BATS, labels=["y_test", "y_pred"])
plt.show()
y_pred_BATS = y_pred_BATS.drop(y_pred_BATS.index[0])
loss8 = smape_loss(y_test, y_pred_BATS)
print("The sMAPE for BATS method is:", loss8)
'''
'''
# prediction with TBATS
forecaster = TBATS(sp=12, use_trend=True, use_box_cox=False)
forecaster.fit(y_train)
y_pred_TBATS = forecaster.predict(fh)
fig9, ax9 = plot_series(y_test, y_pred_TBATS, labels=["y_test", "y_pred"])
ax9.set_title(TBATS)
plt.show()
y_pred_TBATS = y_pred_TBATS.drop(y_pred_TBATS.index[0])
loss9 = smape_loss(y_test, y_pred_TBATS)
print("The sMAPE for TBATS method is:", loss9)
'''
'''
# prediction with autoETS
# modify the data, replacing 0 by 0.01
# change all dato into float
y = pd.Series(data = df['entries_daily_0_modified'])
y_train, y_test = temporal_train_test_split(y, test_size = 42)
forecaster = AutoETS(error = None, trend = None, sp = 52, auto = True)
forecaster.fit(y_train)
y_pred_autoETS = forecaster.predict(fh)
fig10, ax10 = plot_series(y_test, y_pred_autoETS, labels = ["y_test", "y_pred"])
plt.show()
y_pred_autoETS = y_pred_autoETS.drop(y_pred_autoETS.index[0])
loss10 = smape_loss(y_test, y_pred_autoETS)
print("The sMAPE for autoETS method is:", loss10)
'''
# Helper functions, some other possible metrics for evaluations
'''
def regression_results(y_true, y_pred):
# Regression metrics
explained_variance=metrics.explained_variance_score(y_true, y_pred)
mean_absolute_error=metrics.mean_absolute_error(y_true, y_pred)
mse=metrics.mean_squared_error(y_true, y_pred)
mean_squared_log_error=metrics.mean_squared_log_error(y_true, y_pred)
median_absolute_error=metrics.median_absolute_error(y_true, y_pred)
r2=metrics.r2_score(y_true, y_pred)
print('explained_variance: ', round(explained_variance,4))
print('mean_squared_log_error: ', round(mean_squared_log_error,4))
print('r2: ', round(r2,4))
print('MAE: ', round(mean_absolute_error,4))
print('MSE: ', round(mse,4))
print('RMSE: ', round(np.sqrt(mse),4))
'''
| 35.943396
| 118
| 0.758215
|
from warnings import simplefilter
import warnings
from statsmodels.tools.sm_exceptions import ConvergenceWarning
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import time
import sklearn as sk
import sklearn.metrics as metrics
from sklearn.neighbors import KNeighborsRegressor
from sktime.utils.plotting import plot_series
from sktime.forecasting.compose import (
EnsembleForecaster,
ReducedForecaster,
TransformedTargetForecaster,
)
from sktime.forecasting.model_selection import (
ExpandingWindowSplitter,
ForecastingGridSearchCV,
SlidingWindowSplitter,
temporal_train_test_split,
)
from sktime.forecasting.arima import ARIMA, AutoARIMA
from sktime.forecasting.bats import BATS
from sktime.forecasting.tbats import TBATS
from sktime.forecasting.ets import AutoETS
from sktime.forecasting.base import ForecastingHorizon
from sktime.forecasting.exp_smoothing import ExponentialSmoothing
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.theta import ThetaForecaster
from sktime.forecasting.trend import PolynomialTrendForecaster
from sktime.performance_metrics.forecasting import sMAPE, smape_loss
from sktime.transformations.series.detrend import Deseasonalizer, Detrender
simplefilter("ignore", FutureWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
warnings.simplefilter('ignore', RuntimeWarning)
NumberOfPredictions = 3
print("Hello world! Program begins.")
df1 = pd.read_csv("data_daily_preCOVID_2cols.csv")
print("df1.shape", df1.shape)
print("df1", df1)
df = df1.loc[df1["entries_daily"] != 0]
df = df.reset_index()
print("df.shape", df.shape)
print("df", df)
y = pd.Series(data = df['entries_daily'])
y_train, y_test = temporal_train_test_split(y, test_size = 42)
fh = np.arange(len(y_test)+1)
listOfsMAPE = []
listOfsMAPE.append(20)
rangeMax = 324
for i in range(1,rangeMax):
regressor = KNeighborsRegressor(n_neighbors = i)
forecaster = ReducedForecaster(
regressor, scitype = "regressor", window_length = 15, strategy = "recursive"
)
forecaster.fit(y_train)
y_pred = forecaster.predict(fh)
y_pred = y_pred.drop(y_pred.index[0])
loss = smape_loss(y_test, y_pred)
print("The sMAPE loss for ", i,"NN prediction is:", loss)
listOfsMAPE.append(loss)
minOfsMAPE = 20
for i in range(1,rangeMax):
if listOfsMAPE[i] < minOfsMAPE:
minOfsMAPE = listOfsMAPE[i]
k = listOfsMAPE.index(minOfsMAPE)
print("the best k is", k)
regressor = KNeighborsRegressor(n_neighbors = k)
forecaster = ReducedForecaster(
regressor, scitype = "regressor", window_length = 15, strategy = "recursive"
)
forecaster.fit(y_train)
y_pred_kNN_bestk = forecaster.predict(fh)
print(y_test)
print(y_pred_kNN_bestk)
fig4, ax4 = plot_series(y_train, y_test, y_pred_kNN_bestk, labels = ["y_train", "y_test", "y_pred"])
ax4.set_title("Prediction with kNR optimized")
plt.show()
fig4bis, ax4bis = plot_series(y_test, y_pred_kNN_bestk.drop(y_pred_kNN_bestk.index[0]), labels = ["y_test", "y_pred"])
ax4bis.set_title("The Same result zoomed in to the test set y_test")
plt.show()
listOfsMAPE[0] = listOfsMAPE[1]
plt.figure(2)
plt.plot(range(0, rangeMax), listOfsMAPE)
plt.title("sMPAE-k with k is the length of the forecasting window")
plt.show()
| true
| true
|
1c40a23989ae80e4473f4505e19980540d22119f
| 621
|
py
|
Python
|
samples/websocket/web/app.py
|
Algorab/examples
|
c89c24876ac329ebdf2caef578a283a1249546bc
|
[
"Apache-2.0"
] | 17
|
2018-08-16T09:55:03.000Z
|
2021-03-29T00:49:39.000Z
|
samples/websocket/web/app.py
|
Algorab/examples
|
c89c24876ac329ebdf2caef578a283a1249546bc
|
[
"Apache-2.0"
] | 14
|
2018-09-18T10:52:10.000Z
|
2021-12-09T22:38:09.000Z
|
samples/websocket/web/app.py
|
Algorab/examples
|
c89c24876ac329ebdf2caef578a283a1249546bc
|
[
"Apache-2.0"
] | 17
|
2020-09-21T07:40:08.000Z
|
2022-03-25T16:36:59.000Z
|
import os.path
import logging
from flask import request
def root_dir(): # pragma: no cover
return os.path.abspath(os.path.dirname(__file__))
def get_file(filename): # pragma: no cover
try:
src = os.path.join(root_dir(), filename)
# Figure out how flask returns static files
# Tried:
# - render_template
# - send_file
# This should not be so non-obvious
return open(src).read()
except IOError as exc:
return str(exc)
def main():
# print(request)
# print(request.headers)
return get_file(request.headers['X-Fission-Params-Html'])
| 25.875
| 61
| 0.639291
|
import os.path
import logging
from flask import request
def root_dir():
return os.path.abspath(os.path.dirname(__file__))
def get_file(filename):
try:
src = os.path.join(root_dir(), filename)
return open(src).read()
except IOError as exc:
return str(exc)
def main():
return get_file(request.headers['X-Fission-Params-Html'])
| true
| true
|
1c40a30f099c634f5772a9978641a432fdc3189e
| 6,661
|
py
|
Python
|
networkx_mod/generators/tests/test_threshold.py
|
movingpictures83/MATria
|
d3dbd0d15e00dbc26db39ace0663868180fdc471
|
[
"BSD-3-Clause",
"MIT"
] | null | null | null |
networkx_mod/generators/tests/test_threshold.py
|
movingpictures83/MATria
|
d3dbd0d15e00dbc26db39ace0663868180fdc471
|
[
"BSD-3-Clause",
"MIT"
] | null | null | null |
networkx_mod/generators/tests/test_threshold.py
|
movingpictures83/MATria
|
d3dbd0d15e00dbc26db39ace0663868180fdc471
|
[
"BSD-3-Clause",
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Threshold Graphs
================
"""
from nose.tools import *
from nose import SkipTest
from nose.plugins.attrib import attr
import networkx_mod as nx
import networkx_mod.generators.threshold as nxt
from networkx_mod.algorithms.isomorphism.isomorph import graph_could_be_isomorphic
cnlti = nx.convert_node_labels_to_integers
class TestGeneratorThreshold():
def test_threshold_sequence_graph_test(self):
G=nx.star_graph(10)
assert_true(nxt.is_threshold_graph(G))
assert_true(nxt.is_threshold_sequence(list(G.degree().values())))
G=nx.complete_graph(10)
assert_true(nxt.is_threshold_graph(G))
assert_true(nxt.is_threshold_sequence(list(G.degree().values())))
deg=[3,2,2,1,1,1]
assert_false(nxt.is_threshold_sequence(deg))
deg=[3,2,2,1]
assert_true(nxt.is_threshold_sequence(deg))
G=nx.generators.havel_hakimi_graph(deg)
assert_true(nxt.is_threshold_graph(G))
def test_creation_sequences(self):
deg=[3,2,2,1]
G=nx.generators.havel_hakimi_graph(deg)
cs0=nxt.creation_sequence(deg)
H0=nxt.threshold_graph(cs0)
assert_equal(''.join(cs0), 'ddid')
cs1=nxt.creation_sequence(deg, with_labels=True)
H1=nxt.threshold_graph(cs1)
assert_equal(cs1, [(1, 'd'), (2, 'd'), (3, 'i'), (0, 'd')])
cs2=nxt.creation_sequence(deg, compact=True)
H2=nxt.threshold_graph(cs2)
assert_equal(cs2, [2, 1, 1])
assert_equal(''.join(nxt.uncompact(cs2)), 'ddid')
assert_true(graph_could_be_isomorphic(H0,G))
assert_true(graph_could_be_isomorphic(H0,H1))
assert_true(graph_could_be_isomorphic(H0,H2))
def test_shortest_path(self):
deg=[3,2,2,1]
G=nx.generators.havel_hakimi_graph(deg)
cs1=nxt.creation_sequence(deg, with_labels=True)
for n, m in [(3, 0), (0, 3), (0, 2), (0, 1), (1, 3),
(3, 1), (1, 2), (2, 3)]:
assert_equal(nxt.shortest_path(cs1,n,m),
nx.shortest_path(G, n, m))
spl=nxt.shortest_path_length(cs1,3)
spl2=nxt.shortest_path_length([ t for v,t in cs1],2)
assert_equal(spl, spl2)
spld={}
for j,pl in enumerate(spl):
n=cs1[j][0]
spld[n]=pl
assert_equal(spld, nx.single_source_shortest_path_length(G, 3))
def test_weights_thresholds(self):
wseq=[3,4,3,3,5,6,5,4,5,6]
cs=nxt.weights_to_creation_sequence(wseq,threshold=10)
wseq=nxt.creation_sequence_to_weights(cs)
cs2=nxt.weights_to_creation_sequence(wseq)
assert_equal(cs, cs2)
wseq=nxt.creation_sequence_to_weights(nxt.uncompact([3,1,2,3,3,2,3]))
assert_equal(wseq,
[s*0.125 for s in [4,4,4,3,5,5,2,2,2,6,6,6,1,1,7,7,7]])
wseq=nxt.creation_sequence_to_weights([3,1,2,3,3,2,3])
assert_equal(wseq,
[s*0.125 for s in [4,4,4,3,5,5,2,2,2,6,6,6,1,1,7,7,7]])
wseq=nxt.creation_sequence_to_weights(list(enumerate('ddidiiidididi')))
assert_equal(wseq,
[s*0.1 for s in [5,5,4,6,3,3,3,7,2,8,1,9,0]])
wseq=nxt.creation_sequence_to_weights('ddidiiidididi')
assert_equal(wseq,
[s*0.1 for s in [5,5,4,6,3,3,3,7,2,8,1,9,0]])
wseq=nxt.creation_sequence_to_weights('ddidiiidididid')
ws=[s/float(12) for s in [6,6,5,7,4,4,4,8,3,9,2,10,1,11]]
assert_true(sum([abs(c-d) for c,d in zip(wseq,ws)]) < 1e-14)
def test_finding_routines(self):
G=nx.Graph({1:[2],2:[3],3:[4],4:[5],5:[6]})
G.add_edge(2,4)
G.add_edge(2,5)
G.add_edge(2,7)
G.add_edge(3,6)
G.add_edge(4,6)
# Alternating 4 cycle
assert_equal(nxt.find_alternating_4_cycle(G), [1, 2, 3, 6])
# Threshold graph
TG=nxt.find_threshold_graph(G)
assert_true(nxt.is_threshold_graph(TG))
assert_equal(sorted(TG.nodes()), [1, 2, 3, 4, 5, 7])
cs=nxt.creation_sequence(TG.degree(),with_labels=True)
assert_equal(nxt.find_creation_sequence(G), cs)
def test_fast_versions_properties_threshold_graphs(self):
cs='ddiiddid'
G=nxt.threshold_graph(cs)
assert_equal(nxt.density('ddiiddid'), nx.density(G))
assert_equal(sorted(nxt.degree_sequence(cs)),
sorted(G.degree().values()))
ts=nxt.triangle_sequence(cs)
assert_equal(ts, list(nx.triangles(G).values()))
assert_equal(sum(ts) // 3, nxt.triangles(cs))
c1=nxt.cluster_sequence(cs)
c2=list(nx.clustering(G).values())
assert_almost_equal(sum([abs(c-d) for c,d in zip(c1,c2)]), 0)
b1=nx.betweenness_centrality(G).values()
b2=nxt.betweenness_sequence(cs)
assert_true(sum([abs(c-d) for c,d in zip(b1,b2)]) < 1e-14)
assert_equal(nxt.eigenvalues(cs), [0, 1, 3, 3, 5, 7, 7, 8])
# Degree Correlation
assert_true(abs(nxt.degree_correlation(cs)+0.593038821954) < 1e-12)
assert_equal(nxt.degree_correlation('diiiddi'), -0.8)
assert_equal(nxt.degree_correlation('did'), -1.0)
assert_equal(nxt.degree_correlation('ddd'), 1.0)
assert_equal(nxt.eigenvalues('dddiii'), [0, 0, 0, 0, 3, 3])
assert_equal(nxt.eigenvalues('dddiiid'), [0, 1, 1, 1, 4, 4, 7])
def test_tg_creation_routines(self):
s=nxt.left_d_threshold_sequence(5,7)
s=nxt.right_d_threshold_sequence(5,7)
s1=nxt.swap_d(s,1.0,1.0)
@attr('numpy')
def test_eigenvectors(self):
try:
import numpy as N
eigenval=N.linalg.eigvals
import scipy
except ImportError:
raise SkipTest('SciPy not available.')
cs='ddiiddid'
G=nxt.threshold_graph(cs)
(tgeval,tgevec)=nxt.eigenvectors(cs)
dot=N.dot
assert_equal([ abs(dot(lv,lv)-1.0)<1e-9 for lv in tgevec ], [True]*8)
lapl=nx.laplacian_matrix(G)
# tgev=[ dot(lv,dot(lapl,lv)) for lv in tgevec ]
# assert_true(sum([abs(c-d) for c,d in zip(tgev,tgeval)]) < 1e-9)
# tgev.sort()
# lev=list(eigenval(lapl))
# lev.sort()
# assert_true(sum([abs(c-d) for c,d in zip(tgev,lev)]) < 1e-9)
def test_create_using(self):
cs='ddiiddid'
G=nxt.threshold_graph(cs)
assert_raises(nx.exception.NetworkXError,
nxt.threshold_graph, cs, create_using=nx.DiGraph())
MG=nxt.threshold_graph(cs,create_using=nx.MultiGraph())
assert_equal(MG.edges(), G.edges())
| 36.005405
| 82
| 0.607717
|
from nose.tools import *
from nose import SkipTest
from nose.plugins.attrib import attr
import networkx_mod as nx
import networkx_mod.generators.threshold as nxt
from networkx_mod.algorithms.isomorphism.isomorph import graph_could_be_isomorphic
cnlti = nx.convert_node_labels_to_integers
class TestGeneratorThreshold():
def test_threshold_sequence_graph_test(self):
G=nx.star_graph(10)
assert_true(nxt.is_threshold_graph(G))
assert_true(nxt.is_threshold_sequence(list(G.degree().values())))
G=nx.complete_graph(10)
assert_true(nxt.is_threshold_graph(G))
assert_true(nxt.is_threshold_sequence(list(G.degree().values())))
deg=[3,2,2,1,1,1]
assert_false(nxt.is_threshold_sequence(deg))
deg=[3,2,2,1]
assert_true(nxt.is_threshold_sequence(deg))
G=nx.generators.havel_hakimi_graph(deg)
assert_true(nxt.is_threshold_graph(G))
def test_creation_sequences(self):
deg=[3,2,2,1]
G=nx.generators.havel_hakimi_graph(deg)
cs0=nxt.creation_sequence(deg)
H0=nxt.threshold_graph(cs0)
assert_equal(''.join(cs0), 'ddid')
cs1=nxt.creation_sequence(deg, with_labels=True)
H1=nxt.threshold_graph(cs1)
assert_equal(cs1, [(1, 'd'), (2, 'd'), (3, 'i'), (0, 'd')])
cs2=nxt.creation_sequence(deg, compact=True)
H2=nxt.threshold_graph(cs2)
assert_equal(cs2, [2, 1, 1])
assert_equal(''.join(nxt.uncompact(cs2)), 'ddid')
assert_true(graph_could_be_isomorphic(H0,G))
assert_true(graph_could_be_isomorphic(H0,H1))
assert_true(graph_could_be_isomorphic(H0,H2))
def test_shortest_path(self):
deg=[3,2,2,1]
G=nx.generators.havel_hakimi_graph(deg)
cs1=nxt.creation_sequence(deg, with_labels=True)
for n, m in [(3, 0), (0, 3), (0, 2), (0, 1), (1, 3),
(3, 1), (1, 2), (2, 3)]:
assert_equal(nxt.shortest_path(cs1,n,m),
nx.shortest_path(G, n, m))
spl=nxt.shortest_path_length(cs1,3)
spl2=nxt.shortest_path_length([ t for v,t in cs1],2)
assert_equal(spl, spl2)
spld={}
for j,pl in enumerate(spl):
n=cs1[j][0]
spld[n]=pl
assert_equal(spld, nx.single_source_shortest_path_length(G, 3))
def test_weights_thresholds(self):
wseq=[3,4,3,3,5,6,5,4,5,6]
cs=nxt.weights_to_creation_sequence(wseq,threshold=10)
wseq=nxt.creation_sequence_to_weights(cs)
cs2=nxt.weights_to_creation_sequence(wseq)
assert_equal(cs, cs2)
wseq=nxt.creation_sequence_to_weights(nxt.uncompact([3,1,2,3,3,2,3]))
assert_equal(wseq,
[s*0.125 for s in [4,4,4,3,5,5,2,2,2,6,6,6,1,1,7,7,7]])
wseq=nxt.creation_sequence_to_weights([3,1,2,3,3,2,3])
assert_equal(wseq,
[s*0.125 for s in [4,4,4,3,5,5,2,2,2,6,6,6,1,1,7,7,7]])
wseq=nxt.creation_sequence_to_weights(list(enumerate('ddidiiidididi')))
assert_equal(wseq,
[s*0.1 for s in [5,5,4,6,3,3,3,7,2,8,1,9,0]])
wseq=nxt.creation_sequence_to_weights('ddidiiidididi')
assert_equal(wseq,
[s*0.1 for s in [5,5,4,6,3,3,3,7,2,8,1,9,0]])
wseq=nxt.creation_sequence_to_weights('ddidiiidididid')
ws=[s/float(12) for s in [6,6,5,7,4,4,4,8,3,9,2,10,1,11]]
assert_true(sum([abs(c-d) for c,d in zip(wseq,ws)]) < 1e-14)
def test_finding_routines(self):
G=nx.Graph({1:[2],2:[3],3:[4],4:[5],5:[6]})
G.add_edge(2,4)
G.add_edge(2,5)
G.add_edge(2,7)
G.add_edge(3,6)
G.add_edge(4,6)
assert_equal(nxt.find_alternating_4_cycle(G), [1, 2, 3, 6])
TG=nxt.find_threshold_graph(G)
assert_true(nxt.is_threshold_graph(TG))
assert_equal(sorted(TG.nodes()), [1, 2, 3, 4, 5, 7])
cs=nxt.creation_sequence(TG.degree(),with_labels=True)
assert_equal(nxt.find_creation_sequence(G), cs)
def test_fast_versions_properties_threshold_graphs(self):
cs='ddiiddid'
G=nxt.threshold_graph(cs)
assert_equal(nxt.density('ddiiddid'), nx.density(G))
assert_equal(sorted(nxt.degree_sequence(cs)),
sorted(G.degree().values()))
ts=nxt.triangle_sequence(cs)
assert_equal(ts, list(nx.triangles(G).values()))
assert_equal(sum(ts) // 3, nxt.triangles(cs))
c1=nxt.cluster_sequence(cs)
c2=list(nx.clustering(G).values())
assert_almost_equal(sum([abs(c-d) for c,d in zip(c1,c2)]), 0)
b1=nx.betweenness_centrality(G).values()
b2=nxt.betweenness_sequence(cs)
assert_true(sum([abs(c-d) for c,d in zip(b1,b2)]) < 1e-14)
assert_equal(nxt.eigenvalues(cs), [0, 1, 3, 3, 5, 7, 7, 8])
assert_true(abs(nxt.degree_correlation(cs)+0.593038821954) < 1e-12)
assert_equal(nxt.degree_correlation('diiiddi'), -0.8)
assert_equal(nxt.degree_correlation('did'), -1.0)
assert_equal(nxt.degree_correlation('ddd'), 1.0)
assert_equal(nxt.eigenvalues('dddiii'), [0, 0, 0, 0, 3, 3])
assert_equal(nxt.eigenvalues('dddiiid'), [0, 1, 1, 1, 4, 4, 7])
def test_tg_creation_routines(self):
s=nxt.left_d_threshold_sequence(5,7)
s=nxt.right_d_threshold_sequence(5,7)
s1=nxt.swap_d(s,1.0,1.0)
@attr('numpy')
def test_eigenvectors(self):
try:
import numpy as N
eigenval=N.linalg.eigvals
import scipy
except ImportError:
raise SkipTest('SciPy not available.')
cs='ddiiddid'
G=nxt.threshold_graph(cs)
(tgeval,tgevec)=nxt.eigenvectors(cs)
dot=N.dot
assert_equal([ abs(dot(lv,lv)-1.0)<1e-9 for lv in tgevec ], [True]*8)
lapl=nx.laplacian_matrix(G)
def test_create_using(self):
cs='ddiiddid'
G=nxt.threshold_graph(cs)
assert_raises(nx.exception.NetworkXError,
nxt.threshold_graph, cs, create_using=nx.DiGraph())
MG=nxt.threshold_graph(cs,create_using=nx.MultiGraph())
assert_equal(MG.edges(), G.edges())
| true
| true
|
1c40a31df74207678516ba146b07306ed68ed2cb
| 10,511
|
py
|
Python
|
tx/tx_builder/bitcoin_v3_cpfp/merch_close_with_cpfp.py
|
fakecoinbase/boltlabs-incslashlibzkchannels
|
c0b43790c637f4ffd2956193b16f9ddcea94a3a4
|
[
"MIT"
] | 68
|
2020-01-18T22:07:57.000Z
|
2022-02-03T02:30:55.000Z
|
tx/tx_builder/bitcoin_v3_cpfp/merch_close_with_cpfp.py
|
fakecoinbase/boltlabs-incslashlibzkchannels
|
c0b43790c637f4ffd2956193b16f9ddcea94a3a4
|
[
"MIT"
] | 2
|
2020-04-29T02:02:49.000Z
|
2021-04-08T11:23:48.000Z
|
tx/tx_builder/bitcoin_v3_cpfp/merch_close_with_cpfp.py
|
fakecoinbase/boltlabs-incslashlibzkchannels
|
c0b43790c637f4ffd2956193b16f9ddcea94a3a4
|
[
"MIT"
] | 3
|
2021-04-04T05:04:16.000Z
|
2022-01-26T10:14:46.000Z
|
# Based on tutorial from:
# https://github.com/zeltsi/segwit_tutorial/tree/master/transactions
import argparse
import hashlib
import ecdsa
import sys
def dSHA256(data):
hash_1 = hashlib.sha256(data).digest()
hash_2 = hashlib.sha256(hash_1).digest()
return hash_2
def hash160(s):
'''sha256 followed by ripemd160'''
return hashlib.new('ripemd160', hashlib.sha256(s).digest()).digest()
def privkey_to_pubkey(privkey):
signing_key = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.SECP256k1)
verifying_key = signing_key.get_verifying_key()
x_cor = bytes.fromhex(verifying_key.to_string().hex())[:32] # The first 32 bytes are the x coordinate
y_cor = bytes.fromhex(verifying_key.to_string().hex())[32:] # The last 32 bytes are the y coordinate
if int.from_bytes(y_cor, byteorder="big", signed=True) % 2 == 0: # We need to turn the y_cor into a number.
public_key = bytes.fromhex("02" + x_cor.hex())
else:
public_key = bytes.fromhex("03" + x_cor.hex())
return public_key
parser = argparse.ArgumentParser()
# debug on to print full tx details
parser.add_argument("--debug", "-db", action='store_true', help="debug mode: print out all tx details")
# tx details
parser.add_argument("--txid_str", "-tx", help="txid of input as string")
parser.add_argument("--index", "-ind", help="index of outpoint")
parser.add_argument("--input_amount_btc", "-a", help="amount of btc held by the previous outpoint")
parser.add_argument("--cust_privkey", "-csk", help="private key of customer for escrow")
parser.add_argument("--merch_privkey", "-msk", help="private key of merchant for escrow")
# parser.add_argument("--sighash_type", "-sh", help="sighash type for signatures")
parser.add_argument("--output_value_btc", "-o", help="btc to output")
parser.add_argument("--merch_payout_pubkey", "-mcpk", help="public key of merchant close to-self output")
parser.add_argument("--to_self_delay", "-tsd", help="to_self_delay (in unit of blocks) for the merchant's to-self output")
parser.add_argument("--merch_cpfp_value_btc", "-cv", help="merch cpfp output value btc")
parser.add_argument("--merch_cpfp_pubkey", "-cfpk", help="pubkey for merch cpfp output")
args = parser.parse_args()
# If no tx input arguments are provided, use hardcoded values to generate an example tx
if len(sys.argv) < 5:
txID_str = "2222222222222222222222222222222233333333333333333333333333333333"
tx_index = 0
input_amount_sat = int(float(2.1) * 100000000)
cust_privkey = bytes.fromhex("7911111111111111111111111111111111111111111111111111111111111111")
merch_privkey = bytes.fromhex("3711111111111111111111111111111111111111111111111111111111111111")
output_value_sat = int(float(2.0) * 100000000)
merch_payout_pubkey = bytes.fromhex("02f3d17ca1ac6dcf42b0297a71abb87f79dfa2c66278cbb99c1437e6570643ce90")
to_self_delay_big_e = bytes.fromhex("05cf")
merch_cpfp_value_sat = int(float(0.1) * 100000000)
merch_cpfp_pubkey = bytes.fromhex("5511111111111111111111111111111111111111111111111111111111111111")
else:
txID_str = args.txid_str
tx_index = int(args.index)
input_amount_sat = int(float(args.input_amount_btc) * 100000000)
cust_privkey = bytes.fromhex(args.cust_privkey)
merch_privkey = bytes.fromhex(args.merch_privkey)
output_value_sat = int(float(args.output_value_btc) * 100000000)
merch_payout_pubkey = bytes.fromhex(args.merch_payout_pubkey)
to_self_delay_big_e = bytes.fromhex(args.to_self_delay)
merch_cpfp_value_sat = int(float(args.merch_cpfp_value_btc) * 100000000)
merch_cpfp_pubkey = bytes.fromhex(args.merch_cpfp_pubkey)
# keys for the funding tx 2-of-2 multisig
merch_pubkey = privkey_to_pubkey(merch_privkey)
cust_pubkey = privkey_to_pubkey(cust_privkey)
# These are hard coded tx variables
version = bytes.fromhex("0200 0000")
marker = bytes.fromhex("00")
flag = bytes.fromhex("01")
sequence = bytes.fromhex("ffffffff")
locktime = bytes.fromhex("0000 0000")
tx_in_count = bytes.fromhex("01")
tx_out_count = bytes.fromhex("02")
sighash = bytes.fromhex("01000000")
sighash_type_flag = bytes.fromhex("01")
# Convert txid, index, amounts, and to_self_delay to little endian
txid = (bytes.fromhex(txID_str))[::-1]
index = tx_index.to_bytes(4, byteorder="little", signed=False)
input_amount = input_amount_sat.to_bytes(8, byteorder="little", signed=True)
output_value = output_value_sat.to_bytes(8, byteorder="little", signed=True)
merch_cpfp_value = merch_cpfp_value_sat.to_bytes(8, byteorder="little", signed=True)
to_self_delay_little_e = to_self_delay_big_e[::-1]
##########################################
# INPUT (witness script): escrow script op_codes
# 0x52 OP_2
# 0x21 OP_DATA - len(merch_pubkey)
# merch_pubkey
# 0x21 OP_DATA - len(cust_pubkey)
# cust_pubkey
# 0x52 OP_2
# 0xae OP_CHECKMULTISIG
escrow_script = (
bytes.fromhex("5221")
+ merch_pubkey
+ bytes.fromhex("21")
+ cust_pubkey
+ bytes.fromhex("52ae")
)
# OUTPUT: merch-close script op_codes
# 0x63 OP_IF
# 0x52 OP_2
# 0x21 OP_DATA - len(merch_pubkey)
# merch_pubkey
# 0x21 OP_DATA - len(cust_pubkey)
# cust_pubkey
# 0x52 OP_2
# 0xae OP_CHECKMULTISIG
# 0x67 OP_ELSE
# 0x__ OP_DATA - len(to_self_delay) (probably 0x02)
# to_self_delay
# 0xb2 OP_CHECKSEQUENCEVERIFY
# 0x75 OP_DROP
# 0x21 OP_DATA - len(merch_payout_pubkey)
# merch_close_pk
# 0xac OP_CHECKSIG
# 0x68 OP_ENDIF
merch_close_script = (
bytes.fromhex("63 52 21")
+ merch_pubkey
+ bytes.fromhex("21")
+ cust_pubkey
+ bytes.fromhex("52 ae 67")
+ len(to_self_delay_little_e).to_bytes(1, byteorder="little", signed=False)
+ to_self_delay_little_e
+ bytes.fromhex("b2 75 21")
+ merch_payout_pubkey
+ bytes.fromhex("ac68")
)
script_sha32 = hashlib.sha256(merch_close_script).digest()
merch_close_scriptPK = bytes.fromhex("0020") + script_sha32
# P2WPKH scriptPubKey
merch_cpfp_scriptPK = bytes.fromhex("0014") + hash160(merch_cpfp_pubkey)
##########################################
# Put together the tx digest preimage
hashPrevOuts = dSHA256(txid + index)
hashSequence = dSHA256(sequence)
# hashOutputs and output
outputs = (
output_value
+ (len(merch_close_scriptPK)).to_bytes(1, byteorder="little", signed=False)
+ merch_close_scriptPK
+ merch_cpfp_value
+ (len(merch_cpfp_scriptPK)).to_bytes(1, byteorder="little", signed=False)
+ merch_cpfp_scriptPK
)
hashOutputs = dSHA256(outputs)
scriptcode = (
(len(escrow_script)).to_bytes(1, byteorder="little", signed=False)
+ escrow_script
)
tx_digest_preimage = (
version
+ hashPrevOuts
+ hashSequence
+ txid
+ index
+ scriptcode
+ input_amount
+ sequence
+ hashOutputs
+ locktime
+ sighash
)
tx_digest = dSHA256(tx_digest_preimage)
##########################################
# Produce signatures for 2-of-2 multisig
signing_key_merch = ecdsa.SigningKey.from_string(merch_privkey, curve=ecdsa.SECP256k1) # Don't forget to specify the curve
signature_merch = signing_key_merch.sign_digest(tx_digest, sigencode=ecdsa.util.sigencode_der_canonize)
signing_key_cust = ecdsa.SigningKey.from_string(cust_privkey, curve=ecdsa.SECP256k1) # Don't forget to specify the curve
signature_cust = signing_key_cust.sign_digest(tx_digest, sigencode=ecdsa.util.sigencode_der_canonize)
##########################################
# Create witness field with 2-of-2 multisig signatures (in specific order)
witness_field = (
# indicate the number of stack items for the txin
bytes.fromhex("04")
# OP_CHECKMULTISIG bug
+ bytes.fromhex("00")
# signature 1
+ (len(signature_merch)+1).to_bytes(1, byteorder="little", signed=False)
+ signature_merch
+ sighash_type_flag
# signature 2
+ (len(signature_cust)+1).to_bytes(1, byteorder="little", signed=False)
+ signature_cust
+ sighash_type_flag
# witnessScript
# This is the script that the creator of this transaction needs to privide, and
# solve, in order to redeem the UTXO listed in the input
+ (len(escrow_script)).to_bytes(1, byteorder="little", signed=False)
+ escrow_script
)
##########################################
# Create final tx with signatures
scriptSig = (
bytes.fromhex("00") # length of empty scriptSig (since it's a witness output)
)
final_tx = (
version
+ marker
+ flag
+ tx_in_count
+ txid
+ index
+ scriptSig
+ sequence
+ tx_out_count
+ outputs
+ witness_field
+ locktime
)
print(final_tx.hex())
##########################################
# Print out tx digest details if debug flag was set
if args.debug:
print("\ntx digest preimage")
print(tx_digest_preimage.hex())
print("\nbreakdown of tx digest preimage")
print("version: ", version.hex())
print("hashPrevOuts: ", hashPrevOuts.hex())
print("hashSequence: ", hashSequence.hex())
print("txid little endian: ",txid.hex())
print("index: ",index.hex())
print("scriptcode: ",scriptcode.hex())
print("input_amount: ",input_amount.hex())
print("sequence: ",sequence.hex())
print("hashOutputs: ", hashOutputs.hex())
print("locktime: ", locktime.hex())
print("sighash: ",sighash.hex())
print("\ncust escrow pubkey: ", cust_pubkey.hex())
print("merch escrow pubkey: ", merch_pubkey.hex())
print("\nhashOutputs preimage (outputs)")
print("outputs: ", outputs.hex())
print("merch-close-script (p2wsh preimage): ", merch_close_script.hex())
# Calculate txid of the tx we have just created:
# Convert to pre-segwit format, double sha256, reverse bytes (little endian)
final_tx_legacy = (
version
+ tx_in_count
+ txid
+ index
+ scriptSig
+ sequence
+ tx_out_count
+ outputs
+ locktime
)
new_txid = dSHA256(final_tx_legacy)[::-1]
print("\nfinal_tx_legacy: ", final_tx_legacy.hex())
print("\nversion: ", version.hex())
print("tx_in_count: ", tx_in_count.hex())
print("txid little endian: ",txid.hex())
print("index: ",index.hex())
print("scriptSig: ",scriptSig.hex())
print("sequence: ",sequence.hex())
print("tx_out_count: ", tx_out_count.hex())
print("outputs: ",outputs.hex())
print("locktime: ", locktime.hex())
print("\nDouble SHA256 final_tx_legacy: ", dSHA256(final_tx_legacy).hex())
print("\ntxid of this tx: ",new_txid.hex())
| 33.368254
| 122
| 0.694035
|
import argparse
import hashlib
import ecdsa
import sys
def dSHA256(data):
hash_1 = hashlib.sha256(data).digest()
hash_2 = hashlib.sha256(hash_1).digest()
return hash_2
def hash160(s):
return hashlib.new('ripemd160', hashlib.sha256(s).digest()).digest()
def privkey_to_pubkey(privkey):
signing_key = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.SECP256k1)
verifying_key = signing_key.get_verifying_key()
x_cor = bytes.fromhex(verifying_key.to_string().hex())[:32]
y_cor = bytes.fromhex(verifying_key.to_string().hex())[32:]
if int.from_bytes(y_cor, byteorder="big", signed=True) % 2 == 0:
public_key = bytes.fromhex("02" + x_cor.hex())
else:
public_key = bytes.fromhex("03" + x_cor.hex())
return public_key
parser = argparse.ArgumentParser()
parser.add_argument("--debug", "-db", action='store_true', help="debug mode: print out all tx details")
parser.add_argument("--txid_str", "-tx", help="txid of input as string")
parser.add_argument("--index", "-ind", help="index of outpoint")
parser.add_argument("--input_amount_btc", "-a", help="amount of btc held by the previous outpoint")
parser.add_argument("--cust_privkey", "-csk", help="private key of customer for escrow")
parser.add_argument("--merch_privkey", "-msk", help="private key of merchant for escrow")
parser.add_argument("--output_value_btc", "-o", help="btc to output")
parser.add_argument("--merch_payout_pubkey", "-mcpk", help="public key of merchant close to-self output")
parser.add_argument("--to_self_delay", "-tsd", help="to_self_delay (in unit of blocks) for the merchant's to-self output")
parser.add_argument("--merch_cpfp_value_btc", "-cv", help="merch cpfp output value btc")
parser.add_argument("--merch_cpfp_pubkey", "-cfpk", help="pubkey for merch cpfp output")
args = parser.parse_args()
# If no tx input arguments are provided, use hardcoded values to generate an example tx
if len(sys.argv) < 5:
txID_str = "2222222222222222222222222222222233333333333333333333333333333333"
tx_index = 0
input_amount_sat = int(float(2.1) * 100000000)
cust_privkey = bytes.fromhex("7911111111111111111111111111111111111111111111111111111111111111")
merch_privkey = bytes.fromhex("3711111111111111111111111111111111111111111111111111111111111111")
output_value_sat = int(float(2.0) * 100000000)
merch_payout_pubkey = bytes.fromhex("02f3d17ca1ac6dcf42b0297a71abb87f79dfa2c66278cbb99c1437e6570643ce90")
to_self_delay_big_e = bytes.fromhex("05cf")
merch_cpfp_value_sat = int(float(0.1) * 100000000)
merch_cpfp_pubkey = bytes.fromhex("5511111111111111111111111111111111111111111111111111111111111111")
else:
txID_str = args.txid_str
tx_index = int(args.index)
input_amount_sat = int(float(args.input_amount_btc) * 100000000)
cust_privkey = bytes.fromhex(args.cust_privkey)
merch_privkey = bytes.fromhex(args.merch_privkey)
output_value_sat = int(float(args.output_value_btc) * 100000000)
merch_payout_pubkey = bytes.fromhex(args.merch_payout_pubkey)
to_self_delay_big_e = bytes.fromhex(args.to_self_delay)
merch_cpfp_value_sat = int(float(args.merch_cpfp_value_btc) * 100000000)
merch_cpfp_pubkey = bytes.fromhex(args.merch_cpfp_pubkey)
# keys for the funding tx 2-of-2 multisig
merch_pubkey = privkey_to_pubkey(merch_privkey)
cust_pubkey = privkey_to_pubkey(cust_privkey)
# These are hard coded tx variables
version = bytes.fromhex("0200 0000")
marker = bytes.fromhex("00")
flag = bytes.fromhex("01")
sequence = bytes.fromhex("ffffffff")
locktime = bytes.fromhex("0000 0000")
tx_in_count = bytes.fromhex("01")
tx_out_count = bytes.fromhex("02")
sighash = bytes.fromhex("01000000")
sighash_type_flag = bytes.fromhex("01")
# Convert txid, index, amounts, and to_self_delay to little endian
txid = (bytes.fromhex(txID_str))[::-1]
index = tx_index.to_bytes(4, byteorder="little", signed=False)
input_amount = input_amount_sat.to_bytes(8, byteorder="little", signed=True)
output_value = output_value_sat.to_bytes(8, byteorder="little", signed=True)
merch_cpfp_value = merch_cpfp_value_sat.to_bytes(8, byteorder="little", signed=True)
to_self_delay_little_e = to_self_delay_big_e[::-1]
##########################################
# INPUT (witness script): escrow script op_codes
# 0x52 OP_2
# 0x21 OP_DATA - len(merch_pubkey)
# merch_pubkey
# 0x21 OP_DATA - len(cust_pubkey)
# cust_pubkey
# 0x52 OP_2
# 0xae OP_CHECKMULTISIG
escrow_script = (
bytes.fromhex("5221")
+ merch_pubkey
+ bytes.fromhex("21")
+ cust_pubkey
+ bytes.fromhex("52ae")
)
# OUTPUT: merch-close script op_codes
# 0x63 OP_IF
# 0x52 OP_2
# 0x21 OP_DATA - len(merch_pubkey)
# merch_pubkey
# 0x21 OP_DATA - len(cust_pubkey)
# cust_pubkey
# 0x52 OP_2
# 0xae OP_CHECKMULTISIG
# 0x67 OP_ELSE
# 0x__ OP_DATA - len(to_self_delay) (probably 0x02)
# to_self_delay
# 0xb2 OP_CHECKSEQUENCEVERIFY
# 0x75 OP_DROP
# 0x21 OP_DATA - len(merch_payout_pubkey)
# merch_close_pk
# 0xac OP_CHECKSIG
# 0x68 OP_ENDIF
merch_close_script = (
bytes.fromhex("63 52 21")
+ merch_pubkey
+ bytes.fromhex("21")
+ cust_pubkey
+ bytes.fromhex("52 ae 67")
+ len(to_self_delay_little_e).to_bytes(1, byteorder="little", signed=False)
+ to_self_delay_little_e
+ bytes.fromhex("b2 75 21")
+ merch_payout_pubkey
+ bytes.fromhex("ac68")
)
script_sha32 = hashlib.sha256(merch_close_script).digest()
merch_close_scriptPK = bytes.fromhex("0020") + script_sha32
# P2WPKH scriptPubKey
merch_cpfp_scriptPK = bytes.fromhex("0014") + hash160(merch_cpfp_pubkey)
##########################################
# Put together the tx digest preimage
hashPrevOuts = dSHA256(txid + index)
hashSequence = dSHA256(sequence)
# hashOutputs and output
outputs = (
output_value
+ (len(merch_close_scriptPK)).to_bytes(1, byteorder="little", signed=False)
+ merch_close_scriptPK
+ merch_cpfp_value
+ (len(merch_cpfp_scriptPK)).to_bytes(1, byteorder="little", signed=False)
+ merch_cpfp_scriptPK
)
hashOutputs = dSHA256(outputs)
scriptcode = (
(len(escrow_script)).to_bytes(1, byteorder="little", signed=False)
+ escrow_script
)
tx_digest_preimage = (
version
+ hashPrevOuts
+ hashSequence
+ txid
+ index
+ scriptcode
+ input_amount
+ sequence
+ hashOutputs
+ locktime
+ sighash
)
tx_digest = dSHA256(tx_digest_preimage)
##########################################
# Produce signatures for 2-of-2 multisig
signing_key_merch = ecdsa.SigningKey.from_string(merch_privkey, curve=ecdsa.SECP256k1) # Don't forget to specify the curve
signature_merch = signing_key_merch.sign_digest(tx_digest, sigencode=ecdsa.util.sigencode_der_canonize)
signing_key_cust = ecdsa.SigningKey.from_string(cust_privkey, curve=ecdsa.SECP256k1)
signature_cust = signing_key_cust.sign_digest(tx_digest, sigencode=ecdsa.util.sigencode_der_canonize)
##########################################
# Create witness field with 2-of-2 multisig signatures (in specific order)
witness_field = (
# indicate the number of stack items for the txin
bytes.fromhex("04")
# OP_CHECKMULTISIG bug
+ bytes.fromhex("00")
# signature 1
+ (len(signature_merch)+1).to_bytes(1, byteorder="little", signed=False)
+ signature_merch
+ sighash_type_flag
# signature 2
+ (len(signature_cust)+1).to_bytes(1, byteorder="little", signed=False)
+ signature_cust
+ sighash_type_flag
# witnessScript
# This is the script that the creator of this transaction needs to privide, and
# solve, in order to redeem the UTXO listed in the input
+ (len(escrow_script)).to_bytes(1, byteorder="little", signed=False)
+ escrow_script
)
##########################################
# Create final tx with signatures
scriptSig = (
bytes.fromhex("00") # length of empty scriptSig (since it's a witness output)
)
final_tx = (
version
+ marker
+ flag
+ tx_in_count
+ txid
+ index
+ scriptSig
+ sequence
+ tx_out_count
+ outputs
+ witness_field
+ locktime
)
print(final_tx.hex())
ript.hex())
final_tx_legacy = (
version
+ tx_in_count
+ txid
+ index
+ scriptSig
+ sequence
+ tx_out_count
+ outputs
+ locktime
)
new_txid = dSHA256(final_tx_legacy)[::-1]
print("\nfinal_tx_legacy: ", final_tx_legacy.hex())
print("\nversion: ", version.hex())
print("tx_in_count: ", tx_in_count.hex())
print("txid little endian: ",txid.hex())
print("index: ",index.hex())
print("scriptSig: ",scriptSig.hex())
print("sequence: ",sequence.hex())
print("tx_out_count: ", tx_out_count.hex())
print("outputs: ",outputs.hex())
print("locktime: ", locktime.hex())
print("\nDouble SHA256 final_tx_legacy: ", dSHA256(final_tx_legacy).hex())
print("\ntxid of this tx: ",new_txid.hex())
| true
| true
|
1c40a342679a1eaa8322cfbd8ecbc8309bb09213
| 390
|
py
|
Python
|
openstack_dashboard/enabled/_1720_project_databases_panel.py
|
xinwu/horizon
|
0e984a2c75d253dd35ab92e7926021b82d730b26
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/enabled/_1720_project_databases_panel.py
|
xinwu/horizon
|
0e984a2c75d253dd35ab92e7926021b82d730b26
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/enabled/_1720_project_databases_panel.py
|
xinwu/horizon
|
0e984a2c75d253dd35ab92e7926021b82d730b26
|
[
"Apache-2.0"
] | null | null | null |
# The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'databases'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'project'
# The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'database'
# Python panel class of the PANEL to be added.
ADD_PANEL = 'openstack_dashboard.dashboards.project.databases.panel.Databases'
| 39
| 78
| 0.782051
|
PANEL = 'databases'
PANEL_DASHBOARD = 'project'
PANEL_GROUP = 'database'
ADD_PANEL = 'openstack_dashboard.dashboards.project.databases.panel.Databases'
| true
| true
|
1c40a48e830b627ca1d05cdbf1fc61968e871c4a
| 22,637
|
py
|
Python
|
tensorflow/python/keras/distribute/keras_correctness_test_base.py
|
leike666666/tensorflow
|
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
|
[
"Apache-2.0"
] | 12
|
2020-12-28T18:42:10.000Z
|
2022-03-24T17:34:21.000Z
|
tensorflow/python/keras/distribute/keras_correctness_test_base.py
|
leike666666/tensorflow
|
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
|
[
"Apache-2.0"
] | 2
|
2021-08-25T15:58:11.000Z
|
2022-02-10T01:47:24.000Z
|
tensorflow/python/keras/distribute/keras_correctness_test_base.py
|
leike666666/tensorflow
|
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
|
[
"Apache-2.0"
] | 3
|
2020-03-09T19:17:02.000Z
|
2020-06-26T23:14:31.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Correctness tests for tf.keras using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import random_seed
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.preprocessing import sequence
from tensorflow.python.util import nest
_RANDOM_SEED = 1337
_EVAL_STEPS = 20
_GLOBAL_BATCH_SIZE = 64
# Note: Please make sure the tests in this file are also covered in
# keras_backward_compat_test for features that are supported with both APIs.
all_strategies = [
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.tpu_strategy, # steps_per_run=2
strategy_combinations.tpu_strategy_one_step,
]
def eager_mode_test_configuration():
return combinations.combine(
mode='eager', use_numpy=[True, False], use_validation_data=[True, False])
def graph_mode_test_configuration():
return combinations.combine(
mode='graph', use_numpy=[True, False], use_validation_data=[True, False])
def all_strategy_and_input_config_combinations():
return (combinations.times(
combinations.combine(
distribution=all_strategies,
experimental_run_tf_function=[True, False]),
eager_mode_test_configuration() + graph_mode_test_configuration()))
def strategy_minus_tpu_and_input_config_combinations_eager():
return (combinations.times(
combinations.combine(
distribution=strategy_combinations.strategies_minus_tpu),
eager_mode_test_configuration()))
def strategies_for_embedding_models():
"""Returns distribution strategies to test for embedding models.
Since embedding models take longer to train, we disregard DefaultStrategy
in order to prevent testing timeouts.
"""
return [
s for s in all_strategies if s.required_tpu or s.required_gpus or
s is strategy_combinations.one_device_strategy
]
def test_combinations_for_embedding_model():
# TODO(sourabhbajaj): Enable tests for eager mode
eager_mode_strategies = [
s for s in strategies_for_embedding_models() if not s.required_tpu
]
return (combinations.times(
combinations.combine(
distribution=strategies_for_embedding_models(),
experimental_run_tf_function=[True, False]),
(graph_mode_test_configuration())) + combinations.times(
combinations.combine(
distribution=eager_mode_strategies,
experimental_run_tf_function=[False]),
(eager_mode_test_configuration())))
def test_combinations_with_tpu_strategies():
tpu_strategies = [
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_one_step
]
return (combinations.times(
combinations.combine(distribution=tpu_strategies),
graph_mode_test_configuration()))
class MaybeDistributionScope(object):
"""Provides a context allowing no distribution strategy."""
def __init__(self, distribution):
self._distribution = distribution
self._scope = None
def __enter__(self):
if self._distribution:
self._scope = self._distribution.scope()
self._scope.__enter__()
def __exit__(self, exc_type, value, traceback):
if self._distribution:
self._scope.__exit__(exc_type, value, traceback)
self._scope = None
def batch_wrapper(dataset, batch_size, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
return dataset.batch(batch_size)
def get_batch_size(global_batch_size, distribution):
batch_size = global_batch_size
# TODO(b/118776054): Use global batch size for Keras/DS support.
use_per_core_batch_size = (
distribution and
not distributed_training_utils.global_batch_size_supported(distribution))
if use_per_core_batch_size:
batch_size //= distribution.num_replicas_in_sync
return batch_size
def get_data_size(data):
"""Gets the size of data in list, tuple, dict, or a numpy array."""
assert isinstance(data, (np.ndarray, list, dict, tuple))
if isinstance(data, np.ndarray):
return len(data)
if isinstance(data, (list, tuple)):
return len(data[0])
return len(six.next(six.itervalues(data)))
def get_shapes(data):
shapes = None
if all(hasattr(x, 'shape') for x in nest.flatten(data)):
shapes = nest.map_structure(lambda x: x.shape, data)
return shapes
def get_correctness_test_inputs(use_numpy, use_validation_data,
with_distribution, x_train, y_train, x_eval,
y_eval, x_predict, training_epochs):
"""Generates the inputs for correctness check when enable Keras with DS."""
global_batch_size = _GLOBAL_BATCH_SIZE
batch_size = get_batch_size(global_batch_size, with_distribution)
if use_numpy:
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': training_epochs,
'shuffle': False,
}
if use_validation_data:
eval_inputs = None
training_inputs['validation_data'] = (x_eval, y_eval)
else:
eval_inputs = {
'batch_size': batch_size,
'x': x_eval,
'y': y_eval,
}
predict_inputs = {'x': x_predict}
else:
training_data_size = get_data_size(x_train)
# For dataset inputs, we do not pass batch_size to
# keras.fit/evaluate/predict. The batch size is part of the dataset.
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
x = batch_wrapper(train_dataset, batch_size, repeat=training_epochs)
steps_per_epoch = int(np.ceil(1.0 * training_data_size / global_batch_size))
training_inputs = {
'batch_size': None,
'x': x,
'y': None,
'epochs': training_epochs,
'shuffle': False,
'steps_per_epoch': steps_per_epoch
}
if use_validation_data:
eval_inputs = None # Remove the eval_inputs
eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval))
x = batch_wrapper(eval_dataset, batch_size)
training_inputs['validation_data'] = x
training_inputs['validation_steps'] = 5
else:
eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval))
x = batch_wrapper(eval_dataset, batch_size)
eval_steps = int(np.ceil(1.0 * get_data_size(x_eval) / global_batch_size))
eval_inputs = {
'batch_size': None,
'x': x,
'y': None,
'steps': eval_steps,
}
predict_batch_size = get_batch_size(
get_data_size(x_predict), with_distribution)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset, predict_batch_size)
predict_inputs = {
'steps': 1,
'x': predict_dataset,
}
return training_inputs, eval_inputs, predict_inputs
def fit_eval_and_predict(initial_weights,
input_fn,
model_fn,
experimental_run_tf_function=None,
distribution=None,
is_stateful_model=False):
"""Generates results for fit/predict/evaluate for given model."""
training_inputs, eval_inputs, predict_inputs = input_fn()
model = model_fn(
experimental_run_tf_function=experimental_run_tf_function,
initial_weights=initial_weights,
distribution=distribution,
input_shapes=get_shapes(training_inputs['x']))
result = {}
result['training_history_1'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_1'] = model.evaluate(**eval_inputs)
result['weights_1'] = model.get_weights()
if predict_inputs is not None:
# Check correctness of the result of predict() invoked
# multiple times -- as for stateful models, result of
# predict may differ for each batch.
predict_length = 1
if is_stateful_model:
predict_length = 3
for i in range(predict_length):
result_key = 'predict_result_{}'.format(i)
result[result_key] = model.predict(**predict_inputs)
# Train and eval again to mimic user's flow.
result['training_history_2'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_2'] = model.evaluate(**eval_inputs)
result['weights_2'] = model.get_weights()
return result
def compare_results(results_with_ds,
results_without_ds,
distribution,
testcase,
partial_last_batch=None):
"""Compares results of model compiled with/without distribution strategy."""
if policy.global_policy().compute_dtype in ('float16', 'bfloat16'):
default_tolerance = 1e-2
relaxed_tolerance = 1e-2
elif partial_last_batch == 'train_and_eval':
# We relax the tolerence a lot in the partial last batch case as
# 1. the examples in uneven batches may have different weights when
# applying the gradients in the distributed case.
# 2. TF Keras and TF Keras DS have different ways to handle the case when
# training with epochs > 1 with numpy inputs. In TF Keras, every epoch
# may have a partial batch. While in TF Keras DS, as we convert
# numpy inputs into dataset, it will do a repeat() first and calculate
# steps_per_epoch, so it will at most have one partial batch. This
# makes the 1-CPU result even different.
default_tolerance = 1e-3
relaxed_tolerance = 1e-3
else:
default_tolerance = 1e-5
relaxed_tolerance = 1e-4
def _get_compare_result_tolerance(key):
"""Returns tolerance to compare results."""
# TODO(b/119257215): For MirroredStrategy, weights are not exactly the same,
# so use larger tolerance for now. Predict should be related to weights.
if (isinstance(distribution,
(mirrored_strategy.MirroredStrategy,
distribute_lib._DefaultDistributionStrategy)) and # pylint: disable=protected-access
key.startswith(('weights_1', 'weights_2', 'predict_result'))):
return relaxed_tolerance
return default_tolerance
for key in sorted(results_with_ds.keys()):
if (key.startswith('training_history') and
isinstance(distribution,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)) and
distribution.extended.steps_per_run > 1):
# TODO(b/119894254): Enable this test for all cases once the
# underlying bug is fixed.
continue
tolerance = _get_compare_result_tolerance(key)
# We don't compare the loss as loss is currently not computed as metric
# in Keras, the loss value is inaccurate for last partial batch due to
# more weights for the last batch samples.
if partial_last_batch is not None:
if key.startswith('eval_result'):
results_with_ds[key] = results_with_ds[key][1:]
results_without_ds[key] = results_without_ds[key][1:]
if key.startswith('training_history'):
results_with_ds[key]['val_loss'] = 0
results_without_ds[key]['val_loss'] = 0
testcase.assertAllClose(
results_with_ds[key],
results_without_ds[key],
atol=tolerance,
rtol=tolerance,
msg='Fail to assert {}.'.format(key))
def should_skip_tpu_with_eager(distribution):
return (context.executing_eagerly() and
isinstance(distribution,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)))
class LearningRateBatchScheduler(keras.callbacks.Callback):
"""Scheduler that dynamically sets the learning rate of model."""
def __init__(self, update_freq=None):
self._update_freq = update_freq
def on_batch_begin(self, batch, logs=None):
if self._update_freq and batch % self._update_freq != 0:
return
# To avoid divergence, limit the value range.
lr = 0.001 * (batch % 10)
keras.backend.set_value(self.model.optimizer.lr, lr)
class TestDistributionStrategyCorrectnessBase(test.TestCase,
parameterized.TestCase):
"""Model agnostic testing infra to test correctness of Keras models."""
def set_up_test_config(self,
use_numpy=False,
use_validation_data=False,
with_batch_norm=False):
self.use_numpy = use_numpy
self.use_validation_data = use_validation_data
self.with_batch_norm = with_batch_norm
keras.backend.set_image_data_format('channels_last')
np.random.seed(_RANDOM_SEED)
random_seed.set_random_seed(_RANDOM_SEED)
def get_data(self):
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
return (x_train.astype('float32'), y_train.astype('float32'), None)
def get_data_with_partial_last_batch(self):
raise NotImplementedError
def get_data_with_partial_last_batch_eval(self):
raise NotImplementedError
def get_input_for_correctness_test(self, **kwargs):
"""Generates inputs that are dictionaries.
We only provide a default implementation of this method here. If you need
more customized way of providing input to your model, overwrite this method.
Arguments:
**kwargs: key word arguments about how to create the input dictionaries
Returns:
Three dictionaries representing the input for fit(), evalutate() and
predict()
"""
return get_correctness_test_inputs(**kwargs)
def get_model(self,
distribution=None,
experimental_run_tf_function=None,
input_shapes=None):
raise NotImplementedError
def run_correctness_test(self,
distribution,
use_numpy,
use_validation_data,
experimental_run_tf_function=None,
with_batch_norm=False,
is_stateful_model=False,
partial_last_batch=None,
training_epochs=2):
with self.cached_session():
self.set_up_test_config(use_numpy, use_validation_data, with_batch_norm)
if partial_last_batch == 'eval':
x_train, y_train, x_eval, y_eval, x_predict = (
self.get_data_with_partial_last_batch_eval())
elif partial_last_batch == 'train_and_eval':
x_train, y_train, x_eval, y_eval, x_predict = (
self.get_data_with_partial_last_batch())
else:
x_train, y_train, x_predict = self.get_data()
x_eval = x_train
y_eval = y_train
# The model is built once and the initial weights are saved.
# This is used to initialize the model for both the distribution and
# non-distribution run.
model = self.get_model(
experimental_run_tf_function=experimental_run_tf_function,
input_shapes=get_shapes(x_train))
initial_weights = model.get_weights()
ds_input_fn = functools.partial(
self.get_input_for_correctness_test,
use_numpy=use_numpy,
use_validation_data=use_validation_data,
with_distribution=distribution,
x_train=x_train,
y_train=y_train,
x_eval=x_eval,
y_eval=y_eval,
x_predict=x_predict,
training_epochs=training_epochs)
nods_input_fn = functools.partial(
self.get_input_for_correctness_test,
use_numpy=use_numpy,
use_validation_data=use_validation_data,
with_distribution=None,
x_train=x_train,
y_train=y_train,
x_eval=x_eval,
y_eval=y_eval,
x_predict=x_predict,
training_epochs=training_epochs)
results_with_ds = fit_eval_and_predict(
initial_weights,
input_fn=ds_input_fn,
model_fn=self.get_model,
experimental_run_tf_function=experimental_run_tf_function,
distribution=distribution,
is_stateful_model=is_stateful_model)
results_without_ds = fit_eval_and_predict(
initial_weights,
input_fn=nods_input_fn,
model_fn=self.get_model,
experimental_run_tf_function=experimental_run_tf_function,
distribution=None,
is_stateful_model=is_stateful_model)
# First, special case, for multi-replica distributed training, batch
# norm is not aggregated globally. So it is expected to have different
# weights.
if (self.with_batch_norm and distribution.num_replicas_in_sync > 1):
with self.assertRaises(AssertionError):
compare_results(
results_with_ds,
results_without_ds,
distribution,
testcase=self,
partial_last_batch=partial_last_batch)
else:
compare_results(
results_with_ds,
results_without_ds,
distribution,
testcase=self,
partial_last_batch=partial_last_batch)
def get_input_for_dynamic_lr_test(self, **kwargs):
"""Generates inputs that are dictionaries.
We only provide a default implementation of this method here. If you need
more customized way of providing input to your model, overwrite this method.
Arguments:
**kwargs: key word arguments about how to create the input dictionaries
Returns:
Three dictionaries representing the input for fit(), evalutate() and
predict()
"""
training_input = kwargs
return training_input, None, None
def run_dynamic_lr_test(self,
distribution,
experimental_run_tf_function=None):
with self.cached_session():
self.set_up_test_config()
x_train, y_train, _ = self.get_data()
model = self.get_model(
experimental_run_tf_function=experimental_run_tf_function,
input_shapes=get_shapes(x_train))
initial_weights = model.get_weights()
update_freq = None
if (isinstance(distribution, tpu_strategy.TPUStrategyV1) and
distribution.extended.steps_per_run > 1):
# For TPUStrategy with steps_per_run > 1, the callback is not invoked
# every step. So, to compare the CPU/TPU, we let the CPU to behave the
# same as TPU.
update_freq = distribution.extended.steps_per_run
training_epochs = 2
global_batch_size = 64
ds_batch_size = get_batch_size(global_batch_size, distribution)
nods_batch_size = get_batch_size(global_batch_size, None)
ds_input_fn = functools.partial(
self.get_input_for_dynamic_lr_test,
x=x_train,
y=y_train,
batch_size=ds_batch_size,
shuffle=False,
epochs=training_epochs,
callbacks=[LearningRateBatchScheduler(update_freq)],
validation_data=(x_train, y_train))
nods_input_fn = functools.partial(
self.get_input_for_dynamic_lr_test,
x=x_train,
y=y_train,
batch_size=nods_batch_size,
shuffle=False,
epochs=training_epochs,
callbacks=[LearningRateBatchScheduler(update_freq)],
validation_data=(x_train, y_train))
results_with_ds = fit_eval_and_predict(
initial_weights,
input_fn=ds_input_fn,
model_fn=self.get_model,
experimental_run_tf_function=experimental_run_tf_function,
distribution=distribution)
results_without_ds = fit_eval_and_predict(
initial_weights,
input_fn=nods_input_fn,
model_fn=self.get_model,
experimental_run_tf_function=experimental_run_tf_function,
distribution=None)
compare_results(
results_with_ds, results_without_ds, distribution, testcase=self)
class TestDistributionStrategyEmbeddingModelCorrectnessBase(
TestDistributionStrategyCorrectnessBase):
"""Base class to test correctness of Keras models with embedding layers."""
def get_data(self,
count=(_GLOBAL_BATCH_SIZE * _EVAL_STEPS),
min_words=5,
max_words=10,
max_word_id=19,
num_classes=2):
distribution = []
for _ in range(num_classes):
dist = np.abs(np.random.randn(max_word_id))
dist /= np.sum(dist)
distribution.append(dist)
features = []
labels = []
for _ in range(count):
label = np.random.randint(0, num_classes, size=1)[0]
num_words = np.random.randint(min_words, max_words, size=1)[0]
word_ids = np.random.choice(
max_word_id, size=num_words, replace=True, p=distribution[label])
word_ids = word_ids
labels.append(label)
features.append(word_ids)
features = sequence.pad_sequences(
features, maxlen=max_words)
x_train = np.asarray(features, dtype=np.float32)
y_train = np.asarray(labels, dtype=np.int32).reshape((count, 1))
x_predict = x_train[:_GLOBAL_BATCH_SIZE]
return x_train, y_train, x_predict
if __name__ == '__main__':
test.main()
| 35.481191
| 105
| 0.688342
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import random_seed
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.preprocessing import sequence
from tensorflow.python.util import nest
_RANDOM_SEED = 1337
_EVAL_STEPS = 20
_GLOBAL_BATCH_SIZE = 64
all_strategies = [
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_one_step,
]
def eager_mode_test_configuration():
return combinations.combine(
mode='eager', use_numpy=[True, False], use_validation_data=[True, False])
def graph_mode_test_configuration():
return combinations.combine(
mode='graph', use_numpy=[True, False], use_validation_data=[True, False])
def all_strategy_and_input_config_combinations():
return (combinations.times(
combinations.combine(
distribution=all_strategies,
experimental_run_tf_function=[True, False]),
eager_mode_test_configuration() + graph_mode_test_configuration()))
def strategy_minus_tpu_and_input_config_combinations_eager():
return (combinations.times(
combinations.combine(
distribution=strategy_combinations.strategies_minus_tpu),
eager_mode_test_configuration()))
def strategies_for_embedding_models():
return [
s for s in all_strategies if s.required_tpu or s.required_gpus or
s is strategy_combinations.one_device_strategy
]
def test_combinations_for_embedding_model():
eager_mode_strategies = [
s for s in strategies_for_embedding_models() if not s.required_tpu
]
return (combinations.times(
combinations.combine(
distribution=strategies_for_embedding_models(),
experimental_run_tf_function=[True, False]),
(graph_mode_test_configuration())) + combinations.times(
combinations.combine(
distribution=eager_mode_strategies,
experimental_run_tf_function=[False]),
(eager_mode_test_configuration())))
def test_combinations_with_tpu_strategies():
tpu_strategies = [
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_one_step
]
return (combinations.times(
combinations.combine(distribution=tpu_strategies),
graph_mode_test_configuration()))
class MaybeDistributionScope(object):
def __init__(self, distribution):
self._distribution = distribution
self._scope = None
def __enter__(self):
if self._distribution:
self._scope = self._distribution.scope()
self._scope.__enter__()
def __exit__(self, exc_type, value, traceback):
if self._distribution:
self._scope.__exit__(exc_type, value, traceback)
self._scope = None
def batch_wrapper(dataset, batch_size, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
return dataset.batch(batch_size)
def get_batch_size(global_batch_size, distribution):
batch_size = global_batch_size
use_per_core_batch_size = (
distribution and
not distributed_training_utils.global_batch_size_supported(distribution))
if use_per_core_batch_size:
batch_size //= distribution.num_replicas_in_sync
return batch_size
def get_data_size(data):
assert isinstance(data, (np.ndarray, list, dict, tuple))
if isinstance(data, np.ndarray):
return len(data)
if isinstance(data, (list, tuple)):
return len(data[0])
return len(six.next(six.itervalues(data)))
def get_shapes(data):
shapes = None
if all(hasattr(x, 'shape') for x in nest.flatten(data)):
shapes = nest.map_structure(lambda x: x.shape, data)
return shapes
def get_correctness_test_inputs(use_numpy, use_validation_data,
with_distribution, x_train, y_train, x_eval,
y_eval, x_predict, training_epochs):
global_batch_size = _GLOBAL_BATCH_SIZE
batch_size = get_batch_size(global_batch_size, with_distribution)
if use_numpy:
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': training_epochs,
'shuffle': False,
}
if use_validation_data:
eval_inputs = None
training_inputs['validation_data'] = (x_eval, y_eval)
else:
eval_inputs = {
'batch_size': batch_size,
'x': x_eval,
'y': y_eval,
}
predict_inputs = {'x': x_predict}
else:
training_data_size = get_data_size(x_train)
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
x = batch_wrapper(train_dataset, batch_size, repeat=training_epochs)
steps_per_epoch = int(np.ceil(1.0 * training_data_size / global_batch_size))
training_inputs = {
'batch_size': None,
'x': x,
'y': None,
'epochs': training_epochs,
'shuffle': False,
'steps_per_epoch': steps_per_epoch
}
if use_validation_data:
eval_inputs = None
eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval))
x = batch_wrapper(eval_dataset, batch_size)
training_inputs['validation_data'] = x
training_inputs['validation_steps'] = 5
else:
eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval))
x = batch_wrapper(eval_dataset, batch_size)
eval_steps = int(np.ceil(1.0 * get_data_size(x_eval) / global_batch_size))
eval_inputs = {
'batch_size': None,
'x': x,
'y': None,
'steps': eval_steps,
}
predict_batch_size = get_batch_size(
get_data_size(x_predict), with_distribution)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset, predict_batch_size)
predict_inputs = {
'steps': 1,
'x': predict_dataset,
}
return training_inputs, eval_inputs, predict_inputs
def fit_eval_and_predict(initial_weights,
input_fn,
model_fn,
experimental_run_tf_function=None,
distribution=None,
is_stateful_model=False):
training_inputs, eval_inputs, predict_inputs = input_fn()
model = model_fn(
experimental_run_tf_function=experimental_run_tf_function,
initial_weights=initial_weights,
distribution=distribution,
input_shapes=get_shapes(training_inputs['x']))
result = {}
result['training_history_1'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_1'] = model.evaluate(**eval_inputs)
result['weights_1'] = model.get_weights()
if predict_inputs is not None:
predict_length = 1
if is_stateful_model:
predict_length = 3
for i in range(predict_length):
result_key = 'predict_result_{}'.format(i)
result[result_key] = model.predict(**predict_inputs)
result['training_history_2'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_2'] = model.evaluate(**eval_inputs)
result['weights_2'] = model.get_weights()
return result
def compare_results(results_with_ds,
results_without_ds,
distribution,
testcase,
partial_last_batch=None):
if policy.global_policy().compute_dtype in ('float16', 'bfloat16'):
default_tolerance = 1e-2
relaxed_tolerance = 1e-2
elif partial_last_batch == 'train_and_eval':
# We relax the tolerence a lot in the partial last batch case as
# 1. the examples in uneven batches may have different weights when
# applying the gradients in the distributed case.
# 2. TF Keras and TF Keras DS have different ways to handle the case when
# training with epochs > 1 with numpy inputs. In TF Keras, every epoch
# may have a partial batch. While in TF Keras DS, as we convert
# numpy inputs into dataset, it will do a repeat() first and calculate
# steps_per_epoch, so it will at most have one partial batch. This
# makes the 1-CPU result even different.
default_tolerance = 1e-3
relaxed_tolerance = 1e-3
else:
default_tolerance = 1e-5
relaxed_tolerance = 1e-4
def _get_compare_result_tolerance(key):
# TODO(b/119257215): For MirroredStrategy, weights are not exactly the same,
# so use larger tolerance for now. Predict should be related to weights.
if (isinstance(distribution,
(mirrored_strategy.MirroredStrategy,
distribute_lib._DefaultDistributionStrategy)) and # pylint: disable=protected-access
key.startswith(('weights_1', 'weights_2', 'predict_result'))):
return relaxed_tolerance
return default_tolerance
for key in sorted(results_with_ds.keys()):
if (key.startswith('training_history') and
isinstance(distribution,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)) and
distribution.extended.steps_per_run > 1):
# TODO(b/119894254): Enable this test for all cases once the
# underlying bug is fixed.
continue
tolerance = _get_compare_result_tolerance(key)
# We don't compare the loss as loss is currently not computed as metric
if partial_last_batch is not None:
if key.startswith('eval_result'):
results_with_ds[key] = results_with_ds[key][1:]
results_without_ds[key] = results_without_ds[key][1:]
if key.startswith('training_history'):
results_with_ds[key]['val_loss'] = 0
results_without_ds[key]['val_loss'] = 0
testcase.assertAllClose(
results_with_ds[key],
results_without_ds[key],
atol=tolerance,
rtol=tolerance,
msg='Fail to assert {}.'.format(key))
def should_skip_tpu_with_eager(distribution):
return (context.executing_eagerly() and
isinstance(distribution,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)))
class LearningRateBatchScheduler(keras.callbacks.Callback):
def __init__(self, update_freq=None):
self._update_freq = update_freq
def on_batch_begin(self, batch, logs=None):
if self._update_freq and batch % self._update_freq != 0:
return
lr = 0.001 * (batch % 10)
keras.backend.set_value(self.model.optimizer.lr, lr)
class TestDistributionStrategyCorrectnessBase(test.TestCase,
parameterized.TestCase):
def set_up_test_config(self,
use_numpy=False,
use_validation_data=False,
with_batch_norm=False):
self.use_numpy = use_numpy
self.use_validation_data = use_validation_data
self.with_batch_norm = with_batch_norm
keras.backend.set_image_data_format('channels_last')
np.random.seed(_RANDOM_SEED)
random_seed.set_random_seed(_RANDOM_SEED)
def get_data(self):
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
return (x_train.astype('float32'), y_train.astype('float32'), None)
def get_data_with_partial_last_batch(self):
raise NotImplementedError
def get_data_with_partial_last_batch_eval(self):
raise NotImplementedError
def get_input_for_correctness_test(self, **kwargs):
return get_correctness_test_inputs(**kwargs)
def get_model(self,
distribution=None,
experimental_run_tf_function=None,
input_shapes=None):
raise NotImplementedError
def run_correctness_test(self,
distribution,
use_numpy,
use_validation_data,
experimental_run_tf_function=None,
with_batch_norm=False,
is_stateful_model=False,
partial_last_batch=None,
training_epochs=2):
with self.cached_session():
self.set_up_test_config(use_numpy, use_validation_data, with_batch_norm)
if partial_last_batch == 'eval':
x_train, y_train, x_eval, y_eval, x_predict = (
self.get_data_with_partial_last_batch_eval())
elif partial_last_batch == 'train_and_eval':
x_train, y_train, x_eval, y_eval, x_predict = (
self.get_data_with_partial_last_batch())
else:
x_train, y_train, x_predict = self.get_data()
x_eval = x_train
y_eval = y_train
model = self.get_model(
experimental_run_tf_function=experimental_run_tf_function,
input_shapes=get_shapes(x_train))
initial_weights = model.get_weights()
ds_input_fn = functools.partial(
self.get_input_for_correctness_test,
use_numpy=use_numpy,
use_validation_data=use_validation_data,
with_distribution=distribution,
x_train=x_train,
y_train=y_train,
x_eval=x_eval,
y_eval=y_eval,
x_predict=x_predict,
training_epochs=training_epochs)
nods_input_fn = functools.partial(
self.get_input_for_correctness_test,
use_numpy=use_numpy,
use_validation_data=use_validation_data,
with_distribution=None,
x_train=x_train,
y_train=y_train,
x_eval=x_eval,
y_eval=y_eval,
x_predict=x_predict,
training_epochs=training_epochs)
results_with_ds = fit_eval_and_predict(
initial_weights,
input_fn=ds_input_fn,
model_fn=self.get_model,
experimental_run_tf_function=experimental_run_tf_function,
distribution=distribution,
is_stateful_model=is_stateful_model)
results_without_ds = fit_eval_and_predict(
initial_weights,
input_fn=nods_input_fn,
model_fn=self.get_model,
experimental_run_tf_function=experimental_run_tf_function,
distribution=None,
is_stateful_model=is_stateful_model)
if (self.with_batch_norm and distribution.num_replicas_in_sync > 1):
with self.assertRaises(AssertionError):
compare_results(
results_with_ds,
results_without_ds,
distribution,
testcase=self,
partial_last_batch=partial_last_batch)
else:
compare_results(
results_with_ds,
results_without_ds,
distribution,
testcase=self,
partial_last_batch=partial_last_batch)
def get_input_for_dynamic_lr_test(self, **kwargs):
training_input = kwargs
return training_input, None, None
def run_dynamic_lr_test(self,
distribution,
experimental_run_tf_function=None):
with self.cached_session():
self.set_up_test_config()
x_train, y_train, _ = self.get_data()
model = self.get_model(
experimental_run_tf_function=experimental_run_tf_function,
input_shapes=get_shapes(x_train))
initial_weights = model.get_weights()
update_freq = None
if (isinstance(distribution, tpu_strategy.TPUStrategyV1) and
distribution.extended.steps_per_run > 1):
update_freq = distribution.extended.steps_per_run
training_epochs = 2
global_batch_size = 64
ds_batch_size = get_batch_size(global_batch_size, distribution)
nods_batch_size = get_batch_size(global_batch_size, None)
ds_input_fn = functools.partial(
self.get_input_for_dynamic_lr_test,
x=x_train,
y=y_train,
batch_size=ds_batch_size,
shuffle=False,
epochs=training_epochs,
callbacks=[LearningRateBatchScheduler(update_freq)],
validation_data=(x_train, y_train))
nods_input_fn = functools.partial(
self.get_input_for_dynamic_lr_test,
x=x_train,
y=y_train,
batch_size=nods_batch_size,
shuffle=False,
epochs=training_epochs,
callbacks=[LearningRateBatchScheduler(update_freq)],
validation_data=(x_train, y_train))
results_with_ds = fit_eval_and_predict(
initial_weights,
input_fn=ds_input_fn,
model_fn=self.get_model,
experimental_run_tf_function=experimental_run_tf_function,
distribution=distribution)
results_without_ds = fit_eval_and_predict(
initial_weights,
input_fn=nods_input_fn,
model_fn=self.get_model,
experimental_run_tf_function=experimental_run_tf_function,
distribution=None)
compare_results(
results_with_ds, results_without_ds, distribution, testcase=self)
class TestDistributionStrategyEmbeddingModelCorrectnessBase(
TestDistributionStrategyCorrectnessBase):
def get_data(self,
count=(_GLOBAL_BATCH_SIZE * _EVAL_STEPS),
min_words=5,
max_words=10,
max_word_id=19,
num_classes=2):
distribution = []
for _ in range(num_classes):
dist = np.abs(np.random.randn(max_word_id))
dist /= np.sum(dist)
distribution.append(dist)
features = []
labels = []
for _ in range(count):
label = np.random.randint(0, num_classes, size=1)[0]
num_words = np.random.randint(min_words, max_words, size=1)[0]
word_ids = np.random.choice(
max_word_id, size=num_words, replace=True, p=distribution[label])
word_ids = word_ids
labels.append(label)
features.append(word_ids)
features = sequence.pad_sequences(
features, maxlen=max_words)
x_train = np.asarray(features, dtype=np.float32)
y_train = np.asarray(labels, dtype=np.int32).reshape((count, 1))
x_predict = x_train[:_GLOBAL_BATCH_SIZE]
return x_train, y_train, x_predict
if __name__ == '__main__':
test.main()
| true
| true
|
1c40a6eb8a3fb152356c57421c00424f19f98a0a
| 8,302
|
py
|
Python
|
gym-link/gym_link/envs/link_env.py
|
ivukotic/rl_examples
|
b6ca1a01429934cc936baa94753b3e08677e0fae
|
[
"MIT"
] | null | null | null |
gym-link/gym_link/envs/link_env.py
|
ivukotic/rl_examples
|
b6ca1a01429934cc936baa94753b3e08677e0fae
|
[
"MIT"
] | null | null | null |
gym-link/gym_link/envs/link_env.py
|
ivukotic/rl_examples
|
b6ca1a01429934cc936baa94753b3e08677e0fae
|
[
"MIT"
] | null | null | null |
"""
One network link environment.
Link has changing base load.
Actions: start 0 to 4 more transfers
Reward: percentage of free rate used. Gets negative if link fully saturated
Files sizes are normally distributed (absolute values).
"""
import math
from collections import deque
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from gym.envs.classic_control import rendering
import numpy as np
class LinkEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
def __init__(self):
self.max_link_rate = 10 * 1024 * 1024 * 1024 / 8 # 10 Gigabits - all rates are in B/s
self.base_rate_min = 0
self.base_rate_max = self.max_link_rate * 0.9
self.handshake_duration = 1 # seconds
self.max_rate_per_file = 5 * 1024 * 1024 # B/s
self.file_size_mean = 1350 * 1024 * 1024
self.file_size_sigma = 300 * 1024 * 1024
# key: int, start: int, stop:int, size: int [bytes], transfered: int[bytes]
self.transfers = deque(maxlen=2000)
self.current_base_rate = int(self.max_link_rate * 0.5 * np.random.ranf())
self.tstep = 0
self.viewer = None
self.h_base = deque(maxlen=600)
self.h_added = deque(maxlen=600)
self.dc_free = 0
self.dc_used = 0
self._seed()
# obesrvation space reports only on files transfered: rate and how many steps ago it started.
self.observation_space = spaces.Box(
# low=np.array([0.0, 0, 0]),
# high=np.array([np.finfo(np.float32).max, np.iinfo(np.int32).max, np.iinfo(np.int32).max])
low=np.array([0.0]),
high=np.array([1.5])
)
self.action_space = spaces.Discrete(4)
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reward_function(self, x):
return -21.22 * x * x * x * x + 33.77 * x * x * x - 15.73 * x * x + 3.306 * x + 0.002029
def _step(self, action):
# add transfers if asked for
for i in range(action):
file_size = int(math.fabs(self.file_size_mean + np.random.standard_normal() * self.file_size_sigma))
self.transfers.append([self.tstep, 0, file_size, 0])
# find current base rate
self.current_base_rate += int(np.random.standard_normal() * 8 * 1024 * 1024)
if self.current_base_rate > self.base_rate_max:
self.current_base_rate = self.base_rate_max
if self.current_base_rate < self.base_rate_min:
self.current_base_rate = self.base_rate_min
# find used rate if all the ongoing transfers would be at maximal rate
active_transfers = 0
for t in self.transfers:
# print(t)
if self.tstep < self.handshake_duration + t[0] or t[1] > 0:
continue
active_transfers += 1
max_rate = self.max_rate_per_file * active_transfers
# find free bandwidth
max_free_bandwidth = self.max_link_rate - self.current_base_rate
self.dc_free += max_free_bandwidth / 1024
self.dc_used += min(max_free_bandwidth, max_rate) / 1024
reward = self.reward_function(max_rate / max_free_bandwidth)
episode_over = False
if (max_rate + self.current_base_rate) > 1.1 * self.max_link_rate or self.tstep >= 1400:
episode_over = True
current_rate_per_file = 0
if active_transfers > 0:
current_rate_per_file = min(math.floor(max_free_bandwidth / active_transfers), self.max_rate_per_file)
# LSFT - last started finished transfer
time_of_LSFT = 0 # how long ago that transfer ended
rate_of_LSFT = 0
size_of_LSFT = 0
finished = 0
# transfer [start_time, end_time, size, transfered_till_now]
for t in self.transfers:
if self.tstep < self.handshake_duration + t[0]: # still in handshake phase
continue
if t[1] == 0: # increase transfered size for unfinished transfers
t[3] += current_rate_per_file
if t[3] >= t[2] and t[1] == 0: # if some finished in this timestep
t[1] = self.tstep
if t[3] >= t[2]: # all finished
finished += 1 # this is just for info
if t[0] > time_of_LSFT: # last started from all finished
rate_of_LSFT = t[2] / (t[1] - t[0] - self.handshake_duration + 1)
size_of_LSFT = t[2]
time_of_LSFT = self.tstep - t[1]
size_of_LSFT = 0
rate_of_LSFT = 0
time_of_LSFT = max_free_bandwidth / self.max_link_rate # hack
# observation = (rate_of_LSFT, size_of_LSFT, time_of_LSFT)
observation = ((max_rate + self.current_base_rate) / self.max_link_rate)
self.tstep += 1
self.h_base.append(self.current_base_rate)
self.h_added.append(max_rate + self.current_base_rate)
return observation, reward, episode_over, {
"finished transfers": finished,
"duty cycle": self.dc_used / self.dc_free,
"active transfers": active_transfers,
"base rate [%]": int(self.current_base_rate / self.max_link_rate * 10000) / 100
}
def _reset(self):
self.tstep = 0
self.transfers.clear()
self.dc_free = 0
self.dc_used = 0
return np.array((0.5))
# return np.array((0, 0, 0))
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
screen_width = 640
screen_height = 480
scale = np.max(self.h_added) / 440
bdata = [] # (screen_width - 20, 20)] # first point in lower right corner
y = list(reversed(self.h_base))
for j, i in enumerate(y):
bdata.append((screen_width - 20 - j, 20 + int(i / scale)))
# bdata.append((screen_width - 20 - len(y), 20))
adata = [] # (screen_width - 20, 20)]
y = list(reversed(self.h_added))
for j, i in enumerate(y):
adata.append((screen_width - 20 - j, 20 + int(i / scale)))
# adata.append((screen_width - 20 - len(y), 20))
adata = adata[:self.tstep]
if self.viewer is None:
self.viewer = rendering.Viewer(screen_width, screen_height)
# l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
# axleoffset = cartheight / 4.0
# cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
# self.carttrans = rendering.Transform()
# cart.add_attr(self.carttrans)
# self.viewer.add_geom(cart)
# self.poletrans = rendering.Transform(translation=(0, axleoffset))
# pole.add_attr(self.poletrans)
# pole.add_attr(self.carttrans)
# self.axle = rendering.make_circle(polewidth / 2)
# self.axle.add_attr(self.poletrans)
# self.axle.add_attr(self.carttrans)
self.xaxis = rendering.Line((20, 20), (screen_width - 20, 20))
self.xaxis.set_color(0, 0, 0)
self.yaxis = rendering.Line((20, 20), (20, screen_height - 20))
self.yaxis.set_color(0, 0, 0)
self.viewer.add_geom(self.xaxis)
self.viewer.add_geom(self.yaxis)
adde = rendering.PolyLine(adata, False)
adde.set_color(.1, .6, .8)
self.viewer.add_onetime(adde)
base = rendering.PolyLine(bdata, False)
base.set_color(.8, .6, .4)
self.viewer.add_onetime(base)
max_line = self.max_link_rate / scale
ml = rendering.Line((20, max_line + 20), (screen_width - 20, max_line + 20))
ml.set_color(0.1, 0.9, .1)
self.viewer.add_onetime(ml)
# if self.state is None:
# return None
# x = self.state
# cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART
# self.carttrans.set_translation(cartx, carty)
# self.poletrans.set_rotation(-x[2])
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
| 38.258065
| 114
| 0.593471
|
import math
from collections import deque
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from gym.envs.classic_control import rendering
import numpy as np
class LinkEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
def __init__(self):
self.max_link_rate = 10 * 1024 * 1024 * 1024 / 8
self.base_rate_min = 0
self.base_rate_max = self.max_link_rate * 0.9
self.handshake_duration = 1
self.max_rate_per_file = 5 * 1024 * 1024
self.file_size_mean = 1350 * 1024 * 1024
self.file_size_sigma = 300 * 1024 * 1024
self.transfers = deque(maxlen=2000)
self.current_base_rate = int(self.max_link_rate * 0.5 * np.random.ranf())
self.tstep = 0
self.viewer = None
self.h_base = deque(maxlen=600)
self.h_added = deque(maxlen=600)
self.dc_free = 0
self.dc_used = 0
self._seed()
self.observation_space = spaces.Box(
low=np.array([0.0]),
high=np.array([1.5])
)
self.action_space = spaces.Discrete(4)
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reward_function(self, x):
return -21.22 * x * x * x * x + 33.77 * x * x * x - 15.73 * x * x + 3.306 * x + 0.002029
def _step(self, action):
for i in range(action):
file_size = int(math.fabs(self.file_size_mean + np.random.standard_normal() * self.file_size_sigma))
self.transfers.append([self.tstep, 0, file_size, 0])
self.current_base_rate += int(np.random.standard_normal() * 8 * 1024 * 1024)
if self.current_base_rate > self.base_rate_max:
self.current_base_rate = self.base_rate_max
if self.current_base_rate < self.base_rate_min:
self.current_base_rate = self.base_rate_min
active_transfers = 0
for t in self.transfers:
if self.tstep < self.handshake_duration + t[0] or t[1] > 0:
continue
active_transfers += 1
max_rate = self.max_rate_per_file * active_transfers
max_free_bandwidth = self.max_link_rate - self.current_base_rate
self.dc_free += max_free_bandwidth / 1024
self.dc_used += min(max_free_bandwidth, max_rate) / 1024
reward = self.reward_function(max_rate / max_free_bandwidth)
episode_over = False
if (max_rate + self.current_base_rate) > 1.1 * self.max_link_rate or self.tstep >= 1400:
episode_over = True
current_rate_per_file = 0
if active_transfers > 0:
current_rate_per_file = min(math.floor(max_free_bandwidth / active_transfers), self.max_rate_per_file)
time_of_LSFT = 0
rate_of_LSFT = 0
size_of_LSFT = 0
finished = 0
for t in self.transfers:
if self.tstep < self.handshake_duration + t[0]:
continue
if t[1] == 0:
t[3] += current_rate_per_file
if t[3] >= t[2] and t[1] == 0:
t[1] = self.tstep
if t[3] >= t[2]:
finished += 1
if t[0] > time_of_LSFT:
rate_of_LSFT = t[2] / (t[1] - t[0] - self.handshake_duration + 1)
size_of_LSFT = t[2]
time_of_LSFT = self.tstep - t[1]
size_of_LSFT = 0
rate_of_LSFT = 0
time_of_LSFT = max_free_bandwidth / self.max_link_rate
observation = ((max_rate + self.current_base_rate) / self.max_link_rate)
self.tstep += 1
self.h_base.append(self.current_base_rate)
self.h_added.append(max_rate + self.current_base_rate)
return observation, reward, episode_over, {
"finished transfers": finished,
"duty cycle": self.dc_used / self.dc_free,
"active transfers": active_transfers,
"base rate [%]": int(self.current_base_rate / self.max_link_rate * 10000) / 100
}
def _reset(self):
self.tstep = 0
self.transfers.clear()
self.dc_free = 0
self.dc_used = 0
return np.array((0.5))
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
screen_width = 640
screen_height = 480
scale = np.max(self.h_added) / 440
bdata = [] ase))
for j, i in enumerate(y):
bdata.append((screen_width - 20 - j, 20 + int(i / scale)))
adata = []
y = list(reversed(self.h_added))
for j, i in enumerate(y):
adata.append((screen_width - 20 - j, 20 + int(i / scale)))
adata = adata[:self.tstep]
if self.viewer is None:
self.viewer = rendering.Viewer(screen_width, screen_height)
self.xaxis = rendering.Line((20, 20), (screen_width - 20, 20))
self.xaxis.set_color(0, 0, 0)
self.yaxis = rendering.Line((20, 20), (20, screen_height - 20))
self.yaxis.set_color(0, 0, 0)
self.viewer.add_geom(self.xaxis)
self.viewer.add_geom(self.yaxis)
adde = rendering.PolyLine(adata, False)
adde.set_color(.1, .6, .8)
self.viewer.add_onetime(adde)
base = rendering.PolyLine(bdata, False)
base.set_color(.8, .6, .4)
self.viewer.add_onetime(base)
max_line = self.max_link_rate / scale
ml = rendering.Line((20, max_line + 20), (screen_width - 20, max_line + 20))
ml.set_color(0.1, 0.9, .1)
self.viewer.add_onetime(ml)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
| true
| true
|
1c40a712b217d3224d9cbc01756f60232fa675ad
| 691
|
py
|
Python
|
web/news/migrations/0001_initial.py
|
NeumannSven/pyshb_web
|
e4df67dd2550fc8dccf84f26c28894eb86ffc31f
|
[
"MIT"
] | null | null | null |
web/news/migrations/0001_initial.py
|
NeumannSven/pyshb_web
|
e4df67dd2550fc8dccf84f26c28894eb86ffc31f
|
[
"MIT"
] | null | null | null |
web/news/migrations/0001_initial.py
|
NeumannSven/pyshb_web
|
e4df67dd2550fc8dccf84f26c28894eb86ffc31f
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-03-15 19:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='news',
fields=[
('newsid', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('title', models.CharField(max_length=40)),
('subtitle', models.CharField(max_length=80)),
('article', models.TextField()),
('date', models.DateField()),
('topics', models.CharField(max_length=80)),
],
),
]
| 26.576923
| 102
| 0.54848
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='news',
fields=[
('newsid', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('title', models.CharField(max_length=40)),
('subtitle', models.CharField(max_length=80)),
('article', models.TextField()),
('date', models.DateField()),
('topics', models.CharField(max_length=80)),
],
),
]
| true
| true
|
1c40a7fc94e37aa860e57e22b861ba268c37050c
| 1,392
|
py
|
Python
|
Python Fundamentals/P2M5MatthewLane.py
|
mlane52/pythonteachingcode
|
46f007a94dfc6afcc22b41952f9c486d5c4c145e
|
[
"MIT"
] | null | null | null |
Python Fundamentals/P2M5MatthewLane.py
|
mlane52/pythonteachingcode
|
46f007a94dfc6afcc22b41952f9c486d5c4c145e
|
[
"MIT"
] | null | null | null |
Python Fundamentals/P2M5MatthewLane.py
|
mlane52/pythonteachingcode
|
46f007a94dfc6afcc22b41952f9c486d5c4c145e
|
[
"MIT"
] | null | null | null |
#MatthewLaneP2M5Final
import os
os.system ("curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/elements1_20.txt -o ""elements1_20.txt")
def get_names() :
while True :
if(len(ele_list) < 5):
ele_input = input("Enter the name of an element: ").strip().lower()
if not ele_input :
continue
elif (ele_input not in ele_list) :
ele_list.append(ele_input)
elif(ele_input in ele_list) :
print(ele_input,"that was already entered; do not enter duplicates")
else :
break
return ele_list
ele = open('elements1_20.txt','r')
ele_list =[]
index = 0
fl_list =[]
found_list =[]
not_found_list =[]
fl_string = ele.readline().strip("\n").upper().lower()
get_names()
while fl_string :
if fl_string is None :
break
else :
fl_list.append(fl_string)
fl_string = ele.readline().strip("\n").upper().lower()
ele.close()
for ele_line in range(len(ele_list)) :
temp_comp=ele_list[ele_line]
if temp_comp in fl_list :
found_list.append(ele_list[ele_line])
else :
not_found_list.append(ele_list[ele_line])
correct_ans = int(len(found_list))*20
print (correct_ans," %"," correct")
print("Elements found : ",' '.join(found_list).title())
print("Elements not found: ",' '.join(not_found_list).title())
| 29.617021
| 129
| 0.635776
|
import os
os.system ("curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/elements1_20.txt -o ""elements1_20.txt")
def get_names() :
while True :
if(len(ele_list) < 5):
ele_input = input("Enter the name of an element: ").strip().lower()
if not ele_input :
continue
elif (ele_input not in ele_list) :
ele_list.append(ele_input)
elif(ele_input in ele_list) :
print(ele_input,"that was already entered; do not enter duplicates")
else :
break
return ele_list
ele = open('elements1_20.txt','r')
ele_list =[]
index = 0
fl_list =[]
found_list =[]
not_found_list =[]
fl_string = ele.readline().strip("\n").upper().lower()
get_names()
while fl_string :
if fl_string is None :
break
else :
fl_list.append(fl_string)
fl_string = ele.readline().strip("\n").upper().lower()
ele.close()
for ele_line in range(len(ele_list)) :
temp_comp=ele_list[ele_line]
if temp_comp in fl_list :
found_list.append(ele_list[ele_line])
else :
not_found_list.append(ele_list[ele_line])
correct_ans = int(len(found_list))*20
print (correct_ans," %"," correct")
print("Elements found : ",' '.join(found_list).title())
print("Elements not found: ",' '.join(not_found_list).title())
| true
| true
|
1c40a8057efc4770ba41bb544ae07945fc992d08
| 203
|
py
|
Python
|
penerimaan_biji/penerimaan_biji/doctype/id_alat/test_id_alat.py
|
bobzz-zone/Biji
|
02bf9001c7bd505041de57c4b952733421b80815
|
[
"MIT"
] | null | null | null |
penerimaan_biji/penerimaan_biji/doctype/id_alat/test_id_alat.py
|
bobzz-zone/Biji
|
02bf9001c7bd505041de57c4b952733421b80815
|
[
"MIT"
] | null | null | null |
penerimaan_biji/penerimaan_biji/doctype/id_alat/test_id_alat.py
|
bobzz-zone/Biji
|
02bf9001c7bd505041de57c4b952733421b80815
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, PT DAS and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestIDAlat(unittest.TestCase):
pass
| 18.454545
| 45
| 0.758621
|
from __future__ import unicode_literals
import frappe
import unittest
class TestIDAlat(unittest.TestCase):
pass
| true
| true
|
1c40a8a592ca0b34897e5b34e6106314ee52f7fb
| 3,039
|
py
|
Python
|
6.006 Introduction to Algorithms MIT OCW/Lecture Notes/lec06_code/avl.py
|
SiyuIsaacParkerTian/self_learning
|
662b27c60cbfad94d80bd40f46e9f2d0f4270826
|
[
"MIT"
] | 2
|
2020-02-09T18:06:02.000Z
|
2020-04-19T07:30:58.000Z
|
6.006 Introduction to Algorithms MIT OCW/Lecture Notes/lec06_code/avl.py
|
SiyuIsaacParkerTian/self_learning
|
662b27c60cbfad94d80bd40f46e9f2d0f4270826
|
[
"MIT"
] | null | null | null |
6.006 Introduction to Algorithms MIT OCW/Lecture Notes/lec06_code/avl.py
|
SiyuIsaacParkerTian/self_learning
|
662b27c60cbfad94d80bd40f46e9f2d0f4270826
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import bst
def height(node):
if node is None:
return -1
else:
return node.height
def update_height(node):
node.height = max(height(node.left), height(node.right)) + 1
class AVL(bst.BST):
"""
AVL binary search tree implementation.
Supports insert, delete, find, find_min, next_larger each in O(lg n) time.
"""
def left_rotate(self, x):
y = x.right
y.parent = x.parent
if y.parent is None:
self.root = y
else:
if y.parent.left is x:
y.parent.left = y
elif y.parent.right is x:
y.parent.right = y
x.right = y.left
if x.right is not None:
x.right.parent = x
y.left = x
x.parent = y
update_height(x)
update_height(y)
def right_rotate(self, x):
y = x.left
y.parent = x.parent
if y.parent is None:
self.root = y
else:
if y.parent.left is x:
y.parent.left = y
elif y.parent.right is x:
y.parent.right = y
x.left = y.right
if x.left is not None:
x.left.parent = x
y.right = x
x.parent = y
update_height(x)
update_height(y)
def rebalance(self, node):
while node is not None:
update_height(node)
if height(node.left) >= 2 + height(node.right):
if height(node.left.left) >= height(node.left.right):
self.right_rotate(node)
else:
self.left_rotate(node.left)
self.right_rotate(node)
elif height(node.right) >= 2 + height(node.left):
if height(node.right.right) >= height(node.right.left):
self.left_rotate(node)
else:
self.right_rotate(node.right)
self.left_rotate(node)
node = node.parent
## find(k), find_min(), and next_larger(k) inherited from bst.BST
def insert(self, k):
"""Inserts a node with key k into the subtree rooted at this node.
This AVL version guarantees the balance property: h = O(lg n).
Args:
k: The key of the node to be inserted.
"""
node = super(AVL, self).insert(k)
self.rebalance(node)
def delete(self, k):
"""Deletes and returns a node with key k if it exists from the BST.
This AVL version guarantees the balance property: h = O(lg n).
Args:
k: The key of the node that we want to delete.
Returns:
The deleted node with key k.
"""
node = super(AVL, self).delete(k)
## node.parent is actually the old parent of the node,
## which is the first potentially out-of-balance node.
self.rebalance(node.parent)
def test(args=None):
bst.test(args, BSTtype=AVL)
if __name__ == '__main__': test()
| 29.504854
| 75
| 0.529121
|
import bst
def height(node):
if node is None:
return -1
else:
return node.height
def update_height(node):
node.height = max(height(node.left), height(node.right)) + 1
class AVL(bst.BST):
def left_rotate(self, x):
y = x.right
y.parent = x.parent
if y.parent is None:
self.root = y
else:
if y.parent.left is x:
y.parent.left = y
elif y.parent.right is x:
y.parent.right = y
x.right = y.left
if x.right is not None:
x.right.parent = x
y.left = x
x.parent = y
update_height(x)
update_height(y)
def right_rotate(self, x):
y = x.left
y.parent = x.parent
if y.parent is None:
self.root = y
else:
if y.parent.left is x:
y.parent.left = y
elif y.parent.right is x:
y.parent.right = y
x.left = y.right
if x.left is not None:
x.left.parent = x
y.right = x
x.parent = y
update_height(x)
update_height(y)
def rebalance(self, node):
while node is not None:
update_height(node)
if height(node.left) >= 2 + height(node.right):
if height(node.left.left) >= height(node.left.right):
self.right_rotate(node)
else:
self.left_rotate(node.left)
self.right_rotate(node)
elif height(node.right) >= 2 + height(node.left):
if height(node.right.right) >= height(node.right.left):
self.left_rotate(node)
else:
self.right_rotate(node.right)
self.left_rotate(node)
node = node.parent
t(k)
self.rebalance(node)
def delete(self, k):
node = super(AVL, self).delete(k)
name__ == '__main__': test()
| true
| true
|
1c40a9449567adf10ec4b9ff383c65fec24d5def
| 326
|
py
|
Python
|
bioluigi/tasks/samtools.py
|
PavlidisLab/luigi-biotasks
|
fec1c247752278518b2906a2ce968477349fee45
|
[
"Apache-2.0"
] | 5
|
2019-11-14T18:41:46.000Z
|
2020-03-21T17:56:32.000Z
|
bioluigi/tasks/samtools.py
|
PavlidisLab/luigi-biotasks
|
fec1c247752278518b2906a2ce968477349fee45
|
[
"Apache-2.0"
] | 8
|
2019-11-13T21:40:32.000Z
|
2022-03-04T20:31:37.000Z
|
bioluigi/tasks/samtools.py
|
PavlidisLab/luigi-biotasks
|
fec1c247752278518b2906a2ce968477349fee45
|
[
"Apache-2.0"
] | null | null | null |
import luigi
from luigi.contrib.external_program import ExternalProgramTask
import os
class IndexBam(ExternalProgramTask):
bam_file = luigi.Parameter()
def program_args(self):
return ['samtools', 'index', self.bam_file]
def output(self):
return luigi.LocalTarget('{}.bai'.format(self.bam_file))
| 25.076923
| 64
| 0.723926
|
import luigi
from luigi.contrib.external_program import ExternalProgramTask
import os
class IndexBam(ExternalProgramTask):
bam_file = luigi.Parameter()
def program_args(self):
return ['samtools', 'index', self.bam_file]
def output(self):
return luigi.LocalTarget('{}.bai'.format(self.bam_file))
| true
| true
|
1c40a9c5cbc74726c8fb2a338490323f64adc489
| 700
|
py
|
Python
|
C/Matcher.py
|
aleksandr-gordeiko/mathlogic-itmo
|
824b8942d487c0c112304fe7fa8e43f2a8aefa13
|
[
"MIT"
] | null | null | null |
C/Matcher.py
|
aleksandr-gordeiko/mathlogic-itmo
|
824b8942d487c0c112304fe7fa8e43f2a8aefa13
|
[
"MIT"
] | null | null | null |
C/Matcher.py
|
aleksandr-gordeiko/mathlogic-itmo
|
824b8942d487c0c112304fe7fa8e43f2a8aefa13
|
[
"MIT"
] | null | null | null |
# Copyright: Aleksandr Gordeiko 2020
from A.Node import Node
class Matcher:
def __init__(self):
self.node_schema_expressions = {}
def matches(self, node: Node, schema: Node):
if schema.sign is None:
try:
ex = self.node_schema_expressions[schema.expr]
except KeyError:
self.node_schema_expressions[schema.expr] = node.expr
return True
if ex == node.expr:
return True
return False
if node.sign is None or node.sign != schema.sign:
return False
if schema.sign in ["->", "|", "&"]:
return self.matches(node.left, schema.left) and \
self.matches(node.right, schema.right)
elif schema.sign == "!":
return self.matches(node.right, schema.right)
| 21.875
| 57
| 0.682857
|
from A.Node import Node
class Matcher:
def __init__(self):
self.node_schema_expressions = {}
def matches(self, node: Node, schema: Node):
if schema.sign is None:
try:
ex = self.node_schema_expressions[schema.expr]
except KeyError:
self.node_schema_expressions[schema.expr] = node.expr
return True
if ex == node.expr:
return True
return False
if node.sign is None or node.sign != schema.sign:
return False
if schema.sign in ["->", "|", "&"]:
return self.matches(node.left, schema.left) and \
self.matches(node.right, schema.right)
elif schema.sign == "!":
return self.matches(node.right, schema.right)
| true
| true
|
1c40aaa4ea80053d21168df03ff0c474bffc67b9
| 20,317
|
py
|
Python
|
cinder/volume/drivers/hitachi/hbsd_fc.py
|
alexpilotti/cinder-ci-fixes
|
c0ed2ab8cc6b1197e426cd6c58c3b582624d1cfd
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/hitachi/hbsd_fc.py
|
alexpilotti/cinder-ci-fixes
|
c0ed2ab8cc6b1197e426cd6c58c3b582624d1cfd
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/hitachi/hbsd_fc.py
|
alexpilotti/cinder-ci-fixes
|
c0ed2ab8cc6b1197e426cd6c58c3b582624d1cfd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fibre channel Cinder volume driver for Hitachi storage.
"""
import os
import threading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _LW
from cinder import utils
import cinder.volume.driver
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
from cinder.volume.drivers.hitachi import hbsd_common as common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.BoolOpt('hitachi_zoning_request',
default=False,
help='Request for FC Zone creating HostGroup'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
VERSION = common.VERSION
def __init__(self, *args, **kwargs):
os.environ['LANG'] = 'C'
super(HBSDFCDriver, self).__init__(*args, **kwargs)
self.db = kwargs.get('db')
self.common = None
self.configuration.append_config_values(common.volume_opts)
self._stats = {}
self.context = None
self.max_hostgroups = None
self.pair_hostgroups = []
self.pair_hostnum = 0
self.do_setup_status = threading.Event()
def _check_param(self):
self.configuration.append_config_values(volume_opts)
for opt in volume_opts:
getattr(self.configuration, opt.name)
def check_param(self):
try:
self.common.check_param()
self._check_param()
except exception.HBSDError:
raise
except Exception as ex:
msg = basic_lib.output_err(601, param=six.text_type(ex))
raise exception.HBSDError(message=msg)
def output_param_to_log(self):
lock = basic_lib.get_process_lock(self.common.system_lock_file)
with lock:
self.common.output_param_to_log('FC')
for opt in volume_opts:
if not opt.secret:
value = getattr(self.configuration, opt.name)
LOG.info('\t%-35s%s' %
(opt.name + ': ', six.text_type(value)))
self.common.command.output_param_to_log(self.configuration)
def _add_wwn(self, hgs, port, gid, wwns):
for wwn in wwns:
wwn = six.text_type(wwn)
self.common.command.comm_add_hbawwn(port, gid, wwn)
detected = self.common.command.is_detected(port, wwn)
hgs.append({'port': port, 'gid': gid, 'initiator_wwn': wwn,
'detected': detected})
LOG.debug('Create host group for %s' % hgs)
def _add_lun(self, hostgroups, ldev):
if hostgroups is self.pair_hostgroups:
is_once = True
else:
is_once = False
self.common.add_lun('auhgmap', hostgroups, ldev, is_once)
def _delete_lun(self, hostgroups, ldev):
try:
self.common.command.comm_delete_lun(hostgroups, ldev)
except exception.HBSDNotFound:
msg = basic_lib.set_msg(301, ldev=ldev)
LOG.warning(msg)
def _get_hgname_gid(self, port, host_grp_name):
return self.common.command.get_hgname_gid(port, host_grp_name)
def _get_unused_gid(self, port):
group_range = self.configuration.hitachi_group_range
if not group_range:
group_range = basic_lib.DEFAULT_GROUP_RANGE
return self.common.command.get_unused_gid(group_range, port)
def _get_hostgroup_info(self, hgs, wwns, login=True):
target_ports = self.configuration.hitachi_target_ports
return self.common.command.comm_get_hostgroup_info(
hgs, wwns, target_ports, login=login)
def _fill_group(self, hgs, port, host_grp_name, wwns):
added_hostgroup = False
LOG.debug('Create host group (hgs: %(hgs)s port: %(port)s '
'name: %(name)s wwns: %(wwns)s)'
% {'hgs': hgs, 'port': port,
'name': host_grp_name, 'wwns': wwns})
gid = self._get_hgname_gid(port, host_grp_name)
if gid is None:
for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
try:
gid = self._get_unused_gid(port)
self._add_hostgroup(port, gid, host_grp_name)
added_hostgroup = True
except exception.HBSDNotFound:
gid = None
msg = basic_lib.set_msg(312, resource='GID')
LOG.warning(msg)
continue
else:
LOG.debug('Completed to add host target'
'(port: %(port)s gid: %(gid)d)'
% {'port': port, 'gid': gid})
break
else:
msg = basic_lib.output_err(641)
raise exception.HBSDError(message=msg)
try:
if wwns:
self._add_wwn(hgs, port, gid, wwns)
else:
hgs.append({'port': port, 'gid': gid, 'initiator_wwn': None,
'detected': True})
except Exception:
with excutils.save_and_reraise_exception():
if added_hostgroup:
self._delete_hostgroup(port, gid, host_grp_name)
def add_hostgroup_master(self, hgs, master_wwns, host_ip, security_ports):
target_ports = self.configuration.hitachi_target_ports
group_request = self.configuration.hitachi_group_request
wwns = []
for wwn in master_wwns:
wwns.append(wwn.lower())
if target_ports and group_request:
host_grp_name = '%s%s' % (basic_lib.NAME_PREFIX, host_ip)
for port in security_ports:
wwns_copy = wwns[:]
for hostgroup in hgs:
if (hostgroup['port'] == port and
hostgroup['initiator_wwn'].lower() in wwns_copy):
wwns_copy.remove(hostgroup['initiator_wwn'].lower())
if wwns_copy:
try:
self._fill_group(hgs, port, host_grp_name, wwns_copy)
except Exception as ex:
LOG.warning(_LW('Failed to add host group: %s') %
six.text_type(ex))
msg = basic_lib.set_msg(
308, port=port, name=host_grp_name)
LOG.warning(msg)
if not hgs:
msg = basic_lib.output_err(649)
raise exception.HBSDError(message=msg)
def add_hostgroup_pair(self, pair_hostgroups):
if self.configuration.hitachi_unit_name:
return
properties = utils.brick_get_connector_properties()
if 'wwpns' not in properties:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
hostgroups = []
self._get_hostgroup_info(hostgroups, properties['wwpns'],
login=False)
host_grp_name = '%spair%02x' % (basic_lib.NAME_PREFIX,
self.pair_hostnum)
for hostgroup in hostgroups:
gid = self._get_hgname_gid(hostgroup['port'],
host_grp_name)
# When 'gid' is 0, it should be true.
# So, it cannot remove 'is not None'.
if gid is not None:
pair_hostgroups.append({'port': hostgroup['port'],
'gid': gid, 'initiator_wwn': None,
'detected': True})
break
if not pair_hostgroups:
for hostgroup in hostgroups:
pair_port = hostgroup['port']
try:
self._fill_group(pair_hostgroups, pair_port,
host_grp_name, None)
except Exception:
if hostgroup is hostgroups[-1]:
raise
else:
break
def add_hostgroup(self):
properties = utils.brick_get_connector_properties()
if 'wwpns' not in properties:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
LOG.debug("wwpns: %s" % properties['wwpns'])
hostgroups = []
security_ports = self._get_hostgroup_info(
hostgroups, properties['wwpns'], login=False)
self.add_hostgroup_master(hostgroups, properties['wwpns'],
properties['ip'], security_ports)
self.add_hostgroup_pair(self.pair_hostgroups)
def _get_target_wwn(self, port):
target_wwns = self.common.command.comm_set_target_wwns(
self.configuration.hitachi_target_ports)
return target_wwns[port]
def _add_hostgroup(self, port, gid, host_grp_name):
self.common.command.comm_add_hostgrp(port, gid, host_grp_name)
def _delete_hostgroup(self, port, gid, host_grp_name):
try:
self.common.command.comm_del_hostgrp(port, gid, host_grp_name)
except Exception:
with excutils.save_and_reraise_exception():
msg = basic_lib.set_msg(
306, port=port, gid=gid, name=host_grp_name)
LOG.warning(msg)
def _check_volume_mapping(self, hostgroup):
port = hostgroup['port']
gid = hostgroup['gid']
if self.common.command.get_hostgroup_luns(port, gid):
return True
else:
return False
def _build_initiator_target_map(self, hostgroups, terminate=False):
target_wwns = []
init_targ_map = {}
target_ports = self.configuration.hitachi_target_ports
zoning_request = self.configuration.hitachi_zoning_request
for hostgroup in hostgroups:
target_wwn = self._get_target_wwn(hostgroup['port'])
if target_wwn not in target_wwns:
target_wwns.append(target_wwn)
if target_ports and zoning_request:
if terminate and self._check_volume_mapping(hostgroup):
continue
initiator_wwn = hostgroup['initiator_wwn']
if initiator_wwn not in init_targ_map:
init_targ_map[initiator_wwn] = []
init_targ_map[initiator_wwn].append(target_wwn)
return target_wwns, init_targ_map
def _get_properties(self, volume, hostgroups, terminate=False):
properties = {}
target_wwns, init_targ_map = self._build_initiator_target_map(
hostgroups, terminate)
properties['target_wwn'] = target_wwns
if init_targ_map:
properties['initiator_target_map'] = init_targ_map
if not terminate:
properties['target_lun'] = hostgroups[0]['lun']
return properties
def do_setup(self, context):
self.context = context
self.common = common.HBSDCommon(self.configuration, self,
context, self.db)
self.check_param()
self.common.create_lock_file()
self.common.command.connect_storage()
self.max_hostgroups = self.common.command.get_max_hostgroups()
lock = basic_lib.get_process_lock(self.common.service_lock_file)
with lock:
self.add_hostgroup()
self.output_param_to_log()
self.do_setup_status.set()
def check_for_setup_error(self):
pass
def extend_volume(self, volume, new_size):
self.do_setup_status.wait()
self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False):
if refresh:
if self.do_setup_status.isSet():
self.common.output_backend_available_once()
_stats = self.common.update_volume_stats("FC")
if _stats:
self._stats = _stats
return self._stats
def create_volume(self, volume):
self.do_setup_status.wait()
metadata = self.common.create_volume(volume)
return metadata
def delete_volume(self, volume):
self.do_setup_status.wait()
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
self.do_setup_status.wait()
metadata = self.common.create_snapshot(snapshot)
return metadata
def delete_snapshot(self, snapshot):
self.do_setup_status.wait()
self.common.delete_snapshot(snapshot)
def create_cloned_volume(self, volume, src_vref):
self.do_setup_status.wait()
metadata = self.common.create_cloned_volume(volume, src_vref)
return metadata
def create_volume_from_snapshot(self, volume, snapshot):
self.do_setup_status.wait()
metadata = self.common.create_volume_from_snapshot(volume, snapshot)
return metadata
def _initialize_connection(self, ldev, connector, src_hgs=None):
LOG.debug("Call _initialize_connection "
"(config_group: %(group)s ldev: %(ldev)d)"
% {'group': self.configuration.config_group, 'ldev': ldev})
if src_hgs is self.pair_hostgroups:
hostgroups = src_hgs
else:
hostgroups = []
security_ports = self._get_hostgroup_info(
hostgroups, connector['wwpns'], login=True)
self.add_hostgroup_master(hostgroups, connector['wwpns'],
connector['ip'], security_ports)
if src_hgs is self.pair_hostgroups:
try:
self._add_lun(hostgroups, ldev)
except exception.HBSDNotFound:
msg = basic_lib.set_msg(311, ldev=ldev)
LOG.warning(msg)
for i in range(self.max_hostgroups + 1):
self.pair_hostnum += 1
pair_hostgroups = []
try:
self.add_hostgroup_pair(pair_hostgroups)
self.pair_hostgroups.extend(pair_hostgroups)
except exception.HBSDNotFound:
if i >= self.max_hostgroups:
msg = basic_lib.output_err(648, resource='GID')
raise exception.HBSDError(message=msg)
else:
break
self.pair_initialize_connection(ldev)
else:
self._add_lun(hostgroups, ldev)
return hostgroups
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
msg = basic_lib.output_err(619, volume_id=volume['id'])
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
hostgroups = self._initialize_connection(ldev, connector)
properties = self._get_properties(volume, hostgroups)
LOG.debug('Initialize volume_info: %s'
% self.common.volume_info)
LOG.debug('HFCDrv: properties=%s' % properties)
return {
'driver_volume_type': 'fibre_channel',
'data': properties
}
def _terminate_connection(self, ldev, connector, src_hgs):
LOG.debug("Call _terminate_connection(config_group: %s)"
% self.configuration.config_group)
hostgroups = src_hgs[:]
self._delete_lun(hostgroups, ldev)
LOG.debug("*** _terminate_ ***")
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
msg = basic_lib.set_msg(302, volume_id=volume['id'])
LOG.warning(msg)
return
if 'wwpns' not in connector:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
hostgroups = []
self._get_hostgroup_info(hostgroups,
connector['wwpns'], login=False)
if not hostgroups:
msg = basic_lib.output_err(649)
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
self._terminate_connection(ldev, connector, hostgroups)
properties = self._get_properties(volume, hostgroups,
terminate=True)
LOG.debug('Terminate volume_info: %s' % self.common.volume_info)
return {
'driver_volume_type': 'fibre_channel',
'data': properties
}
def pair_initialize_connection(self, ldev):
if self.configuration.hitachi_unit_name:
return
self._initialize_connection(ldev, None, self.pair_hostgroups)
def pair_terminate_connection(self, ldev):
if self.configuration.hitachi_unit_name:
return
self._terminate_connection(ldev, None, self.pair_hostgroups)
def discard_zero_page(self, volume):
self.common.command.discard_zero_page(self.common.get_ldev(volume))
def create_export(self, context, volume):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
self.do_setup_status.wait()
super(HBSDFCDriver, self).copy_volume_data(context, src_vol,
dest_vol, remote)
self.discard_zero_page(dest_vol)
def copy_image_to_volume(self, context, volume, image_service, image_id):
self.do_setup_status.wait()
super(HBSDFCDriver, self).copy_image_to_volume(context, volume,
image_service,
image_id)
self.discard_zero_page(volume)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
self.do_setup_status.wait()
if (volume['instance_uuid'] or volume['attached_host']):
desc = 'volume %s' % volume['id']
msg = basic_lib.output_err(660, desc=desc)
raise exception.HBSDError(message=msg)
super(HBSDFCDriver, self).copy_volume_to_image(context, volume,
image_service,
image_meta)
def restore_backup(self, context, backup, volume, backup_service):
self.do_setup_status.wait()
super(HBSDFCDriver, self).restore_backup(context, backup,
volume, backup_service)
self.discard_zero_page(volume)
def manage_existing(self, volume, existing_ref):
return self.common.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
self.do_setup_status.wait()
return self.common.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
self.do_setup_status.wait()
self.common.unmanage(volume)
| 38.18985
| 79
| 0.592361
|
import os
import threading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _LW
from cinder import utils
import cinder.volume.driver
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
from cinder.volume.drivers.hitachi import hbsd_common as common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.BoolOpt('hitachi_zoning_request',
default=False,
help='Request for FC Zone creating HostGroup'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
VERSION = common.VERSION
def __init__(self, *args, **kwargs):
os.environ['LANG'] = 'C'
super(HBSDFCDriver, self).__init__(*args, **kwargs)
self.db = kwargs.get('db')
self.common = None
self.configuration.append_config_values(common.volume_opts)
self._stats = {}
self.context = None
self.max_hostgroups = None
self.pair_hostgroups = []
self.pair_hostnum = 0
self.do_setup_status = threading.Event()
def _check_param(self):
self.configuration.append_config_values(volume_opts)
for opt in volume_opts:
getattr(self.configuration, opt.name)
def check_param(self):
try:
self.common.check_param()
self._check_param()
except exception.HBSDError:
raise
except Exception as ex:
msg = basic_lib.output_err(601, param=six.text_type(ex))
raise exception.HBSDError(message=msg)
def output_param_to_log(self):
lock = basic_lib.get_process_lock(self.common.system_lock_file)
with lock:
self.common.output_param_to_log('FC')
for opt in volume_opts:
if not opt.secret:
value = getattr(self.configuration, opt.name)
LOG.info('\t%-35s%s' %
(opt.name + ': ', six.text_type(value)))
self.common.command.output_param_to_log(self.configuration)
def _add_wwn(self, hgs, port, gid, wwns):
for wwn in wwns:
wwn = six.text_type(wwn)
self.common.command.comm_add_hbawwn(port, gid, wwn)
detected = self.common.command.is_detected(port, wwn)
hgs.append({'port': port, 'gid': gid, 'initiator_wwn': wwn,
'detected': detected})
LOG.debug('Create host group for %s' % hgs)
def _add_lun(self, hostgroups, ldev):
if hostgroups is self.pair_hostgroups:
is_once = True
else:
is_once = False
self.common.add_lun('auhgmap', hostgroups, ldev, is_once)
def _delete_lun(self, hostgroups, ldev):
try:
self.common.command.comm_delete_lun(hostgroups, ldev)
except exception.HBSDNotFound:
msg = basic_lib.set_msg(301, ldev=ldev)
LOG.warning(msg)
def _get_hgname_gid(self, port, host_grp_name):
return self.common.command.get_hgname_gid(port, host_grp_name)
def _get_unused_gid(self, port):
group_range = self.configuration.hitachi_group_range
if not group_range:
group_range = basic_lib.DEFAULT_GROUP_RANGE
return self.common.command.get_unused_gid(group_range, port)
def _get_hostgroup_info(self, hgs, wwns, login=True):
target_ports = self.configuration.hitachi_target_ports
return self.common.command.comm_get_hostgroup_info(
hgs, wwns, target_ports, login=login)
def _fill_group(self, hgs, port, host_grp_name, wwns):
added_hostgroup = False
LOG.debug('Create host group (hgs: %(hgs)s port: %(port)s '
'name: %(name)s wwns: %(wwns)s)'
% {'hgs': hgs, 'port': port,
'name': host_grp_name, 'wwns': wwns})
gid = self._get_hgname_gid(port, host_grp_name)
if gid is None:
for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
try:
gid = self._get_unused_gid(port)
self._add_hostgroup(port, gid, host_grp_name)
added_hostgroup = True
except exception.HBSDNotFound:
gid = None
msg = basic_lib.set_msg(312, resource='GID')
LOG.warning(msg)
continue
else:
LOG.debug('Completed to add host target'
'(port: %(port)s gid: %(gid)d)'
% {'port': port, 'gid': gid})
break
else:
msg = basic_lib.output_err(641)
raise exception.HBSDError(message=msg)
try:
if wwns:
self._add_wwn(hgs, port, gid, wwns)
else:
hgs.append({'port': port, 'gid': gid, 'initiator_wwn': None,
'detected': True})
except Exception:
with excutils.save_and_reraise_exception():
if added_hostgroup:
self._delete_hostgroup(port, gid, host_grp_name)
def add_hostgroup_master(self, hgs, master_wwns, host_ip, security_ports):
target_ports = self.configuration.hitachi_target_ports
group_request = self.configuration.hitachi_group_request
wwns = []
for wwn in master_wwns:
wwns.append(wwn.lower())
if target_ports and group_request:
host_grp_name = '%s%s' % (basic_lib.NAME_PREFIX, host_ip)
for port in security_ports:
wwns_copy = wwns[:]
for hostgroup in hgs:
if (hostgroup['port'] == port and
hostgroup['initiator_wwn'].lower() in wwns_copy):
wwns_copy.remove(hostgroup['initiator_wwn'].lower())
if wwns_copy:
try:
self._fill_group(hgs, port, host_grp_name, wwns_copy)
except Exception as ex:
LOG.warning(_LW('Failed to add host group: %s') %
six.text_type(ex))
msg = basic_lib.set_msg(
308, port=port, name=host_grp_name)
LOG.warning(msg)
if not hgs:
msg = basic_lib.output_err(649)
raise exception.HBSDError(message=msg)
def add_hostgroup_pair(self, pair_hostgroups):
if self.configuration.hitachi_unit_name:
return
properties = utils.brick_get_connector_properties()
if 'wwpns' not in properties:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
hostgroups = []
self._get_hostgroup_info(hostgroups, properties['wwpns'],
login=False)
host_grp_name = '%spair%02x' % (basic_lib.NAME_PREFIX,
self.pair_hostnum)
for hostgroup in hostgroups:
gid = self._get_hgname_gid(hostgroup['port'],
host_grp_name)
if gid is not None:
pair_hostgroups.append({'port': hostgroup['port'],
'gid': gid, 'initiator_wwn': None,
'detected': True})
break
if not pair_hostgroups:
for hostgroup in hostgroups:
pair_port = hostgroup['port']
try:
self._fill_group(pair_hostgroups, pair_port,
host_grp_name, None)
except Exception:
if hostgroup is hostgroups[-1]:
raise
else:
break
def add_hostgroup(self):
properties = utils.brick_get_connector_properties()
if 'wwpns' not in properties:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
LOG.debug("wwpns: %s" % properties['wwpns'])
hostgroups = []
security_ports = self._get_hostgroup_info(
hostgroups, properties['wwpns'], login=False)
self.add_hostgroup_master(hostgroups, properties['wwpns'],
properties['ip'], security_ports)
self.add_hostgroup_pair(self.pair_hostgroups)
def _get_target_wwn(self, port):
target_wwns = self.common.command.comm_set_target_wwns(
self.configuration.hitachi_target_ports)
return target_wwns[port]
def _add_hostgroup(self, port, gid, host_grp_name):
self.common.command.comm_add_hostgrp(port, gid, host_grp_name)
def _delete_hostgroup(self, port, gid, host_grp_name):
try:
self.common.command.comm_del_hostgrp(port, gid, host_grp_name)
except Exception:
with excutils.save_and_reraise_exception():
msg = basic_lib.set_msg(
306, port=port, gid=gid, name=host_grp_name)
LOG.warning(msg)
def _check_volume_mapping(self, hostgroup):
port = hostgroup['port']
gid = hostgroup['gid']
if self.common.command.get_hostgroup_luns(port, gid):
return True
else:
return False
def _build_initiator_target_map(self, hostgroups, terminate=False):
target_wwns = []
init_targ_map = {}
target_ports = self.configuration.hitachi_target_ports
zoning_request = self.configuration.hitachi_zoning_request
for hostgroup in hostgroups:
target_wwn = self._get_target_wwn(hostgroup['port'])
if target_wwn not in target_wwns:
target_wwns.append(target_wwn)
if target_ports and zoning_request:
if terminate and self._check_volume_mapping(hostgroup):
continue
initiator_wwn = hostgroup['initiator_wwn']
if initiator_wwn not in init_targ_map:
init_targ_map[initiator_wwn] = []
init_targ_map[initiator_wwn].append(target_wwn)
return target_wwns, init_targ_map
def _get_properties(self, volume, hostgroups, terminate=False):
properties = {}
target_wwns, init_targ_map = self._build_initiator_target_map(
hostgroups, terminate)
properties['target_wwn'] = target_wwns
if init_targ_map:
properties['initiator_target_map'] = init_targ_map
if not terminate:
properties['target_lun'] = hostgroups[0]['lun']
return properties
def do_setup(self, context):
self.context = context
self.common = common.HBSDCommon(self.configuration, self,
context, self.db)
self.check_param()
self.common.create_lock_file()
self.common.command.connect_storage()
self.max_hostgroups = self.common.command.get_max_hostgroups()
lock = basic_lib.get_process_lock(self.common.service_lock_file)
with lock:
self.add_hostgroup()
self.output_param_to_log()
self.do_setup_status.set()
def check_for_setup_error(self):
pass
def extend_volume(self, volume, new_size):
self.do_setup_status.wait()
self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False):
if refresh:
if self.do_setup_status.isSet():
self.common.output_backend_available_once()
_stats = self.common.update_volume_stats("FC")
if _stats:
self._stats = _stats
return self._stats
def create_volume(self, volume):
self.do_setup_status.wait()
metadata = self.common.create_volume(volume)
return metadata
def delete_volume(self, volume):
self.do_setup_status.wait()
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
self.do_setup_status.wait()
metadata = self.common.create_snapshot(snapshot)
return metadata
def delete_snapshot(self, snapshot):
self.do_setup_status.wait()
self.common.delete_snapshot(snapshot)
def create_cloned_volume(self, volume, src_vref):
self.do_setup_status.wait()
metadata = self.common.create_cloned_volume(volume, src_vref)
return metadata
def create_volume_from_snapshot(self, volume, snapshot):
self.do_setup_status.wait()
metadata = self.common.create_volume_from_snapshot(volume, snapshot)
return metadata
def _initialize_connection(self, ldev, connector, src_hgs=None):
LOG.debug("Call _initialize_connection "
"(config_group: %(group)s ldev: %(ldev)d)"
% {'group': self.configuration.config_group, 'ldev': ldev})
if src_hgs is self.pair_hostgroups:
hostgroups = src_hgs
else:
hostgroups = []
security_ports = self._get_hostgroup_info(
hostgroups, connector['wwpns'], login=True)
self.add_hostgroup_master(hostgroups, connector['wwpns'],
connector['ip'], security_ports)
if src_hgs is self.pair_hostgroups:
try:
self._add_lun(hostgroups, ldev)
except exception.HBSDNotFound:
msg = basic_lib.set_msg(311, ldev=ldev)
LOG.warning(msg)
for i in range(self.max_hostgroups + 1):
self.pair_hostnum += 1
pair_hostgroups = []
try:
self.add_hostgroup_pair(pair_hostgroups)
self.pair_hostgroups.extend(pair_hostgroups)
except exception.HBSDNotFound:
if i >= self.max_hostgroups:
msg = basic_lib.output_err(648, resource='GID')
raise exception.HBSDError(message=msg)
else:
break
self.pair_initialize_connection(ldev)
else:
self._add_lun(hostgroups, ldev)
return hostgroups
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
msg = basic_lib.output_err(619, volume_id=volume['id'])
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
hostgroups = self._initialize_connection(ldev, connector)
properties = self._get_properties(volume, hostgroups)
LOG.debug('Initialize volume_info: %s'
% self.common.volume_info)
LOG.debug('HFCDrv: properties=%s' % properties)
return {
'driver_volume_type': 'fibre_channel',
'data': properties
}
def _terminate_connection(self, ldev, connector, src_hgs):
LOG.debug("Call _terminate_connection(config_group: %s)"
% self.configuration.config_group)
hostgroups = src_hgs[:]
self._delete_lun(hostgroups, ldev)
LOG.debug("*** _terminate_ ***")
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
self.do_setup_status.wait()
ldev = self.common.get_ldev(volume)
if ldev is None:
msg = basic_lib.set_msg(302, volume_id=volume['id'])
LOG.warning(msg)
return
if 'wwpns' not in connector:
msg = basic_lib.output_err(650, resource='HBA')
raise exception.HBSDError(message=msg)
hostgroups = []
self._get_hostgroup_info(hostgroups,
connector['wwpns'], login=False)
if not hostgroups:
msg = basic_lib.output_err(649)
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
self._terminate_connection(ldev, connector, hostgroups)
properties = self._get_properties(volume, hostgroups,
terminate=True)
LOG.debug('Terminate volume_info: %s' % self.common.volume_info)
return {
'driver_volume_type': 'fibre_channel',
'data': properties
}
def pair_initialize_connection(self, ldev):
if self.configuration.hitachi_unit_name:
return
self._initialize_connection(ldev, None, self.pair_hostgroups)
def pair_terminate_connection(self, ldev):
if self.configuration.hitachi_unit_name:
return
self._terminate_connection(ldev, None, self.pair_hostgroups)
def discard_zero_page(self, volume):
self.common.command.discard_zero_page(self.common.get_ldev(volume))
def create_export(self, context, volume):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
self.do_setup_status.wait()
super(HBSDFCDriver, self).copy_volume_data(context, src_vol,
dest_vol, remote)
self.discard_zero_page(dest_vol)
def copy_image_to_volume(self, context, volume, image_service, image_id):
self.do_setup_status.wait()
super(HBSDFCDriver, self).copy_image_to_volume(context, volume,
image_service,
image_id)
self.discard_zero_page(volume)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
self.do_setup_status.wait()
if (volume['instance_uuid'] or volume['attached_host']):
desc = 'volume %s' % volume['id']
msg = basic_lib.output_err(660, desc=desc)
raise exception.HBSDError(message=msg)
super(HBSDFCDriver, self).copy_volume_to_image(context, volume,
image_service,
image_meta)
def restore_backup(self, context, backup, volume, backup_service):
self.do_setup_status.wait()
super(HBSDFCDriver, self).restore_backup(context, backup,
volume, backup_service)
self.discard_zero_page(volume)
def manage_existing(self, volume, existing_ref):
return self.common.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
self.do_setup_status.wait()
return self.common.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
self.do_setup_status.wait()
self.common.unmanage(volume)
| true
| true
|
1c40ab0516aea0f0690c56eb78806e7e66a6259f
| 1,098
|
py
|
Python
|
sdk/python/pulumi_azure_native/dbforpostgresql/v20200214preview/_enums.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_native/dbforpostgresql/v20200214preview/_enums.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_native/dbforpostgresql/v20200214preview/_enums.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'CreateMode',
'HAEnabledEnum',
'ResourceIdentityType',
'ServerVersion',
'SkuTier',
]
class CreateMode(str, Enum):
"""
The mode to create a new PostgreSQL server.
"""
DEFAULT = "Default"
POINT_IN_TIME_RESTORE = "PointInTimeRestore"
class HAEnabledEnum(str, Enum):
"""
stand by count value can be either enabled or disabled
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class ResourceIdentityType(str, Enum):
"""
The identity type.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
class ServerVersion(str, Enum):
"""
PostgreSQL Server version.
"""
SERVER_VERSION_12 = "12"
SERVER_VERSION_11 = "11"
class SkuTier(str, Enum):
"""
The tier of the particular SKU, e.g. Burstable.
"""
BURSTABLE = "Burstable"
GENERAL_PURPOSE = "GeneralPurpose"
MEMORY_OPTIMIZED = "MemoryOptimized"
| 20.333333
| 80
| 0.644809
|
from enum import Enum
__all__ = [
'CreateMode',
'HAEnabledEnum',
'ResourceIdentityType',
'ServerVersion',
'SkuTier',
]
class CreateMode(str, Enum):
DEFAULT = "Default"
POINT_IN_TIME_RESTORE = "PointInTimeRestore"
class HAEnabledEnum(str, Enum):
ENABLED = "Enabled"
DISABLED = "Disabled"
class ResourceIdentityType(str, Enum):
SYSTEM_ASSIGNED = "SystemAssigned"
class ServerVersion(str, Enum):
SERVER_VERSION_12 = "12"
SERVER_VERSION_11 = "11"
class SkuTier(str, Enum):
BURSTABLE = "Burstable"
GENERAL_PURPOSE = "GeneralPurpose"
MEMORY_OPTIMIZED = "MemoryOptimized"
| true
| true
|
1c40adf6a5577fca192d80e8d7e46f9de991112e
| 46,384
|
py
|
Python
|
hummingbot/strategy/perpetual_market_making/perpetual_market_making.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 2
|
2022-03-03T10:00:27.000Z
|
2022-03-08T13:57:56.000Z
|
hummingbot/strategy/perpetual_market_making/perpetual_market_making.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 6
|
2022-01-31T15:44:54.000Z
|
2022-03-06T04:27:12.000Z
|
hummingbot/strategy/perpetual_market_making/perpetual_market_making.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 1
|
2022-02-03T19:51:26.000Z
|
2022-02-03T19:51:26.000Z
|
import logging
import time
from decimal import Decimal
from itertools import chain
from math import ceil, floor
from typing import Dict, List
import numpy as np
import pandas as pd
from hummingbot.connector.derivative.position import Position
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.core.clock import Clock
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.core.data_type.order_candidate import PerpetualOrderCandidate
from hummingbot.core.event.events import (
BuyOrderCompletedEvent,
OrderFilledEvent,
OrderType,
PositionAction,
PositionMode,
PriceType,
SellOrderCompletedEvent,
TradeType
)
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.core.utils import map_df_to_str
from hummingbot.strategy.asset_price_delegate import AssetPriceDelegate
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from hummingbot.strategy.order_book_asset_price_delegate import OrderBookAssetPriceDelegate
from hummingbot.strategy.perpetual_market_making.data_types import PriceSize, Proposal
from hummingbot.strategy.perpetual_market_making.perpetual_market_making_order_tracker import (
PerpetualMarketMakingOrderTracker
)
from hummingbot.strategy.strategy_py_base import StrategyPyBase
NaN = float("nan")
s_decimal_zero = Decimal(0)
s_decimal_neg_one = Decimal(-1)
class PerpetualMarketMakingStrategy(StrategyPyBase):
OPTION_LOG_CREATE_ORDER = 1 << 3
OPTION_LOG_MAKER_ORDER_FILLED = 1 << 4
OPTION_LOG_STATUS_REPORT = 1 << 5
OPTION_LOG_ALL = 0x7fffffffffffffff
_logger = None
@classmethod
def logger(cls):
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def init_params(self,
market_info: MarketTradingPairTuple,
leverage: int,
position_mode: str,
bid_spread: Decimal,
ask_spread: Decimal,
order_amount: Decimal,
long_profit_taking_spread: Decimal,
short_profit_taking_spread: Decimal,
stop_loss_spread: Decimal,
time_between_stop_loss_orders: float,
stop_loss_slippage_buffer: Decimal,
order_levels: int = 1,
order_level_spread: Decimal = s_decimal_zero,
order_level_amount: Decimal = s_decimal_zero,
order_refresh_time: float = 30.0,
order_refresh_tolerance_pct: Decimal = s_decimal_neg_one,
filled_order_delay: float = 60.0,
order_optimization_enabled: bool = False,
ask_order_optimization_depth: Decimal = s_decimal_zero,
bid_order_optimization_depth: Decimal = s_decimal_zero,
asset_price_delegate: AssetPriceDelegate = None,
price_type: str = "mid_price",
price_ceiling: Decimal = s_decimal_neg_one,
price_floor: Decimal = s_decimal_neg_one,
logging_options: int = OPTION_LOG_ALL,
status_report_interval: float = 900,
minimum_spread: Decimal = Decimal(0),
hb_app_notification: bool = False,
order_override: Dict[str, List[str]] = {},
):
if price_ceiling != s_decimal_neg_one and price_ceiling < price_floor:
raise ValueError("Parameter price_ceiling cannot be lower than price_floor.")
self._sb_order_tracker = PerpetualMarketMakingOrderTracker()
self._market_info = market_info
self._leverage = leverage
self._position_mode = PositionMode.HEDGE if position_mode == "Hedge" else PositionMode.ONEWAY
self._bid_spread = bid_spread
self._ask_spread = ask_spread
self._minimum_spread = minimum_spread
self._order_amount = order_amount
self._long_profit_taking_spread = long_profit_taking_spread
self._short_profit_taking_spread = short_profit_taking_spread
self._stop_loss_spread = stop_loss_spread
self._order_levels = order_levels
self._buy_levels = order_levels
self._sell_levels = order_levels
self._order_level_spread = order_level_spread
self._order_level_amount = order_level_amount
self._order_refresh_time = order_refresh_time
self._order_refresh_tolerance_pct = order_refresh_tolerance_pct
self._filled_order_delay = filled_order_delay
self._order_optimization_enabled = order_optimization_enabled
self._ask_order_optimization_depth = ask_order_optimization_depth
self._bid_order_optimization_depth = bid_order_optimization_depth
self._asset_price_delegate = asset_price_delegate
self._price_type = self.get_price_type(price_type)
self._price_ceiling = price_ceiling
self._price_floor = price_floor
self._hb_app_notification = hb_app_notification
self._order_override = order_override
self._cancel_timestamp = 0
self._create_timestamp = 0
self._all_markets_ready = False
self._logging_options = logging_options
self._last_timestamp = 0
self._status_report_interval = status_report_interval
self._last_own_trade_price = Decimal('nan')
self._ts_peak_bid_price = Decimal('0')
self._ts_peak_ask_price = Decimal('0')
self._exit_orders = dict()
self._next_buy_exit_order_timestamp = 0
self._next_sell_exit_order_timestamp = 0
self.add_markets([market_info.market])
self._close_order_type = OrderType.LIMIT
self._time_between_stop_loss_orders = time_between_stop_loss_orders
self._stop_loss_slippage_buffer = stop_loss_slippage_buffer
def all_markets_ready(self):
return all([market.ready for market in self.active_markets])
@property
def order_refresh_tolerance_pct(self) -> Decimal:
return self._order_refresh_tolerance_pct
@order_refresh_tolerance_pct.setter
def order_refresh_tolerance_pct(self, value: Decimal):
self._order_refresh_tolerance_pct = value
@property
def order_amount(self) -> Decimal:
return self._order_amount
@order_amount.setter
def order_amount(self, value: Decimal):
self._order_amount = value
@property
def order_levels(self) -> int:
return self._order_levels
@order_levels.setter
def order_levels(self, value: int):
self._order_levels = value
self._buy_levels = value
self._sell_levels = value
@property
def buy_levels(self) -> int:
return self._buy_levels
@buy_levels.setter
def buy_levels(self, value: int):
self._buy_levels = value
@property
def sell_levels(self) -> int:
return self._sell_levels
@sell_levels.setter
def sell_levels(self, value: int):
self._sell_levels = value
@property
def order_level_amount(self) -> Decimal:
return self._order_level_amount
@order_level_amount.setter
def order_level_amount(self, value: Decimal):
self._order_level_amount = value
@property
def order_level_spread(self) -> Decimal:
return self._order_level_spread
@order_level_spread.setter
def order_level_spread(self, value: Decimal):
self._order_level_spread = value
@property
def bid_spread(self) -> Decimal:
return self._bid_spread
@bid_spread.setter
def bid_spread(self, value: Decimal):
self._bid_spread = value
@property
def ask_spread(self) -> Decimal:
return self._ask_spread
@ask_spread.setter
def ask_spread(self, value: Decimal):
self._ask_spread = value
@property
def order_optimization_enabled(self) -> bool:
return self._order_optimization_enabled
@order_optimization_enabled.setter
def order_optimization_enabled(self, value: bool):
self._order_optimization_enabled = value
@property
def order_refresh_time(self) -> float:
return self._order_refresh_time
@order_refresh_time.setter
def order_refresh_time(self, value: float):
self._order_refresh_time = value
@property
def filled_order_delay(self) -> float:
return self._filled_order_delay
@filled_order_delay.setter
def filled_order_delay(self, value: float):
self._filled_order_delay = value
@property
def price_ceiling(self) -> Decimal:
return self._price_ceiling
@price_ceiling.setter
def price_ceiling(self, value: Decimal):
self._price_ceiling = value
@property
def price_floor(self) -> Decimal:
return self._price_floor
@price_floor.setter
def price_floor(self, value: Decimal):
self._price_floor = value
@property
def base_asset(self):
return self._market_info.base_asset
@property
def quote_asset(self):
return self._market_info.quote_asset
@property
def trading_pair(self):
return self._market_info.trading_pair
def get_price(self) -> float:
if self._asset_price_delegate is not None:
price_provider = self._asset_price_delegate
else:
price_provider = self._market_info
if self._price_type is PriceType.LastOwnTrade:
price = self._last_own_trade_price
else:
price = price_provider.get_price_by_type(self._price_type)
if price.is_nan():
price = price_provider.get_price_by_type(PriceType.MidPrice)
return price
def get_last_price(self) -> float:
return self._market_info.get_last_price()
def get_mid_price(self) -> Decimal:
delegate: AssetPriceDelegate = self._asset_price_delegate
if delegate is not None:
mid_price = delegate.get_mid_price()
else:
mid_price = self._market_info.get_mid_price()
return mid_price
@property
def active_orders(self) -> List[LimitOrder]:
if self._market_info not in self._sb_order_tracker.market_pair_to_active_orders:
return []
return self._sb_order_tracker.market_pair_to_active_orders[self._market_info]
@property
def active_positions(self) -> Dict[str, Position]:
return self._market_info.market.account_positions
@property
def active_buys(self) -> List[LimitOrder]:
return [o for o in self.active_orders if o.is_buy]
@property
def active_sells(self) -> List[LimitOrder]:
return [o for o in self.active_orders if not o.is_buy]
@property
def logging_options(self) -> int:
return self._logging_options
@logging_options.setter
def logging_options(self, logging_options: int):
self._logging_options = logging_options
@property
def asset_price_delegate(self) -> AssetPriceDelegate:
return self._asset_price_delegate
@asset_price_delegate.setter
def asset_price_delegate(self, value):
self._asset_price_delegate = value
def perpetual_mm_assets_df(self) -> pd.DataFrame:
market, trading_pair, base_asset, quote_asset = self._market_info
quote_balance = float(market.get_balance(quote_asset))
available_quote_balance = float(market.get_available_balance(quote_asset))
data = [
["", quote_asset],
["Total Balance", round(quote_balance, 4)],
["Available Balance", round(available_quote_balance, 4)]
]
df = pd.DataFrame(data=data)
return df
def active_orders_df(self) -> pd.DataFrame:
price = self.get_price()
active_orders = self.active_orders
no_sells = len([o for o in active_orders if not o.is_buy])
active_orders.sort(key=lambda x: x.price, reverse=True)
columns = ["Level", "Type", "Price", "Spread", "Amount (Orig)", "Amount (Adj)", "Age"]
data = []
lvl_buy, lvl_sell = 0, 0
for idx in range(0, len(active_orders)):
order = active_orders[idx]
level = None
if order.is_buy:
level = lvl_buy + 1
lvl_buy += 1
else:
level = no_sells - lvl_sell
lvl_sell += 1
spread = 0 if price == 0 else abs(order.price - price) / price
age = "n/a"
# // indicates order is a paper order so 'n/a'. For real orders, calculate age.
if "//" not in order.client_order_id:
age = pd.Timestamp(int(time.time()) - int(order.client_order_id[-16:]) / 1e6,
unit='s').strftime('%H:%M:%S')
amount_orig = "" if level is None else self._order_amount + ((level - 1) * self._order_level_amount)
data.append([
level,
"buy" if order.is_buy else "sell",
float(order.price),
f"{spread:.2%}",
amount_orig,
float(order.quantity),
age
])
return pd.DataFrame(data=data, columns=columns)
def active_positions_df(self) -> pd.DataFrame:
columns = ["Symbol", "Type", "Entry Price", "Amount", "Leverage", "Unrealized PnL"]
data = []
market, trading_pair = self._market_info.market, self._market_info.trading_pair
for idx in self.active_positions.values():
is_buy = True if idx.amount > 0 else False
unrealized_profit = ((market.get_price(trading_pair, is_buy) - idx.entry_price) * idx.amount)
data.append([
idx.trading_pair,
idx.position_side.name,
idx.entry_price,
idx.amount,
idx.leverage,
unrealized_profit
])
return pd.DataFrame(data=data, columns=columns)
def market_status_data_frame(self) -> pd.DataFrame:
markets_data = []
markets_columns = ["Exchange", "Market", "Best Bid", "Best Ask", f"Ref Price ({self._price_type.name})"]
if self._price_type is PriceType.LastOwnTrade and self._last_own_trade_price.is_nan():
markets_columns[-1] = "Ref Price (MidPrice)"
market_books = [(self._market_info.market, self._market_info.trading_pair)]
if type(self._asset_price_delegate) is OrderBookAssetPriceDelegate:
market_books.append((self._asset_price_delegate.market, self._asset_price_delegate.trading_pair))
for market, trading_pair in market_books:
bid_price = market.get_price(trading_pair, False)
ask_price = market.get_price(trading_pair, True)
ref_price = float("nan")
if market == self._market_info.market and self._asset_price_delegate is None:
ref_price = self.get_price()
elif market == self._asset_price_delegate.market and self._price_type is not PriceType.LastOwnTrade:
ref_price = self._asset_price_delegate.get_price_by_type(self._price_type)
markets_data.append([
market.display_name,
trading_pair,
float(bid_price),
float(ask_price),
float(ref_price)
])
return pd.DataFrame(data=markets_data, columns=markets_columns).replace(np.nan, '', regex=True)
def format_status(self) -> str:
if not self._all_markets_ready:
return "Market connectors are not ready."
lines = []
warning_lines = []
markets_df = self.market_status_data_frame()
lines.extend(["", " Markets:"] + [" " + line for line in markets_df.to_string(index=False).split("\n")])
assets_df = map_df_to_str(self.perpetual_mm_assets_df())
first_col_length = max(*assets_df[0].apply(len))
df_lines = assets_df.to_string(index=False, header=False,
formatters={0: ("{:<" + str(first_col_length) + "}").format}).split("\n")
lines.extend(["", " Assets:"] + [" " + line for line in df_lines])
# See if there're any open orders.
if len(self.active_orders) > 0:
df = self.active_orders_df()
lines.extend(["", " Orders:"] + [" " + line for line in df.to_string(index=False).split("\n")])
else:
lines.extend(["", " No active maker orders."])
# See if there're any active positions.
if len(self.active_positions) > 0:
df = self.active_positions_df()
lines.extend(["", " Positions:"] + [" " + line for line in df.to_string(index=False).split("\n")])
else:
lines.extend(["", " No active positions."])
if len(warning_lines) > 0:
lines.extend(["", "*** WARNINGS ***"] + warning_lines)
return "\n".join(lines)
def start(self, clock: Clock, timestamp: float):
super().start(clock, timestamp)
self._last_timestamp = timestamp
self.apply_initial_settings(self.trading_pair, self._position_mode, self._leverage)
def apply_initial_settings(self, trading_pair: str, position: Position, leverage: int):
market: ExchangeBase = self._market_info.market
market.set_leverage(trading_pair, leverage)
market.set_position_mode(position)
def tick(self, timestamp: float):
market: ExchangeBase = self._market_info.market
session_positions = [s for s in self.active_positions.values() if s.trading_pair == self.trading_pair]
current_tick = timestamp // self._status_report_interval
last_tick = self._last_timestamp // self._status_report_interval
should_report_warnings = ((current_tick > last_tick) and
(self._logging_options & self.OPTION_LOG_STATUS_REPORT))
try:
if not self._all_markets_ready:
self._all_markets_ready = all([market.ready for market in self.active_markets])
if self._asset_price_delegate is not None and self._all_markets_ready:
self._all_markets_ready = self._asset_price_delegate.ready
if not self._all_markets_ready:
# M({self.trading_pair}) Maker sell order {order_id}arkets not ready yet. Don't do anything.
if should_report_warnings:
self.logger().warning("Markets are not ready. No market making trades are permitted.")
return
if should_report_warnings:
if not all([market.network_status is NetworkStatus.CONNECTED for market in self.active_markets]):
self.logger().warning("WARNING: Some markets are not connected or are down at the moment. Market "
"making may be dangerous when markets or networks are unstable.")
if len(session_positions) == 0:
self._exit_orders = dict() # Empty list of exit order at this point to reduce size
proposal = None
if self._create_timestamp <= self.current_timestamp:
# 1. Create base order proposals
proposal = self.create_base_proposal()
# 2. Apply functions that limit numbers of buys and sells proposal
self.apply_order_levels_modifiers(proposal)
# 3. Apply functions that modify orders price
self.apply_order_price_modifiers(proposal)
# 4. Apply budget constraint, i.e. can't buy/sell more than what you have.
self.apply_budget_constraint(proposal)
self.filter_out_takers(proposal)
self.cancel_active_orders(proposal)
self.cancel_orders_below_min_spread()
if self.to_create_orders(proposal):
self.execute_orders_proposal(proposal, PositionAction.OPEN)
# Reset peak ask and bid prices
self._ts_peak_ask_price = market.get_price(self.trading_pair, False)
self._ts_peak_bid_price = market.get_price(self.trading_pair, True)
else:
self.manage_positions(session_positions)
finally:
self._last_timestamp = timestamp
def manage_positions(self, session_positions: List[Position]):
mode = self._position_mode
proposals = self.profit_taking_proposal(mode, session_positions)
if proposals is not None:
self.execute_orders_proposal(proposals, PositionAction.CLOSE)
# check if stop loss needs to be placed
proposals = self.stop_loss_proposal(mode, session_positions)
if proposals is not None:
self.execute_orders_proposal(proposals, PositionAction.CLOSE)
def profit_taking_proposal(self, mode: PositionMode, active_positions: List) -> Proposal:
market: ExchangeBase = self._market_info.market
unwanted_exit_orders = [o for o in self.active_orders
if o.client_order_id not in self._exit_orders.keys()]
ask_price = market.get_price(self.trading_pair, True)
bid_price = market.get_price(self.trading_pair, False)
buys = []
sells = []
if mode == PositionMode.ONEWAY:
# in one-way mode, only one active position is expected per time
if len(active_positions) > 1:
self.logger().error(f"More than one open position in {mode.name} position mode. "
"Kindly ensure you do not interact with the exchange through "
"other platforms and restart this strategy.")
else:
# Cancel open order that could potentially close position before reaching take_profit_limit
for order in unwanted_exit_orders:
if ((active_positions[0].amount < 0 and order.is_buy)
or (active_positions[0].amount > 0 and not order.is_buy)):
self.cancel_order(self._market_info, order.client_order_id)
self.logger().info(f"Initiated cancellation of {'buy' if order.is_buy else 'sell'} order "
f"{order.client_order_id} in favour of take profit order.")
for position in active_positions:
if (ask_price > position.entry_price and position.amount > 0) or (
bid_price < position.entry_price and position.amount < 0):
# check if there is an active order to take profit, and create if none exists
profit_spread = self._long_profit_taking_spread if position.amount > 0 else self._short_profit_taking_spread
take_profit_price = position.entry_price * (Decimal("1") + profit_spread) if position.amount > 0 \
else position.entry_price * (Decimal("1") - profit_spread)
price = market.quantize_order_price(self.trading_pair, take_profit_price)
size = market.quantize_order_amount(self.trading_pair, abs(position.amount))
old_exit_orders = [
o for o in self.active_orders
if ((o.price != price or o.quantity != size)
and o.client_order_id in self._exit_orders.keys()
and ((position.amount < 0 and o.is_buy) or (position.amount > 0 and not o.is_buy)))]
for old_order in old_exit_orders:
self.cancel_order(self._market_info, old_order.client_order_id)
self.logger().info(
f"Initiated cancellation of previous take profit order {old_order.client_order_id} in favour of new take profit order.")
exit_order_exists = [o for o in self.active_orders if o.price == price]
if len(exit_order_exists) == 0:
if size > 0 and price > 0:
if position.amount < 0:
buys.append(PriceSize(price, size))
else:
sells.append(PriceSize(price, size))
return Proposal(buys, sells)
def _should_renew_stop_loss(self, stop_loss_order: LimitOrder) -> bool:
stop_loss_creation_timestamp = self._exit_orders.get(stop_loss_order.client_order_id)
time_since_stop_loss = self.current_timestamp - stop_loss_creation_timestamp
return time_since_stop_loss >= self._time_between_stop_loss_orders
def stop_loss_proposal(self, mode: PositionMode, active_positions: List[Position]) -> Proposal:
market: ExchangeBase = self._market_info.market
top_ask = market.get_price(self.trading_pair, False)
top_bid = market.get_price(self.trading_pair, True)
buys = []
sells = []
for position in active_positions:
# check if stop loss order needs to be placed
stop_loss_price = position.entry_price * (Decimal("1") + self._stop_loss_spread) if position.amount < 0 \
else position.entry_price * (Decimal("1") - self._stop_loss_spread)
existent_stop_loss_orders = [order for order in self.active_orders
if order.client_order_id in self._exit_orders.keys()
and ((position.amount > 0 and not order.is_buy)
or (position.amount < 0 and order.is_buy))]
if (not existent_stop_loss_orders
or (self._should_renew_stop_loss(existent_stop_loss_orders[0]))):
previous_stop_loss_price = None
for order in existent_stop_loss_orders:
previous_stop_loss_price = order.price
self.cancel_order(self._market_info, order.client_order_id)
new_price = previous_stop_loss_price or stop_loss_price
if (top_ask <= stop_loss_price and position.amount > 0):
price = market.quantize_order_price(
self.trading_pair,
new_price * (Decimal(1) - self._stop_loss_slippage_buffer))
take_profit_orders = [o for o in self.active_orders
if (not o.is_buy and o.price > price
and o.client_order_id in self._exit_orders.keys())]
# cancel take profit orders if they exist
for old_order in take_profit_orders:
self.cancel_order(self._market_info, old_order.client_order_id)
size = market.quantize_order_amount(self.trading_pair, abs(position.amount))
if size > 0 and price > 0:
self.logger().info("Creating stop loss sell order to close long position.")
sells.append(PriceSize(price, size))
elif (top_bid >= stop_loss_price and position.amount < 0):
price = market.quantize_order_price(
self.trading_pair,
new_price * (Decimal(1) + self._stop_loss_slippage_buffer))
take_profit_orders = [o for o in self.active_orders
if (o.is_buy and o.price < price
and o.client_order_id in self._exit_orders.keys())]
# cancel take profit orders if they exist
for old_order in take_profit_orders:
self.cancel_order(self._market_info, old_order.client_order_id)
size = market.quantize_order_amount(self.trading_pair, abs(position.amount))
if size > 0 and price > 0:
self.logger().info("Creating stop loss buy order to close short position.")
buys.append(PriceSize(price, size))
return Proposal(buys, sells)
def create_base_proposal(self):
market: ExchangeBase = self._market_info.market
buys = []
sells = []
# First to check if a customized order override is configured, otherwise the proposal will be created according
# to order spread, amount, and levels setting.
order_override = self._order_override
if order_override is not None and len(order_override) > 0:
for key, value in order_override.items():
if str(value[0]) in ["buy", "sell"]:
if str(value[0]) == "buy":
price = self.get_price() * (Decimal("1") - Decimal(str(value[1])) / Decimal("100"))
price = market.quantize_order_price(self.trading_pair, price)
size = Decimal(str(value[2]))
size = market.quantize_order_amount(self.trading_pair, size)
if size > 0 and price > 0:
buys.append(PriceSize(price, size))
elif str(value[0]) == "sell":
price = self.get_price() * (Decimal("1") + Decimal(str(value[1])) / Decimal("100"))
price = market.quantize_order_price(self.trading_pair, price)
size = Decimal(str(value[2]))
size = market.quantize_order_amount(self.trading_pair, size)
if size > 0 and price > 0:
sells.append(PriceSize(price, size))
else:
for level in range(0, self._buy_levels):
price = self.get_price() * (Decimal("1") - self._bid_spread - (level * self._order_level_spread))
price = market.quantize_order_price(self.trading_pair, price)
size = self._order_amount + (self._order_level_amount * level)
size = market.quantize_order_amount(self.trading_pair, size)
if size > 0:
buys.append(PriceSize(price, size))
for level in range(0, self._sell_levels):
price = self.get_price() * (Decimal("1") + self._ask_spread + (level * self._order_level_spread))
price = market.quantize_order_price(self.trading_pair, price)
size = self._order_amount + (self._order_level_amount * level)
size = market.quantize_order_amount(self.trading_pair, size)
if size > 0:
sells.append(PriceSize(price, size))
return Proposal(buys, sells)
def apply_order_levels_modifiers(self, proposal: Proposal):
self.apply_price_band(proposal)
def apply_price_band(self, proposal: Proposal):
if self._price_ceiling > 0 and self.get_price() >= self._price_ceiling:
proposal.buys = []
if self._price_floor > 0 and self.get_price() <= self._price_floor:
proposal.sells = []
def apply_order_price_modifiers(self, proposal: Proposal):
if self._order_optimization_enabled:
self.apply_order_optimization(proposal)
def apply_budget_constraint(self, proposal: Proposal):
checker = self._market_info.market.budget_checker
order_candidates = self.create_order_candidates_for_budget_check(proposal)
adjusted_candidates = checker.adjust_candidates(order_candidates, all_or_none=True)
self.apply_adjusted_order_candidates_to_proposal(adjusted_candidates, proposal)
def create_order_candidates_for_budget_check(self, proposal: Proposal):
order_candidates = []
is_maker = True
order_candidates.extend(
[
PerpetualOrderCandidate(
self.trading_pair,
is_maker,
OrderType.LIMIT,
TradeType.BUY,
buy.size,
buy.price,
leverage=Decimal(self._leverage),
)
for buy in proposal.buys
]
)
order_candidates.extend(
[
PerpetualOrderCandidate(
self.trading_pair,
is_maker,
OrderType.LIMIT,
TradeType.SELL,
sell.size,
sell.price,
leverage=Decimal(self._leverage),
)
for sell in proposal.sells
]
)
return order_candidates
def apply_adjusted_order_candidates_to_proposal(self,
adjusted_candidates: List[PerpetualOrderCandidate],
proposal: Proposal):
for order in chain(proposal.buys, proposal.sells):
adjusted_candidate = adjusted_candidates.pop(0)
if adjusted_candidate.amount == s_decimal_zero:
self.logger().info(
f"Insufficient balance: {adjusted_candidate.order_side.name} order (price: {order.price},"
f" size: {order.size}) is omitted."
)
self.logger().warning(
"You are also at a possible risk of being liquidated if there happens to be an open loss.")
order.size = s_decimal_zero
proposal.buys = [o for o in proposal.buys if o.size > 0]
proposal.sells = [o for o in proposal.sells if o.size > 0]
def filter_out_takers(self, proposal: Proposal):
market: ExchangeBase = self._market_info.market
top_ask = market.get_price(self.trading_pair, True)
if not top_ask.is_nan():
proposal.buys = [buy for buy in proposal.buys if buy.price < top_ask]
top_bid = market.get_price(self.trading_pair, False)
if not top_bid.is_nan():
proposal.sells = [sell for sell in proposal.sells if sell.price > top_bid]
# Compare the market price with the top bid and top ask price
def apply_order_optimization(self, proposal: Proposal):
market: ExchangeBase = self._market_info.market
own_buy_size = s_decimal_zero
own_sell_size = s_decimal_zero
# If there are multiple orders, do not jump prices
if self._order_levels > 1:
return
for order in self.active_orders:
if order.is_buy:
own_buy_size = order.quantity
else:
own_sell_size = order.quantity
if len(proposal.buys) == 1:
# Get the top bid price in the market using order_optimization_depth and your buy order volume
top_bid_price = self._market_info.get_price_for_volume(
False, self._bid_order_optimization_depth + own_buy_size).result_price
price_quantum = market.get_order_price_quantum(
self.trading_pair,
top_bid_price
)
# Get the price above the top bid
price_above_bid = (ceil(top_bid_price / price_quantum) + 1) * price_quantum
# If the price_above_bid is lower than the price suggested by the pricing proposal,
# lower your price to this
lower_buy_price = min(proposal.buys[0].price, price_above_bid)
proposal.buys[0].price = market.quantize_order_price(self.trading_pair, lower_buy_price)
if len(proposal.sells) == 1:
# Get the top ask price in the market using order_optimization_depth and your sell order volume
top_ask_price = self._market_info.get_price_for_volume(
True, self._ask_order_optimization_depth + own_sell_size).result_price
price_quantum = market.get_order_price_quantum(
self.trading_pair,
top_ask_price
)
# Get the price below the top ask
price_below_ask = (floor(top_ask_price / price_quantum) - 1) * price_quantum
# If the price_below_ask is higher than the price suggested by the pricing proposal,
# increase your price to this
higher_sell_price = max(proposal.sells[0].price, price_below_ask)
proposal.sells[0].price = market.quantize_order_price(self.trading_pair, higher_sell_price)
def did_fill_order(self, order_filled_event: OrderFilledEvent):
order_id = order_filled_event.order_id
market_info = self._sb_order_tracker.get_shadow_market_pair_from_order_id(order_id)
if market_info is not None:
if self._logging_options & self.OPTION_LOG_MAKER_ORDER_FILLED:
self.log_with_clock(
logging.INFO,
f"({market_info.trading_pair}) Maker "
f"{'buy' if order_filled_event.trade_type is TradeType.BUY else 'sell'} order of "
f"{order_filled_event.amount} {market_info.base_asset} filled."
)
def did_complete_buy_order(self, order_completed_event: BuyOrderCompletedEvent):
order_id = order_completed_event.order_id
limit_order_record = self._sb_order_tracker.get_limit_order(self._market_info, order_id)
if limit_order_record is None:
return
# delay order creation by filled_order_delay (in seconds)
self._create_timestamp = self.current_timestamp + self._filled_order_delay
self._cancel_timestamp = min(self._cancel_timestamp, self._create_timestamp)
self._last_own_trade_price = limit_order_record.price
self.log_with_clock(
logging.INFO,
f"({self.trading_pair}) Maker buy order {order_id} "
f"({limit_order_record.quantity} {limit_order_record.base_currency} @ "
f"{limit_order_record.price} {limit_order_record.quote_currency}) has been completely filled."
)
self.notify_hb_app_with_timestamp(
f"Maker BUY order {limit_order_record.quantity} {limit_order_record.base_currency} @ "
f"{limit_order_record.price} {limit_order_record.quote_currency} is filled."
)
def did_complete_sell_order(self, order_completed_event: SellOrderCompletedEvent):
order_id = order_completed_event.order_id
limit_order_record: LimitOrder = self._sb_order_tracker.get_limit_order(self._market_info, order_id)
if limit_order_record is None:
return
# delay order creation by filled_order_delay (in seconds)
self._create_timestamp = self.current_timestamp + self._filled_order_delay
self._cancel_timestamp = min(self._cancel_timestamp, self._create_timestamp)
self._last_own_trade_price = limit_order_record.price
self.log_with_clock(
logging.INFO,
f"({self.trading_pair}) Maker sell order {order_id} "
f"({limit_order_record.quantity} {limit_order_record.base_currency} @ "
f"{limit_order_record.price} {limit_order_record.quote_currency}) has been completely filled."
)
self.notify_hb_app_with_timestamp(
f"Maker SELL order {limit_order_record.quantity} {limit_order_record.base_currency} @ "
f"{limit_order_record.price} {limit_order_record.quote_currency} is filled."
)
def is_within_tolerance(self, current_prices: List[Decimal], proposal_prices: List[Decimal]) -> bool:
if len(current_prices) != len(proposal_prices):
return False
current_prices = sorted(current_prices)
proposal_prices = sorted(proposal_prices)
for current, proposal in zip(current_prices, proposal_prices):
# if spread diff is more than the tolerance or order quantities are different, return false.
if abs(proposal - current) / current > self._order_refresh_tolerance_pct:
return False
return True
# Return value: whether order cancellation is deferred.
def cancel_active_orders(self, proposal: Proposal):
if self._cancel_timestamp > self.current_timestamp:
return
to_defer_canceling = False
if len(self.active_orders) == 0:
return
if proposal is not None and self._order_refresh_tolerance_pct >= 0:
active_buy_prices = [Decimal(str(o.price)) for o in self.active_orders if o.is_buy]
active_sell_prices = [Decimal(str(o.price)) for o in self.active_orders if not o.is_buy]
proposal_buys = [buy.price for buy in proposal.buys]
proposal_sells = [sell.price for sell in proposal.sells]
if self.is_within_tolerance(active_buy_prices, proposal_buys) and \
self.is_within_tolerance(active_sell_prices, proposal_sells):
to_defer_canceling = True
if not to_defer_canceling:
for order in self.active_orders:
self.cancel_order(self._market_info, order.client_order_id)
else:
self.logger().info(f"Not cancelling active orders since difference between new order prices "
f"and current order prices is within "
f"{self._order_refresh_tolerance_pct:.2%} order_refresh_tolerance_pct")
self.set_timers()
def cancel_orders_below_min_spread(self):
price = self.get_price()
for order in self.active_orders:
negation = -1 if order.is_buy else 1
if (negation * (order.price - price) / price) < self._minimum_spread:
self.logger().info(f"Order is below minimum spread ({self._minimum_spread})."
f" Cancelling Order: ({'Buy' if order.is_buy else 'Sell'}) "
f"ID - {order.client_order_id}")
self.cancel_order(self._market_info, order.client_order_id)
def to_create_orders(self, proposal: Proposal) -> bool:
return (self._create_timestamp < self.current_timestamp and
proposal is not None and
len(self.active_orders) == 0)
def execute_orders_proposal(self, proposal: Proposal, position_action: PositionAction):
orders_created = False
if len(proposal.buys) > 0:
if position_action == PositionAction.CLOSE:
if self.current_timestamp < self._next_buy_exit_order_timestamp:
return
else:
self._next_buy_exit_order_timestamp = self.current_timestamp + self.filled_order_delay
if self._logging_options & self.OPTION_LOG_CREATE_ORDER:
price_quote_str = [f"{buy.size.normalize()} {self.base_asset}, "
f"{buy.price.normalize()} {self.quote_asset}"
for buy in proposal.buys]
self.logger().info(
f"({self.trading_pair}) Creating {len(proposal.buys)} {self._close_order_type.name} bid orders "
f"at (Size, Price): {price_quote_str} to {position_action.name} position."
)
for buy in proposal.buys:
bid_order_id = self.buy_with_specific_market(
self._market_info,
buy.size,
order_type=self._close_order_type,
price=buy.price,
position_action=position_action
)
if position_action == PositionAction.CLOSE:
self._exit_orders[bid_order_id] = self.current_timestamp
orders_created = True
if len(proposal.sells) > 0:
if position_action == PositionAction.CLOSE:
if self.current_timestamp < self._next_sell_exit_order_timestamp:
return
else:
self._next_sell_exit_order_timestamp = self.current_timestamp + self.filled_order_delay
if self._logging_options & self.OPTION_LOG_CREATE_ORDER:
price_quote_str = [f"{sell.size.normalize()} {self.base_asset}, "
f"{sell.price.normalize()} {self.quote_asset}"
for sell in proposal.sells]
self.logger().info(
f"({self.trading_pair}) Creating {len(proposal.sells)} {self._close_order_type.name} ask "
f"orders at (Size, Price): {price_quote_str} to {position_action.name} position."
)
for sell in proposal.sells:
ask_order_id = self.sell_with_specific_market(
self._market_info,
sell.size,
order_type=self._close_order_type,
price=sell.price,
position_action=position_action
)
if position_action == PositionAction.CLOSE:
self._exit_orders[ask_order_id] = self.current_timestamp
orders_created = True
if orders_created:
self.set_timers()
def set_timers(self):
next_cycle = self.current_timestamp + self._order_refresh_time
if self._create_timestamp <= self.current_timestamp:
self._create_timestamp = next_cycle
if self._cancel_timestamp <= self.current_timestamp:
self._cancel_timestamp = min(self._create_timestamp, next_cycle)
def notify_hb_app(self, msg: str):
if self._hb_app_notification:
super().notify_hb_app(msg)
def get_price_type(self, price_type_str: str) -> PriceType:
if price_type_str == "mid_price":
return PriceType.MidPrice
elif price_type_str == "best_bid":
return PriceType.BestBid
elif price_type_str == "best_ask":
return PriceType.BestAsk
elif price_type_str == "last_price":
return PriceType.LastTrade
elif price_type_str == 'last_own_trade_price':
return PriceType.LastOwnTrade
elif price_type_str == "custom":
return PriceType.Custom
else:
raise ValueError(f"Unrecognized price type string {price_type_str}.")
| 46.570281
| 144
| 0.623491
|
import logging
import time
from decimal import Decimal
from itertools import chain
from math import ceil, floor
from typing import Dict, List
import numpy as np
import pandas as pd
from hummingbot.connector.derivative.position import Position
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.core.clock import Clock
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.core.data_type.order_candidate import PerpetualOrderCandidate
from hummingbot.core.event.events import (
BuyOrderCompletedEvent,
OrderFilledEvent,
OrderType,
PositionAction,
PositionMode,
PriceType,
SellOrderCompletedEvent,
TradeType
)
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.core.utils import map_df_to_str
from hummingbot.strategy.asset_price_delegate import AssetPriceDelegate
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from hummingbot.strategy.order_book_asset_price_delegate import OrderBookAssetPriceDelegate
from hummingbot.strategy.perpetual_market_making.data_types import PriceSize, Proposal
from hummingbot.strategy.perpetual_market_making.perpetual_market_making_order_tracker import (
PerpetualMarketMakingOrderTracker
)
from hummingbot.strategy.strategy_py_base import StrategyPyBase
NaN = float("nan")
s_decimal_zero = Decimal(0)
s_decimal_neg_one = Decimal(-1)
class PerpetualMarketMakingStrategy(StrategyPyBase):
OPTION_LOG_CREATE_ORDER = 1 << 3
OPTION_LOG_MAKER_ORDER_FILLED = 1 << 4
OPTION_LOG_STATUS_REPORT = 1 << 5
OPTION_LOG_ALL = 0x7fffffffffffffff
_logger = None
@classmethod
def logger(cls):
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def init_params(self,
market_info: MarketTradingPairTuple,
leverage: int,
position_mode: str,
bid_spread: Decimal,
ask_spread: Decimal,
order_amount: Decimal,
long_profit_taking_spread: Decimal,
short_profit_taking_spread: Decimal,
stop_loss_spread: Decimal,
time_between_stop_loss_orders: float,
stop_loss_slippage_buffer: Decimal,
order_levels: int = 1,
order_level_spread: Decimal = s_decimal_zero,
order_level_amount: Decimal = s_decimal_zero,
order_refresh_time: float = 30.0,
order_refresh_tolerance_pct: Decimal = s_decimal_neg_one,
filled_order_delay: float = 60.0,
order_optimization_enabled: bool = False,
ask_order_optimization_depth: Decimal = s_decimal_zero,
bid_order_optimization_depth: Decimal = s_decimal_zero,
asset_price_delegate: AssetPriceDelegate = None,
price_type: str = "mid_price",
price_ceiling: Decimal = s_decimal_neg_one,
price_floor: Decimal = s_decimal_neg_one,
logging_options: int = OPTION_LOG_ALL,
status_report_interval: float = 900,
minimum_spread: Decimal = Decimal(0),
hb_app_notification: bool = False,
order_override: Dict[str, List[str]] = {},
):
if price_ceiling != s_decimal_neg_one and price_ceiling < price_floor:
raise ValueError("Parameter price_ceiling cannot be lower than price_floor.")
self._sb_order_tracker = PerpetualMarketMakingOrderTracker()
self._market_info = market_info
self._leverage = leverage
self._position_mode = PositionMode.HEDGE if position_mode == "Hedge" else PositionMode.ONEWAY
self._bid_spread = bid_spread
self._ask_spread = ask_spread
self._minimum_spread = minimum_spread
self._order_amount = order_amount
self._long_profit_taking_spread = long_profit_taking_spread
self._short_profit_taking_spread = short_profit_taking_spread
self._stop_loss_spread = stop_loss_spread
self._order_levels = order_levels
self._buy_levels = order_levels
self._sell_levels = order_levels
self._order_level_spread = order_level_spread
self._order_level_amount = order_level_amount
self._order_refresh_time = order_refresh_time
self._order_refresh_tolerance_pct = order_refresh_tolerance_pct
self._filled_order_delay = filled_order_delay
self._order_optimization_enabled = order_optimization_enabled
self._ask_order_optimization_depth = ask_order_optimization_depth
self._bid_order_optimization_depth = bid_order_optimization_depth
self._asset_price_delegate = asset_price_delegate
self._price_type = self.get_price_type(price_type)
self._price_ceiling = price_ceiling
self._price_floor = price_floor
self._hb_app_notification = hb_app_notification
self._order_override = order_override
self._cancel_timestamp = 0
self._create_timestamp = 0
self._all_markets_ready = False
self._logging_options = logging_options
self._last_timestamp = 0
self._status_report_interval = status_report_interval
self._last_own_trade_price = Decimal('nan')
self._ts_peak_bid_price = Decimal('0')
self._ts_peak_ask_price = Decimal('0')
self._exit_orders = dict()
self._next_buy_exit_order_timestamp = 0
self._next_sell_exit_order_timestamp = 0
self.add_markets([market_info.market])
self._close_order_type = OrderType.LIMIT
self._time_between_stop_loss_orders = time_between_stop_loss_orders
self._stop_loss_slippage_buffer = stop_loss_slippage_buffer
def all_markets_ready(self):
return all([market.ready for market in self.active_markets])
@property
def order_refresh_tolerance_pct(self) -> Decimal:
return self._order_refresh_tolerance_pct
@order_refresh_tolerance_pct.setter
def order_refresh_tolerance_pct(self, value: Decimal):
self._order_refresh_tolerance_pct = value
@property
def order_amount(self) -> Decimal:
return self._order_amount
@order_amount.setter
def order_amount(self, value: Decimal):
self._order_amount = value
@property
def order_levels(self) -> int:
return self._order_levels
@order_levels.setter
def order_levels(self, value: int):
self._order_levels = value
self._buy_levels = value
self._sell_levels = value
@property
def buy_levels(self) -> int:
return self._buy_levels
@buy_levels.setter
def buy_levels(self, value: int):
self._buy_levels = value
@property
def sell_levels(self) -> int:
return self._sell_levels
@sell_levels.setter
def sell_levels(self, value: int):
self._sell_levels = value
@property
def order_level_amount(self) -> Decimal:
return self._order_level_amount
@order_level_amount.setter
def order_level_amount(self, value: Decimal):
self._order_level_amount = value
@property
def order_level_spread(self) -> Decimal:
return self._order_level_spread
@order_level_spread.setter
def order_level_spread(self, value: Decimal):
self._order_level_spread = value
@property
def bid_spread(self) -> Decimal:
return self._bid_spread
@bid_spread.setter
def bid_spread(self, value: Decimal):
self._bid_spread = value
@property
def ask_spread(self) -> Decimal:
return self._ask_spread
@ask_spread.setter
def ask_spread(self, value: Decimal):
self._ask_spread = value
@property
def order_optimization_enabled(self) -> bool:
return self._order_optimization_enabled
@order_optimization_enabled.setter
def order_optimization_enabled(self, value: bool):
self._order_optimization_enabled = value
@property
def order_refresh_time(self) -> float:
return self._order_refresh_time
@order_refresh_time.setter
def order_refresh_time(self, value: float):
self._order_refresh_time = value
@property
def filled_order_delay(self) -> float:
return self._filled_order_delay
@filled_order_delay.setter
def filled_order_delay(self, value: float):
self._filled_order_delay = value
@property
def price_ceiling(self) -> Decimal:
return self._price_ceiling
@price_ceiling.setter
def price_ceiling(self, value: Decimal):
self._price_ceiling = value
@property
def price_floor(self) -> Decimal:
return self._price_floor
@price_floor.setter
def price_floor(self, value: Decimal):
self._price_floor = value
@property
def base_asset(self):
return self._market_info.base_asset
@property
def quote_asset(self):
return self._market_info.quote_asset
@property
def trading_pair(self):
return self._market_info.trading_pair
def get_price(self) -> float:
if self._asset_price_delegate is not None:
price_provider = self._asset_price_delegate
else:
price_provider = self._market_info
if self._price_type is PriceType.LastOwnTrade:
price = self._last_own_trade_price
else:
price = price_provider.get_price_by_type(self._price_type)
if price.is_nan():
price = price_provider.get_price_by_type(PriceType.MidPrice)
return price
def get_last_price(self) -> float:
return self._market_info.get_last_price()
def get_mid_price(self) -> Decimal:
delegate: AssetPriceDelegate = self._asset_price_delegate
if delegate is not None:
mid_price = delegate.get_mid_price()
else:
mid_price = self._market_info.get_mid_price()
return mid_price
@property
def active_orders(self) -> List[LimitOrder]:
if self._market_info not in self._sb_order_tracker.market_pair_to_active_orders:
return []
return self._sb_order_tracker.market_pair_to_active_orders[self._market_info]
@property
def active_positions(self) -> Dict[str, Position]:
return self._market_info.market.account_positions
@property
def active_buys(self) -> List[LimitOrder]:
return [o for o in self.active_orders if o.is_buy]
@property
def active_sells(self) -> List[LimitOrder]:
return [o for o in self.active_orders if not o.is_buy]
@property
def logging_options(self) -> int:
return self._logging_options
@logging_options.setter
def logging_options(self, logging_options: int):
self._logging_options = logging_options
@property
def asset_price_delegate(self) -> AssetPriceDelegate:
return self._asset_price_delegate
@asset_price_delegate.setter
def asset_price_delegate(self, value):
self._asset_price_delegate = value
def perpetual_mm_assets_df(self) -> pd.DataFrame:
market, trading_pair, base_asset, quote_asset = self._market_info
quote_balance = float(market.get_balance(quote_asset))
available_quote_balance = float(market.get_available_balance(quote_asset))
data = [
["", quote_asset],
["Total Balance", round(quote_balance, 4)],
["Available Balance", round(available_quote_balance, 4)]
]
df = pd.DataFrame(data=data)
return df
def active_orders_df(self) -> pd.DataFrame:
price = self.get_price()
active_orders = self.active_orders
no_sells = len([o for o in active_orders if not o.is_buy])
active_orders.sort(key=lambda x: x.price, reverse=True)
columns = ["Level", "Type", "Price", "Spread", "Amount (Orig)", "Amount (Adj)", "Age"]
data = []
lvl_buy, lvl_sell = 0, 0
for idx in range(0, len(active_orders)):
order = active_orders[idx]
level = None
if order.is_buy:
level = lvl_buy + 1
lvl_buy += 1
else:
level = no_sells - lvl_sell
lvl_sell += 1
spread = 0 if price == 0 else abs(order.price - price) / price
age = "n/a"
if "//" not in order.client_order_id:
age = pd.Timestamp(int(time.time()) - int(order.client_order_id[-16:]) / 1e6,
unit='s').strftime('%H:%M:%S')
amount_orig = "" if level is None else self._order_amount + ((level - 1) * self._order_level_amount)
data.append([
level,
"buy" if order.is_buy else "sell",
float(order.price),
f"{spread:.2%}",
amount_orig,
float(order.quantity),
age
])
return pd.DataFrame(data=data, columns=columns)
def active_positions_df(self) -> pd.DataFrame:
columns = ["Symbol", "Type", "Entry Price", "Amount", "Leverage", "Unrealized PnL"]
data = []
market, trading_pair = self._market_info.market, self._market_info.trading_pair
for idx in self.active_positions.values():
is_buy = True if idx.amount > 0 else False
unrealized_profit = ((market.get_price(trading_pair, is_buy) - idx.entry_price) * idx.amount)
data.append([
idx.trading_pair,
idx.position_side.name,
idx.entry_price,
idx.amount,
idx.leverage,
unrealized_profit
])
return pd.DataFrame(data=data, columns=columns)
def market_status_data_frame(self) -> pd.DataFrame:
markets_data = []
markets_columns = ["Exchange", "Market", "Best Bid", "Best Ask", f"Ref Price ({self._price_type.name})"]
if self._price_type is PriceType.LastOwnTrade and self._last_own_trade_price.is_nan():
markets_columns[-1] = "Ref Price (MidPrice)"
market_books = [(self._market_info.market, self._market_info.trading_pair)]
if type(self._asset_price_delegate) is OrderBookAssetPriceDelegate:
market_books.append((self._asset_price_delegate.market, self._asset_price_delegate.trading_pair))
for market, trading_pair in market_books:
bid_price = market.get_price(trading_pair, False)
ask_price = market.get_price(trading_pair, True)
ref_price = float("nan")
if market == self._market_info.market and self._asset_price_delegate is None:
ref_price = self.get_price()
elif market == self._asset_price_delegate.market and self._price_type is not PriceType.LastOwnTrade:
ref_price = self._asset_price_delegate.get_price_by_type(self._price_type)
markets_data.append([
market.display_name,
trading_pair,
float(bid_price),
float(ask_price),
float(ref_price)
])
return pd.DataFrame(data=markets_data, columns=markets_columns).replace(np.nan, '', regex=True)
def format_status(self) -> str:
if not self._all_markets_ready:
return "Market connectors are not ready."
lines = []
warning_lines = []
markets_df = self.market_status_data_frame()
lines.extend(["", " Markets:"] + [" " + line for line in markets_df.to_string(index=False).split("\n")])
assets_df = map_df_to_str(self.perpetual_mm_assets_df())
first_col_length = max(*assets_df[0].apply(len))
df_lines = assets_df.to_string(index=False, header=False,
formatters={0: ("{:<" + str(first_col_length) + "}").format}).split("\n")
lines.extend(["", " Assets:"] + [" " + line for line in df_lines])
if len(self.active_orders) > 0:
df = self.active_orders_df()
lines.extend(["", " Orders:"] + [" " + line for line in df.to_string(index=False).split("\n")])
else:
lines.extend(["", " No active maker orders."])
# See if there're any active positions.
if len(self.active_positions) > 0:
df = self.active_positions_df()
lines.extend(["", " Positions:"] + [" " + line for line in df.to_string(index=False).split("\n")])
else:
lines.extend(["", " No active positions."])
if len(warning_lines) > 0:
lines.extend(["", "*** WARNINGS ***"] + warning_lines)
return "\n".join(lines)
def start(self, clock: Clock, timestamp: float):
super().start(clock, timestamp)
self._last_timestamp = timestamp
self.apply_initial_settings(self.trading_pair, self._position_mode, self._leverage)
def apply_initial_settings(self, trading_pair: str, position: Position, leverage: int):
market: ExchangeBase = self._market_info.market
market.set_leverage(trading_pair, leverage)
market.set_position_mode(position)
def tick(self, timestamp: float):
market: ExchangeBase = self._market_info.market
session_positions = [s for s in self.active_positions.values() if s.trading_pair == self.trading_pair]
current_tick = timestamp // self._status_report_interval
last_tick = self._last_timestamp // self._status_report_interval
should_report_warnings = ((current_tick > last_tick) and
(self._logging_options & self.OPTION_LOG_STATUS_REPORT))
try:
if not self._all_markets_ready:
self._all_markets_ready = all([market.ready for market in self.active_markets])
if self._asset_price_delegate is not None and self._all_markets_ready:
self._all_markets_ready = self._asset_price_delegate.ready
if not self._all_markets_ready:
if should_report_warnings:
self.logger().warning("Markets are not ready. No market making trades are permitted.")
return
if should_report_warnings:
if not all([market.network_status is NetworkStatus.CONNECTED for market in self.active_markets]):
self.logger().warning("WARNING: Some markets are not connected or are down at the moment. Market "
"making may be dangerous when markets or networks are unstable.")
if len(session_positions) == 0:
self._exit_orders = dict() # Empty list of exit order at this point to reduce size
proposal = None
if self._create_timestamp <= self.current_timestamp:
# 1. Create base order proposals
proposal = self.create_base_proposal()
# 2. Apply functions that limit numbers of buys and sells proposal
self.apply_order_levels_modifiers(proposal)
# 3. Apply functions that modify orders price
self.apply_order_price_modifiers(proposal)
# 4. Apply budget constraint, i.e. can't buy/sell more than what you have.
self.apply_budget_constraint(proposal)
self.filter_out_takers(proposal)
self.cancel_active_orders(proposal)
self.cancel_orders_below_min_spread()
if self.to_create_orders(proposal):
self.execute_orders_proposal(proposal, PositionAction.OPEN)
self._ts_peak_ask_price = market.get_price(self.trading_pair, False)
self._ts_peak_bid_price = market.get_price(self.trading_pair, True)
else:
self.manage_positions(session_positions)
finally:
self._last_timestamp = timestamp
def manage_positions(self, session_positions: List[Position]):
mode = self._position_mode
proposals = self.profit_taking_proposal(mode, session_positions)
if proposals is not None:
self.execute_orders_proposal(proposals, PositionAction.CLOSE)
proposals = self.stop_loss_proposal(mode, session_positions)
if proposals is not None:
self.execute_orders_proposal(proposals, PositionAction.CLOSE)
def profit_taking_proposal(self, mode: PositionMode, active_positions: List) -> Proposal:
market: ExchangeBase = self._market_info.market
unwanted_exit_orders = [o for o in self.active_orders
if o.client_order_id not in self._exit_orders.keys()]
ask_price = market.get_price(self.trading_pair, True)
bid_price = market.get_price(self.trading_pair, False)
buys = []
sells = []
if mode == PositionMode.ONEWAY:
if len(active_positions) > 1:
self.logger().error(f"More than one open position in {mode.name} position mode. "
"Kindly ensure you do not interact with the exchange through "
"other platforms and restart this strategy.")
else:
for order in unwanted_exit_orders:
if ((active_positions[0].amount < 0 and order.is_buy)
or (active_positions[0].amount > 0 and not order.is_buy)):
self.cancel_order(self._market_info, order.client_order_id)
self.logger().info(f"Initiated cancellation of {'buy' if order.is_buy else 'sell'} order "
f"{order.client_order_id} in favour of take profit order.")
for position in active_positions:
if (ask_price > position.entry_price and position.amount > 0) or (
bid_price < position.entry_price and position.amount < 0):
profit_spread = self._long_profit_taking_spread if position.amount > 0 else self._short_profit_taking_spread
take_profit_price = position.entry_price * (Decimal("1") + profit_spread) if position.amount > 0 \
else position.entry_price * (Decimal("1") - profit_spread)
price = market.quantize_order_price(self.trading_pair, take_profit_price)
size = market.quantize_order_amount(self.trading_pair, abs(position.amount))
old_exit_orders = [
o for o in self.active_orders
if ((o.price != price or o.quantity != size)
and o.client_order_id in self._exit_orders.keys()
and ((position.amount < 0 and o.is_buy) or (position.amount > 0 and not o.is_buy)))]
for old_order in old_exit_orders:
self.cancel_order(self._market_info, old_order.client_order_id)
self.logger().info(
f"Initiated cancellation of previous take profit order {old_order.client_order_id} in favour of new take profit order.")
exit_order_exists = [o for o in self.active_orders if o.price == price]
if len(exit_order_exists) == 0:
if size > 0 and price > 0:
if position.amount < 0:
buys.append(PriceSize(price, size))
else:
sells.append(PriceSize(price, size))
return Proposal(buys, sells)
def _should_renew_stop_loss(self, stop_loss_order: LimitOrder) -> bool:
stop_loss_creation_timestamp = self._exit_orders.get(stop_loss_order.client_order_id)
time_since_stop_loss = self.current_timestamp - stop_loss_creation_timestamp
return time_since_stop_loss >= self._time_between_stop_loss_orders
def stop_loss_proposal(self, mode: PositionMode, active_positions: List[Position]) -> Proposal:
market: ExchangeBase = self._market_info.market
top_ask = market.get_price(self.trading_pair, False)
top_bid = market.get_price(self.trading_pair, True)
buys = []
sells = []
for position in active_positions:
stop_loss_price = position.entry_price * (Decimal("1") + self._stop_loss_spread) if position.amount < 0 \
else position.entry_price * (Decimal("1") - self._stop_loss_spread)
existent_stop_loss_orders = [order for order in self.active_orders
if order.client_order_id in self._exit_orders.keys()
and ((position.amount > 0 and not order.is_buy)
or (position.amount < 0 and order.is_buy))]
if (not existent_stop_loss_orders
or (self._should_renew_stop_loss(existent_stop_loss_orders[0]))):
previous_stop_loss_price = None
for order in existent_stop_loss_orders:
previous_stop_loss_price = order.price
self.cancel_order(self._market_info, order.client_order_id)
new_price = previous_stop_loss_price or stop_loss_price
if (top_ask <= stop_loss_price and position.amount > 0):
price = market.quantize_order_price(
self.trading_pair,
new_price * (Decimal(1) - self._stop_loss_slippage_buffer))
take_profit_orders = [o for o in self.active_orders
if (not o.is_buy and o.price > price
and o.client_order_id in self._exit_orders.keys())]
for old_order in take_profit_orders:
self.cancel_order(self._market_info, old_order.client_order_id)
size = market.quantize_order_amount(self.trading_pair, abs(position.amount))
if size > 0 and price > 0:
self.logger().info("Creating stop loss sell order to close long position.")
sells.append(PriceSize(price, size))
elif (top_bid >= stop_loss_price and position.amount < 0):
price = market.quantize_order_price(
self.trading_pair,
new_price * (Decimal(1) + self._stop_loss_slippage_buffer))
take_profit_orders = [o for o in self.active_orders
if (o.is_buy and o.price < price
and o.client_order_id in self._exit_orders.keys())]
for old_order in take_profit_orders:
self.cancel_order(self._market_info, old_order.client_order_id)
size = market.quantize_order_amount(self.trading_pair, abs(position.amount))
if size > 0 and price > 0:
self.logger().info("Creating stop loss buy order to close short position.")
buys.append(PriceSize(price, size))
return Proposal(buys, sells)
def create_base_proposal(self):
market: ExchangeBase = self._market_info.market
buys = []
sells = []
order_override = self._order_override
if order_override is not None and len(order_override) > 0:
for key, value in order_override.items():
if str(value[0]) in ["buy", "sell"]:
if str(value[0]) == "buy":
price = self.get_price() * (Decimal("1") - Decimal(str(value[1])) / Decimal("100"))
price = market.quantize_order_price(self.trading_pair, price)
size = Decimal(str(value[2]))
size = market.quantize_order_amount(self.trading_pair, size)
if size > 0 and price > 0:
buys.append(PriceSize(price, size))
elif str(value[0]) == "sell":
price = self.get_price() * (Decimal("1") + Decimal(str(value[1])) / Decimal("100"))
price = market.quantize_order_price(self.trading_pair, price)
size = Decimal(str(value[2]))
size = market.quantize_order_amount(self.trading_pair, size)
if size > 0 and price > 0:
sells.append(PriceSize(price, size))
else:
for level in range(0, self._buy_levels):
price = self.get_price() * (Decimal("1") - self._bid_spread - (level * self._order_level_spread))
price = market.quantize_order_price(self.trading_pair, price)
size = self._order_amount + (self._order_level_amount * level)
size = market.quantize_order_amount(self.trading_pair, size)
if size > 0:
buys.append(PriceSize(price, size))
for level in range(0, self._sell_levels):
price = self.get_price() * (Decimal("1") + self._ask_spread + (level * self._order_level_spread))
price = market.quantize_order_price(self.trading_pair, price)
size = self._order_amount + (self._order_level_amount * level)
size = market.quantize_order_amount(self.trading_pair, size)
if size > 0:
sells.append(PriceSize(price, size))
return Proposal(buys, sells)
def apply_order_levels_modifiers(self, proposal: Proposal):
self.apply_price_band(proposal)
def apply_price_band(self, proposal: Proposal):
if self._price_ceiling > 0 and self.get_price() >= self._price_ceiling:
proposal.buys = []
if self._price_floor > 0 and self.get_price() <= self._price_floor:
proposal.sells = []
def apply_order_price_modifiers(self, proposal: Proposal):
if self._order_optimization_enabled:
self.apply_order_optimization(proposal)
def apply_budget_constraint(self, proposal: Proposal):
checker = self._market_info.market.budget_checker
order_candidates = self.create_order_candidates_for_budget_check(proposal)
adjusted_candidates = checker.adjust_candidates(order_candidates, all_or_none=True)
self.apply_adjusted_order_candidates_to_proposal(adjusted_candidates, proposal)
def create_order_candidates_for_budget_check(self, proposal: Proposal):
order_candidates = []
is_maker = True
order_candidates.extend(
[
PerpetualOrderCandidate(
self.trading_pair,
is_maker,
OrderType.LIMIT,
TradeType.BUY,
buy.size,
buy.price,
leverage=Decimal(self._leverage),
)
for buy in proposal.buys
]
)
order_candidates.extend(
[
PerpetualOrderCandidate(
self.trading_pair,
is_maker,
OrderType.LIMIT,
TradeType.SELL,
sell.size,
sell.price,
leverage=Decimal(self._leverage),
)
for sell in proposal.sells
]
)
return order_candidates
def apply_adjusted_order_candidates_to_proposal(self,
adjusted_candidates: List[PerpetualOrderCandidate],
proposal: Proposal):
for order in chain(proposal.buys, proposal.sells):
adjusted_candidate = adjusted_candidates.pop(0)
if adjusted_candidate.amount == s_decimal_zero:
self.logger().info(
f"Insufficient balance: {adjusted_candidate.order_side.name} order (price: {order.price},"
f" size: {order.size}) is omitted."
)
self.logger().warning(
"You are also at a possible risk of being liquidated if there happens to be an open loss.")
order.size = s_decimal_zero
proposal.buys = [o for o in proposal.buys if o.size > 0]
proposal.sells = [o for o in proposal.sells if o.size > 0]
def filter_out_takers(self, proposal: Proposal):
market: ExchangeBase = self._market_info.market
top_ask = market.get_price(self.trading_pair, True)
if not top_ask.is_nan():
proposal.buys = [buy for buy in proposal.buys if buy.price < top_ask]
top_bid = market.get_price(self.trading_pair, False)
if not top_bid.is_nan():
proposal.sells = [sell for sell in proposal.sells if sell.price > top_bid]
def apply_order_optimization(self, proposal: Proposal):
market: ExchangeBase = self._market_info.market
own_buy_size = s_decimal_zero
own_sell_size = s_decimal_zero
if self._order_levels > 1:
return
for order in self.active_orders:
if order.is_buy:
own_buy_size = order.quantity
else:
own_sell_size = order.quantity
if len(proposal.buys) == 1:
top_bid_price = self._market_info.get_price_for_volume(
False, self._bid_order_optimization_depth + own_buy_size).result_price
price_quantum = market.get_order_price_quantum(
self.trading_pair,
top_bid_price
)
price_above_bid = (ceil(top_bid_price / price_quantum) + 1) * price_quantum
lower_buy_price = min(proposal.buys[0].price, price_above_bid)
proposal.buys[0].price = market.quantize_order_price(self.trading_pair, lower_buy_price)
if len(proposal.sells) == 1:
top_ask_price = self._market_info.get_price_for_volume(
True, self._ask_order_optimization_depth + own_sell_size).result_price
price_quantum = market.get_order_price_quantum(
self.trading_pair,
top_ask_price
)
price_below_ask = (floor(top_ask_price / price_quantum) - 1) * price_quantum
higher_sell_price = max(proposal.sells[0].price, price_below_ask)
proposal.sells[0].price = market.quantize_order_price(self.trading_pair, higher_sell_price)
def did_fill_order(self, order_filled_event: OrderFilledEvent):
order_id = order_filled_event.order_id
market_info = self._sb_order_tracker.get_shadow_market_pair_from_order_id(order_id)
if market_info is not None:
if self._logging_options & self.OPTION_LOG_MAKER_ORDER_FILLED:
self.log_with_clock(
logging.INFO,
f"({market_info.trading_pair}) Maker "
f"{'buy' if order_filled_event.trade_type is TradeType.BUY else 'sell'} order of "
f"{order_filled_event.amount} {market_info.base_asset} filled."
)
def did_complete_buy_order(self, order_completed_event: BuyOrderCompletedEvent):
order_id = order_completed_event.order_id
limit_order_record = self._sb_order_tracker.get_limit_order(self._market_info, order_id)
if limit_order_record is None:
return
self._create_timestamp = self.current_timestamp + self._filled_order_delay
self._cancel_timestamp = min(self._cancel_timestamp, self._create_timestamp)
self._last_own_trade_price = limit_order_record.price
self.log_with_clock(
logging.INFO,
f"({self.trading_pair}) Maker buy order {order_id} "
f"({limit_order_record.quantity} {limit_order_record.base_currency} @ "
f"{limit_order_record.price} {limit_order_record.quote_currency}) has been completely filled."
)
self.notify_hb_app_with_timestamp(
f"Maker BUY order {limit_order_record.quantity} {limit_order_record.base_currency} @ "
f"{limit_order_record.price} {limit_order_record.quote_currency} is filled."
)
def did_complete_sell_order(self, order_completed_event: SellOrderCompletedEvent):
order_id = order_completed_event.order_id
limit_order_record: LimitOrder = self._sb_order_tracker.get_limit_order(self._market_info, order_id)
if limit_order_record is None:
return
self._create_timestamp = self.current_timestamp + self._filled_order_delay
self._cancel_timestamp = min(self._cancel_timestamp, self._create_timestamp)
self._last_own_trade_price = limit_order_record.price
self.log_with_clock(
logging.INFO,
f"({self.trading_pair}) Maker sell order {order_id} "
f"({limit_order_record.quantity} {limit_order_record.base_currency} @ "
f"{limit_order_record.price} {limit_order_record.quote_currency}) has been completely filled."
)
self.notify_hb_app_with_timestamp(
f"Maker SELL order {limit_order_record.quantity} {limit_order_record.base_currency} @ "
f"{limit_order_record.price} {limit_order_record.quote_currency} is filled."
)
def is_within_tolerance(self, current_prices: List[Decimal], proposal_prices: List[Decimal]) -> bool:
if len(current_prices) != len(proposal_prices):
return False
current_prices = sorted(current_prices)
proposal_prices = sorted(proposal_prices)
for current, proposal in zip(current_prices, proposal_prices):
if abs(proposal - current) / current > self._order_refresh_tolerance_pct:
return False
return True
def cancel_active_orders(self, proposal: Proposal):
if self._cancel_timestamp > self.current_timestamp:
return
to_defer_canceling = False
if len(self.active_orders) == 0:
return
if proposal is not None and self._order_refresh_tolerance_pct >= 0:
active_buy_prices = [Decimal(str(o.price)) for o in self.active_orders if o.is_buy]
active_sell_prices = [Decimal(str(o.price)) for o in self.active_orders if not o.is_buy]
proposal_buys = [buy.price for buy in proposal.buys]
proposal_sells = [sell.price for sell in proposal.sells]
if self.is_within_tolerance(active_buy_prices, proposal_buys) and \
self.is_within_tolerance(active_sell_prices, proposal_sells):
to_defer_canceling = True
if not to_defer_canceling:
for order in self.active_orders:
self.cancel_order(self._market_info, order.client_order_id)
else:
self.logger().info(f"Not cancelling active orders since difference between new order prices "
f"and current order prices is within "
f"{self._order_refresh_tolerance_pct:.2%} order_refresh_tolerance_pct")
self.set_timers()
def cancel_orders_below_min_spread(self):
price = self.get_price()
for order in self.active_orders:
negation = -1 if order.is_buy else 1
if (negation * (order.price - price) / price) < self._minimum_spread:
self.logger().info(f"Order is below minimum spread ({self._minimum_spread})."
f" Cancelling Order: ({'Buy' if order.is_buy else 'Sell'}) "
f"ID - {order.client_order_id}")
self.cancel_order(self._market_info, order.client_order_id)
def to_create_orders(self, proposal: Proposal) -> bool:
return (self._create_timestamp < self.current_timestamp and
proposal is not None and
len(self.active_orders) == 0)
def execute_orders_proposal(self, proposal: Proposal, position_action: PositionAction):
orders_created = False
if len(proposal.buys) > 0:
if position_action == PositionAction.CLOSE:
if self.current_timestamp < self._next_buy_exit_order_timestamp:
return
else:
self._next_buy_exit_order_timestamp = self.current_timestamp + self.filled_order_delay
if self._logging_options & self.OPTION_LOG_CREATE_ORDER:
price_quote_str = [f"{buy.size.normalize()} {self.base_asset}, "
f"{buy.price.normalize()} {self.quote_asset}"
for buy in proposal.buys]
self.logger().info(
f"({self.trading_pair}) Creating {len(proposal.buys)} {self._close_order_type.name} bid orders "
f"at (Size, Price): {price_quote_str} to {position_action.name} position."
)
for buy in proposal.buys:
bid_order_id = self.buy_with_specific_market(
self._market_info,
buy.size,
order_type=self._close_order_type,
price=buy.price,
position_action=position_action
)
if position_action == PositionAction.CLOSE:
self._exit_orders[bid_order_id] = self.current_timestamp
orders_created = True
if len(proposal.sells) > 0:
if position_action == PositionAction.CLOSE:
if self.current_timestamp < self._next_sell_exit_order_timestamp:
return
else:
self._next_sell_exit_order_timestamp = self.current_timestamp + self.filled_order_delay
if self._logging_options & self.OPTION_LOG_CREATE_ORDER:
price_quote_str = [f"{sell.size.normalize()} {self.base_asset}, "
f"{sell.price.normalize()} {self.quote_asset}"
for sell in proposal.sells]
self.logger().info(
f"({self.trading_pair}) Creating {len(proposal.sells)} {self._close_order_type.name} ask "
f"orders at (Size, Price): {price_quote_str} to {position_action.name} position."
)
for sell in proposal.sells:
ask_order_id = self.sell_with_specific_market(
self._market_info,
sell.size,
order_type=self._close_order_type,
price=sell.price,
position_action=position_action
)
if position_action == PositionAction.CLOSE:
self._exit_orders[ask_order_id] = self.current_timestamp
orders_created = True
if orders_created:
self.set_timers()
def set_timers(self):
next_cycle = self.current_timestamp + self._order_refresh_time
if self._create_timestamp <= self.current_timestamp:
self._create_timestamp = next_cycle
if self._cancel_timestamp <= self.current_timestamp:
self._cancel_timestamp = min(self._create_timestamp, next_cycle)
def notify_hb_app(self, msg: str):
if self._hb_app_notification:
super().notify_hb_app(msg)
def get_price_type(self, price_type_str: str) -> PriceType:
if price_type_str == "mid_price":
return PriceType.MidPrice
elif price_type_str == "best_bid":
return PriceType.BestBid
elif price_type_str == "best_ask":
return PriceType.BestAsk
elif price_type_str == "last_price":
return PriceType.LastTrade
elif price_type_str == 'last_own_trade_price':
return PriceType.LastOwnTrade
elif price_type_str == "custom":
return PriceType.Custom
else:
raise ValueError(f"Unrecognized price type string {price_type_str}.")
| true
| true
|
1c40aeff2a010938c435dea825268ce34f9171c9
| 3,317
|
py
|
Python
|
tensorflow_datasets/scripts/documentation/document_datasets_test.py
|
harsh020/datasets
|
b4ad3617b279ec65356e696c4c860458621976f6
|
[
"Apache-2.0"
] | 1
|
2020-12-10T06:37:27.000Z
|
2020-12-10T06:37:27.000Z
|
tensorflow_datasets/scripts/documentation/document_datasets_test.py
|
Jinwook-shim/datasets
|
815037e87150e3c8a557d91a68b07e8ffb6a2a86
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/scripts/documentation/document_datasets_test.py
|
Jinwook-shim/datasets
|
815037e87150e3c8a557d91a68b07e8ffb6a2a86
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of `document_datasets.py`."""
import functools
import pytest
import tensorflow_datasets as tfds
from tensorflow_datasets.scripts.documentation import doc_utils
from tensorflow_datasets.scripts.documentation import document_datasets
class DummyDatasetConfigs(tfds.testing.DummyDataset):
"""Builder with config and manual instructions."""
MANUAL_DOWNLOAD_INSTRUCTIONS = """Some manual instructions."""
BUILDER_CONFIGS = [
tfds.core.BuilderConfig(
name='config_name',
version=tfds.core.Version('0.0.1'),
description='Config description.',
),
]
class DummyDatasetConfigsSharedVersion(tfds.testing.DummyDataset):
"""Builder with config ."""
# No BuilderConfig description, and version shared across configs.
VERSION = tfds.core.Version('1.0.0')
BUILDER_CONFIGS = [
tfds.core.BuilderConfig(name='config_name'),
]
@pytest.fixture
def document_single_builder_fn(tmp_path):
yield functools.partial(
document_datasets._document_single_builder,
visu_doc_util=doc_utils.VisualizationDocUtil(
base_path=tmp_path,
base_url=doc_utils.DocUtilPaths.fig_base_url,
),
df_doc_util=doc_utils.DataframeDocUtil(
base_path=tmp_path,
base_url=doc_utils.DocUtilPaths.df_base_url,
),
nightly_doc_util=None,
)
def test_document_datasets():
all_docs = list(document_datasets.iter_documentation_builders(
datasets=['mnist', 'coco'], # Builder with and without config
doc_util_paths=doc_utils.DocUtilPaths(
fig_base_path=None,
df_base_path=None,
nightly_path=None,
),
))
assert {d.name for d in all_docs} == {'mnist', 'coco'}
def test_with_config(document_single_builder_fn): # pylint: disable=redefined-outer-name
"""Test that builder with configs are correctly generated."""
doc = document_single_builder_fn(DummyDatasetConfigs.name)
assert 'Some manual instructions.' in doc.content
assert 'Minimal DatasetBuilder.' in doc.content # Shared description.
# Config-description
assert '**Config description**: Config description.' in doc.content
assert (
'<meta itemprop="url" content="'
f'https://www.tensorflow.org/datasets/catalog/{DummyDatasetConfigs.name}"'
' />'
) in doc.content
def test_with_config_shared_version(document_single_builder_fn): # pylint: disable=redefined-outer-name
"""Test that builder with configs are correctly generated."""
doc = document_single_builder_fn(DummyDatasetConfigsSharedVersion.name)
assert 'Minimal DatasetBuilder.' in doc.content # Shared description.
assert 'Config description:' not in doc.content # No config description
| 34.915789
| 104
| 0.737112
|
import functools
import pytest
import tensorflow_datasets as tfds
from tensorflow_datasets.scripts.documentation import doc_utils
from tensorflow_datasets.scripts.documentation import document_datasets
class DummyDatasetConfigs(tfds.testing.DummyDataset):
MANUAL_DOWNLOAD_INSTRUCTIONS = """Some manual instructions."""
BUILDER_CONFIGS = [
tfds.core.BuilderConfig(
name='config_name',
version=tfds.core.Version('0.0.1'),
description='Config description.',
),
]
class DummyDatasetConfigsSharedVersion(tfds.testing.DummyDataset):
VERSION = tfds.core.Version('1.0.0')
BUILDER_CONFIGS = [
tfds.core.BuilderConfig(name='config_name'),
]
@pytest.fixture
def document_single_builder_fn(tmp_path):
yield functools.partial(
document_datasets._document_single_builder,
visu_doc_util=doc_utils.VisualizationDocUtil(
base_path=tmp_path,
base_url=doc_utils.DocUtilPaths.fig_base_url,
),
df_doc_util=doc_utils.DataframeDocUtil(
base_path=tmp_path,
base_url=doc_utils.DocUtilPaths.df_base_url,
),
nightly_doc_util=None,
)
def test_document_datasets():
all_docs = list(document_datasets.iter_documentation_builders(
datasets=['mnist', 'coco'],
doc_util_paths=doc_utils.DocUtilPaths(
fig_base_path=None,
df_base_path=None,
nightly_path=None,
),
))
assert {d.name for d in all_docs} == {'mnist', 'coco'}
def test_with_config(document_single_builder_fn):
doc = document_single_builder_fn(DummyDatasetConfigs.name)
assert 'Some manual instructions.' in doc.content
assert 'Minimal DatasetBuilder.' in doc.content
assert '**Config description**: Config description.' in doc.content
assert (
'<meta itemprop="url" content="'
f'https://www.tensorflow.org/datasets/catalog/{DummyDatasetConfigs.name}"'
' />'
) in doc.content
def test_with_config_shared_version(document_single_builder_fn):
doc = document_single_builder_fn(DummyDatasetConfigsSharedVersion.name)
assert 'Minimal DatasetBuilder.' in doc.content
assert 'Config description:' not in doc.content
| true
| true
|
1c40af161c7b42ead2f0f09971dbedcc1442595c
| 6,044
|
py
|
Python
|
pysnmp-with-texts/Chromatis-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/Chromatis-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/Chromatis-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module Chromatis-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Chromatis-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:34:31 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
iso, Counter64, ObjectIdentity, Counter32, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Unsigned32, ModuleIdentity, MibIdentifier, Integer32, Gauge32, Bits, NotificationType, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Counter64", "ObjectIdentity", "Counter32", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Unsigned32", "ModuleIdentity", "MibIdentifier", "Integer32", "Gauge32", "Bits", "NotificationType", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
chromatis = ModuleIdentity((1, 3, 6, 1, 4, 1, 3695))
chromatis.setRevisions(('1999-05-17 18:30',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: chromatis.setRevisionsDescriptions(('Compiled for the first time by Zvika',))
if mibBuilder.loadTexts: chromatis.setLastUpdated('9905170000Z')
if mibBuilder.loadTexts: chromatis.setOrganization('Chromatis Networks Inc.')
if mibBuilder.loadTexts: chromatis.setContactInfo('Chromatis Networks 21 c Yagea Kapaim , Kiryat Arye, Petach Tikva, Israel Phone: 972-3-9231030 Fax: 972-3-9231050 emil@chromatis.com')
if mibBuilder.loadTexts: chromatis.setDescription("This MIB module is the SNMP version of Chromatis Networks' Metrpolis")
chrCommon = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1))
chrProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2))
chrComHW = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 1))
chrComIf = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2))
chrComConfigVersion = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 3))
chrComSwVersion = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 4))
chrComAccess = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 5))
chrComTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 6))
chrComActions = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 7))
chrComCompressData = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 8))
chrComAtm = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 9))
chrComPM = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10))
chrComFM = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 11))
chrComProtection = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12))
chrComNetwork = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 13))
chrComHwNe = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 1, 1))
chrComIfSonet = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 1))
chrComIfAtm = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 2))
chrComIfOptics = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 3))
chrComIfDS3 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 4))
chrComIfEthernet = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 5))
chrComAtmVpl = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 9, 1))
chrComAtmVcl = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 9, 2))
chrComPmOptics = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 1))
chrComPmSonet = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 2))
chrComPmDs3 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 3))
chrComPmAtm = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 4))
chrComPmEth = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 5))
chrComProtectionGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 1))
chrComProtectionVp = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 2))
chrComProtectionVc = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 3))
chrComProtectSinglePath = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 4))
chrComProtectEquip = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 5))
chrComNetClockSync = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 13, 1))
chrProductsMetropolis2000 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2, 1))
chrProductsMetropolis2500 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2, 2))
chrProductsMetropolis4000 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2, 3))
chrProductsMetropolis4500 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2, 4))
mibBuilder.exportSymbols("Chromatis-MIB", chrComFM=chrComFM, chrProductsMetropolis4500=chrProductsMetropolis4500, chrComActions=chrComActions, chrComIfSonet=chrComIfSonet, chrComHW=chrComHW, chrComPmEth=chrComPmEth, chrComIf=chrComIf, PYSNMP_MODULE_ID=chromatis, chrComProtectionGroup=chrComProtectionGroup, chrComTrap=chrComTrap, chrComCompressData=chrComCompressData, chrComNetwork=chrComNetwork, chrCommon=chrCommon, chrComConfigVersion=chrComConfigVersion, chrComNetClockSync=chrComNetClockSync, chrComProtectSinglePath=chrComProtectSinglePath, chrComIfAtm=chrComIfAtm, chrComPmOptics=chrComPmOptics, chrComProtectionVc=chrComProtectionVc, chrComAtmVpl=chrComAtmVpl, chrComPM=chrComPM, chrComAtmVcl=chrComAtmVcl, chrComIfOptics=chrComIfOptics, chrComProtectionVp=chrComProtectionVp, chrProductsMetropolis2000=chrProductsMetropolis2000, chromatis=chromatis, chrComPmSonet=chrComPmSonet, chrComSwVersion=chrComSwVersion, chrComProtectEquip=chrComProtectEquip, chrComHwNe=chrComHwNe, chrComIfEthernet=chrComIfEthernet, chrComAccess=chrComAccess, chrProductsMetropolis2500=chrProductsMetropolis2500, chrComProtection=chrComProtection, chrProducts=chrProducts, chrComIfDS3=chrComIfDS3, chrComPmAtm=chrComPmAtm, chrProductsMetropolis4000=chrProductsMetropolis4000, chrComPmDs3=chrComPmDs3, chrComAtm=chrComAtm)
| 97.483871
| 1,308
| 0.741727
|
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
iso, Counter64, ObjectIdentity, Counter32, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Unsigned32, ModuleIdentity, MibIdentifier, Integer32, Gauge32, Bits, NotificationType, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Counter64", "ObjectIdentity", "Counter32", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Unsigned32", "ModuleIdentity", "MibIdentifier", "Integer32", "Gauge32", "Bits", "NotificationType", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
chromatis = ModuleIdentity((1, 3, 6, 1, 4, 1, 3695))
chromatis.setRevisions(('1999-05-17 18:30',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: chromatis.setRevisionsDescriptions(('Compiled for the first time by Zvika',))
if mibBuilder.loadTexts: chromatis.setLastUpdated('9905170000Z')
if mibBuilder.loadTexts: chromatis.setOrganization('Chromatis Networks Inc.')
if mibBuilder.loadTexts: chromatis.setContactInfo('Chromatis Networks 21 c Yagea Kapaim , Kiryat Arye, Petach Tikva, Israel Phone: 972-3-9231030 Fax: 972-3-9231050 emil@chromatis.com')
if mibBuilder.loadTexts: chromatis.setDescription("This MIB module is the SNMP version of Chromatis Networks' Metrpolis")
chrCommon = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1))
chrProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2))
chrComHW = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 1))
chrComIf = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2))
chrComConfigVersion = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 3))
chrComSwVersion = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 4))
chrComAccess = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 5))
chrComTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 6))
chrComActions = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 7))
chrComCompressData = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 8))
chrComAtm = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 9))
chrComPM = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10))
chrComFM = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 11))
chrComProtection = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12))
chrComNetwork = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 13))
chrComHwNe = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 1, 1))
chrComIfSonet = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 1))
chrComIfAtm = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 2))
chrComIfOptics = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 3))
chrComIfDS3 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 4))
chrComIfEthernet = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 5))
chrComAtmVpl = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 9, 1))
chrComAtmVcl = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 9, 2))
chrComPmOptics = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 1))
chrComPmSonet = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 2))
chrComPmDs3 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 3))
chrComPmAtm = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 4))
chrComPmEth = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 5))
chrComProtectionGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 1))
chrComProtectionVp = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 2))
chrComProtectionVc = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 3))
chrComProtectSinglePath = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 4))
chrComProtectEquip = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 5))
chrComNetClockSync = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 13, 1))
chrProductsMetropolis2000 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2, 1))
chrProductsMetropolis2500 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2, 2))
chrProductsMetropolis4000 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2, 3))
chrProductsMetropolis4500 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2, 4))
mibBuilder.exportSymbols("Chromatis-MIB", chrComFM=chrComFM, chrProductsMetropolis4500=chrProductsMetropolis4500, chrComActions=chrComActions, chrComIfSonet=chrComIfSonet, chrComHW=chrComHW, chrComPmEth=chrComPmEth, chrComIf=chrComIf, PYSNMP_MODULE_ID=chromatis, chrComProtectionGroup=chrComProtectionGroup, chrComTrap=chrComTrap, chrComCompressData=chrComCompressData, chrComNetwork=chrComNetwork, chrCommon=chrCommon, chrComConfigVersion=chrComConfigVersion, chrComNetClockSync=chrComNetClockSync, chrComProtectSinglePath=chrComProtectSinglePath, chrComIfAtm=chrComIfAtm, chrComPmOptics=chrComPmOptics, chrComProtectionVc=chrComProtectionVc, chrComAtmVpl=chrComAtmVpl, chrComPM=chrComPM, chrComAtmVcl=chrComAtmVcl, chrComIfOptics=chrComIfOptics, chrComProtectionVp=chrComProtectionVp, chrProductsMetropolis2000=chrProductsMetropolis2000, chromatis=chromatis, chrComPmSonet=chrComPmSonet, chrComSwVersion=chrComSwVersion, chrComProtectEquip=chrComProtectEquip, chrComHwNe=chrComHwNe, chrComIfEthernet=chrComIfEthernet, chrComAccess=chrComAccess, chrProductsMetropolis2500=chrProductsMetropolis2500, chrComProtection=chrComProtection, chrProducts=chrProducts, chrComIfDS3=chrComIfDS3, chrComPmAtm=chrComPmAtm, chrProductsMetropolis4000=chrProductsMetropolis4000, chrComPmDs3=chrComPmDs3, chrComAtm=chrComAtm)
| true
| true
|
1c40afb51f9b020b46acfa7b2254c38faade74bb
| 660
|
py
|
Python
|
app/src/short_urls/tasks.py
|
gustavodsf/blue-code-be-test
|
26b14639ab12fdccb840b8cdaf2f4386ec965bc6
|
[
"Apache-2.0"
] | 1
|
2022-02-10T01:57:31.000Z
|
2022-02-10T01:57:31.000Z
|
app/src/short_urls/tasks.py
|
gustavodsf/blue-code-be-test
|
26b14639ab12fdccb840b8cdaf2f4386ec965bc6
|
[
"Apache-2.0"
] | null | null | null |
app/src/short_urls/tasks.py
|
gustavodsf/blue-code-be-test
|
26b14639ab12fdccb840b8cdaf2f4386ec965bc6
|
[
"Apache-2.0"
] | null | null | null |
import time
import sqlite3
import requests
from bs4 import BeautifulSoup
def threaded_task(shorterObj):
title = get_title_from_page(shorterObj.original_url)
add_title_to_database(shorterObj.id, title)
def add_title_to_database(id, title):
con = sqlite3.connect('app/src/core/flask_boilerplate_main.db')
cur = con.cursor()
cur.execute("UPDATE short_urls SET page_title = :title WHERE id = :id", {'id': id, 'title': title})
con.commit()
con.close()
def get_title_from_page(url):
try:
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
title = soup.find('title').text
return title
except:
print('error')
| 27.5
| 101
| 0.719697
|
import time
import sqlite3
import requests
from bs4 import BeautifulSoup
def threaded_task(shorterObj):
title = get_title_from_page(shorterObj.original_url)
add_title_to_database(shorterObj.id, title)
def add_title_to_database(id, title):
con = sqlite3.connect('app/src/core/flask_boilerplate_main.db')
cur = con.cursor()
cur.execute("UPDATE short_urls SET page_title = :title WHERE id = :id", {'id': id, 'title': title})
con.commit()
con.close()
def get_title_from_page(url):
try:
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
title = soup.find('title').text
return title
except:
print('error')
| true
| true
|
1c40afca3bc43ba6bb32c9ef1a343d7db2bc9737
| 4,390
|
py
|
Python
|
contrib/seeds/generate-seeds.py
|
genteshare-project/genteshare
|
b1407e7977c52bac52326cec9c7243877d0b273d
|
[
"MIT"
] | 3
|
2018-05-04T01:33:30.000Z
|
2018-08-08T14:54:21.000Z
|
contrib/seeds/generate-seeds.py
|
genteshare-project/genteshare
|
b1407e7977c52bac52326cec9c7243877d0b273d
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
genteshare-project/genteshare
|
b1407e7977c52bac52326cec9c7243877d0b273d
|
[
"MIT"
] | 1
|
2019-08-18T00:42:19.000Z
|
2019-08-18T00:42:19.000Z
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef GENTESHARE_CHAINPARAMSSEEDS_H\n')
g.write('#define GENTESHARE_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the genteshare network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // GENTESHARE_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.582734
| 98
| 0.583599
|
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr:
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'):
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef GENTESHARE_CHAINPARAMSSEEDS_H\n')
g.write('#define GENTESHARE_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the genteshare network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // GENTESHARE_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true
| true
|
1c40b03a78b4dd52d0c701603722180fc0d2466c
| 568
|
py
|
Python
|
esphomeflasher/const.py
|
mozzwald/esphome-flasher
|
419b180845352cce92b94766c8af777d0ddf9d0b
|
[
"MIT"
] | null | null | null |
esphomeflasher/const.py
|
mozzwald/esphome-flasher
|
419b180845352cce92b94766c8af777d0ddf9d0b
|
[
"MIT"
] | null | null | null |
esphomeflasher/const.py
|
mozzwald/esphome-flasher
|
419b180845352cce92b94766c8af777d0ddf9d0b
|
[
"MIT"
] | 1
|
2020-07-05T13:40:52.000Z
|
2020-07-05T13:40:52.000Z
|
import re
__version__ = "1.2.0"
ESP32_DEFAULT_BOOTLOADER_FORMAT = 'https://fujinet.online/firmware/bootloader.bin'
ESP32_DEFAULT_OTA_DATA = 'https://fujinet.online/firmware/boot_app0.bin'
ESP32_DEFAULT_PARTITIONS = 'https://fujinet.online/firmware/partitions.bin'
ESP32_DEFAULT_FIRMWARE = 'https://fujinet.online/firmware/firmware.bin'
ESP32_DEFAULT_SPIFFS = 'https://fujinet.online/firmware/spiffs.bin'
# https://stackoverflow.com/a/3809435/8924614
HTTP_REGEX = re.compile(r'https?://(www\.)?[-a-zA-Z0-9@:%._+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_+.~#?&/=]*)')
| 43.692308
| 114
| 0.732394
|
import re
__version__ = "1.2.0"
ESP32_DEFAULT_BOOTLOADER_FORMAT = 'https://fujinet.online/firmware/bootloader.bin'
ESP32_DEFAULT_OTA_DATA = 'https://fujinet.online/firmware/boot_app0.bin'
ESP32_DEFAULT_PARTITIONS = 'https://fujinet.online/firmware/partitions.bin'
ESP32_DEFAULT_FIRMWARE = 'https://fujinet.online/firmware/firmware.bin'
ESP32_DEFAULT_SPIFFS = 'https://fujinet.online/firmware/spiffs.bin'
HTTP_REGEX = re.compile(r'https?://(www\.)?[-a-zA-Z0-9@:%._+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_+.~#?&/=]*)')
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.