hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7332ca069aa2e3e5a2ab0ac6b6ea50d6e8f261c | 1,712 | py | Python | gibolt/__init__.py | Kozea/gibolt | 5af60ada3f611aba3d6a25d61cf060c8f71351eb | [
"MIT"
] | 6 | 2015-04-10T21:30:41.000Z | 2021-05-03T21:10:44.000Z | gibolt/__init__.py | Kozea/gibolt | 5af60ada3f611aba3d6a25d61cf060c8f71351eb | [
"MIT"
] | 138 | 2015-04-08T09:55:55.000Z | 2021-07-27T09:41:37.000Z | gibolt/__init__.py | Kozea/gibolt | 5af60ada3f611aba3d6a25d61cf060c8f71351eb | [
"MIT"
] | 2 | 2015-11-01T19:05:48.000Z | 2019-04-23T13:00:50.000Z | import os
from datetime import datetime
from urllib.parse import urlparse
from flask import Flask
from flask_github import GitHub
from markdown2 import markdown as from_markdown
from markupsafe import Markup
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
app = Flask(__name__)
app.config.from_envvar("FLASK_CONFIG")
if app.config.get("DEBUG"): # pragma: no cover
from sassutils.wsgi import SassMiddleware
app.wsgi_app = SassMiddleware(
app.wsgi_app,
{
"gibolt": {
"sass_path": "static/sass",
"css_path": "static/css",
"wsgi_path": "/static/css",
"strip_extension": True,
}
},
)
engine = create_engine(
app.config["SQLALCHEMY_DATABASE_URI"],
connect_args={"check_same_thread": False},
)
github = GitHub(app)
db = sessionmaker(bind=engine, autoflush=False)()
from . import routes # noqa isort:skip
@app.cli.command()
def dropdb():
filename = urlparse(app.config["SQLALCHEMY_DATABASE_URI"]).path[1:]
if os.path.isfile(filename):
os.remove(filename)
@app.template_filter()
def month(month_string):
return datetime.strptime(month_string, "%Y-%m").strftime("%B %Y")
@app.template_filter()
def day(day_string):
return datetime.strptime(day_string[:10], "%Y-%m-%d").strftime("%d %b %Y")
@app.template_filter()
def markdown(markdown_string):
return Markup(from_markdown(markdown_string or ""))
@app.template_filter()
def indicator(value):
if value is None:
return
value = float(value)
if abs(round(value) - value) < 0.001:
return int(value)
else:
return round(value, 2)
| 23.135135 | 78 | 0.665304 | import os
from datetime import datetime
from urllib.parse import urlparse
from flask import Flask
from flask_github import GitHub
from markdown2 import markdown as from_markdown
from markupsafe import Markup
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
app = Flask(__name__)
app.config.from_envvar("FLASK_CONFIG")
if app.config.get("DEBUG"):
from sassutils.wsgi import SassMiddleware
app.wsgi_app = SassMiddleware(
app.wsgi_app,
{
"gibolt": {
"sass_path": "static/sass",
"css_path": "static/css",
"wsgi_path": "/static/css",
"strip_extension": True,
}
},
)
engine = create_engine(
app.config["SQLALCHEMY_DATABASE_URI"],
connect_args={"check_same_thread": False},
)
github = GitHub(app)
db = sessionmaker(bind=engine, autoflush=False)()
from . import routes
@app.cli.command()
def dropdb():
filename = urlparse(app.config["SQLALCHEMY_DATABASE_URI"]).path[1:]
if os.path.isfile(filename):
os.remove(filename)
@app.template_filter()
def month(month_string):
return datetime.strptime(month_string, "%Y-%m").strftime("%B %Y")
@app.template_filter()
def day(day_string):
return datetime.strptime(day_string[:10], "%Y-%m-%d").strftime("%d %b %Y")
@app.template_filter()
def markdown(markdown_string):
return Markup(from_markdown(markdown_string or ""))
@app.template_filter()
def indicator(value):
if value is None:
return
value = float(value)
if abs(round(value) - value) < 0.001:
return int(value)
else:
return round(value, 2)
| true | true |
f7332de8f6f8e0ed99eebc5c1512fa34ac1039b2 | 899 | py | Python | numba/cuda/simulator/cudadrv/driver.py | seberg/numba | e617b39a0b4b23d7b69d16f482fd66b4ac6cc307 | [
"BSD-2-Clause"
] | 6,620 | 2015-01-04T08:51:04.000Z | 2022-03-31T12:52:18.000Z | numba/cuda/simulator/cudadrv/driver.py | Copastr/numba | 3884e95bdec70c385291bec820beecc5ab64933a | [
"BSD-2-Clause"
] | 6,457 | 2015-01-04T03:18:41.000Z | 2022-03-31T17:38:42.000Z | numba/cuda/simulator/cudadrv/driver.py | Copastr/numba | 3884e95bdec70c385291bec820beecc5ab64933a | [
"BSD-2-Clause"
] | 930 | 2015-01-25T02:33:03.000Z | 2022-03-30T14:10:32.000Z | '''
Most of the driver API is unsupported in the simulator, but some stubs are
provided to allow tests to import correctly.
'''
def device_memset(dst, val, size, stream=0):
dst.view('u1')[:size].fill(bytes([val])[0])
def host_to_device(dst, src, size, stream=0):
dst.view('u1')[:size] = src.view('u1')[:size]
def device_to_host(dst, src, size, stream=0):
host_to_device(dst, src, size)
def device_memory_size(obj):
return obj.itemsize * obj.size
def device_to_device(dst, src, size, stream=0):
host_to_device(dst, src, size)
class FakeDriver(object):
def get_device_count(self):
return 1
driver = FakeDriver()
Linker = None
class LinkerError(RuntimeError):
pass
class CudaAPIError(RuntimeError):
pass
def launch_kernel(*args, **kwargs):
msg = 'Launching kernels directly is not supported in the simulator'
raise RuntimeError(msg)
| 18.729167 | 74 | 0.695217 |
def device_memset(dst, val, size, stream=0):
dst.view('u1')[:size].fill(bytes([val])[0])
def host_to_device(dst, src, size, stream=0):
dst.view('u1')[:size] = src.view('u1')[:size]
def device_to_host(dst, src, size, stream=0):
host_to_device(dst, src, size)
def device_memory_size(obj):
return obj.itemsize * obj.size
def device_to_device(dst, src, size, stream=0):
host_to_device(dst, src, size)
class FakeDriver(object):
def get_device_count(self):
return 1
driver = FakeDriver()
Linker = None
class LinkerError(RuntimeError):
pass
class CudaAPIError(RuntimeError):
pass
def launch_kernel(*args, **kwargs):
msg = 'Launching kernels directly is not supported in the simulator'
raise RuntimeError(msg)
| true | true |
f7332fce6cf86af2864f93388297a3b7768884fa | 985 | py | Python | testreport/migrations/0031_extuser.py | mikiec84/badger-api | d0764fa0fd35ebfd7581e2a0218b59be9d13e814 | [
"MIT"
] | null | null | null | testreport/migrations/0031_extuser.py | mikiec84/badger-api | d0764fa0fd35ebfd7581e2a0218b59be9d13e814 | [
"MIT"
] | 2 | 2021-03-19T23:41:57.000Z | 2021-06-10T23:08:34.000Z | testreport/migrations/0031_extuser.py | gaybro8777/badger-api | d0764fa0fd35ebfd7581e2a0218b59be9d13e814 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('testreport', '0030_launch_duration'),
]
operations = [
migrations.CreateModel(
name='ExtUser',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('default_project', models.IntegerField(default=None, null=True, blank=True)),
('launches_on_page', models.IntegerField(default=10)),
('testresults_on_page', models.IntegerField(default=25)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, related_name='settings')),
],
options={
},
bases=(models.Model,),
),
]
| 32.833333 | 114 | 0.610152 |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('testreport', '0030_launch_duration'),
]
operations = [
migrations.CreateModel(
name='ExtUser',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('default_project', models.IntegerField(default=None, null=True, blank=True)),
('launches_on_page', models.IntegerField(default=10)),
('testresults_on_page', models.IntegerField(default=25)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, related_name='settings')),
],
options={
},
bases=(models.Model,),
),
]
| true | true |
f7333015fa83373d779a8e1eebc2dbe56322417d | 167 | py | Python | Python Pattern Programs/Symbol Patterns/Pattern 60.py | trial1user/Printing-Pattern-Programs | dde29e056b8e067fb3a824edb7ecb7dd9c9a776a | [
"MIT"
] | 61 | 2021-01-07T03:56:25.000Z | 2022-02-26T14:39:52.000Z | PythonPatternPrograms/SymbolPatterns/Pattern 60.py | Ankur-586/Printing-Pattern-Programs | 33e534ed66a02705e6cd6bc1992d4818a44d1b6b | [
"MIT"
] | 51 | 2020-12-25T17:06:26.000Z | 2021-05-07T12:52:56.000Z | PythonPatternPrograms/SymbolPatterns/Pattern 60.py | Ankur-586/Printing-Pattern-Programs | 33e534ed66a02705e6cd6bc1992d4818a44d1b6b | [
"MIT"
] | 13 | 2021-01-07T09:50:21.000Z | 2021-12-17T11:03:57.000Z | n = 5
for x in range(n, 0, -1):
for y in range(n, 0, -1):
if y > x:
print("*", end=" ")
else:
print(x, end=" ")
print() | 20.875 | 31 | 0.359281 | n = 5
for x in range(n, 0, -1):
for y in range(n, 0, -1):
if y > x:
print("*", end=" ")
else:
print(x, end=" ")
print() | true | true |
f73330b38cff9d80573d59c85199da641ef2fb6b | 114 | py | Python | code_gazay/lenin/lenin/transforms.py | artyompal/kaggle_salt | 3c323755730745ac7bbfd106f1f20919cceef0ee | [
"MIT"
] | null | null | null | code_gazay/lenin/lenin/transforms.py | artyompal/kaggle_salt | 3c323755730745ac7bbfd106f1f20919cceef0ee | [
"MIT"
] | 1 | 2021-03-25T23:31:26.000Z | 2021-03-25T23:31:28.000Z | code_gazay/lenin/lenin/transforms.py | artyompal/kaggle_salt | 3c323755730745ac7bbfd106f1f20919cceef0ee | [
"MIT"
] | 1 | 2018-11-08T09:30:38.000Z | 2018-11-08T09:30:38.000Z | import numpy as np
def hwc_to_chw(image):
return np.einsum('hwc->chw', image) # change to pytorch format
| 22.8 | 70 | 0.692982 | import numpy as np
def hwc_to_chw(image):
return np.einsum('hwc->chw', image)
| true | true |
f73331d5f5cbfc3043d4165144e1118dd13cb4da | 601 | py | Python | app/main/forms.py | theposter/food-server | d6a1a9e1300d35ff4642463f0a73074b1440c648 | [
"MIT"
] | null | null | null | app/main/forms.py | theposter/food-server | d6a1a9e1300d35ff4642463f0a73074b1440c648 | [
"MIT"
] | null | null | null | app/main/forms.py | theposter/food-server | d6a1a9e1300d35ff4642463f0a73074b1440c648 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired, Length, ValidationError
class SearchForm(FlaskForm):
search_query = StringField('What cuisine are you in the mood for today?', validators=[DataRequired(),
Length(3, 20, "Must be longer than 3 characters and under 20")],
render_kw=({'placeholder': "Enter a cuisine (e.g. Nepali, Thai, etc)"}))
submit = SubmitField('Search')
class SurpriseForm(FlaskForm):
surprise_me_button = SubmitField("Surprise Me") | 50.083333 | 106 | 0.685524 | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired, Length, ValidationError
class SearchForm(FlaskForm):
search_query = StringField('What cuisine are you in the mood for today?', validators=[DataRequired(),
Length(3, 20, "Must be longer than 3 characters and under 20")],
render_kw=({'placeholder': "Enter a cuisine (e.g. Nepali, Thai, etc)"}))
submit = SubmitField('Search')
class SurpriseForm(FlaskForm):
surprise_me_button = SubmitField("Surprise Me") | true | true |
f73331fcb36a3a70a8bb47fbbad3290ddeab907f | 5,914 | py | Python | pycdp/cdp/background_service.py | HMaker/python-chrome-devtools-protocol | a9646a1c4e172ce458c15e2fcb3860ca8c9b4599 | [
"MIT"
] | null | null | null | pycdp/cdp/background_service.py | HMaker/python-chrome-devtools-protocol | a9646a1c4e172ce458c15e2fcb3860ca8c9b4599 | [
"MIT"
] | null | null | null | pycdp/cdp/background_service.py | HMaker/python-chrome-devtools-protocol | a9646a1c4e172ce458c15e2fcb3860ca8c9b4599 | [
"MIT"
] | null | null | null | # DO NOT EDIT THIS FILE!
#
# This file is generated from the CDP specification. If you need to make
# changes, edit the generator and regenerate all of the modules.
#
# CDP domain: BackgroundService (experimental)
from __future__ import annotations
import enum
import typing
from dataclasses import dataclass
from .util import event_class, T_JSON_DICT
from . import network
from . import service_worker
class ServiceName(enum.Enum):
'''
The Background Service that will be associated with the commands/events.
Every Background Service operates independently, but they share the same
API.
'''
BACKGROUND_FETCH = "backgroundFetch"
BACKGROUND_SYNC = "backgroundSync"
PUSH_MESSAGING = "pushMessaging"
NOTIFICATIONS = "notifications"
PAYMENT_HANDLER = "paymentHandler"
PERIODIC_BACKGROUND_SYNC = "periodicBackgroundSync"
def to_json(self) -> str:
return self.value
@classmethod
def from_json(cls, json: str) -> ServiceName:
return cls(json)
@dataclass
class EventMetadata:
'''
A key-value pair for additional event information to pass along.
'''
key: str
value: str
def to_json(self) -> T_JSON_DICT:
json: T_JSON_DICT = dict()
json['key'] = self.key
json['value'] = self.value
return json
@classmethod
def from_json(cls, json: T_JSON_DICT) -> EventMetadata:
return cls(
key=str(json['key']),
value=str(json['value']),
)
@dataclass
class BackgroundServiceEvent:
#: Timestamp of the event (in seconds).
timestamp: network.TimeSinceEpoch
#: The origin this event belongs to.
origin: str
#: The Service Worker ID that initiated the event.
service_worker_registration_id: service_worker.RegistrationID
#: The Background Service this event belongs to.
service: ServiceName
#: A description of the event.
event_name: str
#: An identifier that groups related events together.
instance_id: str
#: A list of event-specific information.
event_metadata: typing.List[EventMetadata]
def to_json(self) -> T_JSON_DICT:
json: T_JSON_DICT = dict()
json['timestamp'] = self.timestamp.to_json()
json['origin'] = self.origin
json['serviceWorkerRegistrationId'] = self.service_worker_registration_id.to_json()
json['service'] = self.service.to_json()
json['eventName'] = self.event_name
json['instanceId'] = self.instance_id
json['eventMetadata'] = [i.to_json() for i in self.event_metadata]
return json
@classmethod
def from_json(cls, json: T_JSON_DICT) -> BackgroundServiceEvent:
return cls(
timestamp=network.TimeSinceEpoch.from_json(json['timestamp']),
origin=str(json['origin']),
service_worker_registration_id=service_worker.RegistrationID.from_json(json['serviceWorkerRegistrationId']),
service=ServiceName.from_json(json['service']),
event_name=str(json['eventName']),
instance_id=str(json['instanceId']),
event_metadata=[EventMetadata.from_json(i) for i in json['eventMetadata']],
)
def start_observing(
service: ServiceName
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Enables event updates for the service.
:param service:
'''
params: T_JSON_DICT = dict()
params['service'] = service.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'BackgroundService.startObserving',
'params': params,
}
json = yield cmd_dict
def stop_observing(
service: ServiceName
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Disables event updates for the service.
:param service:
'''
params: T_JSON_DICT = dict()
params['service'] = service.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'BackgroundService.stopObserving',
'params': params,
}
json = yield cmd_dict
def set_recording(
should_record: bool,
service: ServiceName
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Set the recording state for the service.
:param should_record:
:param service:
'''
params: T_JSON_DICT = dict()
params['shouldRecord'] = should_record
params['service'] = service.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'BackgroundService.setRecording',
'params': params,
}
json = yield cmd_dict
def clear_events(
service: ServiceName
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Clears all stored data for the service.
:param service:
'''
params: T_JSON_DICT = dict()
params['service'] = service.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'BackgroundService.clearEvents',
'params': params,
}
json = yield cmd_dict
@event_class('BackgroundService.recordingStateChanged')
@dataclass
class RecordingStateChanged:
'''
Called when the recording state for the service has been updated.
'''
is_recording: bool
service: ServiceName
@classmethod
def from_json(cls, json: T_JSON_DICT) -> RecordingStateChanged:
return cls(
is_recording=bool(json['isRecording']),
service=ServiceName.from_json(json['service'])
)
@event_class('BackgroundService.backgroundServiceEventReceived')
@dataclass
class BackgroundServiceEventReceived:
'''
Called with all existing backgroundServiceEvents when enabled, and all new
events afterwards if enabled and recording.
'''
background_service_event: BackgroundServiceEvent
@classmethod
def from_json(cls, json: T_JSON_DICT) -> BackgroundServiceEventReceived:
return cls(
background_service_event=BackgroundServiceEvent.from_json(json['backgroundServiceEvent'])
)
| 28.028436 | 120 | 0.670612 |
from __future__ import annotations
import enum
import typing
from dataclasses import dataclass
from .util import event_class, T_JSON_DICT
from . import network
from . import service_worker
class ServiceName(enum.Enum):
BACKGROUND_FETCH = "backgroundFetch"
BACKGROUND_SYNC = "backgroundSync"
PUSH_MESSAGING = "pushMessaging"
NOTIFICATIONS = "notifications"
PAYMENT_HANDLER = "paymentHandler"
PERIODIC_BACKGROUND_SYNC = "periodicBackgroundSync"
def to_json(self) -> str:
return self.value
@classmethod
def from_json(cls, json: str) -> ServiceName:
return cls(json)
@dataclass
class EventMetadata:
key: str
value: str
def to_json(self) -> T_JSON_DICT:
json: T_JSON_DICT = dict()
json['key'] = self.key
json['value'] = self.value
return json
@classmethod
def from_json(cls, json: T_JSON_DICT) -> EventMetadata:
return cls(
key=str(json['key']),
value=str(json['value']),
)
@dataclass
class BackgroundServiceEvent:
timestamp: network.TimeSinceEpoch
origin: str
service_worker_registration_id: service_worker.RegistrationID
service: ServiceName
event_name: str
instance_id: str
event_metadata: typing.List[EventMetadata]
def to_json(self) -> T_JSON_DICT:
json: T_JSON_DICT = dict()
json['timestamp'] = self.timestamp.to_json()
json['origin'] = self.origin
json['serviceWorkerRegistrationId'] = self.service_worker_registration_id.to_json()
json['service'] = self.service.to_json()
json['eventName'] = self.event_name
json['instanceId'] = self.instance_id
json['eventMetadata'] = [i.to_json() for i in self.event_metadata]
return json
@classmethod
def from_json(cls, json: T_JSON_DICT) -> BackgroundServiceEvent:
return cls(
timestamp=network.TimeSinceEpoch.from_json(json['timestamp']),
origin=str(json['origin']),
service_worker_registration_id=service_worker.RegistrationID.from_json(json['serviceWorkerRegistrationId']),
service=ServiceName.from_json(json['service']),
event_name=str(json['eventName']),
instance_id=str(json['instanceId']),
event_metadata=[EventMetadata.from_json(i) for i in json['eventMetadata']],
)
def start_observing(
service: ServiceName
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
params: T_JSON_DICT = dict()
params['service'] = service.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'BackgroundService.startObserving',
'params': params,
}
json = yield cmd_dict
def stop_observing(
service: ServiceName
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
params: T_JSON_DICT = dict()
params['service'] = service.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'BackgroundService.stopObserving',
'params': params,
}
json = yield cmd_dict
def set_recording(
should_record: bool,
service: ServiceName
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
params: T_JSON_DICT = dict()
params['shouldRecord'] = should_record
params['service'] = service.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'BackgroundService.setRecording',
'params': params,
}
json = yield cmd_dict
def clear_events(
service: ServiceName
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
params: T_JSON_DICT = dict()
params['service'] = service.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'BackgroundService.clearEvents',
'params': params,
}
json = yield cmd_dict
@event_class('BackgroundService.recordingStateChanged')
@dataclass
class RecordingStateChanged:
is_recording: bool
service: ServiceName
@classmethod
def from_json(cls, json: T_JSON_DICT) -> RecordingStateChanged:
return cls(
is_recording=bool(json['isRecording']),
service=ServiceName.from_json(json['service'])
)
@event_class('BackgroundService.backgroundServiceEventReceived')
@dataclass
class BackgroundServiceEventReceived:
background_service_event: BackgroundServiceEvent
@classmethod
def from_json(cls, json: T_JSON_DICT) -> BackgroundServiceEventReceived:
return cls(
background_service_event=BackgroundServiceEvent.from_json(json['backgroundServiceEvent'])
)
| true | true |
f7333211b1dbb9c47db8c96a6fe432b194c037a1 | 332 | py | Python | portfolio/migrations/0006_remove_resume_categorie.py | gbungbung/django-portfolio-site | 7810211e157eac3c76bf139e2b65dc4a73d90cbc | [
"MIT"
] | null | null | null | portfolio/migrations/0006_remove_resume_categorie.py | gbungbung/django-portfolio-site | 7810211e157eac3c76bf139e2b65dc4a73d90cbc | [
"MIT"
] | 6 | 2020-06-05T23:31:43.000Z | 2022-02-10T09:16:09.000Z | portfolio/migrations/0006_remove_resume_categorie.py | gbungbung/django-portfolio-site | 7810211e157eac3c76bf139e2b65dc4a73d90cbc | [
"MIT"
] | null | null | null | # Generated by Django 2.2.5 on 2019-10-07 10:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0005_cvcategory_resume'),
]
operations = [
migrations.RemoveField(
model_name='resume',
name='categorie',
),
]
| 18.444444 | 48 | 0.596386 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0005_cvcategory_resume'),
]
operations = [
migrations.RemoveField(
model_name='resume',
name='categorie',
),
]
| true | true |
f733343d5415138b283a4b4d7a7af72121bd8e1b | 883 | py | Python | singa_auto/advisor/__init__.py | FeynmanDNA/singa-auto | e96982adc689335a323a5a32d03b23942e01d09f | [
"Apache-2.0"
] | 35 | 2018-10-07T09:51:42.000Z | 2021-09-08T14:13:38.000Z | singa_auto/advisor/__init__.py | FeynmanDNA/singa-auto | e96982adc689335a323a5a32d03b23942e01d09f | [
"Apache-2.0"
] | 119 | 2018-10-05T14:49:39.000Z | 2022-03-11T23:49:51.000Z | singa_auto/advisor/__init__.py | FeynmanDNA/singa-auto | e96982adc689335a323a5a32d03b23942e01d09f | [
"Apache-2.0"
] | 32 | 2018-10-18T12:02:55.000Z | 2020-03-01T10:27:06.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from .constants import *
from .advisor import BaseAdvisor, UnsupportedKnobError, make_advisor | 42.047619 | 68 | 0.776897 |
from .constants import *
from .advisor import BaseAdvisor, UnsupportedKnobError, make_advisor | true | true |
f733353028a2e80e2bc55f69525b458586901db5 | 20,627 | py | Python | azext_iot/central/_help.py | YingXue/azure-iot-cli-extension | efe7897b1ae1d2a9953f501abe7654b84d69372d | [
"MIT"
] | null | null | null | azext_iot/central/_help.py | YingXue/azure-iot-cli-extension | efe7897b1ae1d2a9953f501abe7654b84d69372d | [
"MIT"
] | 1 | 2020-09-02T23:25:39.000Z | 2020-09-09T14:18:33.000Z | azext_iot/central/_help.py | YingXue/azure-iot-cli-extension | efe7897b1ae1d2a9953f501abe7654b84d69372d | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps
def load_central_help():
helps[
"iot central"
] = """
type: group
short-summary: Manage IoT Central resources.
long-summary: |
IoT Central is an IoT application platform that reduces the burden and cost of developing,
managing, and maintaining enterprise-grade IoT solutions. Choosing to build with IoT Central
gives you the opportunity to focus time, money, and energy on transforming your business
with IoT data, rather than just maintaining and updating a complex and continually evolving
IoT infrastructure.
IoT Central documentation is available at https://aka.ms/iotcentral-documentation
Additional information on CLI commands is available at https://aka.ms/azure-cli-iot-ext
"""
helps[
"iot central app"
] = """
type: group
short-summary: Manage IoT Central applications.
long-summary: Create, delete, view, and update your IoT Central apps.
"""
_load_central_devices_help()
_load_central_users_help()
_load_central_api_token_help()
_load_central_device_templates_help()
_load_central_monitors_help()
_load_central_command_help()
_load_central_compute_device_key()
# TODO: Delete this by end of Dec 2020
_load_central_deprecated_commands()
def _load_central_devices_help():
helps[
"iot central device"
] = """
type: group
short-summary: Manage and configure IoT Central devices
"""
helps[
"iot central device create"
] = """
type: command
short-summary: Create a device in IoT Central
examples:
- name: Create a device
text: >
az iot central device create
--app-id {appid}
--device-id {deviceid}
- name: Create a simulated device
text: >
az iot central device create
--app-id {appid}
--device-id {deviceid}
--instance-of {devicetemplateid}
--simulated
"""
helps[
"iot central device show"
] = """
type: command
short-summary: Get a device from IoT Central
examples:
- name: Get a device
text: >
az iot central device show
--app-id {appid}
--device-id {deviceid}
"""
helps[
"iot central device delete"
] = """
type: command
short-summary: Delete a device from IoT Central
examples:
- name: Delete a device
text: >
az iot central device delete
--app-id {appid}
--device-id {deviceid}
"""
helps[
"iot central device show-credentials"
] = """
type: command
short-summary: Get device credentials from IoT Central
examples:
- name: Get device credentials for a device
text: >
az iot central device show-credentials
--app-id {appid}
--device-id {deviceid}
"""
helps[
"iot central device registration-info"
] = """
type: command
short-summary: Get registration info on device(s) from IoT Central
long-summary: |
Note: This command can take a significant amount of time to return
if no device id is specified and your app contains a lot of devices
examples:
- name: Get registration info on specified device
text: >
az iot central device registration-info --app-id {appid} --device-id {deviceid}
"""
def _load_central_compute_device_key():
helps[
"iot central device compute-device-key"
] = """
type: command
short-summary: Generate a derived device SAS key.
long-summary: Generate a derived device key from a group-level SAS key.
examples:
- name: Basic usage
text: >
az iot central device compute-device-key --pk {primaryKey} --device-id {deviceid}
"""
def _load_central_command_help():
helps[
"iot central device command"
] = """
type: group
short-summary: Run device commands.
"""
helps[
"iot central device command history"
] = """
type: command
short-summary: Get most recent command-response request and response payload.
examples:
- name: Show command response
text: >
az iot central device command history
--app-id {appid}
--device-id {deviceid}
--interface-id {interfaceid}
--command-name {commandname}
"""
helps[
"iot central device command run"
] = """
type: command
short-summary: Run a command on a device and view associated response. Does NOT monitor property updates that the command may perform.
long-summary: |
Note: payload should be nested under "request".
i.e. if your device expects the payload in a shape {"key": "value"}
payload should be {"request": {"key": "value"}}.
--content can be pointed at a filepath too (.../path/to/payload.json)
examples:
- name: Run command response
text: >
az iot central device command run
--app-id {appid}
--device-id {deviceid}
--interface-id {interfaceid}
--command-name {commandname}
--content {payload}
- name: Short Run command response
text: >
az iot central device command run
-n {appid}
-d {deviceid}
-i {interfaceid}
--cn {commandname}
-k {payload}
"""
def _load_central_users_help():
helps[
"iot central user"
] = """
type: group
short-summary: Manage and configure IoT Central users
"""
helps[
"iot central user create"
] = """
type: command
short-summary: Add a user to the application
examples:
- name: Add a user by email to the application
text: >
az iot central user create
--user-id {userId}
--app-id {appId}
--email {emailAddress}
--role admin
- name: Add a service-principal to the application
text: >
az iot central user create
--user-id {userId}
--app-id {appId}
--tenant-id {tenantId}
--object-id {objectId}
--role operator
"""
helps[
"iot central user show"
] = """
type: command
short-summary: Get the details of a user by ID
examples:
- name: Get details of user
text: >
az iot central user show
--app-id {appid}
--user-id {userId}
"""
helps[
"iot central user delete"
] = """
type: command
short-summary: Delete a user from the application
examples:
- name: Delete a user
text: >
az iot central user delete
--app-id {appid}
--user-id {userId}
"""
helps[
"iot central user list"
] = """
type: command
short-summary: Get list of users in an application
examples:
- name: List of users
text: >
az iot central user list
--app-id {appid}
"""
def _load_central_api_token_help():
helps[
"iot central api-token"
] = """
type: group
short-summary: Create and Manage API tokens.
"""
helps[
"iot central api-token create"
] = """
type: command
short-summary: Create a new API token in the application
long-summary: The only time you will see the value of this token is when creating the token. Ensure you store this token somewhere securely, as if you lose it, you will need to create another.
examples:
- name: Add new API token
text: >
az iot central api-token create
--token-id {tokenId}
--app-id {appId}
--role admin
"""
helps[
"iot central api-token show"
] = """
type: command
short-summary: Get token meta data (e.g. role as a GUID, expiration)
long-summary: API token information contains basic information about the token and does not include the value of the token.
examples:
- name: Get API token
text: >
az iot central api-token show
--app-id {appid}
--token-id {tokenId}
"""
helps[
"iot central api-token delete"
] = """
type: command
short-summary: Delete an API token from the application
examples:
- name: Delete an API token
text: >
az iot central api-token delete
--app-id {appid}
--token-id {tokenId}
"""
helps[
"iot central api-token list"
] = """
type: command
short-summary: Get a list of all token meta data (e.g. Role as a GUID and expiration)
long-summary: Information in the list contains basic information about the tokens in the application and does not include token values.
examples:
- name: List of API tokens
text: >
az iot central api-token list
--app-id {appid}
"""
def _load_central_device_templates_help():
helps[
"iot central device-template"
] = """
type: group
short-summary: Manage and configure IoT Central device templates
"""
helps[
"iot central device-template create"
] = """
type: command
short-summary: Create a device template in IoT Central
examples:
- name: Create a device template with payload read from a file
text: >
az iot central device-template create
--app-id {appid}
--content {pathtofile}
--device-template-id {devicetemplateid}
- name: Create a device template with payload read from raw json
text: >
az iot central device-template create
--app-id {appid}
--content {json}
--device-template-id {devicetemplateid}
"""
helps[
"iot central device-template show"
] = """
type: command
short-summary: Get a device template from IoT Central
examples:
- name: Get a device template
text: >
az iot central device-template show
--app-id {appid}
--device-template-id {devicetemplateid}
"""
helps[
"iot central device-template delete"
] = """
type: command
short-summary: Delete a device template from IoT Central
long-summary: |
Note: this is expected to fail if any devices are still associated to this template.
examples:
- name: Delete a device template from IoT Central
text: >
az iot central device-template delete
--app-id {appid}
--device-template-id {devicetemplateid}
"""
def _load_central_monitors_help():
helps[
"iot central diagnostics"
] = """
type: group
short-summary: Perform application and device level diagnostics.
"""
helps[
"iot central diagnostics monitor-events"
] = """
type: command
short-summary: Monitor device telemetry & messages sent to the IoT Hub for an IoT Central app.
long-summary: |
EXPERIMENTAL requires Python 3.5+
This command relies on and may install dependent Cython package (uamqp) upon first execution.
https://github.com/Azure/azure-uamqp-python
examples:
- name: Basic usage
text: >
az iot central diagnostics monitor-events --app-id {app_id}
- name: Basic usage when filtering on target device
text: >
az iot central diagnostics monitor-events --app-id {app_id} -d {device_id}
- name: Basic usage when filtering targeted devices with a wildcard in the ID
text: >
az iot central diagnostics monitor-events --app-id {app_id} -d Device*d
- name: Basic usage when filtering on module.
text: >
az iot central diagnostics monitor-events --app-id {app_id} -m {module_id}
- name: Basic usage when filtering targeted modules with a wildcard in the ID
text: >
az iot central diagnostics monitor-events --app-id {app_id} -m Module*
- name: Filter device and specify an Event Hub consumer group to bind to.
text: >
az iot central diagnostics monitor-events --app-id {app_id} -d {device_id} --cg {consumer_group_name}
- name: Receive message annotations (message headers)
text: >
az iot central diagnostics monitor-events --app-id {app_id} -d {device_id} --properties anno
- name: Receive message annotations + system properties. Never time out.
text: >
az iot central diagnostics monitor-events --app-id {app_id} -d {device_id} --properties anno sys --timeout 0
- name: Receive all message attributes from all device messages
text: >
az iot central diagnostics monitor-events --app-id {app_id} --props all
- name: Receive all messages and parse message payload as JSON
text: >
az iot central diagnostics monitor-events --app-id {app_id} --output json
"""
helps[
"iot central diagnostics validate-messages"
] = """
type: command
short-summary: Validate messages sent to the IoT Hub for an IoT Central app.
long-summary: |
EXPERIMENTAL requires Python 3.5+
This command relies on and may install dependent Cython package (uamqp) upon first execution.
https://github.com/Azure/azure-uamqp-python
examples:
- name: Basic usage
text: >
az iot central diagnostics validate-messages --app-id {app_id}
- name: Output errors as they are detected
text: >
az iot central diagnostics validate-messages --app-id {app_id} --style scroll
- name: Basic usage when filtering on target device
text: >
az iot central diagnostics validate-messages --app-id {app_id} -d {device_id}
- name: Basic usage when filtering targeted devices with a wildcard in the ID
text: >
az iot central diagnostics validate-messages --app-id {app_id} -d Device*
- name: Basic usage when filtering on module.
text: >
az iot central diagnostics validate-messages --app-id {app_id} -m {module_id}
- name: Basic usage when filtering targeted modules with a wildcard in the ID
text: >
az iot central diagnostics validate-messages --app-id {app_id} -m Module*
- name: Filter device and specify an Event Hub consumer group to bind to.
text: >
az iot central diagnostics validate-messages --app-id {app_id} -d {device_id} --cg {consumer_group_name}
"""
helps[
"iot central diagnostics monitor-properties"
] = """
type: command
short-summary: Monitor desired and reported properties sent to/from the IoT Hub for an IoT Central app.
long-summary: |
Polls device-twin from central and compares it to the last device-twin
Parses out properties from device-twin, and detects if changes were made
Prints subset of properties that were changed within the polling interval
examples:
- name: Basic usage
text: >
az iot central diagnostics monitor-properties --app-id {app_id} -d {device_id}
"""
helps[
"iot central diagnostics validate-properties"
] = """
type: command
short-summary: Validate reported properties sent to IoT Central app.
long-summary: |
Performs validations on reported property updates:
1) Warning - Properties sent by device that are not modeled in central.
2) Warning - Properties with same name declared in multiple interfaces
should have interface name included as part of the property update.
examples:
- name: Basic usage
text: >
az iot central diagnostics validate-properties --app-id {app_id} -d {device_id}
"""
helps[
"iot central diagnostics registration-summary"
] = """
type: command
short-summary: Provides a registration summary of all the devices in an app.
long-summary: |
Note: This command can take a significant amount of time to return
if your app contains a lot of devices
examples:
- name: Registration summary
text: >
az iot central diagnostics registration-summary --app-id {appid}
"""
# TODO: Delete this by end of Dec 2020
def _load_central_deprecated_commands():
helps[
"iot central app device-twin"
] = """
type: group
short-summary: Manage IoT Central device twins.
"""
helps[
"iot central app device-twin show"
] = """
type: command
short-summary: Get the device twin from IoT Hub.
"""
helps[
"iot central device twin"
] = """
type: group
short-summary: Manage IoT Central device twins.
"""
helps[
"iot central device twin show"
] = """
type: command
short-summary: Get the device twin from IoT Hub.
"""
helps[
"iot central app monitor-events"
] = """
type: command
short-summary: Monitor device telemetry & messages sent to the IoT Hub for an IoT Central app.
long-summary: |
EXPERIMENTAL requires Python 3.5+
This command relies on and may install dependent Cython package (uamqp) upon first execution.
https://github.com/Azure/azure-uamqp-python
examples:
- name: Basic usage
text: >
az iot central app monitor-events --app-id {app_id}
- name: Basic usage when filtering on target device
text: >
az iot central app monitor-events --app-id {app_id} -d {device_id}
- name: Basic usage when filtering targeted devices with a wildcard in the ID
text: >
az iot central app monitor-events --app-id {app_id} -d Device*d
- name: Basic usage when filtering on module.
text: >
az iot central app monitor-events --app-id {app_id} -m {module_id}
- name: Basic usage when filtering targeted modules with a wildcard in the ID
text: >
az iot central app monitor-events --app-id {app_id} -m Module*
- name: Filter device and specify an Event Hub consumer group to bind to.
text: >
az iot central app monitor-events --app-id {app_id} -d {device_id} --cg {consumer_group_name}
- name: Receive message annotations (message headers)
text: >
az iot central app monitor-events --app-id {app_id} -d {device_id} --properties anno
- name: Receive message annotations + system properties. Never time out.
text: >
az iot central app monitor-events --app-id {app_id} -d {device_id} --properties anno sys --timeout 0
- name: Receive all message attributes from all device messages
text: >
az iot central app monitor-events --app-id {app_id} --props all
- name: Receive all messages and parse message payload as JSON
text: >
az iot central app monitor-events --app-id {app_id} --output json
"""
| 34.321131 | 200 | 0.575993 |
from knack.help_files import helps
def load_central_help():
helps[
"iot central"
] = """
type: group
short-summary: Manage IoT Central resources.
long-summary: |
IoT Central is an IoT application platform that reduces the burden and cost of developing,
managing, and maintaining enterprise-grade IoT solutions. Choosing to build with IoT Central
gives you the opportunity to focus time, money, and energy on transforming your business
with IoT data, rather than just maintaining and updating a complex and continually evolving
IoT infrastructure.
IoT Central documentation is available at https://aka.ms/iotcentral-documentation
Additional information on CLI commands is available at https://aka.ms/azure-cli-iot-ext
"""
helps[
"iot central app"
] = """
type: group
short-summary: Manage IoT Central applications.
long-summary: Create, delete, view, and update your IoT Central apps.
"""
_load_central_devices_help()
_load_central_users_help()
_load_central_api_token_help()
_load_central_device_templates_help()
_load_central_monitors_help()
_load_central_command_help()
_load_central_compute_device_key()
_load_central_deprecated_commands()
def _load_central_devices_help():
helps[
"iot central device"
] = """
type: group
short-summary: Manage and configure IoT Central devices
"""
helps[
"iot central device create"
] = """
type: command
short-summary: Create a device in IoT Central
examples:
- name: Create a device
text: >
az iot central device create
--app-id {appid}
--device-id {deviceid}
- name: Create a simulated device
text: >
az iot central device create
--app-id {appid}
--device-id {deviceid}
--instance-of {devicetemplateid}
--simulated
"""
helps[
"iot central device show"
] = """
type: command
short-summary: Get a device from IoT Central
examples:
- name: Get a device
text: >
az iot central device show
--app-id {appid}
--device-id {deviceid}
"""
helps[
"iot central device delete"
] = """
type: command
short-summary: Delete a device from IoT Central
examples:
- name: Delete a device
text: >
az iot central device delete
--app-id {appid}
--device-id {deviceid}
"""
helps[
"iot central device show-credentials"
] = """
type: command
short-summary: Get device credentials from IoT Central
examples:
- name: Get device credentials for a device
text: >
az iot central device show-credentials
--app-id {appid}
--device-id {deviceid}
"""
helps[
"iot central device registration-info"
] = """
type: command
short-summary: Get registration info on device(s) from IoT Central
long-summary: |
Note: This command can take a significant amount of time to return
if no device id is specified and your app contains a lot of devices
examples:
- name: Get registration info on specified device
text: >
az iot central device registration-info --app-id {appid} --device-id {deviceid}
"""
def _load_central_compute_device_key():
helps[
"iot central device compute-device-key"
] = """
type: command
short-summary: Generate a derived device SAS key.
long-summary: Generate a derived device key from a group-level SAS key.
examples:
- name: Basic usage
text: >
az iot central device compute-device-key --pk {primaryKey} --device-id {deviceid}
"""
def _load_central_command_help():
helps[
"iot central device command"
] = """
type: group
short-summary: Run device commands.
"""
helps[
"iot central device command history"
] = """
type: command
short-summary: Get most recent command-response request and response payload.
examples:
- name: Show command response
text: >
az iot central device command history
--app-id {appid}
--device-id {deviceid}
--interface-id {interfaceid}
--command-name {commandname}
"""
helps[
"iot central device command run"
] = """
type: command
short-summary: Run a command on a device and view associated response. Does NOT monitor property updates that the command may perform.
long-summary: |
Note: payload should be nested under "request".
i.e. if your device expects the payload in a shape {"key": "value"}
payload should be {"request": {"key": "value"}}.
--content can be pointed at a filepath too (.../path/to/payload.json)
examples:
- name: Run command response
text: >
az iot central device command run
--app-id {appid}
--device-id {deviceid}
--interface-id {interfaceid}
--command-name {commandname}
--content {payload}
- name: Short Run command response
text: >
az iot central device command run
-n {appid}
-d {deviceid}
-i {interfaceid}
--cn {commandname}
-k {payload}
"""
def _load_central_users_help():
helps[
"iot central user"
] = """
type: group
short-summary: Manage and configure IoT Central users
"""
helps[
"iot central user create"
] = """
type: command
short-summary: Add a user to the application
examples:
- name: Add a user by email to the application
text: >
az iot central user create
--user-id {userId}
--app-id {appId}
--email {emailAddress}
--role admin
- name: Add a service-principal to the application
text: >
az iot central user create
--user-id {userId}
--app-id {appId}
--tenant-id {tenantId}
--object-id {objectId}
--role operator
"""
helps[
"iot central user show"
] = """
type: command
short-summary: Get the details of a user by ID
examples:
- name: Get details of user
text: >
az iot central user show
--app-id {appid}
--user-id {userId}
"""
helps[
"iot central user delete"
] = """
type: command
short-summary: Delete a user from the application
examples:
- name: Delete a user
text: >
az iot central user delete
--app-id {appid}
--user-id {userId}
"""
helps[
"iot central user list"
] = """
type: command
short-summary: Get list of users in an application
examples:
- name: List of users
text: >
az iot central user list
--app-id {appid}
"""
def _load_central_api_token_help():
helps[
"iot central api-token"
] = """
type: group
short-summary: Create and Manage API tokens.
"""
helps[
"iot central api-token create"
] = """
type: command
short-summary: Create a new API token in the application
long-summary: The only time you will see the value of this token is when creating the token. Ensure you store this token somewhere securely, as if you lose it, you will need to create another.
examples:
- name: Add new API token
text: >
az iot central api-token create
--token-id {tokenId}
--app-id {appId}
--role admin
"""
helps[
"iot central api-token show"
] = """
type: command
short-summary: Get token meta data (e.g. role as a GUID, expiration)
long-summary: API token information contains basic information about the token and does not include the value of the token.
examples:
- name: Get API token
text: >
az iot central api-token show
--app-id {appid}
--token-id {tokenId}
"""
helps[
"iot central api-token delete"
] = """
type: command
short-summary: Delete an API token from the application
examples:
- name: Delete an API token
text: >
az iot central api-token delete
--app-id {appid}
--token-id {tokenId}
"""
helps[
"iot central api-token list"
] = """
type: command
short-summary: Get a list of all token meta data (e.g. Role as a GUID and expiration)
long-summary: Information in the list contains basic information about the tokens in the application and does not include token values.
examples:
- name: List of API tokens
text: >
az iot central api-token list
--app-id {appid}
"""
def _load_central_device_templates_help():
helps[
"iot central device-template"
] = """
type: group
short-summary: Manage and configure IoT Central device templates
"""
helps[
"iot central device-template create"
] = """
type: command
short-summary: Create a device template in IoT Central
examples:
- name: Create a device template with payload read from a file
text: >
az iot central device-template create
--app-id {appid}
--content {pathtofile}
--device-template-id {devicetemplateid}
- name: Create a device template with payload read from raw json
text: >
az iot central device-template create
--app-id {appid}
--content {json}
--device-template-id {devicetemplateid}
"""
helps[
"iot central device-template show"
] = """
type: command
short-summary: Get a device template from IoT Central
examples:
- name: Get a device template
text: >
az iot central device-template show
--app-id {appid}
--device-template-id {devicetemplateid}
"""
helps[
"iot central device-template delete"
] = """
type: command
short-summary: Delete a device template from IoT Central
long-summary: |
Note: this is expected to fail if any devices are still associated to this template.
examples:
- name: Delete a device template from IoT Central
text: >
az iot central device-template delete
--app-id {appid}
--device-template-id {devicetemplateid}
"""
def _load_central_monitors_help():
helps[
"iot central diagnostics"
] = """
type: group
short-summary: Perform application and device level diagnostics.
"""
helps[
"iot central diagnostics monitor-events"
] = """
type: command
short-summary: Monitor device telemetry & messages sent to the IoT Hub for an IoT Central app.
long-summary: |
EXPERIMENTAL requires Python 3.5+
This command relies on and may install dependent Cython package (uamqp) upon first execution.
https://github.com/Azure/azure-uamqp-python
examples:
- name: Basic usage
text: >
az iot central diagnostics monitor-events --app-id {app_id}
- name: Basic usage when filtering on target device
text: >
az iot central diagnostics monitor-events --app-id {app_id} -d {device_id}
- name: Basic usage when filtering targeted devices with a wildcard in the ID
text: >
az iot central diagnostics monitor-events --app-id {app_id} -d Device*d
- name: Basic usage when filtering on module.
text: >
az iot central diagnostics monitor-events --app-id {app_id} -m {module_id}
- name: Basic usage when filtering targeted modules with a wildcard in the ID
text: >
az iot central diagnostics monitor-events --app-id {app_id} -m Module*
- name: Filter device and specify an Event Hub consumer group to bind to.
text: >
az iot central diagnostics monitor-events --app-id {app_id} -d {device_id} --cg {consumer_group_name}
- name: Receive message annotations (message headers)
text: >
az iot central diagnostics monitor-events --app-id {app_id} -d {device_id} --properties anno
- name: Receive message annotations + system properties. Never time out.
text: >
az iot central diagnostics monitor-events --app-id {app_id} -d {device_id} --properties anno sys --timeout 0
- name: Receive all message attributes from all device messages
text: >
az iot central diagnostics monitor-events --app-id {app_id} --props all
- name: Receive all messages and parse message payload as JSON
text: >
az iot central diagnostics monitor-events --app-id {app_id} --output json
"""
helps[
"iot central diagnostics validate-messages"
] = """
type: command
short-summary: Validate messages sent to the IoT Hub for an IoT Central app.
long-summary: |
EXPERIMENTAL requires Python 3.5+
This command relies on and may install dependent Cython package (uamqp) upon first execution.
https://github.com/Azure/azure-uamqp-python
examples:
- name: Basic usage
text: >
az iot central diagnostics validate-messages --app-id {app_id}
- name: Output errors as they are detected
text: >
az iot central diagnostics validate-messages --app-id {app_id} --style scroll
- name: Basic usage when filtering on target device
text: >
az iot central diagnostics validate-messages --app-id {app_id} -d {device_id}
- name: Basic usage when filtering targeted devices with a wildcard in the ID
text: >
az iot central diagnostics validate-messages --app-id {app_id} -d Device*
- name: Basic usage when filtering on module.
text: >
az iot central diagnostics validate-messages --app-id {app_id} -m {module_id}
- name: Basic usage when filtering targeted modules with a wildcard in the ID
text: >
az iot central diagnostics validate-messages --app-id {app_id} -m Module*
- name: Filter device and specify an Event Hub consumer group to bind to.
text: >
az iot central diagnostics validate-messages --app-id {app_id} -d {device_id} --cg {consumer_group_name}
"""
helps[
"iot central diagnostics monitor-properties"
] = """
type: command
short-summary: Monitor desired and reported properties sent to/from the IoT Hub for an IoT Central app.
long-summary: |
Polls device-twin from central and compares it to the last device-twin
Parses out properties from device-twin, and detects if changes were made
Prints subset of properties that were changed within the polling interval
examples:
- name: Basic usage
text: >
az iot central diagnostics monitor-properties --app-id {app_id} -d {device_id}
"""
helps[
"iot central diagnostics validate-properties"
] = """
type: command
short-summary: Validate reported properties sent to IoT Central app.
long-summary: |
Performs validations on reported property updates:
1) Warning - Properties sent by device that are not modeled in central.
2) Warning - Properties with same name declared in multiple interfaces
should have interface name included as part of the property update.
examples:
- name: Basic usage
text: >
az iot central diagnostics validate-properties --app-id {app_id} -d {device_id}
"""
helps[
"iot central diagnostics registration-summary"
] = """
type: command
short-summary: Provides a registration summary of all the devices in an app.
long-summary: |
Note: This command can take a significant amount of time to return
if your app contains a lot of devices
examples:
- name: Registration summary
text: >
az iot central diagnostics registration-summary --app-id {appid}
"""
def _load_central_deprecated_commands():
helps[
"iot central app device-twin"
] = """
type: group
short-summary: Manage IoT Central device twins.
"""
helps[
"iot central app device-twin show"
] = """
type: command
short-summary: Get the device twin from IoT Hub.
"""
helps[
"iot central device twin"
] = """
type: group
short-summary: Manage IoT Central device twins.
"""
helps[
"iot central device twin show"
] = """
type: command
short-summary: Get the device twin from IoT Hub.
"""
helps[
"iot central app monitor-events"
] = """
type: command
short-summary: Monitor device telemetry & messages sent to the IoT Hub for an IoT Central app.
long-summary: |
EXPERIMENTAL requires Python 3.5+
This command relies on and may install dependent Cython package (uamqp) upon first execution.
https://github.com/Azure/azure-uamqp-python
examples:
- name: Basic usage
text: >
az iot central app monitor-events --app-id {app_id}
- name: Basic usage when filtering on target device
text: >
az iot central app monitor-events --app-id {app_id} -d {device_id}
- name: Basic usage when filtering targeted devices with a wildcard in the ID
text: >
az iot central app monitor-events --app-id {app_id} -d Device*d
- name: Basic usage when filtering on module.
text: >
az iot central app monitor-events --app-id {app_id} -m {module_id}
- name: Basic usage when filtering targeted modules with a wildcard in the ID
text: >
az iot central app monitor-events --app-id {app_id} -m Module*
- name: Filter device and specify an Event Hub consumer group to bind to.
text: >
az iot central app monitor-events --app-id {app_id} -d {device_id} --cg {consumer_group_name}
- name: Receive message annotations (message headers)
text: >
az iot central app monitor-events --app-id {app_id} -d {device_id} --properties anno
- name: Receive message annotations + system properties. Never time out.
text: >
az iot central app monitor-events --app-id {app_id} -d {device_id} --properties anno sys --timeout 0
- name: Receive all message attributes from all device messages
text: >
az iot central app monitor-events --app-id {app_id} --props all
- name: Receive all messages and parse message payload as JSON
text: >
az iot central app monitor-events --app-id {app_id} --output json
"""
| true | true |
f73335e47e7f6716aa3e2c15913d092a19b8db66 | 84 | py | Python | beluga/codegen/__init__.py | dHannasch/beluga | 519e1ca2a43a86bc47737c45484288b2bacc1338 | [
"MIT"
] | null | null | null | beluga/codegen/__init__.py | dHannasch/beluga | 519e1ca2a43a86bc47737c45484288b2bacc1338 | [
"MIT"
] | null | null | null | beluga/codegen/__init__.py | dHannasch/beluga | 519e1ca2a43a86bc47737c45484288b2bacc1338 | [
"MIT"
] | null | null | null | from .codegen import lambdify_, jit_compile_func, tuplefy, SymBVP, FuncBVP, FuncOCP
| 42 | 83 | 0.821429 | from .codegen import lambdify_, jit_compile_func, tuplefy, SymBVP, FuncBVP, FuncOCP
| true | true |
f733365a20592c65c17f3c07129bb74cad595abf | 2,163 | py | Python | CIM100/IEC61970/Dynamics/SynchronousMachineDetailed.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | null | null | null | CIM100/IEC61970/Dynamics/SynchronousMachineDetailed.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | null | null | null | CIM100/IEC61970/Dynamics/SynchronousMachineDetailed.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | null | null | null | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Modified by Gustav Holm (guholm@kth.se) & Francis J. Gomez (fragom@kth.se)
# Modified date: 05/06/2017
from CIM100.IEC61970.Dynamics.SynchronousMachineDynamics import SynchronousMachineDynamics
class SynchronousMachineDetailed(SynchronousMachineDynamics):
def __init__(self, efdBaseRatio=0.0, ifdBaseType=None, saturationFactor120QAxis=0.0, saturationFactorQAxis=0.0, *args, **kw_args):
self.efdBaseRatio = efdBaseRatio
self.ifdBaseType = ifdBaseType
self.saturationFactor120QAxis = saturationFactor120QAxis
self.saturationFactorQAxis = saturationFactorQAxis
super(SynchronousMachineDetailed, self).__init__(*args, **kw_args)
_attrs = ["efdBaseRatio", "saturationFactor120QAxis", "saturationFactorQAxis"]
_attr_types = {"efdBaseRatio": float, "saturationFactor120QAxis": float, "saturationFactorQAxis": float}
_defaults = {"efdBaseRatio": 0.0, "saturationFactor120QAxis": 0.0, "saturationFactorQAxis": 0.0}
_enums = {"ifdBaseType": "ifdBaseKind"}
_refs = []
_many_refs = []
| 45.0625 | 134 | 0.760518 |
from CIM100.IEC61970.Dynamics.SynchronousMachineDynamics import SynchronousMachineDynamics
class SynchronousMachineDetailed(SynchronousMachineDynamics):
def __init__(self, efdBaseRatio=0.0, ifdBaseType=None, saturationFactor120QAxis=0.0, saturationFactorQAxis=0.0, *args, **kw_args):
self.efdBaseRatio = efdBaseRatio
self.ifdBaseType = ifdBaseType
self.saturationFactor120QAxis = saturationFactor120QAxis
self.saturationFactorQAxis = saturationFactorQAxis
super(SynchronousMachineDetailed, self).__init__(*args, **kw_args)
_attrs = ["efdBaseRatio", "saturationFactor120QAxis", "saturationFactorQAxis"]
_attr_types = {"efdBaseRatio": float, "saturationFactor120QAxis": float, "saturationFactorQAxis": float}
_defaults = {"efdBaseRatio": 0.0, "saturationFactor120QAxis": 0.0, "saturationFactorQAxis": 0.0}
_enums = {"ifdBaseType": "ifdBaseKind"}
_refs = []
_many_refs = []
| true | true |
f733366d79457e17f189a276d7bd177aabb7b273 | 154 | py | Python | board/test_paginator.py | msio900/test_django | 5d2077bf00bbdb3b4ed2e1f3d46348639a85191d | [
"Apache-2.0"
] | null | null | null | board/test_paginator.py | msio900/test_django | 5d2077bf00bbdb3b4ed2e1f3d46348639a85191d | [
"Apache-2.0"
] | null | null | null | board/test_paginator.py | msio900/test_django | 5d2077bf00bbdb3b4ed2e1f3d46348639a85191d | [
"Apache-2.0"
] | null | null | null | from django.core.paginator import Paginator
objects = ['john', 'paul', 'george', 'ringo', 'jane', 'mag']
page = Paginator(objects, 2)
print(type(page))
| 22 | 60 | 0.681818 | from django.core.paginator import Paginator
objects = ['john', 'paul', 'george', 'ringo', 'jane', 'mag']
page = Paginator(objects, 2)
print(type(page))
| true | true |
f7333710cd77a3eb913e3f571730db6dc331ea14 | 12,219 | py | Python | ic3net_envs/ic3net_envs/predator_prey_env.py | miniTsl/IC3Net | 897ed3bae6ad5f65fb3cc4577d4392af6e456703 | [
"MIT"
] | 158 | 2018-11-30T20:20:18.000Z | 2022-03-13T07:55:19.000Z | ic3net_envs/ic3net_envs/predator_prey_env.py | miniTsl/IC3Net | 897ed3bae6ad5f65fb3cc4577d4392af6e456703 | [
"MIT"
] | 13 | 2019-01-26T10:51:39.000Z | 2022-01-13T00:58:37.000Z | ic3net_envs/ic3net_envs/predator_prey_env.py | miniTsl/IC3Net | 897ed3bae6ad5f65fb3cc4577d4392af6e456703 | [
"MIT"
] | 36 | 2019-04-17T12:46:25.000Z | 2022-03-27T08:57:59.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simulate a predator prey environment.
Each agent can just observe itself (it's own identity) i.e. s_j = j and vision sqaure around it.
Design Decisions:
- Memory cheaper than time (compute)
- Using Vocab for class of box:
-1 out of bound,
indexing for predator agent (from 2?)
??? for prey agent (1 for fixed case, for now)
- Action Space & Observation Space are according to an agent
- Rewards -0.05 at each time step till the time
- Episode never ends
- Obs. State: Vocab of 1-hot < predator, preys & units >
"""
# core modules
import random
import math
import curses
# 3rd party modules
import gym
import numpy as np
from gym import spaces
class PredatorPreyEnv(gym.Env):
# metadata = {'render.modes': ['human']}
def __init__(self,):
self.__version__ = "0.0.1"
# TODO: better config handling
self.OUTSIDE_CLASS = 1
self.PREY_CLASS = 2
self.PREDATOR_CLASS = 3
self.TIMESTEP_PENALTY = -0.05
self.PREY_REWARD = 0
self.POS_PREY_REWARD = 0.05
self.episode_over = False
def init_curses(self):
self.stdscr = curses.initscr()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_RED, -1)
curses.init_pair(2, curses.COLOR_YELLOW, -1)
curses.init_pair(3, curses.COLOR_CYAN, -1)
curses.init_pair(4, curses.COLOR_GREEN, -1)
def init_args(self, parser):
env = parser.add_argument_group('Prey Predator task')
env.add_argument('--nenemies', type=int, default=1,
help="Total number of preys in play")
env.add_argument('--dim', type=int, default=5,
help="Dimension of box")
env.add_argument('--vision', type=int, default=2,
help="Vision of predator")
env.add_argument('--moving_prey', action="store_true", default=False,
help="Whether prey is fixed or moving")
env.add_argument('--no_stay', action="store_true", default=False,
help="Whether predators have an action to stay in place")
parser.add_argument('--mode', default='mixed', type=str,
help='cooperative|competitive|mixed (default: mixed)')
env.add_argument('--enemy_comm', action="store_true", default=False,
help="Whether prey can communicate.")
def multi_agent_init(self, args):
# General variables defining the environment : CONFIG
params = ['dim', 'vision', 'moving_prey', 'mode', 'enemy_comm']
for key in params:
setattr(self, key, getattr(args, key))
self.nprey = args.nenemies
self.npredator = args.nfriendly
self.dims = dims = (self.dim, self.dim)
self.stay = not args.no_stay
if args.moving_prey:
raise NotImplementedError
# TODO
# (0: UP, 1: RIGHT, 2: DOWN, 3: LEFT, 4: STAY)
# Define what an agent can do -
if self.stay:
self.naction = 5
else:
self.naction = 4
self.action_space = spaces.MultiDiscrete([self.naction])
self.BASE = (dims[0] * dims[1])
self.OUTSIDE_CLASS += self.BASE
self.PREY_CLASS += self.BASE
self.PREDATOR_CLASS += self.BASE
# Setting max vocab size for 1-hot encoding
self.vocab_size = 1 + 1 + self.BASE + 1 + 1
# predator + prey + grid + outside
# Observation for each agent will be vision * vision ndarray
self.observation_space = spaces.Box(low=0, high=1, shape=(self.vocab_size, (2 * self.vision) + 1, (2 * self.vision) + 1), dtype=int)
# Actual observation will be of the shape 1 * npredator * (2v+1) * (2v+1) * vocab_size
return
def step(self, action):
"""
The agents take a step in the environment.
Parameters
----------
action : list/ndarray of length m, containing the indexes of what lever each 'm' chosen agents pulled.
Returns
-------
obs, reward, episode_over, info : tuple
obs (object) :
reward (float) : Ratio of Number of discrete levers pulled to total number of levers.
episode_over (bool) : Will be true as episode length is 1
info (dict) : diagnostic information useful for debugging.
"""
if self.episode_over:
raise RuntimeError("Episode is done")
action = np.array(action).squeeze()
action = np.atleast_1d(action)
for i, a in enumerate(action):
self._take_action(i, a)
assert np.all(action <= self.naction), "Actions should be in the range [0,naction)."
self.episode_over = False
self.obs = self._get_obs()
debug = {'predator_locs':self.predator_loc,'prey_locs':self.prey_loc}
return self.obs, self._get_reward(), self.episode_over, debug
def reset(self):
"""
Reset the state of the environment and returns an initial observation.
Returns
-------
observation (object): the initial observation of the space.
"""
self.episode_over = False
self.reached_prey = np.zeros(self.npredator)
# Locations
locs = self._get_cordinates()
self.predator_loc, self.prey_loc = locs[:self.npredator], locs[self.npredator:]
self._set_grid()
# stat - like success ratio
self.stat = dict()
# Observation will be npredator * vision * vision ndarray
self.obs = self._get_obs()
return self.obs
def seed(self):
return
def _get_cordinates(self):
idx = np.random.choice(np.prod(self.dims),(self.npredator + self.nprey), replace=False)
return np.vstack(np.unravel_index(idx, self.dims)).T
def _set_grid(self):
self.grid = np.arange(self.BASE).reshape(self.dims)
# Mark agents in grid
# self.grid[self.predator_loc[:,0], self.predator_loc[:,1]] = self.predator_ids
# self.grid[self.prey_loc[:,0], self.prey_loc[:,1]] = self.prey_ids
# Padding for vision
self.grid = np.pad(self.grid, self.vision, 'constant', constant_values = self.OUTSIDE_CLASS)
self.empty_bool_base_grid = self._onehot_initialization(self.grid)
def _get_obs(self):
self.bool_base_grid = self.empty_bool_base_grid.copy()
for i, p in enumerate(self.predator_loc):
self.bool_base_grid[p[0] + self.vision, p[1] + self.vision, self.PREDATOR_CLASS] += 1
for i, p in enumerate(self.prey_loc):
self.bool_base_grid[p[0] + self.vision, p[1] + self.vision, self.PREY_CLASS] += 1
obs = []
for p in self.predator_loc:
slice_y = slice(p[0], p[0] + (2 * self.vision) + 1)
slice_x = slice(p[1], p[1] + (2 * self.vision) + 1)
obs.append(self.bool_base_grid[slice_y, slice_x])
if self.enemy_comm:
for p in self.prey_loc:
slice_y = slice(p[0], p[0] + (2 * self.vision) + 1)
slice_x = slice(p[1], p[1] + (2 * self.vision) + 1)
obs.append(self.bool_base_grid[slice_y, slice_x])
obs = np.stack(obs)
return obs
def _take_action(self, idx, act):
# prey action
if idx >= self.npredator:
# fixed prey
if not self.moving_prey:
return
else:
raise NotImplementedError
if self.reached_prey[idx] == 1:
return
# STAY action
if act==5:
return
# UP
if act==0 and self.grid[max(0,
self.predator_loc[idx][0] + self.vision - 1),
self.predator_loc[idx][1] + self.vision] != self.OUTSIDE_CLASS:
self.predator_loc[idx][0] = max(0, self.predator_loc[idx][0]-1)
# RIGHT
elif act==1 and self.grid[self.predator_loc[idx][0] + self.vision,
min(self.dims[1] -1,
self.predator_loc[idx][1] + self.vision + 1)] != self.OUTSIDE_CLASS:
self.predator_loc[idx][1] = min(self.dims[1]-1,
self.predator_loc[idx][1]+1)
# DOWN
elif act==2 and self.grid[min(self.dims[0]-1,
self.predator_loc[idx][0] + self.vision + 1),
self.predator_loc[idx][1] + self.vision] != self.OUTSIDE_CLASS:
self.predator_loc[idx][0] = min(self.dims[0]-1,
self.predator_loc[idx][0]+1)
# LEFT
elif act==3 and self.grid[self.predator_loc[idx][0] + self.vision,
max(0,
self.predator_loc[idx][1] + self.vision - 1)] != self.OUTSIDE_CLASS:
self.predator_loc[idx][1] = max(0, self.predator_loc[idx][1]-1)
def _get_reward(self):
n = self.npredator if not self.enemy_comm else self.npredator + self.nprey
reward = np.full(n, self.TIMESTEP_PENALTY)
on_prey = np.where(np.all(self.predator_loc == self.prey_loc,axis=1))[0]
nb_predator_on_prey = on_prey.size
if self.mode == 'cooperative':
reward[on_prey] = self.POS_PREY_REWARD * nb_predator_on_prey
elif self.mode == 'competitive':
if nb_predator_on_prey:
reward[on_prey] = self.POS_PREY_REWARD / nb_predator_on_prey
elif self.mode == 'mixed':
reward[on_prey] = self.PREY_REWARD
else:
raise RuntimeError("Incorrect mode, Available modes: [cooperative|competitive|mixed]")
self.reached_prey[on_prey] = 1
if np.all(self.reached_prey == 1) and self.mode == 'mixed':
self.episode_over = True
# Prey reward
if nb_predator_on_prey == 0:
reward[self.npredator:] = -1 * self.TIMESTEP_PENALTY
else:
# TODO: discuss & finalise
reward[self.npredator:] = 0
# Success ratio
if self.mode != 'competitive':
if nb_predator_on_prey == self.npredator:
self.stat['success'] = 1
else:
self.stat['success'] = 0
return reward
def reward_terminal(self):
return np.zeros_like(self._get_reward())
def _onehot_initialization(self, a):
ncols = self.vocab_size
out = np.zeros(a.shape + (ncols,), dtype=int)
out[self._all_idx(a, axis=2)] = 1
return out
def _all_idx(self, idx, axis):
grid = np.ogrid[tuple(map(slice, idx.shape))]
grid.insert(axis, idx)
return tuple(grid)
def render(self, mode='human', close=False):
grid = np.zeros(self.BASE, dtype=object).reshape(self.dims)
self.stdscr.clear()
for p in self.predator_loc:
if grid[p[0]][p[1]] != 0:
grid[p[0]][p[1]] = str(grid[p[0]][p[1]]) + 'X'
else:
grid[p[0]][p[1]] = 'X'
for p in self.prey_loc:
if grid[p[0]][p[1]] != 0:
grid[p[0]][p[1]] = str(grid[p[0]][p[1]]) + 'P'
else:
grid[p[0]][p[1]] = 'P'
for row_num, row in enumerate(grid):
for idx, item in enumerate(row):
if item != 0:
if 'X' in item and 'P' in item:
self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(3))
elif 'X' in item:
self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(1))
else:
self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(2))
else:
self.stdscr.addstr(row_num, idx * 4, '0'.center(3), curses.color_pair(4))
self.stdscr.addstr(len(grid), 0, '\n')
self.stdscr.refresh()
def exit_render(self):
curses.endwin()
| 35.938235 | 140 | 0.561421 |
import random
import math
import curses
import gym
import numpy as np
from gym import spaces
class PredatorPreyEnv(gym.Env):
def __init__(self,):
self.__version__ = "0.0.1"
self.OUTSIDE_CLASS = 1
self.PREY_CLASS = 2
self.PREDATOR_CLASS = 3
self.TIMESTEP_PENALTY = -0.05
self.PREY_REWARD = 0
self.POS_PREY_REWARD = 0.05
self.episode_over = False
def init_curses(self):
self.stdscr = curses.initscr()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_RED, -1)
curses.init_pair(2, curses.COLOR_YELLOW, -1)
curses.init_pair(3, curses.COLOR_CYAN, -1)
curses.init_pair(4, curses.COLOR_GREEN, -1)
def init_args(self, parser):
env = parser.add_argument_group('Prey Predator task')
env.add_argument('--nenemies', type=int, default=1,
help="Total number of preys in play")
env.add_argument('--dim', type=int, default=5,
help="Dimension of box")
env.add_argument('--vision', type=int, default=2,
help="Vision of predator")
env.add_argument('--moving_prey', action="store_true", default=False,
help="Whether prey is fixed or moving")
env.add_argument('--no_stay', action="store_true", default=False,
help="Whether predators have an action to stay in place")
parser.add_argument('--mode', default='mixed', type=str,
help='cooperative|competitive|mixed (default: mixed)')
env.add_argument('--enemy_comm', action="store_true", default=False,
help="Whether prey can communicate.")
def multi_agent_init(self, args):
params = ['dim', 'vision', 'moving_prey', 'mode', 'enemy_comm']
for key in params:
setattr(self, key, getattr(args, key))
self.nprey = args.nenemies
self.npredator = args.nfriendly
self.dims = dims = (self.dim, self.dim)
self.stay = not args.no_stay
if args.moving_prey:
raise NotImplementedError
if self.stay:
self.naction = 5
else:
self.naction = 4
self.action_space = spaces.MultiDiscrete([self.naction])
self.BASE = (dims[0] * dims[1])
self.OUTSIDE_CLASS += self.BASE
self.PREY_CLASS += self.BASE
self.PREDATOR_CLASS += self.BASE
self.vocab_size = 1 + 1 + self.BASE + 1 + 1
self.observation_space = spaces.Box(low=0, high=1, shape=(self.vocab_size, (2 * self.vision) + 1, (2 * self.vision) + 1), dtype=int)
return
def step(self, action):
if self.episode_over:
raise RuntimeError("Episode is done")
action = np.array(action).squeeze()
action = np.atleast_1d(action)
for i, a in enumerate(action):
self._take_action(i, a)
assert np.all(action <= self.naction), "Actions should be in the range [0,naction)."
self.episode_over = False
self.obs = self._get_obs()
debug = {'predator_locs':self.predator_loc,'prey_locs':self.prey_loc}
return self.obs, self._get_reward(), self.episode_over, debug
def reset(self):
self.episode_over = False
self.reached_prey = np.zeros(self.npredator)
locs = self._get_cordinates()
self.predator_loc, self.prey_loc = locs[:self.npredator], locs[self.npredator:]
self._set_grid()
self.stat = dict()
self.obs = self._get_obs()
return self.obs
def seed(self):
return
def _get_cordinates(self):
idx = np.random.choice(np.prod(self.dims),(self.npredator + self.nprey), replace=False)
return np.vstack(np.unravel_index(idx, self.dims)).T
def _set_grid(self):
self.grid = np.arange(self.BASE).reshape(self.dims)
self.grid = np.pad(self.grid, self.vision, 'constant', constant_values = self.OUTSIDE_CLASS)
self.empty_bool_base_grid = self._onehot_initialization(self.grid)
def _get_obs(self):
self.bool_base_grid = self.empty_bool_base_grid.copy()
for i, p in enumerate(self.predator_loc):
self.bool_base_grid[p[0] + self.vision, p[1] + self.vision, self.PREDATOR_CLASS] += 1
for i, p in enumerate(self.prey_loc):
self.bool_base_grid[p[0] + self.vision, p[1] + self.vision, self.PREY_CLASS] += 1
obs = []
for p in self.predator_loc:
slice_y = slice(p[0], p[0] + (2 * self.vision) + 1)
slice_x = slice(p[1], p[1] + (2 * self.vision) + 1)
obs.append(self.bool_base_grid[slice_y, slice_x])
if self.enemy_comm:
for p in self.prey_loc:
slice_y = slice(p[0], p[0] + (2 * self.vision) + 1)
slice_x = slice(p[1], p[1] + (2 * self.vision) + 1)
obs.append(self.bool_base_grid[slice_y, slice_x])
obs = np.stack(obs)
return obs
def _take_action(self, idx, act):
if idx >= self.npredator:
if not self.moving_prey:
return
else:
raise NotImplementedError
if self.reached_prey[idx] == 1:
return
if act==5:
return
if act==0 and self.grid[max(0,
self.predator_loc[idx][0] + self.vision - 1),
self.predator_loc[idx][1] + self.vision] != self.OUTSIDE_CLASS:
self.predator_loc[idx][0] = max(0, self.predator_loc[idx][0]-1)
elif act==1 and self.grid[self.predator_loc[idx][0] + self.vision,
min(self.dims[1] -1,
self.predator_loc[idx][1] + self.vision + 1)] != self.OUTSIDE_CLASS:
self.predator_loc[idx][1] = min(self.dims[1]-1,
self.predator_loc[idx][1]+1)
elif act==2 and self.grid[min(self.dims[0]-1,
self.predator_loc[idx][0] + self.vision + 1),
self.predator_loc[idx][1] + self.vision] != self.OUTSIDE_CLASS:
self.predator_loc[idx][0] = min(self.dims[0]-1,
self.predator_loc[idx][0]+1)
elif act==3 and self.grid[self.predator_loc[idx][0] + self.vision,
max(0,
self.predator_loc[idx][1] + self.vision - 1)] != self.OUTSIDE_CLASS:
self.predator_loc[idx][1] = max(0, self.predator_loc[idx][1]-1)
def _get_reward(self):
n = self.npredator if not self.enemy_comm else self.npredator + self.nprey
reward = np.full(n, self.TIMESTEP_PENALTY)
on_prey = np.where(np.all(self.predator_loc == self.prey_loc,axis=1))[0]
nb_predator_on_prey = on_prey.size
if self.mode == 'cooperative':
reward[on_prey] = self.POS_PREY_REWARD * nb_predator_on_prey
elif self.mode == 'competitive':
if nb_predator_on_prey:
reward[on_prey] = self.POS_PREY_REWARD / nb_predator_on_prey
elif self.mode == 'mixed':
reward[on_prey] = self.PREY_REWARD
else:
raise RuntimeError("Incorrect mode, Available modes: [cooperative|competitive|mixed]")
self.reached_prey[on_prey] = 1
if np.all(self.reached_prey == 1) and self.mode == 'mixed':
self.episode_over = True
if nb_predator_on_prey == 0:
reward[self.npredator:] = -1 * self.TIMESTEP_PENALTY
else:
reward[self.npredator:] = 0
if self.mode != 'competitive':
if nb_predator_on_prey == self.npredator:
self.stat['success'] = 1
else:
self.stat['success'] = 0
return reward
def reward_terminal(self):
return np.zeros_like(self._get_reward())
def _onehot_initialization(self, a):
ncols = self.vocab_size
out = np.zeros(a.shape + (ncols,), dtype=int)
out[self._all_idx(a, axis=2)] = 1
return out
def _all_idx(self, idx, axis):
grid = np.ogrid[tuple(map(slice, idx.shape))]
grid.insert(axis, idx)
return tuple(grid)
def render(self, mode='human', close=False):
grid = np.zeros(self.BASE, dtype=object).reshape(self.dims)
self.stdscr.clear()
for p in self.predator_loc:
if grid[p[0]][p[1]] != 0:
grid[p[0]][p[1]] = str(grid[p[0]][p[1]]) + 'X'
else:
grid[p[0]][p[1]] = 'X'
for p in self.prey_loc:
if grid[p[0]][p[1]] != 0:
grid[p[0]][p[1]] = str(grid[p[0]][p[1]]) + 'P'
else:
grid[p[0]][p[1]] = 'P'
for row_num, row in enumerate(grid):
for idx, item in enumerate(row):
if item != 0:
if 'X' in item and 'P' in item:
self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(3))
elif 'X' in item:
self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(1))
else:
self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(2))
else:
self.stdscr.addstr(row_num, idx * 4, '0'.center(3), curses.color_pair(4))
self.stdscr.addstr(len(grid), 0, '\n')
self.stdscr.refresh()
def exit_render(self):
curses.endwin()
| true | true |
f733396eed94e5d97ff81c8bf98cb921b43fb09d | 594 | py | Python | pycaret/tests/test_overflow.py | hanaseleb/pycaret | 1fe6e1a6bee642351c4b6064d769f97294713f48 | [
"MIT"
] | 5,541 | 2019-12-30T13:24:46.000Z | 2022-03-31T19:21:37.000Z | pycaret/tests/test_overflow.py | sjoerdteunisse/pycaret | 87e3691e8b0253a7eb203bd721916d5b617fad85 | [
"MIT"
] | 1,616 | 2019-11-23T20:08:30.000Z | 2022-03-31T20:18:21.000Z | pycaret/tests/test_overflow.py | sjoerdteunisse/pycaret | 87e3691e8b0253a7eb203bd721916d5b617fad85 | [
"MIT"
] | 1,471 | 2019-11-23T20:21:48.000Z | 2022-03-31T12:07:39.000Z | import os, sys
sys.path.insert(0, os.path.abspath(".."))
import pandas as pd
import numpy as np
import pytest
def test():
from pycaret.datasets import get_data
data = get_data("boston")
from pycaret.regression import setup, create_model, tune_model
s = setup(data, target="medv", silent=True, html=False, session_id=123, n_jobs=1,)
gbr = create_model("gbr")
tuned_gbr = tune_model(gbr)
xgboost = create_model("xgboost")
tuned_xgboost = tune_model(xgboost)
lightgbm = create_model("lightgbm")
tuned_lightgbm = tune_model(lightgbm)
assert 1 == 1
| 24.75 | 86 | 0.700337 | import os, sys
sys.path.insert(0, os.path.abspath(".."))
import pandas as pd
import numpy as np
import pytest
def test():
from pycaret.datasets import get_data
data = get_data("boston")
from pycaret.regression import setup, create_model, tune_model
s = setup(data, target="medv", silent=True, html=False, session_id=123, n_jobs=1,)
gbr = create_model("gbr")
tuned_gbr = tune_model(gbr)
xgboost = create_model("xgboost")
tuned_xgboost = tune_model(xgboost)
lightgbm = create_model("lightgbm")
tuned_lightgbm = tune_model(lightgbm)
assert 1 == 1
| true | true |
f7333984fc106b555c03a8a42420e08537ea6e09 | 42,772 | py | Python | localstack/utils/aws/aws_stack.py | HugoPelletier/localstack | 75f7e3034bd27e1c9a46a70ededcc06bbe2bd5c5 | [
"Apache-2.0"
] | null | null | null | localstack/utils/aws/aws_stack.py | HugoPelletier/localstack | 75f7e3034bd27e1c9a46a70ededcc06bbe2bd5c5 | [
"Apache-2.0"
] | null | null | null | localstack/utils/aws/aws_stack.py | HugoPelletier/localstack | 75f7e3034bd27e1c9a46a70ededcc06bbe2bd5c5 | [
"Apache-2.0"
] | null | null | null | import json
import logging
import os
import re
import socket
import sys
import threading
import time
from typing import Dict, Optional, Union
from urllib.parse import urlparse
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
import boto3
import botocore
import botocore.config
from botocore.utils import ArnParser, InvalidArnException
from localstack import config, constants
from localstack.constants import (
APPLICATION_AMZ_JSON_1_0,
APPLICATION_AMZ_JSON_1_1,
APPLICATION_X_WWW_FORM_URLENCODED,
ENV_DEV,
INTERNAL_AWS_ACCESS_KEY_ID,
LOCALHOST,
MAX_POOL_CONNECTIONS,
MOTO_ACCOUNT_ID,
REGION_LOCAL,
S3_VIRTUAL_HOSTNAME,
TEST_AWS_ACCESS_KEY_ID,
TEST_AWS_ACCOUNT_ID,
TEST_AWS_SECRET_ACCESS_KEY,
)
from localstack.utils.aws import templating
from localstack.utils.aws.aws_models import KinesisStream
from localstack.utils.common import (
get_service_protocol,
is_string,
is_string_or_bytes,
make_http_request,
retry,
run_safe,
to_str,
)
from localstack.utils.generic import dict_utils
# AWS environment variable names
ENV_ACCESS_KEY = "AWS_ACCESS_KEY_ID"
ENV_SECRET_KEY = "AWS_SECRET_ACCESS_KEY"
ENV_SESSION_TOKEN = "AWS_SESSION_TOKEN"
# set up logger
LOG = logging.getLogger(__name__)
# cache local region
LOCAL_REGION = None
# Use this flag to enable creation of a new session for each boto3 connection.
CREATE_NEW_SESSION_PER_BOTO3_CONNECTION = False
# Used in AWS assume role function
INITIAL_BOTO3_SESSION = None
# Boto clients cache
BOTO_CLIENTS_CACHE = {}
# Assume role loop seconds
DEFAULT_TIMER_LOOP_SECONDS = 60 * 50
# maps SQS queue ARNs to queue URLs
SQS_ARN_TO_URL_CACHE = {}
# List of parameters with additional event target parameters
EVENT_TARGET_PARAMETERS = ["$.SqsParameters", "$.KinesisParameters"]
# cached value used to determine the DNS status of the S3 hostname (whether it can be resolved properly)
CACHE_S3_HOSTNAME_DNS_STATUS = None
# mutex used when creating boto clients (which isn't thread safe: https://github.com/boto/boto3/issues/801)
BOTO_CLIENT_CREATE_LOCK = threading.RLock()
class Environment(object):
def __init__(self, region=None, prefix=None):
# target is the runtime environment to use, e.g.,
# 'local' for local mode
self.region = region or get_local_region()
# prefix can be 'prod', 'stg', 'uat-1', etc.
self.prefix = prefix
def apply_json(self, j):
if isinstance(j, str):
j = json.loads(j)
self.__dict__.update(j)
@staticmethod
def from_string(s):
parts = s.split(":")
if len(parts) == 1:
if s in PREDEFINED_ENVIRONMENTS:
return PREDEFINED_ENVIRONMENTS[s]
parts = [get_local_region(), s]
if len(parts) > 2:
raise Exception('Invalid environment string "%s"' % s)
region = parts[0]
prefix = parts[1]
return Environment(region=region, prefix=prefix)
@staticmethod
def from_json(j):
if not isinstance(j, dict):
j = j.to_dict()
result = Environment()
result.apply_json(j)
return result
def __str__(self):
return "%s:%s" % (self.region, self.prefix)
PREDEFINED_ENVIRONMENTS = {ENV_DEV: Environment(region=REGION_LOCAL, prefix=ENV_DEV)}
def get_environment(env=None, region_name=None):
"""
Return an Environment object based on the input arguments.
Parameter `env` can be either of:
* None (or empty), in which case the rules below are applied to (env = os.environ['ENV'] or ENV_DEV)
* an Environment object (then this object is returned)
* a string '<region>:<name>', which corresponds to Environment(region='<region>', prefix='<prefix>')
* the predefined string 'dev' (ENV_DEV), which implies Environment(region='local', prefix='dev')
* a string '<name>', which implies Environment(region=DEFAULT_REGION, prefix='<name>')
Additionally, parameter `region_name` can be used to override DEFAULT_REGION.
"""
if not env:
if "ENV" in os.environ:
env = os.environ["ENV"]
else:
env = ENV_DEV
elif not is_string(env) and not isinstance(env, Environment):
raise Exception("Invalid environment: %s" % env)
if is_string(env):
env = Environment.from_string(env)
if region_name:
env.region = region_name
if not env.region:
raise Exception('Invalid region in environment: "%s"' % env)
return env
def is_local_env(env):
return not env or env.region == REGION_LOCAL or env.prefix == ENV_DEV
class Boto3Session(boto3.session.Session):
"""Custom boto3 session that points to local endpoint URLs."""
def resource(self, service, *args, **kwargs):
self._fix_endpoint(kwargs)
return connect_to_resource(service, *args, **kwargs)
def client(self, service, *args, **kwargs):
self._fix_endpoint(kwargs)
return connect_to_service(service, *args, **kwargs)
def _fix_endpoint(self, kwargs):
if "amazonaws.com" in kwargs.get("endpoint_url", ""):
kwargs.pop("endpoint_url")
def get_boto3_session(cache=True):
if not cache or CREATE_NEW_SESSION_PER_BOTO3_CONNECTION:
return boto3.session.Session()
# return default session
return boto3
def get_region():
# Note: leave import here to avoid import errors (e.g., "flask") for CLI commands
from localstack.utils.aws.request_context import get_region_from_request_context
region = get_region_from_request_context()
if region:
return region
# fall back to returning static pre-defined region
return get_local_region()
def get_partition(region_name: str = None):
region_name = region_name or get_region()
return boto3.session.Session().get_partition_for_region(region_name)
def get_local_region():
global LOCAL_REGION
if LOCAL_REGION is None:
session = boto3.session.Session()
LOCAL_REGION = session.region_name or ""
return config.DEFAULT_REGION or LOCAL_REGION
def is_internal_call_context(headers):
"""Return whether we are executing in the context of an internal API call, i.e.,
the case where one API uses a boto3 client to call another API internally."""
auth_header = headers.get("Authorization") or ""
return get_internal_credential() in auth_header
def get_internal_credential():
return "Credential=%s/" % INTERNAL_AWS_ACCESS_KEY_ID
def set_internal_auth(headers):
authorization = headers.get("Authorization") or ""
if authorization.startswith("AWS "):
# Cover Non HMAC Authentication
authorization = re.sub(
r"AWS [^/]+",
"AWS %s" % get_internal_credential(),
authorization,
)
else:
authorization = re.sub(
r"Credential=[^/]+/",
get_internal_credential(),
authorization,
)
headers["Authorization"] = authorization
return headers
def get_local_service_url(service_name_or_port: Union[str, int]) -> str:
"""Return the local service URL for the given service name or port."""
if isinstance(service_name_or_port, int):
return f"{get_service_protocol()}://{LOCALHOST}:{service_name_or_port}"
service_name = service_name_or_port
if service_name == "s3api":
service_name = "s3"
elif service_name == "runtime.sagemaker":
service_name = "sagemaker-runtime"
return config.service_url(service_name)
def connect_to_resource(
service_name, env=None, region_name=None, endpoint_url=None, *args, **kwargs
):
"""
Generic method to obtain an AWS service resource using boto3, based on environment, region, or custom endpoint_url.
"""
return connect_to_service(
service_name,
client=False,
env=env,
region_name=region_name,
endpoint_url=endpoint_url,
)
def connect_to_service(
service_name,
client=True,
env=None,
region_name=None,
endpoint_url=None,
config: botocore.config.Config = None,
verify=False,
cache=True,
*args,
**kwargs,
):
"""
Generic method to obtain an AWS service client using boto3, based on environment, region, or custom endpoint_url.
"""
# determine context and create cache key
region_name = region_name or get_region()
env = get_environment(env, region_name=region_name)
region = env.region if env.region != REGION_LOCAL else region_name
key_elements = [service_name, client, env, region, endpoint_url, config, kwargs]
cache_key = "/".join([str(k) for k in key_elements])
# check cache first (most calls will be served from cache)
if cache and cache_key in BOTO_CLIENTS_CACHE:
return BOTO_CLIENTS_CACHE[cache_key]
with BOTO_CLIENT_CREATE_LOCK:
# check cache again within lock context to avoid race conditions
if cache and cache_key in BOTO_CLIENTS_CACHE:
return BOTO_CLIENTS_CACHE[cache_key]
# determine endpoint_url if it is not set explicitly
if not endpoint_url:
if is_local_env(env):
endpoint_url = get_local_service_url(service_name)
verify = False
backend_env_name = "%s_BACKEND" % service_name.upper()
backend_url = os.environ.get(backend_env_name, "").strip()
if backend_url:
endpoint_url = backend_url
# configure S3 path/host style addressing
if service_name == "s3":
if re.match(r"https?://localhost(:[0-9]+)?", endpoint_url):
endpoint_url = endpoint_url.replace("://localhost", "://%s" % get_s3_hostname())
# create boto client or resource from potentially cached session
boto_session = get_boto3_session(cache=cache)
boto_config = config or botocore.client.Config()
boto_factory = boto_session.client if client else boto_session.resource
# To, prevent error "Connection pool is full, discarding connection ...",
# set the environment variable MAX_POOL_CONNECTIONS. Default is 150.
boto_config.max_pool_connections = MAX_POOL_CONNECTIONS
new_client = boto_factory(
service_name,
region_name=region,
endpoint_url=endpoint_url,
verify=verify,
config=boto_config,
**kwargs,
)
if cache:
BOTO_CLIENTS_CACHE[cache_key] = new_client
return new_client
def create_external_boto_client(
service_name,
client=True,
env=None,
region_name=None,
endpoint_url=None,
config: botocore.config.Config = None,
verify=False,
cache=True,
*args,
**kwargs,
):
return connect_to_service(
service_name,
client,
env,
region_name,
endpoint_url,
config,
verify,
cache,
aws_access_key_id="__test_call__",
aws_secret_access_key="__test_key__",
*args,
**kwargs,
)
def get_s3_hostname():
global CACHE_S3_HOSTNAME_DNS_STATUS
if CACHE_S3_HOSTNAME_DNS_STATUS is None:
try:
assert socket.gethostbyname(S3_VIRTUAL_HOSTNAME)
CACHE_S3_HOSTNAME_DNS_STATUS = True
except socket.error:
CACHE_S3_HOSTNAME_DNS_STATUS = False
if CACHE_S3_HOSTNAME_DNS_STATUS:
return S3_VIRTUAL_HOSTNAME
return LOCALHOST
# TODO remove from here in the future
def render_velocity_template(*args, **kwargs):
return templating.render_velocity_template(*args, **kwargs)
def generate_presigned_url(*args, **kwargs):
endpoint_url = kwargs.pop("endpoint_url", None)
s3_client = connect_to_service(
"s3",
endpoint_url=endpoint_url,
cache=False,
# Note: presigned URL needs to be created with (external) test credentials
aws_access_key_id=TEST_AWS_ACCESS_KEY_ID,
aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY,
)
return s3_client.generate_presigned_url(*args, **kwargs)
def check_valid_region(headers):
"""Check whether a valid region is provided, and if not then raise an Exception."""
auth_header = headers.get("Authorization")
if not auth_header:
raise Exception('Unable to find "Authorization" header in request')
replaced = re.sub(r".*Credential=([^,]+),.*", r"\1", auth_header)
if auth_header == replaced:
raise Exception('Unable to find "Credential" section in "Authorization" header')
# Format is: <your-access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
# See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html
parts = replaced.split("/")
region = parts[2]
if region not in config.VALID_REGIONS:
raise Exception('Invalid region specified in "Authorization" header: "%s"' % region)
def set_default_region_in_headers(headers, service=None, region=None):
# this should now be a no-op, as we support arbitrary regions and don't use a "default" region
# TODO: remove this function once the legacy USE_SINGLE_REGION config is removed
if not config.USE_SINGLE_REGION:
return
auth_header = headers.get("Authorization")
region = region or get_region()
if not auth_header:
if service:
headers["Authorization"] = mock_aws_request_headers(service, region_name=region)[
"Authorization"
]
return
replaced = re.sub(r"(.*Credential=[^/]+/[^/]+/)([^/])+/", r"\1%s/" % region, auth_header)
headers["Authorization"] = replaced
def fix_account_id_in_arns(response, colon_delimiter=":", existing=None, replace=None):
"""Fix the account ID in the ARNs returned in the given Flask response or string"""
existing = existing or ["123456789", "1234567890", "123456789012", MOTO_ACCOUNT_ID]
existing = existing if isinstance(existing, list) else [existing]
replace = replace or TEST_AWS_ACCOUNT_ID
is_str_obj = is_string_or_bytes(response)
content = to_str(response if is_str_obj else response._content)
replace = r"arn{col}aws{col}\1{col}\2{col}{acc}{col}".format(col=colon_delimiter, acc=replace)
for acc_id in existing:
regex = r"arn{col}aws{col}([^:%]+){col}([^:%]*){col}{acc}{col}".format(
col=colon_delimiter, acc=acc_id
)
content = re.sub(regex, replace, content)
if not is_str_obj:
response._content = content
response.headers["Content-Length"] = len(response._content)
return response
return content
def inject_test_credentials_into_env(env):
if ENV_ACCESS_KEY not in env and ENV_SECRET_KEY not in env:
env[ENV_ACCESS_KEY] = "test"
env[ENV_SECRET_KEY] = "test"
def inject_region_into_env(env, region):
env["AWS_REGION"] = region
def dynamodb_table_exists(table_name, client=None):
client = client or connect_to_service("dynamodb")
paginator = client.get_paginator("list_tables")
pages = paginator.paginate(PaginationConfig={"PageSize": 100})
for page in pages:
table_names = page["TableNames"]
if to_str(table_name) in table_names:
return True
return False
def sqs_queue_url_for_arn(queue_arn):
if "://" in queue_arn:
return queue_arn
if queue_arn in SQS_ARN_TO_URL_CACHE:
return SQS_ARN_TO_URL_CACHE[queue_arn]
try:
arn = parse_arn(queue_arn)
region_name = arn["region"]
queue_name = arn["resource"]
except InvalidArnException:
region_name = None
queue_name = queue_arn
sqs_client = connect_to_service("sqs", region_name=region_name)
result = sqs_client.get_queue_url(QueueName=queue_name)["QueueUrl"]
SQS_ARN_TO_URL_CACHE[queue_arn] = result
return result
# TODO: remove and merge with sqs_queue_url_for_arn(..) above!!
def get_sqs_queue_url(queue_arn: str) -> str:
return sqs_queue_url_for_arn(queue_arn)
def extract_region_from_auth_header(headers: Dict[str, str], use_default=True) -> str:
auth = headers.get("Authorization") or ""
region = re.sub(r".*Credential=[^/]+/[^/]+/([^/]+)/.*", r"\1", auth)
if region == auth:
region = None
if use_default:
region = region or get_region()
return region
def extract_access_key_id_from_auth_header(headers: Dict[str, str]) -> str:
auth = headers.get("Authorization") or ""
access_id = re.sub(r".*Credential=([^/]+)/[^/]+/[^/]+/.*", r"\1", auth)
if access_id == auth:
access_id = None
return access_id
# TODO: extract ARN utils into separate file!
_arn_parser = ArnParser()
class ArnData(TypedDict):
partition: str
service: str
region: str
account: str
resource: str
def parse_arn(arn: str) -> ArnData:
"""
Uses a botocore ArnParser to parse an arn.
:param arn: the arn string to parse
:returns: a dictionary containing the ARN components
:raises InvalidArnException: if the arn is invalid
"""
return _arn_parser.parse_arn(arn)
def extract_region_from_arn(arn: str) -> Optional[str]:
try:
return parse_arn(arn).get("region")
except InvalidArnException:
return None
def extract_service_from_arn(arn: str) -> Optional[str]:
try:
return parse_arn(arn).get("service")
except InvalidArnException:
return None
def get_account_id(account_id=None, env=None):
if account_id:
return account_id
env = get_environment(env)
if is_local_env(env):
return os.environ["TEST_AWS_ACCOUNT_ID"]
raise Exception("Unable to determine AWS account ID (%s, %s)" % (account_id, env))
def role_arn(role_name, account_id=None, env=None):
if not role_name:
return role_name
if role_name.startswith("arn:aws:iam::"):
return role_name
env = get_environment(env)
account_id = get_account_id(account_id, env=env)
return "arn:aws:iam::%s:role/%s" % (account_id, role_name)
def policy_arn(policy_name, account_id=None):
if ":policy/" in policy_name:
return policy_name
account_id = account_id or TEST_AWS_ACCOUNT_ID
return "arn:aws:iam::{}:policy/{}".format(account_id, policy_name)
def iam_resource_arn(resource, role=None, env=None):
env = get_environment(env)
if not role:
role = get_iam_role(resource, env=env)
return role_arn(role_name=role, account_id=get_account_id())
def get_iam_role(resource, env=None):
env = get_environment(env)
return "role-%s" % resource
# TODO: remove this (can't statically define secret ARN because it includes a random suffix)
def secretsmanager_secret_arn(secret_id, account_id=None, region_name=None):
if ":" in (secret_id or ""):
return secret_id
pattern = "arn:aws:secretsmanager:%s:%s:secret:%s"
return _resource_arn(secret_id, pattern, account_id=account_id, region_name=region_name)
def cloudformation_stack_arn(stack_name, stack_id=None, account_id=None, region_name=None):
stack_id = stack_id or "id-123"
pattern = "arn:aws:cloudformation:%s:%s:stack/%s/{stack_id}".format(stack_id=stack_id)
return _resource_arn(stack_name, pattern, account_id=account_id, region_name=region_name)
def cf_change_set_arn(change_set_name, change_set_id=None, account_id=None, region_name=None):
change_set_id = change_set_id or "id-456"
pattern = "arn:aws:cloudformation:%s:%s:changeSet/%s/{cs_id}".format(cs_id=change_set_id)
return _resource_arn(change_set_name, pattern, account_id=account_id, region_name=region_name)
def dynamodb_table_arn(table_name, account_id=None, region_name=None):
table_name = table_name.split(":table/")[-1]
pattern = "arn:aws:dynamodb:%s:%s:table/%s"
return _resource_arn(table_name, pattern, account_id=account_id, region_name=region_name)
def dynamodb_stream_arn(table_name, latest_stream_label, account_id=None):
account_id = get_account_id(account_id)
return "arn:aws:dynamodb:%s:%s:table/%s/stream/%s" % (
get_region(),
account_id,
table_name,
latest_stream_label,
)
def cloudwatch_alarm_arn(alarm_name, account_id=None, region_name=None):
pattern = "arn:aws:cloudwatch:%s:%s:alarm:%s"
return _resource_arn(alarm_name, pattern, account_id=account_id, region_name=region_name)
def log_group_arn(group_name, account_id=None, region_name=None):
pattern = "arn:aws:logs:%s:%s:log-group:%s"
return _resource_arn(group_name, pattern, account_id=account_id, region_name=region_name)
def events_rule_arn(rule_name, account_id=None, region_name=None):
pattern = "arn:aws:events:%s:%s:rule/%s"
return _resource_arn(rule_name, pattern, account_id=account_id, region_name=region_name)
def lambda_function_arn(function_name, account_id=None, region_name=None):
return lambda_function_or_layer_arn(
"function", function_name, account_id=account_id, region_name=region_name
)
def lambda_layer_arn(layer_name, version=None, account_id=None):
return lambda_function_or_layer_arn("layer", layer_name, version=None, account_id=account_id)
def lambda_function_or_layer_arn(
type, entity_name, version=None, account_id=None, region_name=None
):
pattern = "arn:aws:lambda:.*:.*:(function|layer):.*"
if re.match(pattern, entity_name):
return entity_name
if ":" in entity_name:
client = connect_to_service("lambda")
entity_name, _, alias = entity_name.rpartition(":")
try:
alias_response = client.get_alias(FunctionName=entity_name, Name=alias)
version = alias_response["FunctionVersion"]
except Exception as e:
msg = "Alias %s of %s not found" % (alias, entity_name)
LOG.info(f"{msg}: {e}")
raise Exception(msg)
account_id = get_account_id(account_id)
region_name = region_name or get_region()
pattern = re.sub(r"\([^\|]+\|.+\)", type, pattern)
result = pattern.replace(".*", "%s") % (region_name, account_id, entity_name)
if version:
result = "%s:%s" % (result, version)
return result
def lambda_function_name(name_or_arn):
if ":" in name_or_arn:
arn = parse_arn(name_or_arn)
if arn["service"] != "lambda":
raise ValueError("arn is not a lambda arn %s" % name_or_arn)
return parse_arn(name_or_arn)["resource"].split(":")[1]
else:
return name_or_arn
def state_machine_arn(name, account_id=None, region_name=None):
pattern = "arn:aws:states:%s:%s:stateMachine:%s"
return _resource_arn(name, pattern, account_id=account_id, region_name=region_name)
def stepfunctions_activity_arn(name, account_id=None, region_name=None):
pattern = "arn:aws:states:%s:%s:activity:%s"
return _resource_arn(name, pattern, account_id=account_id, region_name=region_name)
def fix_arn(arn):
"""Function that attempts to "canonicalize" the given ARN. This includes converting
resource names to ARNs, replacing incorrect regions, account IDs, etc."""
if arn.startswith("arn:aws:lambda"):
parts = arn.split(":")
region = parts[3] if parts[3] in config.VALID_REGIONS else get_region()
return lambda_function_arn(lambda_function_name(arn), region_name=region)
LOG.warning("Unable to fix/canonicalize ARN: %s", arn)
return arn
def cognito_user_pool_arn(user_pool_id, account_id=None, region_name=None):
pattern = "arn:aws:cognito-idp:%s:%s:userpool/%s"
return _resource_arn(user_pool_id, pattern, account_id=account_id, region_name=region_name)
def kinesis_stream_arn(stream_name, account_id=None, region_name=None):
pattern = "arn:aws:kinesis:%s:%s:stream/%s"
return _resource_arn(stream_name, pattern, account_id=account_id, region_name=region_name)
def elasticsearch_domain_arn(domain_name, account_id=None, region_name=None):
pattern = "arn:aws:es:%s:%s:domain/%s"
return _resource_arn(domain_name, pattern, account_id=account_id, region_name=region_name)
def firehose_stream_arn(stream_name, account_id=None, region_name=None):
pattern = "arn:aws:firehose:%s:%s:deliverystream/%s"
return _resource_arn(stream_name, pattern, account_id=account_id, region_name=region_name)
def es_domain_arn(domain_name, account_id=None, region_name=None):
pattern = "arn:aws:es:%s:%s:domain/%s"
return _resource_arn(domain_name, pattern, account_id=account_id, region_name=region_name)
def kms_key_arn(key_id: str, account_id: str = None, region_name: str = None) -> str:
pattern = "arn:aws:kms:%s:%s:key/%s"
return _resource_arn(key_id, pattern, account_id=account_id, region_name=region_name)
def code_signing_arn(code_signing_id: str, account_id: str = None, region_name: str = None) -> str:
pattern = "arn:aws:lambda:%s:%s:code-signing-config:%s"
return _resource_arn(code_signing_id, pattern, account_id=account_id, region_name=region_name)
def ssm_parameter_arn(param_name: str, account_id: str = None, region_name: str = None) -> str:
pattern = "arn:aws:ssm:%s:%s:parameter/%s"
param_name = param_name.lstrip("/")
return _resource_arn(param_name, pattern, account_id=account_id, region_name=region_name)
def s3_bucket_arn(bucket_name_or_arn: str, account_id=None):
bucket_name = s3_bucket_name(bucket_name_or_arn)
return "arn:aws:s3:::%s" % bucket_name
def s3_bucket_name(bucket_name_or_arn: str) -> str:
return bucket_name_or_arn.split(":::")[-1]
def _resource_arn(name: str, pattern: str, account_id: str = None, region_name: str = None) -> str:
if ":" in name:
return name
account_id = get_account_id(account_id)
region_name = region_name or get_region()
if len(pattern.split("%s")) == 3:
return pattern % (account_id, name)
return pattern % (region_name, account_id, name)
def get_events_target_attributes(target):
return dict_utils.pick_attributes(target, EVENT_TARGET_PARAMETERS)
def get_or_create_bucket(bucket_name, s3_client=None):
s3_client = s3_client or connect_to_service("s3")
try:
return s3_client.head_bucket(Bucket=bucket_name)
except Exception:
return s3_client.create_bucket(Bucket=bucket_name)
def create_sqs_queue(queue_name, env=None):
env = get_environment(env)
# queue
conn = connect_to_service("sqs", env=env)
return conn.create_queue(QueueName=queue_name)
def sqs_queue_arn(queue_name, account_id=None, region_name=None):
account_id = get_account_id(account_id)
region_name = region_name or get_region()
queue_name = queue_name.split("/")[-1]
return "arn:aws:sqs:%s:%s:%s" % (region_name, account_id, queue_name)
def apigateway_restapi_arn(api_id, account_id=None, region_name=None):
account_id = get_account_id(account_id)
region_name = region_name or get_region()
return "arn:aws:apigateway:%s:%s:/restapis/%s" % (region_name, account_id, api_id)
def sqs_queue_name(queue_arn):
if ":" in queue_arn:
return parse_arn(queue_arn)["resource"]
else:
return queue_arn
def sns_topic_arn(topic_name, account_id=None):
account_id = get_account_id(account_id)
return "arn:aws:sns:%s:%s:%s" % (get_region(), account_id, topic_name)
def sqs_receive_message(queue_arn):
region_name = extract_region_from_arn(queue_arn)
client = connect_to_service("sqs", region_name=region_name)
queue_url = get_sqs_queue_url(queue_arn)
response = client.receive_message(QueueUrl=queue_url)
return response
def firehose_name(firehose_arn):
return firehose_arn.split("/")[-1]
def kinesis_stream_name(kinesis_arn):
return kinesis_arn.split(":stream/")[-1]
def mock_aws_request_headers(service="dynamodb", region_name=None, access_key=None):
ctype = APPLICATION_AMZ_JSON_1_0
if service == "kinesis":
ctype = APPLICATION_AMZ_JSON_1_1
elif service in ["sns", "sqs"]:
ctype = APPLICATION_X_WWW_FORM_URLENCODED
# TODO: consider adding an internal=False flag, to use INTERNAL_AWS_ACCESS_KEY_ID for internal calls here
access_key = access_key or constants.TEST_AWS_ACCESS_KEY_ID
region_name = region_name or get_region()
headers = {
"Content-Type": ctype,
"Accept-Encoding": "identity",
"X-Amz-Date": "20160623T103251Z",
"Authorization": (
"AWS4-HMAC-SHA256 "
+ "Credential=%s/20160623/%s/%s/aws4_request, "
+ "SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=1234"
)
% (access_key, region_name, service),
}
return headers
def inject_region_into_auth_headers(region, headers):
auth_header = headers.get("Authorization")
if auth_header:
regex = r"Credential=([^/]+)/([^/]+)/([^/]+)/"
auth_header = re.sub(regex, r"Credential=\1/\2/%s/" % region, auth_header)
headers["Authorization"] = auth_header
def dynamodb_get_item_raw(request):
headers = mock_aws_request_headers()
headers["X-Amz-Target"] = "DynamoDB_20120810.GetItem"
new_item = make_http_request(
url=config.service_url("dynamodb"),
method="POST",
data=json.dumps(request),
headers=headers,
)
new_item = new_item.text
new_item = new_item and json.loads(new_item)
return new_item
def create_dynamodb_table(
table_name,
partition_key,
env=None,
stream_view_type=None,
region_name=None,
client=None,
sleep_after=2,
):
"""Utility method to create a DynamoDB table"""
dynamodb = client or connect_to_service(
"dynamodb", env=env, client=True, region_name=region_name
)
stream_spec = {"StreamEnabled": False}
key_schema = [{"AttributeName": partition_key, "KeyType": "HASH"}]
attr_defs = [{"AttributeName": partition_key, "AttributeType": "S"}]
if stream_view_type is not None:
stream_spec = {"StreamEnabled": True, "StreamViewType": stream_view_type}
table = None
try:
table = dynamodb.create_table(
TableName=table_name,
KeySchema=key_schema,
AttributeDefinitions=attr_defs,
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
StreamSpecification=stream_spec,
)
except Exception as e:
if "ResourceInUseException" in str(e):
# Table already exists -> return table reference
return connect_to_resource("dynamodb", env=env, region_name=region_name).Table(
table_name
)
if "AccessDeniedException" in str(e):
raise
if sleep_after:
# TODO: do we need this?
time.sleep(sleep_after)
return table
def get_apigateway_integration(api_id, method, path, env=None):
apigateway = connect_to_service(service_name="apigateway", client=True, env=env)
resources = apigateway.get_resources(restApiId=api_id, limit=100)
resource_id = None
for r in resources["items"]:
if r["path"] == path:
resource_id = r["id"]
if not resource_id:
raise Exception('Unable to find apigateway integration for path "%s"' % path)
integration = apigateway.get_integration(
restApiId=api_id, resourceId=resource_id, httpMethod=method
)
return integration
def get_apigateway_resource_for_path(api_id, path, parent=None, resources=None):
if resources is None:
apigateway = connect_to_service(service_name="apigateway")
resources = apigateway.get_resources(restApiId=api_id, limit=100)
if not isinstance(path, list):
path = path.split("/")
if not path:
return parent
for resource in resources:
if resource["pathPart"] == path[0] and (not parent or parent["id"] == resource["parentId"]):
return get_apigateway_resource_for_path(
api_id, path[1:], parent=resource, resources=resources
)
return None
def get_apigateway_path_for_resource(
api_id, resource_id, path_suffix="", resources=None, region_name=None
):
if resources is None:
apigateway = connect_to_service(service_name="apigateway", region_name=region_name)
resources = apigateway.get_resources(restApiId=api_id, limit=100)["items"]
target_resource = list(filter(lambda res: res["id"] == resource_id, resources))[0]
path_part = target_resource.get("pathPart", "")
if path_suffix:
if path_part:
path_suffix = "%s/%s" % (path_part, path_suffix)
else:
path_suffix = path_part
parent_id = target_resource.get("parentId")
if not parent_id:
return "/%s" % path_suffix
return get_apigateway_path_for_resource(
api_id,
parent_id,
path_suffix=path_suffix,
resources=resources,
region_name=region_name,
)
def create_api_gateway(
name,
description=None,
resources=None,
stage_name=None,
enabled_api_keys=None,
env=None,
usage_plan_name=None,
region_name=None,
auth_creator_func=None, # function that receives an api_id and returns an authorizer_id
):
if enabled_api_keys is None:
enabled_api_keys = []
client = connect_to_service("apigateway", env=env, region_name=region_name)
resources = resources or []
stage_name = stage_name or "testing"
usage_plan_name = usage_plan_name or "Basic Usage"
description = description or 'Test description for API "%s"' % name
LOG.info('Creating API resources under API Gateway "%s".', name)
api = client.create_rest_api(name=name, description=description)
api_id = api["id"]
auth_id = None
if auth_creator_func:
auth_id = auth_creator_func(api_id)
resources_list = client.get_resources(restApiId=api_id)
root_res_id = resources_list["items"][0]["id"]
# add API resources and methods
for path, methods in resources.items():
# create resources recursively
parent_id = root_res_id
for path_part in path.split("/"):
api_resource = client.create_resource(
restApiId=api_id, parentId=parent_id, pathPart=path_part
)
parent_id = api_resource["id"]
# add methods to the API resource
for method in methods:
kwargs = {"authorizerId": auth_id} if auth_id else {}
client.put_method(
restApiId=api_id,
resourceId=api_resource["id"],
httpMethod=method["httpMethod"],
authorizationType=method.get("authorizationType") or "NONE",
apiKeyRequired=method.get("apiKeyRequired") or False,
requestParameters=method.get("requestParameters") or {},
**kwargs,
)
# create integrations for this API resource/method
integrations = method["integrations"]
create_api_gateway_integrations(
api_id,
api_resource["id"],
method,
integrations,
env=env,
region_name=region_name,
)
# deploy the API gateway
client.create_deployment(restApiId=api_id, stageName=stage_name)
return api
def create_api_gateway_integrations(
api_id, resource_id, method, integrations=None, env=None, region_name=None
):
if integrations is None:
integrations = []
client = connect_to_service("apigateway", env=env, region_name=region_name)
for integration in integrations:
req_templates = integration.get("requestTemplates") or {}
res_templates = integration.get("responseTemplates") or {}
success_code = integration.get("successCode") or "200"
client_error_code = integration.get("clientErrorCode") or "400"
server_error_code = integration.get("serverErrorCode") or "500"
request_parameters = integration.get("requestParameters") or {}
# create integration
client.put_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod=method["httpMethod"],
integrationHttpMethod=method.get("integrationHttpMethod") or method["httpMethod"],
type=integration["type"],
uri=integration["uri"],
requestTemplates=req_templates,
requestParameters=request_parameters,
)
response_configs = [
{"pattern": "^2.*", "code": success_code, "res_templates": res_templates},
{"pattern": "^4.*", "code": client_error_code, "res_templates": {}},
{"pattern": "^5.*", "code": server_error_code, "res_templates": {}},
]
# create response configs
for response_config in response_configs:
# create integration response
client.put_integration_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod=method["httpMethod"],
statusCode=response_config["code"],
responseTemplates=response_config["res_templates"],
selectionPattern=response_config["pattern"],
)
# create method response
client.put_method_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod=method["httpMethod"],
statusCode=response_config["code"],
)
def apigateway_invocations_arn(lambda_uri):
return "arn:aws:apigateway:%s:lambda:path/2015-03-31/functions/%s/invocations" % (
get_region(),
lambda_uri,
)
def get_opensearch_endpoint(domain_arn: str) -> str:
"""
Get an OpenSearch cluster endpoint by describing the cluster associated with the domain_arn
:param domain_arn: ARN of the cluster.
:returns: cluster endpoint
:raises: ValueError if the domain_arn is malformed
"""
region_name = extract_region_from_arn(domain_arn)
if region_name is None:
raise ValueError("unable to parse region from opensearch domain ARN")
opensearch_client = connect_to_service(service_name="opensearch", region_name=region_name)
domain_name = domain_arn.rpartition("/")[2]
info = opensearch_client.describe_domain(DomainName=domain_name)
base_domain = info["DomainStatus"]["Endpoint"]
endpoint = base_domain if base_domain.startswith("http") else f"https://{base_domain}"
return endpoint
def get_search_db_connection(endpoint: str, region_name: str):
"""
Get a connection to an ElasticSearch or OpenSearch DB
:param endpoint: cluster endpoint
:param region_name: cluster region e.g. us-east-1
"""
from opensearchpy import OpenSearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
verify_certs = False
use_ssl = False
# use ssl?
if "https://" in endpoint:
use_ssl = True
# TODO remove this condition once ssl certs are available for .es.localhost.localstack.cloud domains
endpoint_netloc = urlparse(endpoint).netloc
if not re.match(r"^.*(localhost(\.localstack\.cloud)?)(:\d+)?$", endpoint_netloc):
verify_certs = True
LOG.debug("Creating ES client with endpoint %s", endpoint)
if ENV_ACCESS_KEY in os.environ and ENV_SECRET_KEY in os.environ:
access_key = os.environ.get(ENV_ACCESS_KEY)
secret_key = os.environ.get(ENV_SECRET_KEY)
session_token = os.environ.get(ENV_SESSION_TOKEN)
awsauth = AWS4Auth(access_key, secret_key, region_name, "es", session_token=session_token)
connection_class = RequestsHttpConnection
return OpenSearch(
hosts=[endpoint],
verify_certs=verify_certs,
use_ssl=use_ssl,
connection_class=connection_class,
http_auth=awsauth,
)
return OpenSearch(hosts=[endpoint], verify_certs=verify_certs, use_ssl=use_ssl)
def create_kinesis_stream(stream_name, shards=1, env=None, delete=False):
env = get_environment(env)
stream = KinesisStream(id=stream_name, num_shards=shards)
conn = connect_to_service("kinesis", env=env)
stream.connect(conn)
if delete:
run_safe(lambda: stream.destroy(), print_error=False)
stream.create()
# Note: Returning the stream without awaiting its creation (via wait_for()) to avoid API call timeouts/retries.
return stream
def kinesis_get_latest_records(stream_name, shard_id, count=10, env=None):
kinesis = connect_to_service("kinesis", env=env)
result = []
response = kinesis.get_shard_iterator(
StreamName=stream_name, ShardId=shard_id, ShardIteratorType="TRIM_HORIZON"
)
shard_iterator = response["ShardIterator"]
while shard_iterator:
records_response = kinesis.get_records(ShardIterator=shard_iterator)
records = records_response["Records"]
for record in records:
try:
record["Data"] = to_str(record["Data"])
except Exception:
pass
result.extend(records)
shard_iterator = records_response["NextShardIterator"] if records else False
while len(result) > count:
result.pop(0)
return result
def get_stack_details(stack_name, region_name=None):
cloudformation = connect_to_service("cloudformation", region_name=region_name)
stacks = cloudformation.describe_stacks(StackName=stack_name)
for stack in stacks["Stacks"]:
if stack["StackName"] == stack_name:
return stack
def deploy_cf_stack(stack_name, template_body):
cfn = connect_to_service("cloudformation")
cfn.create_stack(StackName=stack_name, TemplateBody=template_body)
# wait for deployment to finish
return await_stack_completion(stack_name)
def await_stack_status(stack_name, expected_statuses, retries=20, sleep=2, region_name=None):
def check_stack():
stack = get_stack_details(stack_name, region_name=region_name)
if stack["StackStatus"] not in expected_statuses:
raise Exception(
'Status "%s" for stack "%s" not in expected list: %s'
% (stack["StackStatus"], stack_name, expected_statuses)
)
return stack
expected_statuses = (
expected_statuses if isinstance(expected_statuses, list) else [expected_statuses]
)
return retry(check_stack, retries, sleep)
def await_stack_completion(stack_name, retries=20, sleep=2, statuses=None, region_name=None):
statuses = statuses or ["CREATE_COMPLETE", "UPDATE_COMPLETE", "DELETE_COMPLETE"]
return await_stack_status(
stack_name, statuses, retries=retries, sleep=sleep, region_name=region_name
)
| 34.944444 | 119 | 0.683485 | import json
import logging
import os
import re
import socket
import sys
import threading
import time
from typing import Dict, Optional, Union
from urllib.parse import urlparse
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
import boto3
import botocore
import botocore.config
from botocore.utils import ArnParser, InvalidArnException
from localstack import config, constants
from localstack.constants import (
APPLICATION_AMZ_JSON_1_0,
APPLICATION_AMZ_JSON_1_1,
APPLICATION_X_WWW_FORM_URLENCODED,
ENV_DEV,
INTERNAL_AWS_ACCESS_KEY_ID,
LOCALHOST,
MAX_POOL_CONNECTIONS,
MOTO_ACCOUNT_ID,
REGION_LOCAL,
S3_VIRTUAL_HOSTNAME,
TEST_AWS_ACCESS_KEY_ID,
TEST_AWS_ACCOUNT_ID,
TEST_AWS_SECRET_ACCESS_KEY,
)
from localstack.utils.aws import templating
from localstack.utils.aws.aws_models import KinesisStream
from localstack.utils.common import (
get_service_protocol,
is_string,
is_string_or_bytes,
make_http_request,
retry,
run_safe,
to_str,
)
from localstack.utils.generic import dict_utils
ENV_ACCESS_KEY = "AWS_ACCESS_KEY_ID"
ENV_SECRET_KEY = "AWS_SECRET_ACCESS_KEY"
ENV_SESSION_TOKEN = "AWS_SESSION_TOKEN"
LOG = logging.getLogger(__name__)
LOCAL_REGION = None
CREATE_NEW_SESSION_PER_BOTO3_CONNECTION = False
INITIAL_BOTO3_SESSION = None
BOTO_CLIENTS_CACHE = {}
DEFAULT_TIMER_LOOP_SECONDS = 60 * 50
SQS_ARN_TO_URL_CACHE = {}
EVENT_TARGET_PARAMETERS = ["$.SqsParameters", "$.KinesisParameters"]
CACHE_S3_HOSTNAME_DNS_STATUS = None
BOTO_CLIENT_CREATE_LOCK = threading.RLock()
class Environment(object):
def __init__(self, region=None, prefix=None):
# target is the runtime environment to use, e.g.,
# 'local' for local mode
self.region = region or get_local_region()
# prefix can be 'prod', 'stg', 'uat-1', etc.
self.prefix = prefix
def apply_json(self, j):
if isinstance(j, str):
j = json.loads(j)
self.__dict__.update(j)
@staticmethod
def from_string(s):
parts = s.split(":")
if len(parts) == 1:
if s in PREDEFINED_ENVIRONMENTS:
return PREDEFINED_ENVIRONMENTS[s]
parts = [get_local_region(), s]
if len(parts) > 2:
raise Exception('Invalid environment string "%s"' % s)
region = parts[0]
prefix = parts[1]
return Environment(region=region, prefix=prefix)
@staticmethod
def from_json(j):
if not isinstance(j, dict):
j = j.to_dict()
result = Environment()
result.apply_json(j)
return result
def __str__(self):
return "%s:%s" % (self.region, self.prefix)
PREDEFINED_ENVIRONMENTS = {ENV_DEV: Environment(region=REGION_LOCAL, prefix=ENV_DEV)}
def get_environment(env=None, region_name=None):
if not env:
if "ENV" in os.environ:
env = os.environ["ENV"]
else:
env = ENV_DEV
elif not is_string(env) and not isinstance(env, Environment):
raise Exception("Invalid environment: %s" % env)
if is_string(env):
env = Environment.from_string(env)
if region_name:
env.region = region_name
if not env.region:
raise Exception('Invalid region in environment: "%s"' % env)
return env
def is_local_env(env):
return not env or env.region == REGION_LOCAL or env.prefix == ENV_DEV
class Boto3Session(boto3.session.Session):
def resource(self, service, *args, **kwargs):
self._fix_endpoint(kwargs)
return connect_to_resource(service, *args, **kwargs)
def client(self, service, *args, **kwargs):
self._fix_endpoint(kwargs)
return connect_to_service(service, *args, **kwargs)
def _fix_endpoint(self, kwargs):
if "amazonaws.com" in kwargs.get("endpoint_url", ""):
kwargs.pop("endpoint_url")
def get_boto3_session(cache=True):
if not cache or CREATE_NEW_SESSION_PER_BOTO3_CONNECTION:
return boto3.session.Session()
# return default session
return boto3
def get_region():
# Note: leave import here to avoid import errors (e.g., "flask") for CLI commands
from localstack.utils.aws.request_context import get_region_from_request_context
region = get_region_from_request_context()
if region:
return region
# fall back to returning static pre-defined region
return get_local_region()
def get_partition(region_name: str = None):
region_name = region_name or get_region()
return boto3.session.Session().get_partition_for_region(region_name)
def get_local_region():
global LOCAL_REGION
if LOCAL_REGION is None:
session = boto3.session.Session()
LOCAL_REGION = session.region_name or ""
return config.DEFAULT_REGION or LOCAL_REGION
def is_internal_call_context(headers):
auth_header = headers.get("Authorization") or ""
return get_internal_credential() in auth_header
def get_internal_credential():
return "Credential=%s/" % INTERNAL_AWS_ACCESS_KEY_ID
def set_internal_auth(headers):
authorization = headers.get("Authorization") or ""
if authorization.startswith("AWS "):
# Cover Non HMAC Authentication
authorization = re.sub(
r"AWS [^/]+",
"AWS %s" % get_internal_credential(),
authorization,
)
else:
authorization = re.sub(
r"Credential=[^/]+/",
get_internal_credential(),
authorization,
)
headers["Authorization"] = authorization
return headers
def get_local_service_url(service_name_or_port: Union[str, int]) -> str:
if isinstance(service_name_or_port, int):
return f"{get_service_protocol()}://{LOCALHOST}:{service_name_or_port}"
service_name = service_name_or_port
if service_name == "s3api":
service_name = "s3"
elif service_name == "runtime.sagemaker":
service_name = "sagemaker-runtime"
return config.service_url(service_name)
def connect_to_resource(
service_name, env=None, region_name=None, endpoint_url=None, *args, **kwargs
):
return connect_to_service(
service_name,
client=False,
env=env,
region_name=region_name,
endpoint_url=endpoint_url,
)
def connect_to_service(
service_name,
client=True,
env=None,
region_name=None,
endpoint_url=None,
config: botocore.config.Config = None,
verify=False,
cache=True,
*args,
**kwargs,
):
# determine context and create cache key
region_name = region_name or get_region()
env = get_environment(env, region_name=region_name)
region = env.region if env.region != REGION_LOCAL else region_name
key_elements = [service_name, client, env, region, endpoint_url, config, kwargs]
cache_key = "/".join([str(k) for k in key_elements])
# check cache first (most calls will be served from cache)
if cache and cache_key in BOTO_CLIENTS_CACHE:
return BOTO_CLIENTS_CACHE[cache_key]
with BOTO_CLIENT_CREATE_LOCK:
# check cache again within lock context to avoid race conditions
if cache and cache_key in BOTO_CLIENTS_CACHE:
return BOTO_CLIENTS_CACHE[cache_key]
# determine endpoint_url if it is not set explicitly
if not endpoint_url:
if is_local_env(env):
endpoint_url = get_local_service_url(service_name)
verify = False
backend_env_name = "%s_BACKEND" % service_name.upper()
backend_url = os.environ.get(backend_env_name, "").strip()
if backend_url:
endpoint_url = backend_url
# configure S3 path/host style addressing
if service_name == "s3":
if re.match(r"https?://localhost(:[0-9]+)?", endpoint_url):
endpoint_url = endpoint_url.replace("://localhost", "://%s" % get_s3_hostname())
# create boto client or resource from potentially cached session
boto_session = get_boto3_session(cache=cache)
boto_config = config or botocore.client.Config()
boto_factory = boto_session.client if client else boto_session.resource
# To, prevent error "Connection pool is full, discarding connection ...",
# set the environment variable MAX_POOL_CONNECTIONS. Default is 150.
boto_config.max_pool_connections = MAX_POOL_CONNECTIONS
new_client = boto_factory(
service_name,
region_name=region,
endpoint_url=endpoint_url,
verify=verify,
config=boto_config,
**kwargs,
)
if cache:
BOTO_CLIENTS_CACHE[cache_key] = new_client
return new_client
def create_external_boto_client(
service_name,
client=True,
env=None,
region_name=None,
endpoint_url=None,
config: botocore.config.Config = None,
verify=False,
cache=True,
*args,
**kwargs,
):
return connect_to_service(
service_name,
client,
env,
region_name,
endpoint_url,
config,
verify,
cache,
aws_access_key_id="__test_call__",
aws_secret_access_key="__test_key__",
*args,
**kwargs,
)
def get_s3_hostname():
global CACHE_S3_HOSTNAME_DNS_STATUS
if CACHE_S3_HOSTNAME_DNS_STATUS is None:
try:
assert socket.gethostbyname(S3_VIRTUAL_HOSTNAME)
CACHE_S3_HOSTNAME_DNS_STATUS = True
except socket.error:
CACHE_S3_HOSTNAME_DNS_STATUS = False
if CACHE_S3_HOSTNAME_DNS_STATUS:
return S3_VIRTUAL_HOSTNAME
return LOCALHOST
# TODO remove from here in the future
def render_velocity_template(*args, **kwargs):
return templating.render_velocity_template(*args, **kwargs)
def generate_presigned_url(*args, **kwargs):
endpoint_url = kwargs.pop("endpoint_url", None)
s3_client = connect_to_service(
"s3",
endpoint_url=endpoint_url,
cache=False,
# Note: presigned URL needs to be created with (external) test credentials
aws_access_key_id=TEST_AWS_ACCESS_KEY_ID,
aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY,
)
return s3_client.generate_presigned_url(*args, **kwargs)
def check_valid_region(headers):
auth_header = headers.get("Authorization")
if not auth_header:
raise Exception('Unable to find "Authorization" header in request')
replaced = re.sub(r".*Credential=([^,]+),.*", r"\1", auth_header)
if auth_header == replaced:
raise Exception('Unable to find "Credential" section in "Authorization" header')
# Format is: <your-access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
# See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html
parts = replaced.split("/")
region = parts[2]
if region not in config.VALID_REGIONS:
raise Exception('Invalid region specified in "Authorization" header: "%s"' % region)
def set_default_region_in_headers(headers, service=None, region=None):
# this should now be a no-op, as we support arbitrary regions and don't use a "default" region
if not config.USE_SINGLE_REGION:
return
auth_header = headers.get("Authorization")
region = region or get_region()
if not auth_header:
if service:
headers["Authorization"] = mock_aws_request_headers(service, region_name=region)[
"Authorization"
]
return
replaced = re.sub(r"(.*Credential=[^/]+/[^/]+/)([^/])+/", r"\1%s/" % region, auth_header)
headers["Authorization"] = replaced
def fix_account_id_in_arns(response, colon_delimiter=":", existing=None, replace=None):
existing = existing or ["123456789", "1234567890", "123456789012", MOTO_ACCOUNT_ID]
existing = existing if isinstance(existing, list) else [existing]
replace = replace or TEST_AWS_ACCOUNT_ID
is_str_obj = is_string_or_bytes(response)
content = to_str(response if is_str_obj else response._content)
replace = r"arn{col}aws{col}\1{col}\2{col}{acc}{col}".format(col=colon_delimiter, acc=replace)
for acc_id in existing:
regex = r"arn{col}aws{col}([^:%]+){col}([^:%]*){col}{acc}{col}".format(
col=colon_delimiter, acc=acc_id
)
content = re.sub(regex, replace, content)
if not is_str_obj:
response._content = content
response.headers["Content-Length"] = len(response._content)
return response
return content
def inject_test_credentials_into_env(env):
if ENV_ACCESS_KEY not in env and ENV_SECRET_KEY not in env:
env[ENV_ACCESS_KEY] = "test"
env[ENV_SECRET_KEY] = "test"
def inject_region_into_env(env, region):
env["AWS_REGION"] = region
def dynamodb_table_exists(table_name, client=None):
client = client or connect_to_service("dynamodb")
paginator = client.get_paginator("list_tables")
pages = paginator.paginate(PaginationConfig={"PageSize": 100})
for page in pages:
table_names = page["TableNames"]
if to_str(table_name) in table_names:
return True
return False
def sqs_queue_url_for_arn(queue_arn):
if "://" in queue_arn:
return queue_arn
if queue_arn in SQS_ARN_TO_URL_CACHE:
return SQS_ARN_TO_URL_CACHE[queue_arn]
try:
arn = parse_arn(queue_arn)
region_name = arn["region"]
queue_name = arn["resource"]
except InvalidArnException:
region_name = None
queue_name = queue_arn
sqs_client = connect_to_service("sqs", region_name=region_name)
result = sqs_client.get_queue_url(QueueName=queue_name)["QueueUrl"]
SQS_ARN_TO_URL_CACHE[queue_arn] = result
return result
def get_sqs_queue_url(queue_arn: str) -> str:
return sqs_queue_url_for_arn(queue_arn)
def extract_region_from_auth_header(headers: Dict[str, str], use_default=True) -> str:
auth = headers.get("Authorization") or ""
region = re.sub(r".*Credential=[^/]+/[^/]+/([^/]+)/.*", r"\1", auth)
if region == auth:
region = None
if use_default:
region = region or get_region()
return region
def extract_access_key_id_from_auth_header(headers: Dict[str, str]) -> str:
auth = headers.get("Authorization") or ""
access_id = re.sub(r".*Credential=([^/]+)/[^/]+/[^/]+/.*", r"\1", auth)
if access_id == auth:
access_id = None
return access_id
_arn_parser = ArnParser()
class ArnData(TypedDict):
partition: str
service: str
region: str
account: str
resource: str
def parse_arn(arn: str) -> ArnData:
return _arn_parser.parse_arn(arn)
def extract_region_from_arn(arn: str) -> Optional[str]:
try:
return parse_arn(arn).get("region")
except InvalidArnException:
return None
def extract_service_from_arn(arn: str) -> Optional[str]:
try:
return parse_arn(arn).get("service")
except InvalidArnException:
return None
def get_account_id(account_id=None, env=None):
if account_id:
return account_id
env = get_environment(env)
if is_local_env(env):
return os.environ["TEST_AWS_ACCOUNT_ID"]
raise Exception("Unable to determine AWS account ID (%s, %s)" % (account_id, env))
def role_arn(role_name, account_id=None, env=None):
if not role_name:
return role_name
if role_name.startswith("arn:aws:iam::"):
return role_name
env = get_environment(env)
account_id = get_account_id(account_id, env=env)
return "arn:aws:iam::%s:role/%s" % (account_id, role_name)
def policy_arn(policy_name, account_id=None):
if ":policy/" in policy_name:
return policy_name
account_id = account_id or TEST_AWS_ACCOUNT_ID
return "arn:aws:iam::{}:policy/{}".format(account_id, policy_name)
def iam_resource_arn(resource, role=None, env=None):
env = get_environment(env)
if not role:
role = get_iam_role(resource, env=env)
return role_arn(role_name=role, account_id=get_account_id())
def get_iam_role(resource, env=None):
env = get_environment(env)
return "role-%s" % resource
def secretsmanager_secret_arn(secret_id, account_id=None, region_name=None):
if ":" in (secret_id or ""):
return secret_id
pattern = "arn:aws:secretsmanager:%s:%s:secret:%s"
return _resource_arn(secret_id, pattern, account_id=account_id, region_name=region_name)
def cloudformation_stack_arn(stack_name, stack_id=None, account_id=None, region_name=None):
stack_id = stack_id or "id-123"
pattern = "arn:aws:cloudformation:%s:%s:stack/%s/{stack_id}".format(stack_id=stack_id)
return _resource_arn(stack_name, pattern, account_id=account_id, region_name=region_name)
def cf_change_set_arn(change_set_name, change_set_id=None, account_id=None, region_name=None):
change_set_id = change_set_id or "id-456"
pattern = "arn:aws:cloudformation:%s:%s:changeSet/%s/{cs_id}".format(cs_id=change_set_id)
return _resource_arn(change_set_name, pattern, account_id=account_id, region_name=region_name)
def dynamodb_table_arn(table_name, account_id=None, region_name=None):
table_name = table_name.split(":table/")[-1]
pattern = "arn:aws:dynamodb:%s:%s:table/%s"
return _resource_arn(table_name, pattern, account_id=account_id, region_name=region_name)
def dynamodb_stream_arn(table_name, latest_stream_label, account_id=None):
account_id = get_account_id(account_id)
return "arn:aws:dynamodb:%s:%s:table/%s/stream/%s" % (
get_region(),
account_id,
table_name,
latest_stream_label,
)
def cloudwatch_alarm_arn(alarm_name, account_id=None, region_name=None):
pattern = "arn:aws:cloudwatch:%s:%s:alarm:%s"
return _resource_arn(alarm_name, pattern, account_id=account_id, region_name=region_name)
def log_group_arn(group_name, account_id=None, region_name=None):
pattern = "arn:aws:logs:%s:%s:log-group:%s"
return _resource_arn(group_name, pattern, account_id=account_id, region_name=region_name)
def events_rule_arn(rule_name, account_id=None, region_name=None):
pattern = "arn:aws:events:%s:%s:rule/%s"
return _resource_arn(rule_name, pattern, account_id=account_id, region_name=region_name)
def lambda_function_arn(function_name, account_id=None, region_name=None):
return lambda_function_or_layer_arn(
"function", function_name, account_id=account_id, region_name=region_name
)
def lambda_layer_arn(layer_name, version=None, account_id=None):
return lambda_function_or_layer_arn("layer", layer_name, version=None, account_id=account_id)
def lambda_function_or_layer_arn(
type, entity_name, version=None, account_id=None, region_name=None
):
pattern = "arn:aws:lambda:.*:.*:(function|layer):.*"
if re.match(pattern, entity_name):
return entity_name
if ":" in entity_name:
client = connect_to_service("lambda")
entity_name, _, alias = entity_name.rpartition(":")
try:
alias_response = client.get_alias(FunctionName=entity_name, Name=alias)
version = alias_response["FunctionVersion"]
except Exception as e:
msg = "Alias %s of %s not found" % (alias, entity_name)
LOG.info(f"{msg}: {e}")
raise Exception(msg)
account_id = get_account_id(account_id)
region_name = region_name or get_region()
pattern = re.sub(r"\([^\|]+\|.+\)", type, pattern)
result = pattern.replace(".*", "%s") % (region_name, account_id, entity_name)
if version:
result = "%s:%s" % (result, version)
return result
def lambda_function_name(name_or_arn):
if ":" in name_or_arn:
arn = parse_arn(name_or_arn)
if arn["service"] != "lambda":
raise ValueError("arn is not a lambda arn %s" % name_or_arn)
return parse_arn(name_or_arn)["resource"].split(":")[1]
else:
return name_or_arn
def state_machine_arn(name, account_id=None, region_name=None):
pattern = "arn:aws:states:%s:%s:stateMachine:%s"
return _resource_arn(name, pattern, account_id=account_id, region_name=region_name)
def stepfunctions_activity_arn(name, account_id=None, region_name=None):
pattern = "arn:aws:states:%s:%s:activity:%s"
return _resource_arn(name, pattern, account_id=account_id, region_name=region_name)
def fix_arn(arn):
if arn.startswith("arn:aws:lambda"):
parts = arn.split(":")
region = parts[3] if parts[3] in config.VALID_REGIONS else get_region()
return lambda_function_arn(lambda_function_name(arn), region_name=region)
LOG.warning("Unable to fix/canonicalize ARN: %s", arn)
return arn
def cognito_user_pool_arn(user_pool_id, account_id=None, region_name=None):
pattern = "arn:aws:cognito-idp:%s:%s:userpool/%s"
return _resource_arn(user_pool_id, pattern, account_id=account_id, region_name=region_name)
def kinesis_stream_arn(stream_name, account_id=None, region_name=None):
pattern = "arn:aws:kinesis:%s:%s:stream/%s"
return _resource_arn(stream_name, pattern, account_id=account_id, region_name=region_name)
def elasticsearch_domain_arn(domain_name, account_id=None, region_name=None):
pattern = "arn:aws:es:%s:%s:domain/%s"
return _resource_arn(domain_name, pattern, account_id=account_id, region_name=region_name)
def firehose_stream_arn(stream_name, account_id=None, region_name=None):
pattern = "arn:aws:firehose:%s:%s:deliverystream/%s"
return _resource_arn(stream_name, pattern, account_id=account_id, region_name=region_name)
def es_domain_arn(domain_name, account_id=None, region_name=None):
pattern = "arn:aws:es:%s:%s:domain/%s"
return _resource_arn(domain_name, pattern, account_id=account_id, region_name=region_name)
def kms_key_arn(key_id: str, account_id: str = None, region_name: str = None) -> str:
pattern = "arn:aws:kms:%s:%s:key/%s"
return _resource_arn(key_id, pattern, account_id=account_id, region_name=region_name)
def code_signing_arn(code_signing_id: str, account_id: str = None, region_name: str = None) -> str:
pattern = "arn:aws:lambda:%s:%s:code-signing-config:%s"
return _resource_arn(code_signing_id, pattern, account_id=account_id, region_name=region_name)
def ssm_parameter_arn(param_name: str, account_id: str = None, region_name: str = None) -> str:
pattern = "arn:aws:ssm:%s:%s:parameter/%s"
param_name = param_name.lstrip("/")
return _resource_arn(param_name, pattern, account_id=account_id, region_name=region_name)
def s3_bucket_arn(bucket_name_or_arn: str, account_id=None):
bucket_name = s3_bucket_name(bucket_name_or_arn)
return "arn:aws:s3:::%s" % bucket_name
def s3_bucket_name(bucket_name_or_arn: str) -> str:
return bucket_name_or_arn.split(":::")[-1]
def _resource_arn(name: str, pattern: str, account_id: str = None, region_name: str = None) -> str:
if ":" in name:
return name
account_id = get_account_id(account_id)
region_name = region_name or get_region()
if len(pattern.split("%s")) == 3:
return pattern % (account_id, name)
return pattern % (region_name, account_id, name)
def get_events_target_attributes(target):
return dict_utils.pick_attributes(target, EVENT_TARGET_PARAMETERS)
def get_or_create_bucket(bucket_name, s3_client=None):
s3_client = s3_client or connect_to_service("s3")
try:
return s3_client.head_bucket(Bucket=bucket_name)
except Exception:
return s3_client.create_bucket(Bucket=bucket_name)
def create_sqs_queue(queue_name, env=None):
env = get_environment(env)
# queue
conn = connect_to_service("sqs", env=env)
return conn.create_queue(QueueName=queue_name)
def sqs_queue_arn(queue_name, account_id=None, region_name=None):
account_id = get_account_id(account_id)
region_name = region_name or get_region()
queue_name = queue_name.split("/")[-1]
return "arn:aws:sqs:%s:%s:%s" % (region_name, account_id, queue_name)
def apigateway_restapi_arn(api_id, account_id=None, region_name=None):
account_id = get_account_id(account_id)
region_name = region_name or get_region()
return "arn:aws:apigateway:%s:%s:/restapis/%s" % (region_name, account_id, api_id)
def sqs_queue_name(queue_arn):
if ":" in queue_arn:
return parse_arn(queue_arn)["resource"]
else:
return queue_arn
def sns_topic_arn(topic_name, account_id=None):
account_id = get_account_id(account_id)
return "arn:aws:sns:%s:%s:%s" % (get_region(), account_id, topic_name)
def sqs_receive_message(queue_arn):
region_name = extract_region_from_arn(queue_arn)
client = connect_to_service("sqs", region_name=region_name)
queue_url = get_sqs_queue_url(queue_arn)
response = client.receive_message(QueueUrl=queue_url)
return response
def firehose_name(firehose_arn):
return firehose_arn.split("/")[-1]
def kinesis_stream_name(kinesis_arn):
return kinesis_arn.split(":stream/")[-1]
def mock_aws_request_headers(service="dynamodb", region_name=None, access_key=None):
ctype = APPLICATION_AMZ_JSON_1_0
if service == "kinesis":
ctype = APPLICATION_AMZ_JSON_1_1
elif service in ["sns", "sqs"]:
ctype = APPLICATION_X_WWW_FORM_URLENCODED
# TODO: consider adding an internal=False flag, to use INTERNAL_AWS_ACCESS_KEY_ID for internal calls here
access_key = access_key or constants.TEST_AWS_ACCESS_KEY_ID
region_name = region_name or get_region()
headers = {
"Content-Type": ctype,
"Accept-Encoding": "identity",
"X-Amz-Date": "20160623T103251Z",
"Authorization": (
"AWS4-HMAC-SHA256 "
+ "Credential=%s/20160623/%s/%s/aws4_request, "
+ "SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=1234"
)
% (access_key, region_name, service),
}
return headers
def inject_region_into_auth_headers(region, headers):
auth_header = headers.get("Authorization")
if auth_header:
regex = r"Credential=([^/]+)/([^/]+)/([^/]+)/"
auth_header = re.sub(regex, r"Credential=\1/\2/%s/" % region, auth_header)
headers["Authorization"] = auth_header
def dynamodb_get_item_raw(request):
headers = mock_aws_request_headers()
headers["X-Amz-Target"] = "DynamoDB_20120810.GetItem"
new_item = make_http_request(
url=config.service_url("dynamodb"),
method="POST",
data=json.dumps(request),
headers=headers,
)
new_item = new_item.text
new_item = new_item and json.loads(new_item)
return new_item
def create_dynamodb_table(
table_name,
partition_key,
env=None,
stream_view_type=None,
region_name=None,
client=None,
sleep_after=2,
):
dynamodb = client or connect_to_service(
"dynamodb", env=env, client=True, region_name=region_name
)
stream_spec = {"StreamEnabled": False}
key_schema = [{"AttributeName": partition_key, "KeyType": "HASH"}]
attr_defs = [{"AttributeName": partition_key, "AttributeType": "S"}]
if stream_view_type is not None:
stream_spec = {"StreamEnabled": True, "StreamViewType": stream_view_type}
table = None
try:
table = dynamodb.create_table(
TableName=table_name,
KeySchema=key_schema,
AttributeDefinitions=attr_defs,
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
StreamSpecification=stream_spec,
)
except Exception as e:
if "ResourceInUseException" in str(e):
# Table already exists -> return table reference
return connect_to_resource("dynamodb", env=env, region_name=region_name).Table(
table_name
)
if "AccessDeniedException" in str(e):
raise
if sleep_after:
# TODO: do we need this?
time.sleep(sleep_after)
return table
def get_apigateway_integration(api_id, method, path, env=None):
apigateway = connect_to_service(service_name="apigateway", client=True, env=env)
resources = apigateway.get_resources(restApiId=api_id, limit=100)
resource_id = None
for r in resources["items"]:
if r["path"] == path:
resource_id = r["id"]
if not resource_id:
raise Exception('Unable to find apigateway integration for path "%s"' % path)
integration = apigateway.get_integration(
restApiId=api_id, resourceId=resource_id, httpMethod=method
)
return integration
def get_apigateway_resource_for_path(api_id, path, parent=None, resources=None):
if resources is None:
apigateway = connect_to_service(service_name="apigateway")
resources = apigateway.get_resources(restApiId=api_id, limit=100)
if not isinstance(path, list):
path = path.split("/")
if not path:
return parent
for resource in resources:
if resource["pathPart"] == path[0] and (not parent or parent["id"] == resource["parentId"]):
return get_apigateway_resource_for_path(
api_id, path[1:], parent=resource, resources=resources
)
return None
def get_apigateway_path_for_resource(
api_id, resource_id, path_suffix="", resources=None, region_name=None
):
if resources is None:
apigateway = connect_to_service(service_name="apigateway", region_name=region_name)
resources = apigateway.get_resources(restApiId=api_id, limit=100)["items"]
target_resource = list(filter(lambda res: res["id"] == resource_id, resources))[0]
path_part = target_resource.get("pathPart", "")
if path_suffix:
if path_part:
path_suffix = "%s/%s" % (path_part, path_suffix)
else:
path_suffix = path_part
parent_id = target_resource.get("parentId")
if not parent_id:
return "/%s" % path_suffix
return get_apigateway_path_for_resource(
api_id,
parent_id,
path_suffix=path_suffix,
resources=resources,
region_name=region_name,
)
def create_api_gateway(
name,
description=None,
resources=None,
stage_name=None,
enabled_api_keys=None,
env=None,
usage_plan_name=None,
region_name=None,
auth_creator_func=None, # function that receives an api_id and returns an authorizer_id
):
if enabled_api_keys is None:
enabled_api_keys = []
client = connect_to_service("apigateway", env=env, region_name=region_name)
resources = resources or []
stage_name = stage_name or "testing"
usage_plan_name = usage_plan_name or "Basic Usage"
description = description or 'Test description for API "%s"' % name
LOG.info('Creating API resources under API Gateway "%s".', name)
api = client.create_rest_api(name=name, description=description)
api_id = api["id"]
auth_id = None
if auth_creator_func:
auth_id = auth_creator_func(api_id)
resources_list = client.get_resources(restApiId=api_id)
root_res_id = resources_list["items"][0]["id"]
# add API resources and methods
for path, methods in resources.items():
# create resources recursively
parent_id = root_res_id
for path_part in path.split("/"):
api_resource = client.create_resource(
restApiId=api_id, parentId=parent_id, pathPart=path_part
)
parent_id = api_resource["id"]
# add methods to the API resource
for method in methods:
kwargs = {"authorizerId": auth_id} if auth_id else {}
client.put_method(
restApiId=api_id,
resourceId=api_resource["id"],
httpMethod=method["httpMethod"],
authorizationType=method.get("authorizationType") or "NONE",
apiKeyRequired=method.get("apiKeyRequired") or False,
requestParameters=method.get("requestParameters") or {},
**kwargs,
)
# create integrations for this API resource/method
integrations = method["integrations"]
create_api_gateway_integrations(
api_id,
api_resource["id"],
method,
integrations,
env=env,
region_name=region_name,
)
# deploy the API gateway
client.create_deployment(restApiId=api_id, stageName=stage_name)
return api
def create_api_gateway_integrations(
api_id, resource_id, method, integrations=None, env=None, region_name=None
):
if integrations is None:
integrations = []
client = connect_to_service("apigateway", env=env, region_name=region_name)
for integration in integrations:
req_templates = integration.get("requestTemplates") or {}
res_templates = integration.get("responseTemplates") or {}
success_code = integration.get("successCode") or "200"
client_error_code = integration.get("clientErrorCode") or "400"
server_error_code = integration.get("serverErrorCode") or "500"
request_parameters = integration.get("requestParameters") or {}
# create integration
client.put_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod=method["httpMethod"],
integrationHttpMethod=method.get("integrationHttpMethod") or method["httpMethod"],
type=integration["type"],
uri=integration["uri"],
requestTemplates=req_templates,
requestParameters=request_parameters,
)
response_configs = [
{"pattern": "^2.*", "code": success_code, "res_templates": res_templates},
{"pattern": "^4.*", "code": client_error_code, "res_templates": {}},
{"pattern": "^5.*", "code": server_error_code, "res_templates": {}},
]
# create response configs
for response_config in response_configs:
# create integration response
client.put_integration_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod=method["httpMethod"],
statusCode=response_config["code"],
responseTemplates=response_config["res_templates"],
selectionPattern=response_config["pattern"],
)
# create method response
client.put_method_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod=method["httpMethod"],
statusCode=response_config["code"],
)
def apigateway_invocations_arn(lambda_uri):
return "arn:aws:apigateway:%s:lambda:path/2015-03-31/functions/%s/invocations" % (
get_region(),
lambda_uri,
)
def get_opensearch_endpoint(domain_arn: str) -> str:
region_name = extract_region_from_arn(domain_arn)
if region_name is None:
raise ValueError("unable to parse region from opensearch domain ARN")
opensearch_client = connect_to_service(service_name="opensearch", region_name=region_name)
domain_name = domain_arn.rpartition("/")[2]
info = opensearch_client.describe_domain(DomainName=domain_name)
base_domain = info["DomainStatus"]["Endpoint"]
endpoint = base_domain if base_domain.startswith("http") else f"https://{base_domain}"
return endpoint
def get_search_db_connection(endpoint: str, region_name: str):
from opensearchpy import OpenSearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
verify_certs = False
use_ssl = False
# use ssl?
if "https://" in endpoint:
use_ssl = True
# TODO remove this condition once ssl certs are available for .es.localhost.localstack.cloud domains
endpoint_netloc = urlparse(endpoint).netloc
if not re.match(r"^.*(localhost(\.localstack\.cloud)?)(:\d+)?$", endpoint_netloc):
verify_certs = True
LOG.debug("Creating ES client with endpoint %s", endpoint)
if ENV_ACCESS_KEY in os.environ and ENV_SECRET_KEY in os.environ:
access_key = os.environ.get(ENV_ACCESS_KEY)
secret_key = os.environ.get(ENV_SECRET_KEY)
session_token = os.environ.get(ENV_SESSION_TOKEN)
awsauth = AWS4Auth(access_key, secret_key, region_name, "es", session_token=session_token)
connection_class = RequestsHttpConnection
return OpenSearch(
hosts=[endpoint],
verify_certs=verify_certs,
use_ssl=use_ssl,
connection_class=connection_class,
http_auth=awsauth,
)
return OpenSearch(hosts=[endpoint], verify_certs=verify_certs, use_ssl=use_ssl)
def create_kinesis_stream(stream_name, shards=1, env=None, delete=False):
env = get_environment(env)
stream = KinesisStream(id=stream_name, num_shards=shards)
conn = connect_to_service("kinesis", env=env)
stream.connect(conn)
if delete:
run_safe(lambda: stream.destroy(), print_error=False)
stream.create()
# Note: Returning the stream without awaiting its creation (via wait_for()) to avoid API call timeouts/retries.
return stream
def kinesis_get_latest_records(stream_name, shard_id, count=10, env=None):
kinesis = connect_to_service("kinesis", env=env)
result = []
response = kinesis.get_shard_iterator(
StreamName=stream_name, ShardId=shard_id, ShardIteratorType="TRIM_HORIZON"
)
shard_iterator = response["ShardIterator"]
while shard_iterator:
records_response = kinesis.get_records(ShardIterator=shard_iterator)
records = records_response["Records"]
for record in records:
try:
record["Data"] = to_str(record["Data"])
except Exception:
pass
result.extend(records)
shard_iterator = records_response["NextShardIterator"] if records else False
while len(result) > count:
result.pop(0)
return result
def get_stack_details(stack_name, region_name=None):
cloudformation = connect_to_service("cloudformation", region_name=region_name)
stacks = cloudformation.describe_stacks(StackName=stack_name)
for stack in stacks["Stacks"]:
if stack["StackName"] == stack_name:
return stack
def deploy_cf_stack(stack_name, template_body):
cfn = connect_to_service("cloudformation")
cfn.create_stack(StackName=stack_name, TemplateBody=template_body)
# wait for deployment to finish
return await_stack_completion(stack_name)
def await_stack_status(stack_name, expected_statuses, retries=20, sleep=2, region_name=None):
def check_stack():
stack = get_stack_details(stack_name, region_name=region_name)
if stack["StackStatus"] not in expected_statuses:
raise Exception(
'Status "%s" for stack "%s" not in expected list: %s'
% (stack["StackStatus"], stack_name, expected_statuses)
)
return stack
expected_statuses = (
expected_statuses if isinstance(expected_statuses, list) else [expected_statuses]
)
return retry(check_stack, retries, sleep)
def await_stack_completion(stack_name, retries=20, sleep=2, statuses=None, region_name=None):
statuses = statuses or ["CREATE_COMPLETE", "UPDATE_COMPLETE", "DELETE_COMPLETE"]
return await_stack_status(
stack_name, statuses, retries=retries, sleep=sleep, region_name=region_name
)
| true | true |
f73339a69a504cdce7153fc5c6c634c49da259f2 | 192 | py | Python | bindings/python/pydeck/pydeck/bindings/__init__.py | shitao1988/deck.gl | a4c9ada186eceabdd3bb191309887841384a5843 | [
"MIT"
] | 1 | 2022-03-10T09:51:31.000Z | 2022-03-10T09:51:31.000Z | bindings/python/pydeck/pydeck/bindings/__init__.py | shitao1988/deck.gl | a4c9ada186eceabdd3bb191309887841384a5843 | [
"MIT"
] | null | null | null | bindings/python/pydeck/pydeck/bindings/__init__.py | shitao1988/deck.gl | a4c9ada186eceabdd3bb191309887841384a5843 | [
"MIT"
] | 2 | 2020-03-10T05:56:50.000Z | 2021-11-26T16:28:49.000Z | from .deck import Deck # noqa
from .layer import Layer # noqa
from .light_settings import LightSettings # noqa
from .view import View # noqa
from .view_state import ViewState # noqa
| 32 | 50 | 0.739583 | from .deck import Deck
from .layer import Layer
from .light_settings import LightSettings
from .view import View
from .view_state import ViewState
| true | true |
f73339e581354f9272a08d2528eff4ed0f815376 | 2,277 | py | Python | device/enviro_demo.py | ptone/cloud-iot-end-user-demo | 519dea13e72ea1e05bac348ef10554b60ea8eaab | [
"Apache-2.0"
] | null | null | null | device/enviro_demo.py | ptone/cloud-iot-end-user-demo | 519dea13e72ea1e05bac348ef10554b60ea8eaab | [
"Apache-2.0"
] | null | null | null | device/enviro_demo.py | ptone/cloud-iot-end-user-demo | 519dea13e72ea1e05bac348ef10554b60ea8eaab | [
"Apache-2.0"
] | null | null | null | from aiy.enviro import EnviroKit
from aiy.cloudiot import CloudIot
from luma.core.render import canvas
from PIL import ImageDraw
from time import sleep
import argparse
def update_display(display, msg):
with canvas(display) as draw:
draw.text((0, 5), msg, fill='white')
def _none_to_nan(val):
return float('nan') if val is None else val
def main():
# Pull arguments from command line.
parser = argparse.ArgumentParser(description='Enviro Kit Demo')
parser.add_argument('--display_duration',
help='Measurement display duration (seconds)', type=int,
default=1)
parser.add_argument('--upload_delay', help='Cloud upload delay (seconds)',
type=int, default=6)
parser.add_argument('--cloud_config', help='Cloud IoT config file', default='my_config.ini')
args = parser.parse_args()
# Create instances of EnviroKit and Cloud IoT.
enviro = EnviroKit()
print("kit created")
with CloudIot(args.cloud_config) as cloud:
# Indefinitely update display and upload to cloud.
read_count = 0
sensors = {}
while True:
# First display temperature and RH.
sensors['temp'] = enviro.temperature
sensors['humidity'] = enviro.humidity
msg = 'Temp: %.2f C\n' % _none_to_nan(sensors['temp'])
msg += 'RH: %.2f %%' % _none_to_nan(sensors['humidity'])
update_display(enviro.display, msg)
sleep(args.display_duration)
# After 5 seconds, switch to light and pressure.
sensors['light'] = enviro.ambient_light
sensors['pressure'] = enviro.pressure
msg = 'Light: %.2f lux\n' % _none_to_nan(sensors['light'])
msg += 'Pressure: %.2f kPa' % _none_to_nan(sensors['pressure'])
sleep(args.display_duration)
read_count += 1
print(read_count)
if cloud.enabled():
print("cloud enabled")
cloud.publish_message(sensors)
# If time has elapsed, attempt cloud upload.
if (read_count > 5):
update_display(enviro.display, msg)
read_count = 0
if __name__ == '__main__':
main()
| 35.578125 | 96 | 0.603865 | from aiy.enviro import EnviroKit
from aiy.cloudiot import CloudIot
from luma.core.render import canvas
from PIL import ImageDraw
from time import sleep
import argparse
def update_display(display, msg):
with canvas(display) as draw:
draw.text((0, 5), msg, fill='white')
def _none_to_nan(val):
return float('nan') if val is None else val
def main():
parser = argparse.ArgumentParser(description='Enviro Kit Demo')
parser.add_argument('--display_duration',
help='Measurement display duration (seconds)', type=int,
default=1)
parser.add_argument('--upload_delay', help='Cloud upload delay (seconds)',
type=int, default=6)
parser.add_argument('--cloud_config', help='Cloud IoT config file', default='my_config.ini')
args = parser.parse_args()
enviro = EnviroKit()
print("kit created")
with CloudIot(args.cloud_config) as cloud:
read_count = 0
sensors = {}
while True:
sensors['temp'] = enviro.temperature
sensors['humidity'] = enviro.humidity
msg = 'Temp: %.2f C\n' % _none_to_nan(sensors['temp'])
msg += 'RH: %.2f %%' % _none_to_nan(sensors['humidity'])
update_display(enviro.display, msg)
sleep(args.display_duration)
sensors['light'] = enviro.ambient_light
sensors['pressure'] = enviro.pressure
msg = 'Light: %.2f lux\n' % _none_to_nan(sensors['light'])
msg += 'Pressure: %.2f kPa' % _none_to_nan(sensors['pressure'])
sleep(args.display_duration)
read_count += 1
print(read_count)
if cloud.enabled():
print("cloud enabled")
cloud.publish_message(sensors)
if (read_count > 5):
update_display(enviro.display, msg)
read_count = 0
if __name__ == '__main__':
main()
| true | true |
f73339f01a3b5cc8f2e8048400a9c0b656d84e99 | 2,651 | py | Python | heatclient/common/template_format.py | enterstudio/python-heatclient | 954e475a6a0a12432ec325d7579460fabcf3f40a | [
"Apache-2.0"
] | null | null | null | heatclient/common/template_format.py | enterstudio/python-heatclient | 954e475a6a0a12432ec325d7579460fabcf3f40a | [
"Apache-2.0"
] | null | null | null | heatclient/common/template_format.py | enterstudio/python-heatclient | 954e475a6a0a12432ec325d7579460fabcf3f40a | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import yaml
from heatclient._i18n import _
if hasattr(yaml, 'CSafeLoader'):
yaml_loader = yaml.CSafeLoader
else:
yaml_loader = yaml.SafeLoader
if hasattr(yaml, 'CSafeDumper'):
yaml_dumper = yaml.CSafeDumper
else:
yaml_dumper = yaml.SafeDumper
def _construct_yaml_str(self, node):
# Override the default string handling function
# to always return unicode objects
return self.construct_scalar(node)
yaml_loader.add_constructor(u'tag:yaml.org,2002:str', _construct_yaml_str)
# Unquoted dates like 2013-05-23 in yaml files get loaded as objects of type
# datetime.data which causes problems in API layer when being processed by
# openstack.common.jsonutils. Therefore, make unicode string out of timestamps
# until jsonutils can handle dates.
yaml_loader.add_constructor(u'tag:yaml.org,2002:timestamp',
_construct_yaml_str)
def parse(tmpl_str):
"""Takes a string and returns a dict containing the parsed structure.
This includes determination of whether the string is using the
JSON or YAML format.
"""
# strip any whitespace before the check
tmpl_str = tmpl_str.strip()
if tmpl_str.startswith('{'):
tpl = json.loads(tmpl_str)
else:
try:
tpl = yaml.load(tmpl_str, Loader=yaml_loader)
except yaml.YAMLError:
# NOTE(prazumovsky): we need to return more informative error for
# user, so use SafeLoader, which return error message with template
# snippet where error has been occurred.
try:
tpl = yaml.load(tmpl_str, Loader=yaml.SafeLoader)
except yaml.YAMLError as yea:
raise ValueError(yea)
else:
if tpl is None:
tpl = {}
# Looking for supported version keys in the loaded template
if not ('HeatTemplateFormatVersion' in tpl
or 'heat_template_version' in tpl
or 'AWSTemplateFormatVersion' in tpl):
raise ValueError(_("Template format version not found."))
return tpl
| 36.819444 | 79 | 0.688797 |
import json
import yaml
from heatclient._i18n import _
if hasattr(yaml, 'CSafeLoader'):
yaml_loader = yaml.CSafeLoader
else:
yaml_loader = yaml.SafeLoader
if hasattr(yaml, 'CSafeDumper'):
yaml_dumper = yaml.CSafeDumper
else:
yaml_dumper = yaml.SafeDumper
def _construct_yaml_str(self, node):
return self.construct_scalar(node)
yaml_loader.add_constructor(u'tag:yaml.org,2002:str', _construct_yaml_str)
yaml_loader.add_constructor(u'tag:yaml.org,2002:timestamp',
_construct_yaml_str)
def parse(tmpl_str):
tmpl_str = tmpl_str.strip()
if tmpl_str.startswith('{'):
tpl = json.loads(tmpl_str)
else:
try:
tpl = yaml.load(tmpl_str, Loader=yaml_loader)
except yaml.YAMLError:
try:
tpl = yaml.load(tmpl_str, Loader=yaml.SafeLoader)
except yaml.YAMLError as yea:
raise ValueError(yea)
else:
if tpl is None:
tpl = {}
if not ('HeatTemplateFormatVersion' in tpl
or 'heat_template_version' in tpl
or 'AWSTemplateFormatVersion' in tpl):
raise ValueError(_("Template format version not found."))
return tpl
| true | true |
f7333a661fec67fc063ced993387787d5367e478 | 1,816 | py | Python | nova/scheduler/filters/core_filter.py | russellb/nova | 99c2e02b44a1012c8e26fc7658dc40ec4620a1ee | [
"Apache-2.0"
] | null | null | null | nova/scheduler/filters/core_filter.py | russellb/nova | 99c2e02b44a1012c8e26fc7658dc40ec4620a1ee | [
"Apache-2.0"
] | null | null | null | nova/scheduler/filters/core_filter.py | russellb/nova | 99c2e02b44a1012c8e26fc7658dc40ec4620a1ee | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2011 Openstack, LLC.
# Copyright (c) 2012 Justin Santa Barbara
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova.scheduler.filters import abstract_filter
LOG = logging.getLogger(__name__)
cpu_allocation_ratio_opt = cfg.FloatOpt('cpu_allocation_ratio',
default=16.0,
help='Virtual CPU to Physical CPU allocation ratio')
FLAGS = flags.FLAGS
FLAGS.register_opt(cpu_allocation_ratio_opt)
class CoreFilter(abstract_filter.AbstractHostFilter):
"""CoreFilter filters based on CPU core utilization."""
def host_passes(self, host_state, filter_properties):
"""Return True if host has sufficient CPU cores."""
instance_type = filter_properties.get('instance_type')
if host_state.topic != 'compute' or not instance_type:
return True
if not host_state.vcpus_total:
# Fail safe
LOG.warning(_("VCPUs not set; assuming CPU collection broken"))
return True
instance_vcpus = instance_type['vcpus']
vcpus_total = host_state.vcpus_total * FLAGS.cpu_allocation_ratio
return (vcpus_total - host_state.vcpus_used) >= instance_vcpus
| 35.607843 | 78 | 0.719163 |
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova.scheduler.filters import abstract_filter
LOG = logging.getLogger(__name__)
cpu_allocation_ratio_opt = cfg.FloatOpt('cpu_allocation_ratio',
default=16.0,
help='Virtual CPU to Physical CPU allocation ratio')
FLAGS = flags.FLAGS
FLAGS.register_opt(cpu_allocation_ratio_opt)
class CoreFilter(abstract_filter.AbstractHostFilter):
def host_passes(self, host_state, filter_properties):
instance_type = filter_properties.get('instance_type')
if host_state.topic != 'compute' or not instance_type:
return True
if not host_state.vcpus_total:
LOG.warning(_("VCPUs not set; assuming CPU collection broken"))
return True
instance_vcpus = instance_type['vcpus']
vcpus_total = host_state.vcpus_total * FLAGS.cpu_allocation_ratio
return (vcpus_total - host_state.vcpus_used) >= instance_vcpus
| true | true |
f7333b2ed2a9e8f8b0c316ceb06b7159e13d4eec | 4,450 | py | Python | util/box_ops.py | antoyang/TubeDETR | 3c32cc92a0fdaa0c770d95a59d8764e0e212424c | [
"Apache-2.0"
] | 4 | 2022-03-31T04:57:59.000Z | 2022-03-31T12:26:34.000Z | util/box_ops.py | antoyang/TubeDETR | 3c32cc92a0fdaa0c770d95a59d8764e0e212424c | [
"Apache-2.0"
] | null | null | null | util/box_ops.py | antoyang/TubeDETR | 3c32cc92a0fdaa0c770d95a59d8764e0e212424c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities for bounding box manipulation and GIoU.
"""
import torch
import numpy as np
from torchvision.ops.boxes import box_area
from typing import Tuple
#### Bounding box utilities imported from torchvision and converted to numpy
def np_box_area(boxes: np.array) -> np.array:
"""
Computes the area of a set of bounding boxes, which are specified by its
(x1, y1, x2, y2) coordinates.
Args:
boxes (Tensor[N, 4]): boxes for which the area will be computed. They
are expected to be in (x1, y1, x2, y2) format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``.
Returns:
area (Tensor[N]): area for each box
"""
assert boxes.ndim == 2 and boxes.shape[-1] == 4
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def _box_inter_union(boxes1: np.array, boxes2: np.array) -> Tuple[np.array, np.array]:
area1 = np_box_area(boxes1)
area2 = np_box_area(boxes2)
lt = np.maximum(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = np.minimum(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clip(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
return inter, union
def np_box_iou(boxes1: np.array, boxes2: np.array) -> np.array:
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``.
Args:
boxes1 (Tensor[N, 4])
boxes2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2
"""
inter, union = _box_inter_union(boxes1, boxes2)
iou = inter / union
return iou
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
# modified from torchvision to also return the union
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = masks * x.unsqueeze(0)
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = masks * y.unsqueeze(0)
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
| 31.118881 | 116 | 0.595056 |
import torch
import numpy as np
from torchvision.ops.boxes import box_area
from typing import Tuple
array) -> Tuple[np.array, np.array]:
area1 = np_box_area(boxes1)
area2 = np_box_area(boxes2)
lt = np.maximum(boxes1[:, None, :2], boxes2[:, :2])
rb = np.minimum(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clip(min=0)
inter = wh[:, :, 0] * wh[:, :, 1]
union = area1[:, None] + area2 - inter
return inter, union
def np_box_iou(boxes1: np.array, boxes2: np.array) -> np.array:
inter, union = _box_inter_union(boxes1, boxes2)
iou = inter / union
return iou
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0)
inter = wh[:, :, 0] * wh[:, :, 1]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0)
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
def masks_to_boxes(masks):
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = masks * x.unsqueeze(0)
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = masks * y.unsqueeze(0)
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
| true | true |
f7333c1762010ec0e00ede92c0e22ad172a13a1a | 4,607 | py | Python | wagtail_webstories/blocks.py | pyneda/wagtail-webstories | 92abc2af318e599cb0dc486deba9828379c78f1a | [
"BSD-3-Clause"
] | null | null | null | wagtail_webstories/blocks.py | pyneda/wagtail-webstories | 92abc2af318e599cb0dc486deba9828379c78f1a | [
"BSD-3-Clause"
] | null | null | null | wagtail_webstories/blocks.py | pyneda/wagtail-webstories | 92abc2af318e599cb0dc486deba9828379c78f1a | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import gettext as _
import requests
from wagtail.core import blocks
from webstories import Story, StoryPage
from .markup import AMPText
class AMPCleanHTMLBlock(blocks.RawHTMLBlock):
def clean(self, value):
if isinstance(value, AMPText) and getattr(settings, 'WAGTAIL_WEBSTORIES_CLEAN_HTML', True):
return AMPText(StoryPage.clean_html_fragment(value.source))
else:
return value
def get_default(self):
if isinstance(self.meta.default, AMPText):
return self.meta.default
else:
return AMPText(self.meta.default)
def to_python(self, value):
if isinstance(value, AMPText):
return value
else:
return AMPText(value)
def get_prep_value(self, value):
if isinstance(value, AMPText):
return value.source
else:
return value
def value_for_form(self, value):
if isinstance(value, AMPText):
return value.source
else:
return value
def value_from_form(self, value):
return AMPText(value)
class PageBlock(blocks.StructBlock):
id = blocks.CharBlock()
html = AMPCleanHTMLBlock()
class StoryChooserBlock(blocks.PageChooserBlock):
def __init__(self, **kwargs):
has_specified_page_type = kwargs.get('page_type') or kwargs.get('target_model')
if not has_specified_page_type:
# allow selecting any page model that inherits from BaseWebStoryPage
from .models import get_story_page_models
kwargs['target_model'] = get_story_page_models()
super().__init__(**kwargs)
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context=parent_context)
context['page'] = value.specific
return context
class Meta:
template = 'wagtail_webstories/blocks/story_poster_link.html'
class StoryEmbedBlock(StoryChooserBlock):
class Meta:
template = 'wagtail_webstories/blocks/story_embed_block.html'
class ExternalStoryBlock(blocks.URLBlock):
def get_default(self):
from .models import ExternalStory
# Allow specifying the default as either an ExternalStory or a URL string (or None).
if not self.meta.default:
return None
elif isinstance(self.meta.default, ExternalStory):
return self.meta.default
else:
# assume default has been passed as a string
return ExternalStory.get_for_url(self.meta.default)
def to_python(self, value):
from .models import ExternalStory
# The JSON representation of an ExternalStoryBlock value is a URL string;
# this should be converted to an ExternalStory instance (or None).
if not value:
return None
else:
return ExternalStory.get_for_url(value)
def get_prep_value(self, value):
# serialisable value should be a URL string
if value is None:
return ''
elif isinstance(value, str):
return value
else:
return value.url
def value_for_form(self, value):
# the value to be handled by the URLField is a plain URL string (or the empty string)
if value is None:
return ''
elif isinstance(value, str):
return value
else:
return value.url
def value_from_form(self, value):
# Keep value as a string, and convert to an ExternalStory during clean
return value or None
def clean(self, value):
from .models import ExternalStory
value = super().clean(value)
if value is not None:
try:
value = ExternalStory.get_for_url(value)
except requests.exceptions.RequestException:
raise ValidationError(_("Could not fetch URL."))
except Story.InvalidStoryException:
raise ValidationError(_("URL is not a valid web story."))
return value
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context=parent_context)
context['story'] = value
return context
class Meta:
template = 'wagtail_webstories/blocks/external_story_poster_link.html'
class ExternalStoryEmbedBlock(ExternalStoryBlock):
class Meta:
template = 'wagtail_webstories/blocks/external_story_embed_block.html'
| 31.993056 | 99 | 0.654439 | from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import gettext as _
import requests
from wagtail.core import blocks
from webstories import Story, StoryPage
from .markup import AMPText
class AMPCleanHTMLBlock(blocks.RawHTMLBlock):
def clean(self, value):
if isinstance(value, AMPText) and getattr(settings, 'WAGTAIL_WEBSTORIES_CLEAN_HTML', True):
return AMPText(StoryPage.clean_html_fragment(value.source))
else:
return value
def get_default(self):
if isinstance(self.meta.default, AMPText):
return self.meta.default
else:
return AMPText(self.meta.default)
def to_python(self, value):
if isinstance(value, AMPText):
return value
else:
return AMPText(value)
def get_prep_value(self, value):
if isinstance(value, AMPText):
return value.source
else:
return value
def value_for_form(self, value):
if isinstance(value, AMPText):
return value.source
else:
return value
def value_from_form(self, value):
return AMPText(value)
class PageBlock(blocks.StructBlock):
id = blocks.CharBlock()
html = AMPCleanHTMLBlock()
class StoryChooserBlock(blocks.PageChooserBlock):
def __init__(self, **kwargs):
has_specified_page_type = kwargs.get('page_type') or kwargs.get('target_model')
if not has_specified_page_type:
from .models import get_story_page_models
kwargs['target_model'] = get_story_page_models()
super().__init__(**kwargs)
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context=parent_context)
context['page'] = value.specific
return context
class Meta:
template = 'wagtail_webstories/blocks/story_poster_link.html'
class StoryEmbedBlock(StoryChooserBlock):
class Meta:
template = 'wagtail_webstories/blocks/story_embed_block.html'
class ExternalStoryBlock(blocks.URLBlock):
def get_default(self):
from .models import ExternalStory
if not self.meta.default:
return None
elif isinstance(self.meta.default, ExternalStory):
return self.meta.default
else:
return ExternalStory.get_for_url(self.meta.default)
def to_python(self, value):
from .models import ExternalStory
if not value:
return None
else:
return ExternalStory.get_for_url(value)
def get_prep_value(self, value):
if value is None:
return ''
elif isinstance(value, str):
return value
else:
return value.url
def value_for_form(self, value):
if value is None:
return ''
elif isinstance(value, str):
return value
else:
return value.url
def value_from_form(self, value):
return value or None
def clean(self, value):
from .models import ExternalStory
value = super().clean(value)
if value is not None:
try:
value = ExternalStory.get_for_url(value)
except requests.exceptions.RequestException:
raise ValidationError(_("Could not fetch URL."))
except Story.InvalidStoryException:
raise ValidationError(_("URL is not a valid web story."))
return value
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context=parent_context)
context['story'] = value
return context
class Meta:
template = 'wagtail_webstories/blocks/external_story_poster_link.html'
class ExternalStoryEmbedBlock(ExternalStoryBlock):
class Meta:
template = 'wagtail_webstories/blocks/external_story_embed_block.html'
| true | true |
f7333c1c38a2f74caebd064ed74ad103c4f85ffa | 179 | py | Python | src/saturnv_ui/saturnv/ui/fonts/fontmanager.py | epkaz93/saturnv | b8a2c61bb0e833f2e31698050113038bab3ca5a4 | [
"MIT"
] | 1 | 2022-03-12T07:38:09.000Z | 2022-03-12T07:38:09.000Z | src/saturnv_ui/saturnv/ui/fonts/fontmanager.py | epkaz93/saturnv | b8a2c61bb0e833f2e31698050113038bab3ca5a4 | [
"MIT"
] | null | null | null | src/saturnv_ui/saturnv/ui/fonts/fontmanager.py | epkaz93/saturnv | b8a2c61bb0e833f2e31698050113038bab3ca5a4 | [
"MIT"
] | null | null | null | from saturnv.ui.managers import FileBasedManager
class FontManager(FileBasedManager):
def __init__(self, path, extension='*ttf'):
super().__init__(path, extension)
| 22.375 | 48 | 0.73743 | from saturnv.ui.managers import FileBasedManager
class FontManager(FileBasedManager):
def __init__(self, path, extension='*ttf'):
super().__init__(path, extension)
| true | true |
f7333c2a62c0c9bca41b5f8b5a6e3d3a26fc5e39 | 1,279 | py | Python | crawler/helpers.py | alatar-/sample-crawler | 434d771398d78f1e13a3c46875bde2e2fd763e2c | [
"MIT"
] | null | null | null | crawler/helpers.py | alatar-/sample-crawler | 434d771398d78f1e13a3c46875bde2e2fd763e2c | [
"MIT"
] | null | null | null | crawler/helpers.py | alatar-/sample-crawler | 434d771398d78f1e13a3c46875bde2e2fd763e2c | [
"MIT"
] | null | null | null | import logging
import datetime
import json
import requests
from bs4 import BeautifulSoup
from twilio.rest import TwilioRestClient
from .config import twilio
logger = logging.getLogger(__name__)
def translate_timestamp(ts):
translation = {
'stycznia': 1,
'lutego': 2,
'grudnia': 12,
}
def multiple_replace(text, _dict):
for key in _dict:
text = text.replace(key, str(_dict[key]))
return text
ts = multiple_replace(ts, translation)
ts = datetime.datetime.strptime(ts, '%H:%M, %d %m %Y')
return ts
def get_url_soup(url):
result = requests.get(url)
soup = BeautifulSoup(result.content, 'html.parser')
return soup
def get_url_json(url):
result = requests.get(url)
_json = json.loads(result.text)
return _json
def send_sms(number):
translation_table = dict.fromkeys(map(ord, '-() '), None)
number = number.translate(translation_table).lstrip('+48')
assert len(number) <= 9, "Invalid phone number '%s'." % number
logger.info('Sending sms to %s' % number)
# client = TwilioRestClient(twilio['sid'], twilio['token'])
# client.messages.create(to="+48%s" % number, from_=twilio['origin-number'],
# body=twilio['message'])
| 23.254545 | 80 | 0.643471 | import logging
import datetime
import json
import requests
from bs4 import BeautifulSoup
from twilio.rest import TwilioRestClient
from .config import twilio
logger = logging.getLogger(__name__)
def translate_timestamp(ts):
translation = {
'stycznia': 1,
'lutego': 2,
'grudnia': 12,
}
def multiple_replace(text, _dict):
for key in _dict:
text = text.replace(key, str(_dict[key]))
return text
ts = multiple_replace(ts, translation)
ts = datetime.datetime.strptime(ts, '%H:%M, %d %m %Y')
return ts
def get_url_soup(url):
result = requests.get(url)
soup = BeautifulSoup(result.content, 'html.parser')
return soup
def get_url_json(url):
result = requests.get(url)
_json = json.loads(result.text)
return _json
def send_sms(number):
translation_table = dict.fromkeys(map(ord, '-() '), None)
number = number.translate(translation_table).lstrip('+48')
assert len(number) <= 9, "Invalid phone number '%s'." % number
logger.info('Sending sms to %s' % number)
| true | true |
f7333de8a7bc20273f44f37de6e18bf14b09e26d | 8,596 | py | Python | var/spack/repos/builtin/packages/hydrogen/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/hydrogen/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/hydrogen/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2022-01-18T23:39:24.000Z | 2022-01-18T23:39:24.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Hydrogen(CMakePackage, CudaPackage):
"""Hydrogen: Distributed-memory dense and sparse-direct linear algebra
and optimization library. Based on the Elemental library."""
homepage = "http://libelemental.org"
url = "https://github.com/LLNL/Elemental/archive/v1.0.1.tar.gz"
git = "https://github.com/LLNL/Elemental.git"
maintainers = ['bvanessen']
version('develop', branch='hydrogen')
version('1.5.0', sha256='03dd487fb23b9fdbc715554a8ea48c3196a1021502e61b0172ef3fdfbee75180')
version('1.4.0', sha256='c13374ff4a6c4d1076e47ba8c8d91a7082588b9958d1ed89cffb12f1d2e1452e')
version('1.3.4', sha256='7979f6656f698f0bbad6798b39d4b569835b3013ff548d98089fce7c283c6741')
version('1.3.3', sha256='a51a1cfd40ac74d10923dfce35c2c04a3082477683f6b35e7b558ea9f4bb6d51')
version('1.3.2', sha256='50bc5e87955f8130003d04dfd9dcad63107e92b82704f8107baf95b0ccf98ed6')
version('1.3.1', sha256='a8b8521458e9e747f2b24af87c4c2749a06e500019c383e0cefb33e5df6aaa1d')
version('1.3.0', sha256='0f3006aa1d8235ecdd621e7344c99f56651c6836c2e1bc0cf006331b70126b36')
version('1.2.0', sha256='8545975139582ee7bfe5d00f8d83a8697afc285bf7026b0761e9943355974806')
version('1.1.0-1', sha256='73ce05e4166853a186469269cb00a454de71e126b2019f95bbae703b65606808')
version('1.1.0', sha256='b4c12913acd01c72d31f4522266bfeb8df1d4d3b4aef02e07ccbc9a477894e71')
version('1.0.1', sha256='27cf76e1ef1d58bd8f9b1e34081a14a682b7ff082fb5d1da56713e5e0040e528')
version('1.0', sha256='d8a97de3133f2c6b6bb4b80d32b4a4cc25eb25e0df4f0cec0f8cb19bf34ece98')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant('openmp', default=True,
description='Make use of OpenMP within CPU-kernels')
variant('openmp_blas', default=False,
description='Use OpenMP for threading in the BLAS library')
variant('quad', default=False,
description='Enable quad precision')
variant('int64', default=False,
description='Use 64bit integers')
variant('int64_blas', default=False,
description='Use 64bit integers for BLAS.')
variant('scalapack', default=False,
description='Build with ScaLAPACK library')
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
variant('blas', default='openblas', values=('openblas', 'mkl', 'accelerate', 'essl'),
description='Enable the use of OpenBlas/MKL/Accelerate/ESSL')
variant('mpfr', default=False,
description='Support GNU MPFR\'s'
'arbitrary-precision floating-point arithmetic')
variant('test', default=False,
description='Builds test suite')
variant('al', default=False,
description='Builds with Aluminum communication library')
variant('omp_taskloops', default=False,
description='Use OpenMP taskloops instead of parallel for loops.')
variant('half', default=False,
description='Builds with support for FP16 precision data types')
conflicts('~openmp', when='+omp_taskloops')
depends_on('cmake@3.17.0:', type='build')
depends_on('mpi')
depends_on('hwloc@1.11:')
# Note that #1712 forces us to enumerate the different blas variants
depends_on('openblas', when='blas=openblas')
depends_on('openblas +ilp64', when='blas=openblas +int64_blas')
depends_on('openblas threads=openmp', when='blas=openblas +openmp_blas')
depends_on('intel-mkl', when="blas=mkl")
depends_on('intel-mkl +ilp64', when="blas=mkl +int64_blas")
depends_on('intel-mkl threads=openmp', when='blas=mkl +openmp_blas')
depends_on('veclibfort', when='blas=accelerate')
conflicts('blas=accelerate +openmp_blas')
depends_on('essl', when='blas=essl')
depends_on('essl -cuda', when='blas=essl -openmp_blas')
depends_on('essl +ilp64', when='blas=essl +int64_blas')
depends_on('essl threads=openmp', when='blas=essl +openmp_blas')
depends_on('netlib-lapack +external-blas', when='blas=essl')
# Specify the correct version of Aluminum
depends_on('aluminum@:0.3.99', when='@:1.3.99 +al')
depends_on('aluminum@0.4:0.4.99', when='@1.4:1.4.99 +al')
depends_on('aluminum@0.5:', when='@:1.0,1.5.0: +al')
# Add Aluminum variants
depends_on('aluminum +cuda +nccl +ht +cuda_rma', when='+al +cuda')
for arch in CudaPackage.cuda_arch_values:
depends_on('aluminum cuda_arch=%s' % arch, when='+al +cuda cuda_arch=%s' % arch)
# Note that this forces us to use OpenBLAS until #1712 is fixed
depends_on('lapack', when='blas=openblas ~openmp_blas')
depends_on('scalapack', when='+scalapack')
depends_on('gmp', when='+mpfr')
depends_on('mpc', when='+mpfr')
depends_on('mpfr', when='+mpfr')
depends_on('cuda', when='+cuda')
depends_on('cub', when='^cuda@:10.99')
depends_on('half', when='+half')
depends_on('llvm-openmp', when='%apple-clang +openmp')
conflicts('@0:0.98', msg="Hydrogen did not exist before v0.99. " +
"Did you mean to use Elemental instead?")
generator = 'Ninja'
depends_on('ninja', type='build')
@property
def libs(self):
shared = True if '+shared' in self.spec else False
return find_libraries(
'libEl', root=self.prefix, shared=shared, recursive=True
)
def cmake_args(self):
spec = self.spec
enable_gpu_fp16 = ('+cuda' in spec and '+half' in spec)
args = [
'-DCMAKE_INSTALL_MESSAGE:STRING=LAZY',
'-DBUILD_SHARED_LIBS:BOOL=%s' % ('+shared' in spec),
'-DHydrogen_ENABLE_OPENMP:BOOL=%s' % ('+openmp' in spec),
'-DHydrogen_ENABLE_QUADMATH:BOOL=%s' % ('+quad' in spec),
'-DHydrogen_USE_64BIT_INTS:BOOL=%s' % ('+int64' in spec),
'-DHydrogen_USE_64BIT_BLAS_INTS:BOOL=%s' % ('+int64_blas' in spec),
'-DHydrogen_ENABLE_MPC:BOOL=%s' % ('+mpfr' in spec),
'-DHydrogen_GENERAL_LAPACK_FALLBACK=ON',
'-DHydrogen_ENABLE_ALUMINUM=%s' % ('+al' in spec),
'-DHydrogen_ENABLE_CUB=%s' % ('+cuda' in spec),
'-DHydrogen_ENABLE_CUDA=%s' % ('+cuda' in spec),
'-DHydrogen_ENABLE_TESTING=%s' % ('+test' in spec),
'-DHydrogen_ENABLE_HALF=%s' % ('+half' in spec),
'-DHydrogen_ENABLE_GPU_FP16=%s' % enable_gpu_fp16,
]
# Add support for OS X to find OpenMP (LLVM installed via brew)
if self.spec.satisfies('%clang +openmp platform=darwin'):
clang = self.compiler.cc
clang_bin = os.path.dirname(clang)
clang_root = os.path.dirname(clang_bin)
args.extend([
'-DOpenMP_DIR={0}'.format(clang_root)])
if 'blas=openblas' in spec:
args.extend([
'-DHydrogen_USE_OpenBLAS:BOOL=%s' % ('blas=openblas' in spec),
'-DOpenBLAS_DIR:STRING={0}'.format(
spec['openblas'].prefix)])
elif 'blas=mkl' in spec:
args.extend([
'-DHydrogen_USE_MKL:BOOL=%s' % ('blas=mkl' in spec)])
elif 'blas=accelerate' in spec:
args.extend(['-DHydrogen_USE_ACCELERATE:BOOL=TRUE'])
elif 'blas=essl' in spec:
args.extend([
'-DHydrogen_USE_ESSL:BOOL=%s' % ('blas=essl' in spec)])
if '+omp_taskloops' in spec:
args.extend([
'-DHydrogen_ENABLE_OMP_TASKLOOP:BOOL=%s' %
('+omp_taskloops' in spec)])
if '+al' in spec:
args.extend([
'-DHydrogen_ENABLE_ALUMINUM:BOOL=%s' % ('+al' in spec),
'-DALUMINUM_DIR={0}'.format(
spec['aluminum'].prefix)])
return args
def setup_build_environment(self, env):
if self.spec.satisfies('%apple-clang +openmp'):
env.append_flags(
'CPPFLAGS', self.compiler.openmp_flag)
env.append_flags(
'CFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'CXXFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'LDFLAGS', self.spec['llvm-openmp'].libs.ld_flags)
| 44.309278 | 97 | 0.642275 |
import os
from spack import *
class Hydrogen(CMakePackage, CudaPackage):
homepage = "http://libelemental.org"
url = "https://github.com/LLNL/Elemental/archive/v1.0.1.tar.gz"
git = "https://github.com/LLNL/Elemental.git"
maintainers = ['bvanessen']
version('develop', branch='hydrogen')
version('1.5.0', sha256='03dd487fb23b9fdbc715554a8ea48c3196a1021502e61b0172ef3fdfbee75180')
version('1.4.0', sha256='c13374ff4a6c4d1076e47ba8c8d91a7082588b9958d1ed89cffb12f1d2e1452e')
version('1.3.4', sha256='7979f6656f698f0bbad6798b39d4b569835b3013ff548d98089fce7c283c6741')
version('1.3.3', sha256='a51a1cfd40ac74d10923dfce35c2c04a3082477683f6b35e7b558ea9f4bb6d51')
version('1.3.2', sha256='50bc5e87955f8130003d04dfd9dcad63107e92b82704f8107baf95b0ccf98ed6')
version('1.3.1', sha256='a8b8521458e9e747f2b24af87c4c2749a06e500019c383e0cefb33e5df6aaa1d')
version('1.3.0', sha256='0f3006aa1d8235ecdd621e7344c99f56651c6836c2e1bc0cf006331b70126b36')
version('1.2.0', sha256='8545975139582ee7bfe5d00f8d83a8697afc285bf7026b0761e9943355974806')
version('1.1.0-1', sha256='73ce05e4166853a186469269cb00a454de71e126b2019f95bbae703b65606808')
version('1.1.0', sha256='b4c12913acd01c72d31f4522266bfeb8df1d4d3b4aef02e07ccbc9a477894e71')
version('1.0.1', sha256='27cf76e1ef1d58bd8f9b1e34081a14a682b7ff082fb5d1da56713e5e0040e528')
version('1.0', sha256='d8a97de3133f2c6b6bb4b80d32b4a4cc25eb25e0df4f0cec0f8cb19bf34ece98')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant('openmp', default=True,
description='Make use of OpenMP within CPU-kernels')
variant('openmp_blas', default=False,
description='Use OpenMP for threading in the BLAS library')
variant('quad', default=False,
description='Enable quad precision')
variant('int64', default=False,
description='Use 64bit integers')
variant('int64_blas', default=False,
description='Use 64bit integers for BLAS.')
variant('scalapack', default=False,
description='Build with ScaLAPACK library')
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
variant('blas', default='openblas', values=('openblas', 'mkl', 'accelerate', 'essl'),
description='Enable the use of OpenBlas/MKL/Accelerate/ESSL')
variant('mpfr', default=False,
description='Support GNU MPFR\'s'
'arbitrary-precision floating-point arithmetic')
variant('test', default=False,
description='Builds test suite')
variant('al', default=False,
description='Builds with Aluminum communication library')
variant('omp_taskloops', default=False,
description='Use OpenMP taskloops instead of parallel for loops.')
variant('half', default=False,
description='Builds with support for FP16 precision data types')
conflicts('~openmp', when='+omp_taskloops')
depends_on('cmake@3.17.0:', type='build')
depends_on('mpi')
depends_on('hwloc@1.11:')
# Note that #1712 forces us to enumerate the different blas variants
depends_on('openblas', when='blas=openblas')
depends_on('openblas +ilp64', when='blas=openblas +int64_blas')
depends_on('openblas threads=openmp', when='blas=openblas +openmp_blas')
depends_on('intel-mkl', when="blas=mkl")
depends_on('intel-mkl +ilp64', when="blas=mkl +int64_blas")
depends_on('intel-mkl threads=openmp', when='blas=mkl +openmp_blas')
depends_on('veclibfort', when='blas=accelerate')
conflicts('blas=accelerate +openmp_blas')
depends_on('essl', when='blas=essl')
depends_on('essl -cuda', when='blas=essl -openmp_blas')
depends_on('essl +ilp64', when='blas=essl +int64_blas')
depends_on('essl threads=openmp', when='blas=essl +openmp_blas')
depends_on('netlib-lapack +external-blas', when='blas=essl')
# Specify the correct version of Aluminum
depends_on('aluminum@:0.3.99', when='@:1.3.99 +al')
depends_on('aluminum@0.4:0.4.99', when='@1.4:1.4.99 +al')
depends_on('aluminum@0.5:', when='@:1.0,1.5.0: +al')
# Add Aluminum variants
depends_on('aluminum +cuda +nccl +ht +cuda_rma', when='+al +cuda')
for arch in CudaPackage.cuda_arch_values:
depends_on('aluminum cuda_arch=%s' % arch, when='+al +cuda cuda_arch=%s' % arch)
# Note that this forces us to use OpenBLAS until #1712 is fixed
depends_on('lapack', when='blas=openblas ~openmp_blas')
depends_on('scalapack', when='+scalapack')
depends_on('gmp', when='+mpfr')
depends_on('mpc', when='+mpfr')
depends_on('mpfr', when='+mpfr')
depends_on('cuda', when='+cuda')
depends_on('cub', when='^cuda@:10.99')
depends_on('half', when='+half')
depends_on('llvm-openmp', when='%apple-clang +openmp')
conflicts('@0:0.98', msg="Hydrogen did not exist before v0.99. " +
"Did you mean to use Elemental instead?")
generator = 'Ninja'
depends_on('ninja', type='build')
@property
def libs(self):
shared = True if '+shared' in self.spec else False
return find_libraries(
'libEl', root=self.prefix, shared=shared, recursive=True
)
def cmake_args(self):
spec = self.spec
enable_gpu_fp16 = ('+cuda' in spec and '+half' in spec)
args = [
'-DCMAKE_INSTALL_MESSAGE:STRING=LAZY',
'-DBUILD_SHARED_LIBS:BOOL=%s' % ('+shared' in spec),
'-DHydrogen_ENABLE_OPENMP:BOOL=%s' % ('+openmp' in spec),
'-DHydrogen_ENABLE_QUADMATH:BOOL=%s' % ('+quad' in spec),
'-DHydrogen_USE_64BIT_INTS:BOOL=%s' % ('+int64' in spec),
'-DHydrogen_USE_64BIT_BLAS_INTS:BOOL=%s' % ('+int64_blas' in spec),
'-DHydrogen_ENABLE_MPC:BOOL=%s' % ('+mpfr' in spec),
'-DHydrogen_GENERAL_LAPACK_FALLBACK=ON',
'-DHydrogen_ENABLE_ALUMINUM=%s' % ('+al' in spec),
'-DHydrogen_ENABLE_CUB=%s' % ('+cuda' in spec),
'-DHydrogen_ENABLE_CUDA=%s' % ('+cuda' in spec),
'-DHydrogen_ENABLE_TESTING=%s' % ('+test' in spec),
'-DHydrogen_ENABLE_HALF=%s' % ('+half' in spec),
'-DHydrogen_ENABLE_GPU_FP16=%s' % enable_gpu_fp16,
]
# Add support for OS X to find OpenMP (LLVM installed via brew)
if self.spec.satisfies('%clang +openmp platform=darwin'):
clang = self.compiler.cc
clang_bin = os.path.dirname(clang)
clang_root = os.path.dirname(clang_bin)
args.extend([
'-DOpenMP_DIR={0}'.format(clang_root)])
if 'blas=openblas' in spec:
args.extend([
'-DHydrogen_USE_OpenBLAS:BOOL=%s' % ('blas=openblas' in spec),
'-DOpenBLAS_DIR:STRING={0}'.format(
spec['openblas'].prefix)])
elif 'blas=mkl' in spec:
args.extend([
'-DHydrogen_USE_MKL:BOOL=%s' % ('blas=mkl' in spec)])
elif 'blas=accelerate' in spec:
args.extend(['-DHydrogen_USE_ACCELERATE:BOOL=TRUE'])
elif 'blas=essl' in spec:
args.extend([
'-DHydrogen_USE_ESSL:BOOL=%s' % ('blas=essl' in spec)])
if '+omp_taskloops' in spec:
args.extend([
'-DHydrogen_ENABLE_OMP_TASKLOOP:BOOL=%s' %
('+omp_taskloops' in spec)])
if '+al' in spec:
args.extend([
'-DHydrogen_ENABLE_ALUMINUM:BOOL=%s' % ('+al' in spec),
'-DALUMINUM_DIR={0}'.format(
spec['aluminum'].prefix)])
return args
def setup_build_environment(self, env):
if self.spec.satisfies('%apple-clang +openmp'):
env.append_flags(
'CPPFLAGS', self.compiler.openmp_flag)
env.append_flags(
'CFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'CXXFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'LDFLAGS', self.spec['llvm-openmp'].libs.ld_flags)
| true | true |
f7333e88eaf27abea297faa8629e8771393ddb5a | 17,564 | py | Python | sktime/contrib/vector_classifiers/_rotation_forest.py | kejsitake/sktime | 5c608f09ce0f5216677ce9f6ad61d71584211db9 | [
"BSD-3-Clause"
] | 2 | 2021-12-28T10:48:11.000Z | 2022-03-06T18:08:01.000Z | sktime/contrib/vector_classifiers/_rotation_forest.py | kejsitake/sktime | 5c608f09ce0f5216677ce9f6ad61d71584211db9 | [
"BSD-3-Clause"
] | null | null | null | sktime/contrib/vector_classifiers/_rotation_forest.py | kejsitake/sktime | 5c608f09ce0f5216677ce9f6ad61d71584211db9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""RotationForest vector classifier.
Rotation Forest, sktime implementation for continuous values only.
"""
__author__ = ["MatthewMiddlehurst"]
__all__ = ["RotationForest"]
import time
import numpy as np
from joblib import Parallel, delayed
from sklearn.base import BaseEstimator
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_random_state, check_X_y
from sktime.base._base import _clone_estimator
from sktime.exceptions import NotFittedError
from sktime.utils.validation import check_n_jobs
class RotationForest(BaseEstimator):
"""Rotation Forest Classifier.
Implementation of the Rotation Forest classifier described in Rodriguez et al
(2013). [1]_
Intended as a benchmark for time series data and a base classifier for
transformation based appraoches such as ShapeletTransformClassifier, this sktime
implementation only works with continuous attributes.
Parameters
----------
n_estimators : int, default=200
Number of estimators to build for the ensemble.
min_group : int, default=3
The minimum size of a group.
max_group : int, default=3
The maximum size of a group.
remove_proportion : float, default=0.5
The proportion of cases to be removed.
base_estimator : BaseEstimator or None, default="None"
Base estimator for the ensemble. By default uses the sklearn
DecisionTreeClassifier using entropy as a splitting measure.
time_limit_in_minutes : int, default=0
Time contract to limit build time in minutes, overriding n_estimators.
Default of 0 means n_estimators is used.
contract_max_n_estimators : int, default=500
Max number of estimators when time_limit_in_minutes is set.
save_transformed_data : bool, default=False
Save the data transformed in fit for use in _get_train_probs.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int or None, default=None
Seed for random number generation.
Attributes
----------
n_classes : int
The number of classes.
n_instances : int
The number of train cases.
n_atts : int
The number of attributes in each train case.
classes_ : list
The classes labels.
estimators_ : list of shape (n_estimators) of BaseEstimator
The collections of estimators trained in fit.
transformed_data : list of shape (n_estimators) of ndarray
The transformed dataset for all classifiers. Only saved when
save_transformed_data is true.
See Also
--------
ShapeletTransformClassifier
Notes
-----
For the Java version, see
`TSML <https://github.com/uea-machine-learning/tsml/blob/master/src/main/java
/weka/classifiers/meta/RotationForest.java>`_.
References
----------
.. [1] Rodriguez, Juan José, Ludmila I. Kuncheva, and Carlos J. Alonso. "Rotation
forest: A new classifier ensemble method." IEEE transactions on pattern analysis
and machine intelligence 28.10 (2006).
.. [2] Bagnall, A., et al. "Is rotation forest the best classifier for problems
with continuous features?." arXiv preprint arXiv:1809.06705 (2018).
Examples
--------
>>> from sktime.contrib.vector_classifiers._rotation_forest import RotationForest
>>> from sktime.datasets import load_unit_test
>>> from sktime.datatypes._panel._convert import from_nested_to_3d_numpy
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True)
>>> X_train = from_nested_to_3d_numpy(X_train)
>>> X_test = from_nested_to_3d_numpy(X_test)
>>> clf = RotationForest(n_estimators=10)
>>> clf.fit(X_train, y_train)
RotationForest(...)
>>> y_pred = clf.predict(X_test)
"""
def __init__(
self,
n_estimators=200,
min_group=3,
max_group=3,
remove_proportion=0.5,
base_estimator=None,
time_limit_in_minutes=0.0,
contract_max_n_estimators=500,
save_transformed_data=False,
n_jobs=1,
random_state=None,
):
self.n_estimators = n_estimators
self.min_group = min_group
self.max_group = max_group
self.remove_proportion = remove_proportion
self.base_estimator = base_estimator
self.time_limit_in_minutes = time_limit_in_minutes
self.contract_max_n_estimators = contract_max_n_estimators
self.save_transformed_data = save_transformed_data
self.n_jobs = n_jobs
self.random_state = random_state
self.n_classes = 0
self.n_instances = 0
self.n_atts = 0
self.classes_ = []
self.estimators_ = []
self.transformed_data = []
self._n_estimators = n_estimators
self._base_estimator = base_estimator
self._min = 0
self._ptp = 0
self._useful_atts = []
self._pcas = []
self._groups = []
self._class_dictionary = {}
self._n_jobs = n_jobs
self._n_atts = 0
# We need to add is-fitted state when inheriting from scikit-learn
self._is_fitted = False
super(RotationForest, self).__init__()
def fit(self, X, y):
"""Fit a forest of trees on cases (X,y), where y is the target variable.
Parameters
----------
X : ndarray of shape = [n_instances,n_attributes]
The training input samples.
y : array-like, shape = [n_instances]
The class labels.
Returns
-------
self : object
"""
if isinstance(X, np.ndarray) and len(X.shape) == 3 and X.shape[1] == 1:
X = np.reshape(X, (X.shape[0], -1))
elif not isinstance(X, np.ndarray) or len(X.shape) > 2:
raise ValueError(
"RotationForest is not a time series classifier. "
"A 2d numpy array is required."
)
X, y = check_X_y(X, y)
self._n_jobs = check_n_jobs(self.n_jobs)
self.n_instances, self.n_atts = X.shape
self.classes_ = np.unique(y)
self.n_classes = self.classes_.shape[0]
for index, classVal in enumerate(self.classes_):
self._class_dictionary[classVal] = index
time_limit = self.time_limit_in_minutes * 60
start_time = time.time()
train_time = 0
if self.base_estimator is None:
self._base_estimator = DecisionTreeClassifier(criterion="entropy")
# replace missing values with 0 and remove useless attributes
X = np.nan_to_num(X, False, 0, 0, 0)
self._useful_atts = ~np.all(X[1:] == X[:-1], axis=0)
X = X[:, self._useful_atts]
self._n_atts = X.shape[1]
# normalise attributes
self._min = X.min(axis=0)
self._ptp = X.max(axis=0) - self._min
X = (X - self._min) / self._ptp
X_cls_split = [X[np.where(y == i)] for i in self.classes_]
if time_limit > 0:
self._n_estimators = 0
self.estimators_ = []
self._pcas = []
self._groups = []
while (
train_time < time_limit
and self._n_estimators < self.contract_max_n_estimators
):
fit = Parallel(n_jobs=self._n_jobs)(
delayed(self._fit_estimator)(
X,
X_cls_split,
y,
i,
)
for i in range(self._n_jobs)
)
estimators, pcas, groups, transformed_data = zip(*fit)
self.estimators_ += estimators
self._pcas += pcas
self._groups += groups
self.transformed_data += transformed_data
self._n_estimators += self._n_jobs
train_time = time.time() - start_time
else:
fit = Parallel(n_jobs=self._n_jobs)(
delayed(self._fit_estimator)(
X,
X_cls_split,
y,
i,
)
for i in range(self._n_estimators)
)
self.estimators_, self._pcas, self._groups, self.transformed_data = zip(
*fit
)
self._is_fitted = True
return self
def predict(self, X):
"""Predict for all cases in X. Built on top of predict_proba.
Parameters
----------
X : ndarray of shape = [n_instances,n_attributes]
Returns
-------
output : array of shape = [n_test_instances]
"""
rng = check_random_state(self.random_state)
return np.array(
[
self.classes_[int(rng.choice(np.flatnonzero(prob == prob.max())))]
for prob in self.predict_proba(X)
]
)
def predict_proba(self, X):
"""Probability estimates for each class for all cases in X.
Parameters
----------
X : ndarray of shape = [n_instances,n_attributes]
Returns
-------
output : array of shape = [n_test_instances, num_classes] of
probabilities
"""
if not self._is_fitted:
raise NotFittedError(
f"This instance of {self.__class__.__name__} has not "
f"been fitted yet; please call `fit` first."
)
if isinstance(X, np.ndarray) and len(X.shape) == 3 and X.shape[1] == 1:
X = np.reshape(X, (X.shape[0], -1))
elif not isinstance(X, np.ndarray) or len(X.shape) > 2:
raise ValueError(
"RotationForest is not a time series classifier. "
"A 2d numpy array is required."
)
# replace missing values with 0 and remove useless attributes
X = np.nan_to_num(X, False, 0, 0, 0)
X = X[:, self._useful_atts]
# normalise the data.
X = (X - self._min) / self._ptp
y_probas = Parallel(n_jobs=self._n_jobs)(
delayed(self._predict_proba_for_estimator)(
X,
self.estimators_[i],
self._pcas[i],
self._groups[i],
)
for i in range(self._n_estimators)
)
output = np.sum(y_probas, axis=0) / (
np.ones(self.n_classes) * self._n_estimators
)
return output
def _get_train_probs(self, X, y):
if not self._is_fitted:
raise NotFittedError(
f"This instance of {self.__class__.__name__} has not "
f"been fitted yet; please call `fit` first."
)
if isinstance(X, np.ndarray) and len(X.shape) == 3 and X.shape[1] == 1:
X = np.reshape(X, (X.shape[0], -1))
elif not isinstance(X, np.ndarray) or len(X.shape) > 2:
raise ValueError(
"RotationForest is not a time series classifier. "
"A 2d numpy array is required."
)
n_instances, n_atts = X.shape
if n_instances != self.n_instances or n_atts != self.n_atts:
raise ValueError(
"n_instances, n_dims, series_length mismatch. X should be "
"the same as the training data used in fit for generating train "
"probabilities."
)
if not self.save_transformed_data:
raise ValueError("Currently only works with saved transform data from fit.")
p = Parallel(n_jobs=self._n_jobs)(
delayed(self._train_probas_for_estimator)(
y,
i,
)
for i in range(self._n_estimators)
)
y_probas, oobs = zip(*p)
results = np.sum(y_probas, axis=0)
divisors = np.zeros(n_instances)
for oob in oobs:
for inst in oob:
divisors[inst] += 1
for i in range(n_instances):
results[i] = (
np.ones(self.n_classes) * (1 / self.n_classes)
if divisors[i] == 0
else results[i] / (np.ones(self.n_classes) * divisors[i])
)
return results
def _fit_estimator(self, X, X_cls_split, y, idx):
rs = 255 if self.random_state == 0 else self.random_state
rs = (
None
if self.random_state is None
else (rs * 37 * (idx + 1)) % np.iinfo(np.int32).max
)
rng = check_random_state(rs)
groups = self._generate_groups(rng)
pcas = []
# construct the slices to fit the PCAs too.
for group in groups:
classes = rng.choice(
range(self.n_classes),
size=rng.randint(1, self.n_classes + 1),
replace=False,
)
# randomly add the classes with the randomly selected attributes.
X_t = np.zeros((0, len(group)))
for cls_idx in classes:
c = X_cls_split[cls_idx]
X_t = np.concatenate((X_t, c[:, group]), axis=0)
sample_ind = rng.choice(
X_t.shape[0],
int(X_t.shape[0] * self.remove_proportion),
replace=False,
)
X_t = X_t[sample_ind]
# try to fit the PCA if it fails, remake it, and add 10 random data instances.
while True:
# ignore err state on PCA because we account if it fails.
with np.errstate(divide="ignore", invalid="ignore"):
# differences between os occasionally. seems to happen when there
# are low amounts of cases in the fit
pca = PCA(random_state=rs).fit(X_t)
if not np.isnan(pca.explained_variance_ratio_).all():
break
X_t = np.concatenate(
(X_t, rng.random_sample((10, X_t.shape[1]))), axis=0
)
pcas.append(pca)
# merge all the pca_transformed data into one instance and build a classifier on it.
X_t = np.concatenate(
[pcas[i].transform(X[:, group]) for i, group in enumerate(groups)], axis=1
)
tree = _clone_estimator(self._base_estimator, random_state=rs)
tree.fit(X_t, y)
return tree, pcas, groups, X_t if self.save_transformed_data else None
def _predict_proba_for_estimator(self, X, clf, pcas, groups):
X_t = np.concatenate(
[pcas[i].transform(X[:, group]) for i, group in enumerate(groups)], axis=1
)
probas = clf.predict_proba(X_t)
if probas.shape[1] != self.n_classes:
new_probas = np.zeros((probas.shape[0], self.n_classes))
for i, cls in enumerate(clf.classes_):
cls_idx = self._class_dictionary[cls]
new_probas[:, cls_idx] = probas[:, i]
probas = new_probas
return probas
def _train_probas_for_estimator(self, y, idx):
rs = 255 if self.random_state == 0 else self.random_state
rs = (
None
if self.random_state is None
else (rs * 37 * (idx + 1)) % np.iinfo(np.int32).max
)
rng = check_random_state(rs)
indices = range(self.n_instances)
subsample = rng.choice(self.n_instances, size=self.n_instances)
oob = [n for n in indices if n not in subsample]
clf = _clone_estimator(self._base_estimator, rs)
clf.fit(self.transformed_data[idx][subsample], y[subsample])
probas = clf.predict_proba(self.transformed_data[idx][oob])
if probas.shape[1] != self.n_classes:
new_probas = np.zeros((probas.shape[0], self.n_classes))
for i, cls in enumerate(clf.classes_):
cls_idx = self._class_dictionary[cls]
new_probas[:, cls_idx] = probas[:, i]
probas = new_probas
results = np.zeros((self.n_instances, self.n_classes))
for n, proba in enumerate(probas):
results[oob[n]] += proba
return [results, oob]
def _generate_groups(self, rng):
permutation = rng.permutation((np.arange(0, self._n_atts)))
# select the size of each group.
group_size_count = np.zeros(self.max_group - self.min_group + 1)
n_attributes = 0
n_groups = 0
while n_attributes < self._n_atts:
n = rng.randint(group_size_count.shape[0])
group_size_count[n] += 1
n_attributes += self.min_group + n
n_groups += 1
groups = []
current_attribute = 0
current_size = 0
for i in range(0, n_groups):
while group_size_count[current_size] == 0:
current_size += 1
group_size_count[current_size] -= 1
n = self.min_group + current_size
groups.append(np.zeros(n, dtype=int))
for k in range(0, n):
if current_attribute < permutation.shape[0]:
groups[i][k] = permutation[current_attribute]
else:
groups[i][k] = permutation[rng.randint(permutation.shape[0])]
current_attribute += 1
return groups
| 34.711462 | 92 | 0.577317 |
__author__ = ["MatthewMiddlehurst"]
__all__ = ["RotationForest"]
import time
import numpy as np
from joblib import Parallel, delayed
from sklearn.base import BaseEstimator
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_random_state, check_X_y
from sktime.base._base import _clone_estimator
from sktime.exceptions import NotFittedError
from sktime.utils.validation import check_n_jobs
class RotationForest(BaseEstimator):
def __init__(
self,
n_estimators=200,
min_group=3,
max_group=3,
remove_proportion=0.5,
base_estimator=None,
time_limit_in_minutes=0.0,
contract_max_n_estimators=500,
save_transformed_data=False,
n_jobs=1,
random_state=None,
):
self.n_estimators = n_estimators
self.min_group = min_group
self.max_group = max_group
self.remove_proportion = remove_proportion
self.base_estimator = base_estimator
self.time_limit_in_minutes = time_limit_in_minutes
self.contract_max_n_estimators = contract_max_n_estimators
self.save_transformed_data = save_transformed_data
self.n_jobs = n_jobs
self.random_state = random_state
self.n_classes = 0
self.n_instances = 0
self.n_atts = 0
self.classes_ = []
self.estimators_ = []
self.transformed_data = []
self._n_estimators = n_estimators
self._base_estimator = base_estimator
self._min = 0
self._ptp = 0
self._useful_atts = []
self._pcas = []
self._groups = []
self._class_dictionary = {}
self._n_jobs = n_jobs
self._n_atts = 0
self._is_fitted = False
super(RotationForest, self).__init__()
def fit(self, X, y):
if isinstance(X, np.ndarray) and len(X.shape) == 3 and X.shape[1] == 1:
X = np.reshape(X, (X.shape[0], -1))
elif not isinstance(X, np.ndarray) or len(X.shape) > 2:
raise ValueError(
"RotationForest is not a time series classifier. "
"A 2d numpy array is required."
)
X, y = check_X_y(X, y)
self._n_jobs = check_n_jobs(self.n_jobs)
self.n_instances, self.n_atts = X.shape
self.classes_ = np.unique(y)
self.n_classes = self.classes_.shape[0]
for index, classVal in enumerate(self.classes_):
self._class_dictionary[classVal] = index
time_limit = self.time_limit_in_minutes * 60
start_time = time.time()
train_time = 0
if self.base_estimator is None:
self._base_estimator = DecisionTreeClassifier(criterion="entropy")
X = np.nan_to_num(X, False, 0, 0, 0)
self._useful_atts = ~np.all(X[1:] == X[:-1], axis=0)
X = X[:, self._useful_atts]
self._n_atts = X.shape[1]
self._min = X.min(axis=0)
self._ptp = X.max(axis=0) - self._min
X = (X - self._min) / self._ptp
X_cls_split = [X[np.where(y == i)] for i in self.classes_]
if time_limit > 0:
self._n_estimators = 0
self.estimators_ = []
self._pcas = []
self._groups = []
while (
train_time < time_limit
and self._n_estimators < self.contract_max_n_estimators
):
fit = Parallel(n_jobs=self._n_jobs)(
delayed(self._fit_estimator)(
X,
X_cls_split,
y,
i,
)
for i in range(self._n_jobs)
)
estimators, pcas, groups, transformed_data = zip(*fit)
self.estimators_ += estimators
self._pcas += pcas
self._groups += groups
self.transformed_data += transformed_data
self._n_estimators += self._n_jobs
train_time = time.time() - start_time
else:
fit = Parallel(n_jobs=self._n_jobs)(
delayed(self._fit_estimator)(
X,
X_cls_split,
y,
i,
)
for i in range(self._n_estimators)
)
self.estimators_, self._pcas, self._groups, self.transformed_data = zip(
*fit
)
self._is_fitted = True
return self
def predict(self, X):
rng = check_random_state(self.random_state)
return np.array(
[
self.classes_[int(rng.choice(np.flatnonzero(prob == prob.max())))]
for prob in self.predict_proba(X)
]
)
def predict_proba(self, X):
if not self._is_fitted:
raise NotFittedError(
f"This instance of {self.__class__.__name__} has not "
f"been fitted yet; please call `fit` first."
)
if isinstance(X, np.ndarray) and len(X.shape) == 3 and X.shape[1] == 1:
X = np.reshape(X, (X.shape[0], -1))
elif not isinstance(X, np.ndarray) or len(X.shape) > 2:
raise ValueError(
"RotationForest is not a time series classifier. "
"A 2d numpy array is required."
)
X = np.nan_to_num(X, False, 0, 0, 0)
X = X[:, self._useful_atts]
X = (X - self._min) / self._ptp
y_probas = Parallel(n_jobs=self._n_jobs)(
delayed(self._predict_proba_for_estimator)(
X,
self.estimators_[i],
self._pcas[i],
self._groups[i],
)
for i in range(self._n_estimators)
)
output = np.sum(y_probas, axis=0) / (
np.ones(self.n_classes) * self._n_estimators
)
return output
def _get_train_probs(self, X, y):
if not self._is_fitted:
raise NotFittedError(
f"This instance of {self.__class__.__name__} has not "
f"been fitted yet; please call `fit` first."
)
if isinstance(X, np.ndarray) and len(X.shape) == 3 and X.shape[1] == 1:
X = np.reshape(X, (X.shape[0], -1))
elif not isinstance(X, np.ndarray) or len(X.shape) > 2:
raise ValueError(
"RotationForest is not a time series classifier. "
"A 2d numpy array is required."
)
n_instances, n_atts = X.shape
if n_instances != self.n_instances or n_atts != self.n_atts:
raise ValueError(
"n_instances, n_dims, series_length mismatch. X should be "
"the same as the training data used in fit for generating train "
"probabilities."
)
if not self.save_transformed_data:
raise ValueError("Currently only works with saved transform data from fit.")
p = Parallel(n_jobs=self._n_jobs)(
delayed(self._train_probas_for_estimator)(
y,
i,
)
for i in range(self._n_estimators)
)
y_probas, oobs = zip(*p)
results = np.sum(y_probas, axis=0)
divisors = np.zeros(n_instances)
for oob in oobs:
for inst in oob:
divisors[inst] += 1
for i in range(n_instances):
results[i] = (
np.ones(self.n_classes) * (1 / self.n_classes)
if divisors[i] == 0
else results[i] / (np.ones(self.n_classes) * divisors[i])
)
return results
def _fit_estimator(self, X, X_cls_split, y, idx):
rs = 255 if self.random_state == 0 else self.random_state
rs = (
None
if self.random_state is None
else (rs * 37 * (idx + 1)) % np.iinfo(np.int32).max
)
rng = check_random_state(rs)
groups = self._generate_groups(rng)
pcas = []
for group in groups:
classes = rng.choice(
range(self.n_classes),
size=rng.randint(1, self.n_classes + 1),
replace=False,
)
X_t = np.zeros((0, len(group)))
for cls_idx in classes:
c = X_cls_split[cls_idx]
X_t = np.concatenate((X_t, c[:, group]), axis=0)
sample_ind = rng.choice(
X_t.shape[0],
int(X_t.shape[0] * self.remove_proportion),
replace=False,
)
X_t = X_t[sample_ind]
while True:
with np.errstate(divide="ignore", invalid="ignore"):
pca = PCA(random_state=rs).fit(X_t)
if not np.isnan(pca.explained_variance_ratio_).all():
break
X_t = np.concatenate(
(X_t, rng.random_sample((10, X_t.shape[1]))), axis=0
)
pcas.append(pca)
X_t = np.concatenate(
[pcas[i].transform(X[:, group]) for i, group in enumerate(groups)], axis=1
)
tree = _clone_estimator(self._base_estimator, random_state=rs)
tree.fit(X_t, y)
return tree, pcas, groups, X_t if self.save_transformed_data else None
def _predict_proba_for_estimator(self, X, clf, pcas, groups):
X_t = np.concatenate(
[pcas[i].transform(X[:, group]) for i, group in enumerate(groups)], axis=1
)
probas = clf.predict_proba(X_t)
if probas.shape[1] != self.n_classes:
new_probas = np.zeros((probas.shape[0], self.n_classes))
for i, cls in enumerate(clf.classes_):
cls_idx = self._class_dictionary[cls]
new_probas[:, cls_idx] = probas[:, i]
probas = new_probas
return probas
def _train_probas_for_estimator(self, y, idx):
rs = 255 if self.random_state == 0 else self.random_state
rs = (
None
if self.random_state is None
else (rs * 37 * (idx + 1)) % np.iinfo(np.int32).max
)
rng = check_random_state(rs)
indices = range(self.n_instances)
subsample = rng.choice(self.n_instances, size=self.n_instances)
oob = [n for n in indices if n not in subsample]
clf = _clone_estimator(self._base_estimator, rs)
clf.fit(self.transformed_data[idx][subsample], y[subsample])
probas = clf.predict_proba(self.transformed_data[idx][oob])
if probas.shape[1] != self.n_classes:
new_probas = np.zeros((probas.shape[0], self.n_classes))
for i, cls in enumerate(clf.classes_):
cls_idx = self._class_dictionary[cls]
new_probas[:, cls_idx] = probas[:, i]
probas = new_probas
results = np.zeros((self.n_instances, self.n_classes))
for n, proba in enumerate(probas):
results[oob[n]] += proba
return [results, oob]
def _generate_groups(self, rng):
permutation = rng.permutation((np.arange(0, self._n_atts)))
group_size_count = np.zeros(self.max_group - self.min_group + 1)
n_attributes = 0
n_groups = 0
while n_attributes < self._n_atts:
n = rng.randint(group_size_count.shape[0])
group_size_count[n] += 1
n_attributes += self.min_group + n
n_groups += 1
groups = []
current_attribute = 0
current_size = 0
for i in range(0, n_groups):
while group_size_count[current_size] == 0:
current_size += 1
group_size_count[current_size] -= 1
n = self.min_group + current_size
groups.append(np.zeros(n, dtype=int))
for k in range(0, n):
if current_attribute < permutation.shape[0]:
groups[i][k] = permutation[current_attribute]
else:
groups[i][k] = permutation[rng.randint(permutation.shape[0])]
current_attribute += 1
return groups
| true | true |
f7333ef3b1155ca6ee86e57360eec802d4647c8c | 8,239 | py | Python | qa/rpc-tests/test_framework/test_framework.py | cozy-coin/cozycoin | 5c0706d819898cc1af0e70f6a955acc2dac524b8 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/test_framework.py | cozy-coin/cozycoin | 5c0706d819898cc1af0e70f6a955acc2dac524b8 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/test_framework.py | cozy-coin/cozycoin | 5c0706d819898cc1af0e70f6a955acc2dac524b8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave cozycoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop cozycoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing cozycoind/cozycoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: cozycoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for f in filenames:
print("From" , f, ":")
from collections import deque
print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("COZYCOIND", "cozycoind"),
help="cozycoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("COZYCOIND", "cozycoind"),
help="cozycoind binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| 37.45 | 142 | 0.610632 |
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave cozycoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop cozycoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing cozycoind/cozycoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: cozycoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for f in filenames:
print("From" , f, ":")
from collections import deque
print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("COZYCOIND", "cozycoind"),
help="cozycoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("COZYCOIND", "cozycoind"),
help="cozycoind binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| true | true |
f733401fe91ac8646d4bdcf18d93dc23aa819bd9 | 13,003 | py | Python | shap/plots/dependence.py | Benjamin15/shap | 4b6472c90c89aad403e00dff0cc8a6416f354fea | [
"MIT"
] | null | null | null | shap/plots/dependence.py | Benjamin15/shap | 4b6472c90c89aad403e00dff0cc8a6416f354fea | [
"MIT"
] | null | null | null | shap/plots/dependence.py | Benjamin15/shap | 4b6472c90c89aad403e00dff0cc8a6416f354fea | [
"MIT"
] | null | null | null | from __future__ import division
from io import BytesIO
import base64
import numpy as np
import warnings
try:
import matplotlib.pyplot as pl
import matplotlib
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from . import labels
from . import colors
from ..common import convert_name, approximate_interactions
def dependence_plot(ind, shap_values, features, feature_names=None, display_features=None,
interaction_index="auto",
color="#1E88E5", axis_color="#333333", cmap=None,
dot_size=16, x_jitter=0, alpha=1, title=None, xmin=None, xmax=None, ax=None, show=True,
get_png=False):
""" Create a SHAP dependence plot, colored by an interaction feature.
Plots the value of the feature on the x-axis and the SHAP value of the same feature
on the y-axis. This shows how the model depends on the given feature, and is like a
richer extenstion of the classical parital dependence plots. Vertical dispersion of the
data points represents interaction effects. Grey ticks along the y-axis are data
points where the feature's value was NaN.
Parameters
----------
ind : int or string
If this is an int it is the index of the feature to plot. If this is a string it is
either the name of the feature to plot, or it can have the form "rank(int)" to specify
the feature with that rank (ordered by mean absolute SHAP value over all the samples).
shap_values : numpy.array
Matrix of SHAP values (# samples x # features).
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features).
feature_names : list
Names of the features (length # features).
display_features : numpy.array or pandas.DataFrame
Matrix of feature values for visual display (such as strings instead of coded values).
interaction_index : "auto", None, int, or string
The index of the feature used to color the plot. The name of a feature can also be passed
as a string. If "auto" then shap.common.approximate_interactions is used to pick what
seems to be the strongest interaction (note that to find to true stongest interaction you
need to compute the SHAP interaction values).
x_jitter : float (0 - 1)
Adds random jitter to feature values. May increase plot readability when feature
is discrete.
alpha : float
The transparency of the data points (between 0 and 1). This can be useful to the
show density of the data points when using a large dataset.
xmin : float or string
Represents the lower bound of the plot's x-axis. It can be a string of the format
"percentile(float)" to denote that percentile of the feature's value used on the x-axis.
xmax : float or string
Represents the upper bound of the plot's x-axis. It can be a string of the format
"percentile(float)" to denote that percentile of the feature's value used on the x-axis.
ax : matplotlib Axes object
Optionally specify an existing matplotlib Axes object, into which the plot will be placed.
In this case we do not create a Figure, otherwise we do.
"""
if cmap is None:
cmap = colors.red_blue
if type(shap_values) is list:
raise TypeError("The passed shap_values are a list not an array! If you have a list of explanations try " \
"passing shap_values[0] instead to explain the first output class of a multi-output model.")
# convert from DataFrames if we got any
if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = features.columns
features = features.values
if str(type(display_features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = display_features.columns
display_features = display_features.values
elif display_features is None:
display_features = features
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
# allow vectors to be passed
if len(shap_values.shape) == 1:
shap_values = np.reshape(shap_values, len(shap_values), 1)
if len(features.shape) == 1:
features = np.reshape(features, len(features), 1)
ind = convert_name(ind, shap_values, feature_names)
# guess what other feature as the stongest interaction with the plotted feature
if not hasattr(ind, "__len__"):
if interaction_index == "auto":
interaction_index = approximate_interactions(ind, shap_values, features)[0]
interaction_index = convert_name(interaction_index, shap_values, feature_names)
categorical_interaction = False
# create a matplotlib figure, if `ax` hasn't been specified.
if not ax:
figsize = (7.5, 5) if interaction_index != ind and interaction_index is not None else (6, 5)
fig = pl.figure(figsize=figsize)
ax = fig.gca()
else:
fig = ax.get_figure()
# plotting SHAP interaction values
if len(shap_values.shape) == 3 and hasattr(ind, "__len__") and len(ind) == 2:
ind1 = convert_name(ind[0], shap_values, feature_names)
ind2 = convert_name(ind[1], shap_values, feature_names)
if ind1 == ind2:
proj_shap_values = shap_values[:, ind2, :]
else:
proj_shap_values = shap_values[:, ind2, :] * 2 # off-diag values are split in half
# there is no interaction coloring for the main effect
if ind1 == ind2:
fig.set_size_inches(6, 5, forward=True)
# TODO: remove recursion; generally the functions should be shorter for more maintainable code
dependence_plot(
ind1, proj_shap_values, features, feature_names=feature_names,
interaction_index=(None if ind1 == ind2 else ind2), display_features=display_features, ax=ax, show=False,
xmin=xmin, xmax=xmax, x_jitter=x_jitter, alpha=alpha, get_png=get_png
)
if ind1 == ind2:
ax.set_ylabel(labels['MAIN_EFFECT'] % feature_names[ind1])
else:
ax.set_ylabel(labels['INTERACTION_EFFECT'] % (feature_names[ind1], feature_names[ind2]))
if show:
pl.show()
return
assert shap_values.shape[0] == features.shape[0], \
"'shap_values' and 'features' values must have the same number of rows!"
assert shap_values.shape[1] == features.shape[1], \
"'shap_values' must have the same number of columns as 'features'!"
# get both the raw and display feature values
oinds = np.arange(shap_values.shape[0]) # we randomize the ordering so plotting overlaps are not related to data ordering
np.random.shuffle(oinds)
xv = features[oinds, ind].astype(np.float64)
xd = display_features[oinds, ind]
s = shap_values[oinds, ind]
if type(xd[0]) == str:
name_map = {}
for i in range(len(xv)):
name_map[xd[i]] = xv[i]
xnames = list(name_map.keys())
# allow a single feature name to be passed alone
if type(feature_names) == str:
feature_names = [feature_names]
name = feature_names[ind]
# get both the raw and display color values
color_norm = None
if interaction_index is not None:
cv = features[:, interaction_index]
cd = display_features[:, interaction_index]
clow = np.nanpercentile(cv.astype(np.float), 5)
chigh = np.nanpercentile(cv.astype(np.float), 95)
if clow == chigh:
clow = np.nanmin(cv.astype(np.float))
chigh = np.nanmax(cv.astype(np.float))
if type(cd[0]) == str:
cname_map = {}
for i in range(len(cv)):
cname_map[cd[i]] = cv[i]
cnames = list(cname_map.keys())
categorical_interaction = True
elif clow % 1 == 0 and chigh % 1 == 0 and chigh - clow < 10:
categorical_interaction = True
# discritize colors for categorical features
if categorical_interaction and clow != chigh:
clow = np.nanmin(cv.astype(np.float))
chigh = np.nanmax(cv.astype(np.float))
bounds = np.linspace(clow, chigh, int(chigh - clow + 2))
color_norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N-1)
# optionally add jitter to feature values
if x_jitter > 0:
if x_jitter > 1: x_jitter = 1
xvals = xv.copy()
if isinstance(xvals[0], float):
xvals = xvals.astype(np.float)
xvals = xvals[~np.isnan(xvals)]
xvals = np.unique(xvals) # returns a sorted array
if len(xvals) >= 2:
smallest_diff = np.min(np.diff(xvals))
jitter_amount = x_jitter * smallest_diff
xv += (np.random.ranf(size = len(xv))*jitter_amount) - (jitter_amount/2)
# the actual scatter plot, TODO: adapt the dot_size to the number of data points?
xv_nan = np.isnan(xv)
xv_notnan = np.invert(xv_nan)
if interaction_index is not None:
# plot the nan values in the interaction feature as grey
cvals = features[oinds, interaction_index].astype(np.float64)
cvals_imp = cvals.copy()
cvals_imp[np.isnan(cvals)] = (clow + chigh) / 2.0
cvals[cvals_imp > chigh] = chigh
cvals[cvals_imp < clow] = clow
p = ax.scatter(
xv[xv_notnan], s[xv_notnan], s=dot_size, linewidth=0, c=cvals[xv_notnan],
cmap=cmap, alpha=alpha, vmin=clow, vmax=chigh,
norm=color_norm, rasterized=len(xv) > 500
)
p.set_array(cvals[xv_notnan])
else:
p = ax.scatter(xv, s, s=dot_size, linewidth=0, color=color,
alpha=alpha, rasterized=len(xv) > 500)
if interaction_index != ind and interaction_index is not None:
# draw the color bar
if type(cd[0]) == str:
tick_positions = [cname_map[n] for n in cnames]
if len(tick_positions) == 2:
tick_positions[0] -= 0.25
tick_positions[1] += 0.25
cb = pl.colorbar(p, ticks=tick_positions, ax=ax)
cb.set_ticklabels(cnames)
else:
cb = pl.colorbar(p, ax=ax)
cb.set_label(feature_names[interaction_index], size=13)
cb.ax.tick_params(labelsize=11)
if categorical_interaction:
cb.ax.tick_params(length=0)
cb.set_alpha(1)
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
# handles any setting of xmax and xmin
# note that we handle None,float, or "percentile(float)" formats
if xmin is not None or xmax is not None:
if type(xmin) == str and xmin.startswith("percentile"):
xmin = np.nanpercentile(xv, float(xmin[11:-1]))
if type(xmax) == str and xmax.startswith("percentile"):
xmax = np.nanpercentile(xv, float(xmax[11:-1]))
if xmin is None or xmin == np.nanmin(xv):
xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20
if xmax is None or xmax == np.nanmax(xv):
xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20
ax.set_xlim(xmin, xmax)
# plot any nan feature values as tick marks along the y-axis
xlim = ax.get_xlim()
if interaction_index is not None:
p = ax.scatter(
xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,
linewidth=2, c=cvals_imp[xv_nan], cmap=cmap, alpha=alpha,
vmin=clow, vmax=chigh
)
p.set_array(cvals[xv_nan])
else:
ax.scatter(
xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,
linewidth=2, color=color, alpha=alpha
)
ax.set_xlim(xlim)
# make the plot more readable
ax.set_xlabel(name, color=axis_color, fontsize=13)
ax.set_ylabel(labels['VALUE_FOR'] % name, color=axis_color, fontsize=13)
if title is not None:
ax.set_title(title, color=axis_color, fontsize=13)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(color=axis_color, labelcolor=axis_color, labelsize=11)
for spine in ax.spines.values():
spine.set_edgecolor(axis_color)
if type(xd[0]) == str:
ax.set_xticks([name_map[n] for n in xnames])
ax.set_xticklabels(xnames, dict(rotation='vertical', fontsize=11))
if show:
with warnings.catch_warnings(): # ignore expected matplotlib warnings
warnings.simplefilter("ignore", RuntimeWarning)
pl.show()
if get_png:
file = BytesIO()
pl.savefig(file, format='png', bbox_inches="tight")
return file
| 42.217532 | 125 | 0.640544 | from __future__ import division
from io import BytesIO
import base64
import numpy as np
import warnings
try:
import matplotlib.pyplot as pl
import matplotlib
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from . import labels
from . import colors
from ..common import convert_name, approximate_interactions
def dependence_plot(ind, shap_values, features, feature_names=None, display_features=None,
interaction_index="auto",
color="#1E88E5", axis_color="#333333", cmap=None,
dot_size=16, x_jitter=0, alpha=1, title=None, xmin=None, xmax=None, ax=None, show=True,
get_png=False):
if cmap is None:
cmap = colors.red_blue
if type(shap_values) is list:
raise TypeError("The passed shap_values are a list not an array! If you have a list of explanations try " \
"passing shap_values[0] instead to explain the first output class of a multi-output model.")
if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = features.columns
features = features.values
if str(type(display_features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = display_features.columns
display_features = display_features.values
elif display_features is None:
display_features = features
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
if len(shap_values.shape) == 1:
shap_values = np.reshape(shap_values, len(shap_values), 1)
if len(features.shape) == 1:
features = np.reshape(features, len(features), 1)
ind = convert_name(ind, shap_values, feature_names)
if not hasattr(ind, "__len__"):
if interaction_index == "auto":
interaction_index = approximate_interactions(ind, shap_values, features)[0]
interaction_index = convert_name(interaction_index, shap_values, feature_names)
categorical_interaction = False
if not ax:
figsize = (7.5, 5) if interaction_index != ind and interaction_index is not None else (6, 5)
fig = pl.figure(figsize=figsize)
ax = fig.gca()
else:
fig = ax.get_figure()
# plotting SHAP interaction values
if len(shap_values.shape) == 3 and hasattr(ind, "__len__") and len(ind) == 2:
ind1 = convert_name(ind[0], shap_values, feature_names)
ind2 = convert_name(ind[1], shap_values, feature_names)
if ind1 == ind2:
proj_shap_values = shap_values[:, ind2, :]
else:
proj_shap_values = shap_values[:, ind2, :] * 2 # off-diag values are split in half
# there is no interaction coloring for the main effect
if ind1 == ind2:
fig.set_size_inches(6, 5, forward=True)
# TODO: remove recursion; generally the functions should be shorter for more maintainable code
dependence_plot(
ind1, proj_shap_values, features, feature_names=feature_names,
interaction_index=(None if ind1 == ind2 else ind2), display_features=display_features, ax=ax, show=False,
xmin=xmin, xmax=xmax, x_jitter=x_jitter, alpha=alpha, get_png=get_png
)
if ind1 == ind2:
ax.set_ylabel(labels['MAIN_EFFECT'] % feature_names[ind1])
else:
ax.set_ylabel(labels['INTERACTION_EFFECT'] % (feature_names[ind1], feature_names[ind2]))
if show:
pl.show()
return
assert shap_values.shape[0] == features.shape[0], \
"'shap_values' and 'features' values must have the same number of rows!"
assert shap_values.shape[1] == features.shape[1], \
"'shap_values' must have the same number of columns as 'features'!"
# get both the raw and display feature values
oinds = np.arange(shap_values.shape[0]) # we randomize the ordering so plotting overlaps are not related to data ordering
np.random.shuffle(oinds)
xv = features[oinds, ind].astype(np.float64)
xd = display_features[oinds, ind]
s = shap_values[oinds, ind]
if type(xd[0]) == str:
name_map = {}
for i in range(len(xv)):
name_map[xd[i]] = xv[i]
xnames = list(name_map.keys())
# allow a single feature name to be passed alone
if type(feature_names) == str:
feature_names = [feature_names]
name = feature_names[ind]
# get both the raw and display color values
color_norm = None
if interaction_index is not None:
cv = features[:, interaction_index]
cd = display_features[:, interaction_index]
clow = np.nanpercentile(cv.astype(np.float), 5)
chigh = np.nanpercentile(cv.astype(np.float), 95)
if clow == chigh:
clow = np.nanmin(cv.astype(np.float))
chigh = np.nanmax(cv.astype(np.float))
if type(cd[0]) == str:
cname_map = {}
for i in range(len(cv)):
cname_map[cd[i]] = cv[i]
cnames = list(cname_map.keys())
categorical_interaction = True
elif clow % 1 == 0 and chigh % 1 == 0 and chigh - clow < 10:
categorical_interaction = True
# discritize colors for categorical features
if categorical_interaction and clow != chigh:
clow = np.nanmin(cv.astype(np.float))
chigh = np.nanmax(cv.astype(np.float))
bounds = np.linspace(clow, chigh, int(chigh - clow + 2))
color_norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N-1)
# optionally add jitter to feature values
if x_jitter > 0:
if x_jitter > 1: x_jitter = 1
xvals = xv.copy()
if isinstance(xvals[0], float):
xvals = xvals.astype(np.float)
xvals = xvals[~np.isnan(xvals)]
xvals = np.unique(xvals) # returns a sorted array
if len(xvals) >= 2:
smallest_diff = np.min(np.diff(xvals))
jitter_amount = x_jitter * smallest_diff
xv += (np.random.ranf(size = len(xv))*jitter_amount) - (jitter_amount/2)
# the actual scatter plot, TODO: adapt the dot_size to the number of data points?
xv_nan = np.isnan(xv)
xv_notnan = np.invert(xv_nan)
if interaction_index is not None:
# plot the nan values in the interaction feature as grey
cvals = features[oinds, interaction_index].astype(np.float64)
cvals_imp = cvals.copy()
cvals_imp[np.isnan(cvals)] = (clow + chigh) / 2.0
cvals[cvals_imp > chigh] = chigh
cvals[cvals_imp < clow] = clow
p = ax.scatter(
xv[xv_notnan], s[xv_notnan], s=dot_size, linewidth=0, c=cvals[xv_notnan],
cmap=cmap, alpha=alpha, vmin=clow, vmax=chigh,
norm=color_norm, rasterized=len(xv) > 500
)
p.set_array(cvals[xv_notnan])
else:
p = ax.scatter(xv, s, s=dot_size, linewidth=0, color=color,
alpha=alpha, rasterized=len(xv) > 500)
if interaction_index != ind and interaction_index is not None:
# draw the color bar
if type(cd[0]) == str:
tick_positions = [cname_map[n] for n in cnames]
if len(tick_positions) == 2:
tick_positions[0] -= 0.25
tick_positions[1] += 0.25
cb = pl.colorbar(p, ticks=tick_positions, ax=ax)
cb.set_ticklabels(cnames)
else:
cb = pl.colorbar(p, ax=ax)
cb.set_label(feature_names[interaction_index], size=13)
cb.ax.tick_params(labelsize=11)
if categorical_interaction:
cb.ax.tick_params(length=0)
cb.set_alpha(1)
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
# handles any setting of xmax and xmin
# note that we handle None,float, or "percentile(float)" formats
if xmin is not None or xmax is not None:
if type(xmin) == str and xmin.startswith("percentile"):
xmin = np.nanpercentile(xv, float(xmin[11:-1]))
if type(xmax) == str and xmax.startswith("percentile"):
xmax = np.nanpercentile(xv, float(xmax[11:-1]))
if xmin is None or xmin == np.nanmin(xv):
xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20
if xmax is None or xmax == np.nanmax(xv):
xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20
ax.set_xlim(xmin, xmax)
# plot any nan feature values as tick marks along the y-axis
xlim = ax.get_xlim()
if interaction_index is not None:
p = ax.scatter(
xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,
linewidth=2, c=cvals_imp[xv_nan], cmap=cmap, alpha=alpha,
vmin=clow, vmax=chigh
)
p.set_array(cvals[xv_nan])
else:
ax.scatter(
xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,
linewidth=2, color=color, alpha=alpha
)
ax.set_xlim(xlim)
# make the plot more readable
ax.set_xlabel(name, color=axis_color, fontsize=13)
ax.set_ylabel(labels['VALUE_FOR'] % name, color=axis_color, fontsize=13)
if title is not None:
ax.set_title(title, color=axis_color, fontsize=13)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(color=axis_color, labelcolor=axis_color, labelsize=11)
for spine in ax.spines.values():
spine.set_edgecolor(axis_color)
if type(xd[0]) == str:
ax.set_xticks([name_map[n] for n in xnames])
ax.set_xticklabels(xnames, dict(rotation='vertical', fontsize=11))
if show:
with warnings.catch_warnings(): # ignore expected matplotlib warnings
warnings.simplefilter("ignore", RuntimeWarning)
pl.show()
if get_png:
file = BytesIO()
pl.savefig(file, format='png', bbox_inches="tight")
return file
| true | true |
f73341a9b1861783023e6c29597f6470872dddd4 | 4,232 | py | Python | train_classifier.py | Awesomex005/CarND-Vehicle-Detection | e12068887946605d148284aeea0262695d54743f | [
"MIT"
] | null | null | null | train_classifier.py | Awesomex005/CarND-Vehicle-Detection | e12068887946605d148284aeea0262695d54743f | [
"MIT"
] | null | null | null | train_classifier.py | Awesomex005/CarND-Vehicle-Detection | e12068887946605d148284aeea0262695d54743f | [
"MIT"
] | null | null | null | import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import time
from sklearn.svm import LinearSVC
from sklearn.preprocessing import StandardScaler
from extract_feature import *
# NOTE: the next import is only valid for scikit-learn version <= 0.17
# for scikit-learn >= 0.18 use:
# from sklearn.model_selection import train_test_split
from sklearn.cross_validation import train_test_split
import pickle
# Read in cars and notcars
cars = glob.glob('./train_data/vehicles/*/*.png'); train_data_tpye = 'png'; notcars = glob.glob('./train_data/non-vehicles/*/*.png')
#cars = glob.glob('./hog_test_imgs/vehicles_smallset/*/*.jpeg'); train_data_tpye = 'jpeg'; #notcars = glob.glob('./hog_test_imgs/non-vehicles_smallset/*/*.jpeg')
sample_size = None
cars = cars[0:sample_size]
notcars = notcars[0:sample_size]
color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 9 # HOG orientations
pix_per_cell = 8 # HOG pixels per cell
cell_per_block = 2 # HOG cells per block
hog_channel = "ALL" # Can be 0, 1, 2, or "ALL"
spatial_size = (32, 32) # Spatial binning dimensions
hist_bins = 32 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
y_start_stop = [None, None] # Min and max in y to search in slide_window()
t = time.time()
print("start extract car_features")
car_features = extract_features(cars, train_data_tpye, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
print("start extract notcar_features")
notcar_features = extract_features(notcars, train_data_tpye, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=rand_state)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to extract features SVC...')
print("X_train shape: {} \X_test shape: {}".format(X_train.shape, X_test.shape))
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X_train)
# Apply the scaler to X
X_train = X_scaler.transform(X_train)
X_test = X_scaler.transform(X_test)
print('Using:',orient,'orientations',pix_per_cell,
'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
svc = LinearSVC()
t=time.time()
svc.fit(X_train, y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
# pickle SVC
pickle_file = 'svc_acc_%f.p'%round(svc.score(X_test, y_test), 4)
try:
with open(pickle_file, 'wb') as pfile:
pickle.dump(
{
'svc': svc,
'scaler': X_scaler,
'color_space': color_space,
'orient': orient,
'pix_per_cell': pix_per_cell,
'cell_per_block': cell_per_block,
'spatial_size': spatial_size,
'hist_bins': hist_bins,
'spatial_feat': spatial_feat,
'hist_feat': hist_feat,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise | 39.924528 | 161 | 0.661626 | import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import time
from sklearn.svm import LinearSVC
from sklearn.preprocessing import StandardScaler
from extract_feature import *
from sklearn.cross_validation import train_test_split
import pickle
cars = glob.glob('./train_data/vehicles/*/*.png'); train_data_tpye = 'png'; notcars = glob.glob('./train_data/non-vehicles/*/*.png')
ple_size]
color_space = 'YCrCb'
orient = 9
pix_per_cell = 8
cell_per_block = 2
hog_channel = "ALL"
spatial_size = (32, 32)
hist_bins = 32
spatial_feat = True
hist_feat = True
hog_feat = True
y_start_stop = [None, None]
t = time.time()
print("start extract car_features")
car_features = extract_features(cars, train_data_tpye, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
print("start extract notcar_features")
notcar_features = extract_features(notcars, train_data_tpye, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
X = np.vstack((car_features, notcar_features)).astype(np.float64)
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=rand_state)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to extract features SVC...')
print("X_train shape: {} \X_test shape: {}".format(X_train.shape, X_test.shape))
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_test = X_scaler.transform(X_test)
print('Using:',orient,'orientations',pix_per_cell,
'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
svc = LinearSVC()
t=time.time()
svc.fit(X_train, y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
pickle_file = 'svc_acc_%f.p'%round(svc.score(X_test, y_test), 4)
try:
with open(pickle_file, 'wb') as pfile:
pickle.dump(
{
'svc': svc,
'scaler': X_scaler,
'color_space': color_space,
'orient': orient,
'pix_per_cell': pix_per_cell,
'cell_per_block': cell_per_block,
'spatial_size': spatial_size,
'hist_bins': hist_bins,
'spatial_feat': spatial_feat,
'hist_feat': hist_feat,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise | true | true |
f7334209758cbea200cb0c7e05c995573175bd72 | 20,788 | py | Python | monk/pytorch/finetune/level_12_losses_main.py | take2rohit/monk_v1 | 9c567bf2c8b571021b120d879ba9edf7751b9f92 | [
"Apache-2.0"
] | 542 | 2019-11-10T12:09:31.000Z | 2022-03-28T11:39:07.000Z | monk/pytorch/finetune/level_12_losses_main.py | take2rohit/monk_v1 | 9c567bf2c8b571021b120d879ba9edf7751b9f92 | [
"Apache-2.0"
] | 117 | 2019-11-12T09:39:24.000Z | 2022-03-12T00:20:41.000Z | monk/pytorch/finetune/level_12_losses_main.py | take2rohit/monk_v1 | 9c567bf2c8b571021b120d879ba9edf7751b9f92 | [
"Apache-2.0"
] | 246 | 2019-11-09T21:53:24.000Z | 2022-03-29T00:57:07.000Z | from monk.pytorch.finetune.imports import *
from monk.system.imports import *
from monk.pytorch.finetune.level_11_optimizers_main import prototype_optimizers
class prototype_losses(prototype_optimizers):
'''
Main class for all parameters in expert mode
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
@accepts("self", verbose=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def __init__(self, verbose=1):
super().__init__(verbose=verbose);
###############################################################################################################################################
@accepts("self", weight=[list, type(np.array([1, 2, 3])), float, type(None)], batch_axis=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_l1(self, weight=None, batch_axis=0):
'''
Select L1 Loss
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
Returns:
None
'''
self.system_dict = l1(self.system_dict, weight=weight, batch_axis=batch_axis);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", weight=[list, type(np.array([1, 2, 3])), float, type(None)], batch_axis=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_l2(self, weight=None, batch_axis=0):
'''
Select L2 Loss
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
Returns:
None
'''
self.system_dict = l2(self.system_dict, weight=weight, batch_axis=batch_axis);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", weight=[list, type(np.array([1, 2, 3])), float, type(None)], batch_axis=int,
axis_to_sum_over=int, label_as_categories=bool, label_smoothing=bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_softmax_crossentropy(self, weight=None, batch_axis=0, axis_to_sum_over=-1,
label_as_categories=True, label_smoothing=False):
'''
Select soaftmax crossentropy Loss - Auto softmax before applying loss
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
axis_to_sum_over (int): Set as -1
label_as_categories (bool): Fixed as True
label_smoothing (bool): If True, label smoothning is applied.
Returns:
None
'''
self.system_dict = softmax_crossentropy(self.system_dict, weight=weight, batch_axis=batch_axis,
axis_to_sum_over=axis_to_sum_over, label_as_categories=label_as_categories,
label_smoothing=label_smoothing);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", weight=[list, type(np.array([1, 2, 3])), float, type(None)], batch_axis=int,
axis_to_sum_over=int, label_as_categories=bool, label_smoothing=bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_crossentropy(self, weight=None, batch_axis=0, axis_to_sum_over=-1,
label_as_categories=True, label_smoothing=False):
'''
Select crossentropy Loss - Need to manually apply softmax
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
axis_to_sum_over (int): Set as -1
label_as_categories (bool): Fixed as True
label_smoothing (bool): If True, label smoothning is applied.
Returns:
None
'''
self.system_dict = crossentropy(self.system_dict, weight=weight, batch_axis=batch_axis,
axis_to_sum_over=axis_to_sum_over, label_as_categories=label_as_categories,
label_smoothing=label_smoothing);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", weight=[list, type(np.array([1, 2, 3])), float, type(None)], batch_axis=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_sigmoid_binary_crossentropy(self, weight=None, batch_axis=0):
'''
Select sigmoid binary crossentropy Loss - Auto sigmoid before applying loss
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
Returns:
None
'''
self.system_dict = sigmoid_binary_crossentropy(self.system_dict, weight=weight, batch_axis=batch_axis);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", weight=[list, type(np.array([1, 2, 3])), float, type(None)], batch_axis=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_binary_crossentropy(self, weight=None, batch_axis=0):
'''
Select binary crossentropy Loss - Need to manually apply sigmoid
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
Returns:
None
'''
self.system_dict = binary_crossentropy(self.system_dict, weight=weight, batch_axis=batch_axis);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", log_pre_applied=bool, weight=[list, type(np.array([1, 2, 3])), float, type(None)],
batch_axis=int, axis_to_sum_over=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_kldiv(self, log_pre_applied=False, weight=None, batch_axis=0, axis_to_sum_over=-1):
'''
Select lkdiv Loss
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
axis_to_sum_over (int): Set as -1
log_pre_applied (bool): If set as False, then logarithmic function is auto applied over target variables
Returns:
None
'''
self.system_dict = kldiv(self.system_dict, weight=weight, batch_axis=batch_axis,
axis_to_sum_over=axis_to_sum_over, log_pre_applied=log_pre_applied);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", log_pre_applied=bool, weight=[list, type(np.array([1, 2, 3])), float, type(None)],
batch_axis=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_poisson_nll(self, log_pre_applied=False, weight=None, batch_axis=0):
'''
Select poisson_nll Loss
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
log_pre_applied (bool): If set as False, then logarithmic function is auto applied over target variables
Returns:
None
'''
self.system_dict = poisson_nll(self.system_dict, log_pre_applied=log_pre_applied,
weight=weight, batch_axis=batch_axis);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", weight=[list, type(np.array([1, 2, 3])), float, type(None)], batch_axis=int,
threshold_for_mean_estimator=[int, float], post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_huber(self, weight=None, batch_axis=0, threshold_for_mean_estimator=1):
'''
Select huber Loss
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
threshold_for_mean_estimator (int): Threshold for trimmed mean estimator.
Returns:
None
'''
self.system_dict = huber(self.system_dict, threshold_for_mean_estimator=threshold_for_mean_estimator,
weight=weight, batch_axis=batch_axis);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", weight=[list, type(np.array([1, 2, 3])), float, type(None)], batch_axis=int,
margin=[int, float], post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_hinge(self, weight=None, batch_axis=0, margin=1):
'''
Select hinge Loss
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
margin (float): MArgin value.
Returns:
None
'''
self.system_dict = hinge(self.system_dict, margin=margin,
weight=weight, batch_axis=batch_axis);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", weight=[list, type(np.array([1, 2, 3])), float, type(None)], batch_axis=int,
margin=[int, float], post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_squared_hinge(self, weight=None, batch_axis=0, margin=1):
'''
Select squared hinge Loss
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
margin (float): MArgin value.
Returns:
None
'''
self.system_dict = squared_hinge(self.system_dict, margin=margin,
weight=weight, batch_axis=batch_axis);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", weight=[list, type(np.array([1, 2, 3])), float, type(None)], batch_axis=int,
margin=[int, float], post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_multimargin(self, weight=None, batch_axis=0, margin=1):
'''
Select multi margin Loss
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
margin (float): MArgin value.
Returns:
None
'''
self.system_dict = multimargin(self.system_dict, margin=margin,
weight=weight, batch_axis=batch_axis);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", weight=[list, type(np.array([1, 2, 3])), float, type(None)], batch_axis=int,
margin=[int, float], post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_squared_multimargin(self, weight=None, batch_axis=0, margin=1):
'''
Select squared multi margin Loss
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
margin (float): MArgin value.
Returns:
None
'''
self.system_dict = squared_multimargin(self.system_dict, margin=margin,
weight=weight, batch_axis=batch_axis);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", weight=[list, type(np.array([1, 2, 3])), float, type(None)], batch_axis=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_multilabel_margin(self, weight=None, batch_axis=0):
'''
Select multilabel margin Loss
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
Returns:
None
'''
self.system_dict = multilabelmargin(self.system_dict, weight=weight, batch_axis=batch_axis);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", weight=[list, type(np.array([1, 2, 3])), float, type(None)], batch_axis=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def loss_multilabel_softmargin(self, weight=None, batch_axis=0):
'''
Select multilabel softmargin Loss
Args:
weight (float): global scalar for weight loss
batch_axis (int): Axis representing number of elements in the batch - N
Returns:
None
'''
self.system_dict = multilabelsoftmargin(self.system_dict, weight=weight, batch_axis=batch_axis);
self.custom_print("Loss");
self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["loss"]["name"]));
self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["loss"]["params"]));
self.custom_print("");
###############################################################################################################################################
| 51.328395 | 147 | 0.479219 | from monk.pytorch.finetune.imports import *
from monk.system.imports import *
from monk.pytorch.finetune.level_11_optimizers_main import prototype_optimizers
class prototype_losses(prototype_optimizers):
@accepts("self", verbose=int, post_trace=False)
def __init__(self, verbose=1):
super().__init__(verbose=verbose);
| true | true |
f73342ade8523d343007d9b616bca0d71c3a4c27 | 4,050 | py | Python | ghidra_9.0/Ghidra/Features/Python/data/jython-2.7.1/Lib/distutils/tests/test_check.py | ChristopherMorrison/ghidra | e53aa57d1aff79a1df93728f746705c58fe95ab0 | [
"Apache-2.0"
] | 577 | 2020-06-04T16:34:44.000Z | 2022-03-31T11:46:07.000Z | ghidra_9.0/Ghidra/Features/Python/data/jython-2.7.1/Lib/distutils/tests/test_check.py | ChristopherMorrison/ghidra | e53aa57d1aff79a1df93728f746705c58fe95ab0 | [
"Apache-2.0"
] | 174 | 2015-01-08T20:37:09.000Z | 2020-06-03T16:48:59.000Z | ghidra_9.0/Ghidra/Features/Python/data/jython-2.7.1/Lib/distutils/tests/test_check.py | ChristopherMorrison/ghidra | e53aa57d1aff79a1df93728f746705c58fe95ab0 | [
"Apache-2.0"
] | 299 | 2015-01-23T10:06:24.000Z | 2022-02-02T06:34:51.000Z | # -*- encoding: utf8 -*-
"""Tests for distutils.command.check."""
import unittest
from test.test_support import run_unittest
from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
class CheckTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
def _run(self, metadata=None, **options):
if metadata is None:
metadata = {}
pkg_info, dist = self.create_dist(**metadata)
cmd = check(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
return cmd
def test_check_metadata(self):
# let's run the command with no metadata at all
# by default, check is checking the metadata
# should have some warnings
cmd = self._run()
self.assertEqual(cmd._warnings, 2)
# now let's add the required fields
# and run it again, to make sure we don't get
# any warning anymore
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
# now with the strict mode, we should
# get an error if there are missing metadata
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self._run(metadata, strict=1)
self.assertEqual(cmd._warnings, 0)
# now a test with Unicode entries
metadata = {'url': u'xxx', 'author': u'\u00c9ric',
'author_email': u'xxx', u'name': 'xxx',
'version': u'xxx',
'description': u'Something about esszet \u00df',
'long_description': u'More things about esszet \u00df'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
def test_check_document(self):
if not HAS_DOCUTILS: # won't test without docutils
return
pkg_info, dist = self.create_dist()
cmd = check(dist)
# let's see if it detects broken rest
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
self.assertEqual(len(msgs), 1)
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
def test_check_restructuredtext(self):
if not HAS_DOCUTILS: # won't test without docutils
return
# let's see if it detects broken rest in long_description
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
self.assertEqual(cmd._warnings, 1)
# let's see if we have an error with strict=1
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': broken_rest}
self.assertRaises(DistutilsSetupError, self._run, metadata,
**{'strict': 1, 'restructuredtext': 1})
# and non-broken rest, including a non-ASCII character to test #12114
metadata['long_description'] = u'title\n=====\n\ntest \u00df'
cmd = self._run(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
def test_check_all(self):
metadata = {'url': 'xxx', 'author': 'xxx'}
self.assertRaises(DistutilsSetupError, self._run,
{}, **{'strict': 1,
'restructuredtext': 1})
def test_suite():
return unittest.makeSuite(CheckTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| 36.818182 | 78 | 0.588148 |
import unittest
from test.test_support import run_unittest
from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
class CheckTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
def _run(self, metadata=None, **options):
if metadata is None:
metadata = {}
pkg_info, dist = self.create_dist(**metadata)
cmd = check(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
return cmd
def test_check_metadata(self):
# by default, check is checking the metadata
# should have some warnings
cmd = self._run()
self.assertEqual(cmd._warnings, 2)
# now let's add the required fields
# any warning anymore
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
# now with the strict mode, we should
# get an error if there are missing metadata
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self._run(metadata, strict=1)
self.assertEqual(cmd._warnings, 0)
# now a test with Unicode entries
metadata = {'url': u'xxx', 'author': u'\u00c9ric',
'author_email': u'xxx', u'name': 'xxx',
'version': u'xxx',
'description': u'Something about esszet \u00df',
'long_description': u'More things about esszet \u00df'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
def test_check_document(self):
if not HAS_DOCUTILS: # won't test without docutils
return
pkg_info, dist = self.create_dist()
cmd = check(dist)
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
self.assertEqual(len(msgs), 1)
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
def test_check_restructuredtext(self):
if not HAS_DOCUTILS: # won't test without docutils
return
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
self.assertEqual(cmd._warnings, 1)
# let's see if we have an error with strict=1
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': broken_rest}
self.assertRaises(DistutilsSetupError, self._run, metadata,
**{'strict': 1, 'restructuredtext': 1})
metadata['long_description'] = u'title\n=====\n\ntest \u00df'
cmd = self._run(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
def test_check_all(self):
metadata = {'url': 'xxx', 'author': 'xxx'}
self.assertRaises(DistutilsSetupError, self._run,
{}, **{'strict': 1,
'restructuredtext': 1})
def test_suite():
return unittest.makeSuite(CheckTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| true | true |
f73342e88b8291f32ee90a4caf7fbe952853619b | 14,615 | py | Python | carEducation.py | L0113408/JoyRides | e006f94f3c31317b68566a2bce504ac6a6b702d1 | [
"MIT"
] | 3 | 2021-09-01T02:42:52.000Z | 2021-09-06T04:14:24.000Z | carEducation.py | sarahnadi/JoyRides | e006f94f3c31317b68566a2bce504ac6a6b702d1 | [
"MIT"
] | null | null | null | carEducation.py | sarahnadi/JoyRides | e006f94f3c31317b68566a2bce504ac6a6b702d1 | [
"MIT"
] | 1 | 2021-09-01T02:43:00.000Z | 2021-09-01T02:43:00.000Z | """
Python Wechaty - https://github.com/wechaty/python-wechaty
Authors: Huan LI (李卓桓) <https://github.com/huan>
Jingjing WU (吴京京) <https://github.com/wj-Mcat>
2020 @ Copyright Wechaty Contributors <https://github.com/wechaty>
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#from asyncio.windows_events import NULL
import os
import asyncio
import json
import array
#import paddlehub as hub
#import cv2
#from PIL import Image
from operator import methodcaller
#from wechaty.user import room
from user import User
from action import Action
#from typing import Union
#from wechaty.plugin import WechatyPlugin
from wechaty import (
Contact,
Room,
FileBox,
Message,
Wechaty,
ScanStatus,
user,
)
with open('./data/data.json', 'r') as f:
processes = json.load(f)
with open('./data/pair.json', 'r') as f:
pair = json.load(f)
statkey = '出发吧'
#ninghtkey = '黑夜模式'
ninghtkey = '天黑了'
curProcess = ''
userInfo : User
userInfo = ''
dicuser = {}
#model = hub.Module(name="humanseg_lite")
os.environ['WECHATY_PUPPET']="wechaty-puppet-service"
#os.environ['WECHATY_PUPPET_SERVICE_TOKEN']="puppet_padlocal_ef80ab0ab8f547c39b0b2460fb1f3027"
# os.environ['WECHATY_PUPPET_SERVICE_TOKEN']="puppet_padlocal_44f190e6ea6845558fcb3cfdf70d8a59"
# os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT']="182.61.61.97:8080"
os.environ['WECHATY_PUPPET_SERVICE_TOKEN']='puppet_padlocal_6c909d60a7444eeaa106e044de0a6026'
os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT']="106.13.69.114:8080"
#os.environ['WECHATY_PUPPET_SERVICE_TOKEN']='puppet_padlocal_6c909d60a7444eeaa106e044de0a6026'
#os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT']="106.13.69.114:8080"
class DoProcess(object):
def __init__(self, msg: Message):
self.msg = msg
# 唤醒
def S01(self, user_Info : User):
cur_Process = processes[processes['start']]
send = cur_Process['question']
from_contact = self.msg.talker()
room = self.msg.room()
user_Info = User(from_contact.contact_id)
user_Info.contact = from_contact
user_Info.room = None
if room is not None :
user_Info.roomtopic = room.room_id
user_Info.room = room
user_Info.state = cur_Process['state']
return send, user_Info
def P01(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
# 拍照模式
def P02(self, user_Info : User):
from_contact = self.msg.talker()
if self.msg.wechaty.contact_id == from_contact.contact_id : return
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
# send = self["A00"]
return result
# 接受图片
def P03(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
# 系统发送图片
def P04(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
if result[0] is not None:
send = result[0]
user_Info : User = result[1]
params = send.split('|')
if len(params) > 1 :
if params[0] == 'imgpath' :
user_Info.imgpath = params[1]
return result
# TODO
def P05(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
# 选择交互模式
def P06(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
text: str = self.msg.text()
if text in cur_Process :
user_Info.qstntype = cur_Process[text]['type']
method = cur_Process['action']
user_Info.cls = None
result = methodcaller(method, user_Info)(a)
return result
# 选择图片来源
def P07(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
#text: str = self.msg.text()
#if text in cur_Process :
# user_Info.chose = cur_Process[text]['type']
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
# 绘画流程入口
def P10(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
#text: str = self.msg.text()
#if text in cur_Process :
# user_Info.chose = cur_Process[text]['type']
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
# 系统发送图片
def P11(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
# 接受图片
def P12(self, user_Info : User):
from_contact = self.msg.talker()
if self.msg.wechaty.contact_id == from_contact.contact_id : return
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
# 接受图片
def P15(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
# TODO
def P05(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
# 绘画流程入口
def P20(self, user_Info : User):
# from_contact = self.msg.talker()
# if self.msg.wechaty.contact_id == from_contact.contact_id : return
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
# 比较答案
def P21(self, user_Info : User):
from_contact = self.msg.talker()
if self.msg.wechaty.contact_id == from_contact.contact_id : return
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
# 接受图片
def P22(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
# TODO
def P05(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P90(self, user_Info : User):
a = Action(self.msg)
from_contact = self.msg.talker()
user_Info = User(from_contact.contact_id)
user_Info.state = 'P90'
cur_Process = processes['P90']
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
# 选择图片来源
def P98(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
#text: str = self.msg.text()
#if text in cur_Process :
# user_Info.chose = cur_Process[text]['type']
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P99(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def doGame(msg: Message, img_path: str):
global userInfo
global curProcess
result = None
#if msg.text() == 'show the game':
if msg.text() == ninghtkey:
# 流程唤醒
dp = DoProcess(msg)
# userInfo.state = 'P90'
result = methodcaller('P90', userInfo)(dp)
elif msg.text() == statkey:
# 流程唤醒
dp = DoProcess(msg)
result = methodcaller('S01', userInfo)(dp)
elif userInfo:
dp = DoProcess(msg)
curProcess = processes[userInfo.state]
# if 'imgpath' in curProcess :
# curProcess['imgpath'] = img_path
if img_path is not None :
userInfo.imgpath = img_path
result = methodcaller(curProcess['state'], userInfo)(dp)
#userInfo.state = result[1]
#userInfo.type =
if (result is not None) and (len(result) > 1) :
userInfo = result[1]
return result
else :
return ''
async def save_img(msg: Message):
# 将Message转换为FileBox
file_box_2 = await msg.to_file_box()
# 获取图片名
img_name = file_box_2.name
# 图片保存的路径
img_path = './images/input/' + img_name
# 将图片保存为本地文件
await file_box_2.to_file(file_path=img_path)
return img_path
command = ['0','1','2', statkey, ninghtkey, processes['P03']['question'], processes['P04']['question']]
async def on_message(msg: Message):
global contact
from_contact = msg.talker()
# room = msg.room()
# msg.wechaty.contact_id
# if from_contact.is_self : return
# text = msg.text()
#conversation: Union[Room, Contact] = from_contact if room is None else room
# await conversation.ready()
#await conversation.say('dong')
img_path = None
# 如果收到的message是一张图片
if msg.type() == Message.Type.MESSAGE_TYPE_IMAGE:
from_contact = msg.talker()
if msg.wechaty.contact_id != from_contact.contact_id :
# img_path = './image/3330.jpeg'
# '''
# 将Message转换为FileBox
file_box_2 = await msg.to_file_box()
# 获取图片名
img_name = file_box_2.name
# 图片保存的路径
img_path = './images/input/' + img_name
#img_path = img_name
print('img_path=', img_path)
# 将图片保存为本地文件
await file_box_2.to_file(file_path=img_path)
# '''
# 游戏流程处理
isCommand = False
# if (msg.text() in command) or (img_path is not None) :
# isCommand = True
#while isCommand :
if True :
result = doGame(msg, img_path = img_path)
if len(result) > 1 :
if result[0] is not None:
send = result[0]
user_Info : User = result[1]
params = send.split('|')
if len(params) > 1 :
if params[0] == 'imgpath' :
#file_box_3 = params[1]
file_box_3 = FileBox.from_file(params[1])
await msg.say(file_box_3)
send = None
if params[0] == 'url' :
file_box_3 = params[1]
#file_box_3 = FileBox.from_url(params[1], '黑夜模式')
await msg.say(file_box_3)
send = None
if send is not None:
if user_Info.room is not None :
await user_Info.room.say(send)
else :
await user_Info.contact.say(send)
# if processes[result[1].state]['wait'] == 'true' :
# break
# else :
# break
if msg.text() == 'D':
contact = msg.talker()
await msg.say('ding')
if msg.text() == 'ding':
msg.talker = contact
await contact.say('这是自动回复: dong dong dong')
await msg.say('测试')
if msg.text() == '图片':
url = 'https://ai.bdstatic.com/file/403BC03612CC4AF1B05FB26A19D99BAF'
# 构建一个FileBox
#file_box_1 = FileBox.from_url(url=url, name='xx.jpg')
'''
with open('./image/3330.jpeg', 'rb') as f:
content = base64.b64encode(f.read())
file_box = FileBox.from_base64(name='3300177014.jpg', base64=content)
await conversation.say(file_box)
'''
img_path = r'./images/3330.jpeg'
file_box_3 = FileBox.from_file(img_path)
#await msg.say('图片前')
await msg.say(file_box_3)
await msg.say('图片后')
async def on_scan(
qrcode: str,
status: ScanStatus,
_data,
):
print('Status: ' + str(status))
print('View QR Code Online: https://wechaty.js.org/qrcode/' + qrcode)
async def on_login(user: Contact):
print(user)
async def main():
# 确保我们在环境变量中设置了WECHATY_PUPPET_SERVICE_TOKEN
if 'WECHATY_PUPPET_SERVICE_TOKEN' not in os.environ:
print('''
Error: WECHATY_PUPPET_SERVICE_TOKEN is not found in the environment variables
You need a TOKEN to run the Python Wechaty. Please goto our README for details
https://github.com/wechaty/python-wechaty-getting-started/#wechaty_puppet_service_token
''')
bot = Wechaty()
bot.on('scan', on_scan)
bot.on('login', on_login)
bot.on('message', on_message)
await bot.start()
print('[Python Wechaty] Ding Dong Bot started.')
asyncio.run(main()) | 33.065611 | 104 | 0.5844 |
import os
import asyncio
import json
import array
from operator import methodcaller
from user import User
from action import Action
from wechaty import (
Contact,
Room,
FileBox,
Message,
Wechaty,
ScanStatus,
user,
)
with open('./data/data.json', 'r') as f:
processes = json.load(f)
with open('./data/pair.json', 'r') as f:
pair = json.load(f)
statkey = '出发吧'
ninghtkey = '天黑了'
curProcess = ''
userInfo : User
userInfo = ''
dicuser = {}
os.environ['WECHATY_PUPPET']="wechaty-puppet-service"
os.environ['WECHATY_PUPPET_SERVICE_TOKEN']='puppet_padlocal_6c909d60a7444eeaa106e044de0a6026'
os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT']="106.13.69.114:8080"
class DoProcess(object):
def __init__(self, msg: Message):
self.msg = msg
def S01(self, user_Info : User):
cur_Process = processes[processes['start']]
send = cur_Process['question']
from_contact = self.msg.talker()
room = self.msg.room()
user_Info = User(from_contact.contact_id)
user_Info.contact = from_contact
user_Info.room = None
if room is not None :
user_Info.roomtopic = room.room_id
user_Info.room = room
user_Info.state = cur_Process['state']
return send, user_Info
def P01(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P02(self, user_Info : User):
from_contact = self.msg.talker()
if self.msg.wechaty.contact_id == from_contact.contact_id : return
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P03(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P04(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
if result[0] is not None:
send = result[0]
user_Info : User = result[1]
params = send.split('|')
if len(params) > 1 :
if params[0] == 'imgpath' :
user_Info.imgpath = params[1]
return result
def P05(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P06(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
text: str = self.msg.text()
if text in cur_Process :
user_Info.qstntype = cur_Process[text]['type']
method = cur_Process['action']
user_Info.cls = None
result = methodcaller(method, user_Info)(a)
return result
def P07(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P10(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P11(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P12(self, user_Info : User):
from_contact = self.msg.talker()
if self.msg.wechaty.contact_id == from_contact.contact_id : return
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P15(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P05(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P20(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P21(self, user_Info : User):
from_contact = self.msg.talker()
if self.msg.wechaty.contact_id == from_contact.contact_id : return
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P22(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P05(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P90(self, user_Info : User):
a = Action(self.msg)
from_contact = self.msg.talker()
user_Info = User(from_contact.contact_id)
user_Info.state = 'P90'
cur_Process = processes['P90']
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P98(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def P99(self, user_Info : User):
a = Action(self.msg)
cur_Process = processes[user_Info.state]
method = cur_Process['action']
result = methodcaller(method, user_Info)(a)
return result
def doGame(msg: Message, img_path: str):
global userInfo
global curProcess
result = None
if msg.text() == ninghtkey:
dp = DoProcess(msg)
result = methodcaller('P90', userInfo)(dp)
elif msg.text() == statkey:
dp = DoProcess(msg)
result = methodcaller('S01', userInfo)(dp)
elif userInfo:
dp = DoProcess(msg)
curProcess = processes[userInfo.state]
if img_path is not None :
userInfo.imgpath = img_path
result = methodcaller(curProcess['state'], userInfo)(dp)
if (result is not None) and (len(result) > 1) :
userInfo = result[1]
return result
else :
return ''
async def save_img(msg: Message):
file_box_2 = await msg.to_file_box()
img_name = file_box_2.name
img_path = './images/input/' + img_name
await file_box_2.to_file(file_path=img_path)
return img_path
command = ['0','1','2', statkey, ninghtkey, processes['P03']['question'], processes['P04']['question']]
async def on_message(msg: Message):
global contact
from_contact = msg.talker()
img_path = None
if msg.type() == Message.Type.MESSAGE_TYPE_IMAGE:
from_contact = msg.talker()
if msg.wechaty.contact_id != from_contact.contact_id :
# 将Message转换为FileBox
file_box_2 = await msg.to_file_box()
# 获取图片名
img_name = file_box_2.name
# 图片保存的路径
img_path = './images/input/' + img_name
#img_path = img_name
print('img_path=', img_path)
# 将图片保存为本地文件
await file_box_2.to_file(file_path=img_path)
# '''
isCommand = False
if True :
result = doGame(msg, img_path = img_path)
if len(result) > 1 :
if result[0] is not None:
send = result[0]
user_Info : User = result[1]
params = send.split('|')
if len(params) > 1 :
if params[0] == 'imgpath' :
file_box_3 = FileBox.from_file(params[1])
await msg.say(file_box_3)
send = None
if params[0] == 'url' :
file_box_3 = params[1]
await msg.say(file_box_3)
send = None
if send is not None:
if user_Info.room is not None :
await user_Info.room.say(send)
else :
await user_Info.contact.say(send)
if msg.text() == 'D':
contact = msg.talker()
await msg.say('ding')
if msg.text() == 'ding':
msg.talker = contact
await contact.say('这是自动回复: dong dong dong')
await msg.say('测试')
if msg.text() == '图片':
url = 'https://ai.bdstatic.com/file/403BC03612CC4AF1B05FB26A19D99BAF'
img_path = r'./images/3330.jpeg'
file_box_3 = FileBox.from_file(img_path)
await msg.say(file_box_3)
await msg.say('图片后')
async def on_scan(
qrcode: str,
status: ScanStatus,
_data,
):
print('Status: ' + str(status))
print('View QR Code Online: https://wechaty.js.org/qrcode/' + qrcode)
async def on_login(user: Contact):
print(user)
async def main():
if 'WECHATY_PUPPET_SERVICE_TOKEN' not in os.environ:
print('''
Error: WECHATY_PUPPET_SERVICE_TOKEN is not found in the environment variables
You need a TOKEN to run the Python Wechaty. Please goto our README for details
https://github.com/wechaty/python-wechaty-getting-started/#wechaty_puppet_service_token
''')
bot = Wechaty()
bot.on('scan', on_scan)
bot.on('login', on_login)
bot.on('message', on_message)
await bot.start()
print('[Python Wechaty] Ding Dong Bot started.')
asyncio.run(main()) | true | true |
f73343b901bf273919dad04a3aaac532772c271a | 1,872 | py | Python | congress/client.py | giltolley/propublica-congress | 03e519341063c5703080b4723112f1831816c77e | [
"MIT"
] | 49 | 2017-02-04T18:37:14.000Z | 2022-02-07T02:01:31.000Z | congress/client.py | giltolley/propublica-congress | 03e519341063c5703080b4723112f1831816c77e | [
"MIT"
] | 29 | 2017-02-06T17:50:31.000Z | 2022-03-29T19:58:12.000Z | congress/client.py | giltolley/propublica-congress | 03e519341063c5703080b4723112f1831816c77e | [
"MIT"
] | 13 | 2017-02-11T06:42:26.000Z | 2020-08-15T16:20:50.000Z | """
Base client outlining how we fetch and parse responses
"""
import json
import logging
import httplib2
from .utils import NotFound, CongressError, u
log = logging.getLogger('congress')
class Client(object):
"""
Client classes deal with fetching responses from the ProPublica Congress
API and parsing what comes back. In addition to storing API credentials,
a client can use a custom cache, or even a customized
httplib2.Http instance.
"""
BASE_URI = "https://api.propublica.org/congress/v1/"
def __init__(self, apikey=None, cache='.cache', http=None):
self.apikey = apikey
if isinstance(http, httplib2.Http):
self.http = http
else:
self.http = httplib2.Http(cache)
def fetch(self, path, parse=lambda r: r['results'][0]):
"""
Make an API request, with authentication.
This method can be used directly to fetch new endpoints
or customize parsing.
::
>>> from congress import Congress
>>> client = Congress()
>>> senate = client.fetch('115/senate/members.json')
>>> print(senate['num_results'])
101
"""
url = self.BASE_URI + path
headers = {'X-API-Key': self.apikey}
log.debug(url)
resp, content = self.http.request(url, headers=headers)
content = u(content)
content = json.loads(content)
# handle errors
if not content.get('status') == 'OK':
if "errors" in content and content['errors'][0]['error'] == "Record not found":
raise NotFound(path)
if content.get('status') == '404':
raise NotFound(path)
raise CongressError(content, resp, url)
if callable(parse):
content = parse(content)
return content
| 26.366197 | 91 | 0.592415 | import json
import logging
import httplib2
from .utils import NotFound, CongressError, u
log = logging.getLogger('congress')
class Client(object):
BASE_URI = "https://api.propublica.org/congress/v1/"
def __init__(self, apikey=None, cache='.cache', http=None):
self.apikey = apikey
if isinstance(http, httplib2.Http):
self.http = http
else:
self.http = httplib2.Http(cache)
def fetch(self, path, parse=lambda r: r['results'][0]):
url = self.BASE_URI + path
headers = {'X-API-Key': self.apikey}
log.debug(url)
resp, content = self.http.request(url, headers=headers)
content = u(content)
content = json.loads(content)
if not content.get('status') == 'OK':
if "errors" in content and content['errors'][0]['error'] == "Record not found":
raise NotFound(path)
if content.get('status') == '404':
raise NotFound(path)
raise CongressError(content, resp, url)
if callable(parse):
content = parse(content)
return content
| true | true |
f733450b38e896eba9d3f68ecc6ba467ad7a4e20 | 1,322 | py | Python | chemprop/models/FFNetwork.py | DannySalem/chemprop | f99cea2c08f54640ccd8ad3851a93f47badc72dd | [
"MIT"
] | null | null | null | chemprop/models/FFNetwork.py | DannySalem/chemprop | f99cea2c08f54640ccd8ad3851a93f47badc72dd | [
"MIT"
] | null | null | null | chemprop/models/FFNetwork.py | DannySalem/chemprop | f99cea2c08f54640ccd8ad3851a93f47badc72dd | [
"MIT"
] | null | null | null | import torch.nn as nn
from chemprop.nn_utils import get_activation_function
from chemprop.args import TrainArgs
def create_ffn(output_size: int, input_size: int, args: TrainArgs):
"""
Creates the feed-forward layers for the model.
:param args: A :class:`~chemprop.args.TrainArgs` object containing model arguments.
"""
first_linear_dim = args.hidden_size * args.number_of_molecules
# need to also add other 2 network outputs
if args.use_input_features:
first_linear_dim += args.features_size
if args.atom_descriptors == "descriptor":
first_linear_dim += args.atom_descriptors_size
first_linear_dim = input_size
dropout = nn.Dropout(args.dropout)
activation = get_activation_function(args.activation)
# Create FFN layers
if args.ffn_num_layers == 1:
ffn = [dropout, nn.Linear(first_linear_dim, output_size)]
else:
ffn = [dropout, nn.Linear(first_linear_dim, args.ffn_hidden_size)]
for _ in range(args.ffn_num_layers - 2):
ffn.extend(
[activation, dropout, nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size),]
)
ffn.extend(
[activation, dropout, nn.Linear(args.ffn_hidden_size, output_size),]
)
# return FFN model
return nn.Sequential(*ffn)
| 33.05 | 93 | 0.688351 | import torch.nn as nn
from chemprop.nn_utils import get_activation_function
from chemprop.args import TrainArgs
def create_ffn(output_size: int, input_size: int, args: TrainArgs):
first_linear_dim = args.hidden_size * args.number_of_molecules
if args.use_input_features:
first_linear_dim += args.features_size
if args.atom_descriptors == "descriptor":
first_linear_dim += args.atom_descriptors_size
first_linear_dim = input_size
dropout = nn.Dropout(args.dropout)
activation = get_activation_function(args.activation)
if args.ffn_num_layers == 1:
ffn = [dropout, nn.Linear(first_linear_dim, output_size)]
else:
ffn = [dropout, nn.Linear(first_linear_dim, args.ffn_hidden_size)]
for _ in range(args.ffn_num_layers - 2):
ffn.extend(
[activation, dropout, nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size),]
)
ffn.extend(
[activation, dropout, nn.Linear(args.ffn_hidden_size, output_size),]
)
return nn.Sequential(*ffn)
| true | true |
f73345b124039a1ae853cb686c955e44ccee85b5 | 3,332 | py | Python | rokka_client_codegen/models/list_user_memberships_response.py | rokka-io/rokka-client-python-codegen | 63696489531a6f6efc982d3e25d79fbbce433838 | [
"MIT"
] | null | null | null | rokka_client_codegen/models/list_user_memberships_response.py | rokka-io/rokka-client-python-codegen | 63696489531a6f6efc982d3e25d79fbbce433838 | [
"MIT"
] | null | null | null | rokka_client_codegen/models/list_user_memberships_response.py | rokka-io/rokka-client-python-codegen | 63696489531a6f6efc982d3e25d79fbbce433838 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
rokka.io
digital image processing done right. [Documentation](https://rokka.io/documentation). [Changelog](https://api.rokka.io/changelog.md). # noqa: E501
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from rokka_client_codegen.models.user_membership import UserMembership # noqa: F401,E501
class ListUserMembershipsResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[UserMembership]'
}
attribute_map = {
'items': 'items'
}
def __init__(self, items=None): # noqa: E501
"""ListUserMembershipsResponse - a model defined in Swagger""" # noqa: E501
self._items = None
self.discriminator = None
if items is not None:
self.items = items
@property
def items(self):
"""Gets the items of this ListUserMembershipsResponse. # noqa: E501
:return: The items of this ListUserMembershipsResponse. # noqa: E501
:rtype: list[UserMembership]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this ListUserMembershipsResponse.
:param items: The items of this ListUserMembershipsResponse. # noqa: E501
:type: list[UserMembership]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ListUserMembershipsResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListUserMembershipsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.237288 | 151 | 0.577731 |
import pprint
import re
import six
from rokka_client_codegen.models.user_membership import UserMembership
class ListUserMembershipsResponse(object):
swagger_types = {
'items': 'list[UserMembership]'
}
attribute_map = {
'items': 'items'
}
def __init__(self, items=None):
self._items = None
self.discriminator = None
if items is not None:
self.items = items
@property
def items(self):
return self._items
@items.setter
def items(self, items):
self._items = items
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ListUserMembershipsResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ListUserMembershipsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7334768fa7f2497dd81db65213f28b75e9d04e9 | 5,800 | py | Python | BESO.py | lucasdornelles/2DBESO | b92a42346ed4945a3668a3277d67ef412e200cbb | [
"MIT"
] | null | null | null | BESO.py | lucasdornelles/2DBESO | b92a42346ed4945a3668a3277d67ef412e200cbb | [
"MIT"
] | 2 | 2018-02-28T04:54:35.000Z | 2018-02-28T12:29:44.000Z | BESO.py | lucasdornelles/2DBESO | b92a42346ed4945a3668a3277d67ef412e200cbb | [
"MIT"
] | null | null | null | import numpy as np
from FEM import get_element_dof
from tqdm import tqdm
from scipy.spatial.distance import pdist
def get_elements_sensibilities(local_matrix, minimum_density, elements_density,
displacements, penalty, connectivity, nodes_dof):
# calculate elements sensibilities
sensibilities = []
for i in tqdm(range(len(connectivity))):
element_dof = get_element_dof(connectivity[i], nodes_dof)
element_displacements = displacements[element_dof]
element_sensibility = np.matmul(element_displacements,
np.matmul(local_matrix[i],
np.transpose(np.asmatrix(element_displacements))))
if elements_density[i] == 1:
sensibilities.append(element_sensibility[0, 0])
else:
element_sensibility[0, 0] = element_sensibility[0, 0] * (minimum_density ** (penalty - 1))
sensibilities.append(element_sensibility[0, 0])
return sensibilities
def get_elements_on_filtering_radius(centers, element_index, filter_radius):
# identify elements index on filtering radius of element_index element
element_center = centers[element_index]
elements_on_filtering_radius = []
for i in range(len(centers)):
if (element_center[0] - filter_radius) <= centers[i][0] <= (element_center[0] + filter_radius) and \
(element_center[1] - filter_radius) <= centers[i][1] <= (element_center[1] + filter_radius) and \
pdist([centers[i], element_center]) <= filter_radius:
elements_on_filtering_radius = elements_on_filtering_radius + [i]
return elements_on_filtering_radius
def get_filtering_weights(centers, filter_radius, all_elements_on_filtering_radius):
# calculate filtering weights for all elements
filtering_weights = []
for element_index in range(len(centers)):
element_weights = []
element_center = centers[element_index]
elements_on_filtering_radius = all_elements_on_filtering_radius[element_index]
for elements in elements_on_filtering_radius:
center = centers[elements]
weight = filter_radius - pdist([element_center, center])
element_weights = element_weights + [weight]
filtering_weights.append(element_weights)
return filtering_weights
def filter_sensibilities(sensibilities, all_elements_on_filtering_radius, filtering_weights):
# filter sensibilities using filtering weights and elements on filtering radius
filtered_sensibilities = []
for element_index in range(len(sensibilities)):
element_sensibilitie = 0
elements_on_filtering_radius = all_elements_on_filtering_radius[element_index]
element_filtering_weights = filtering_weights[element_index]
for index in range(len(elements_on_filtering_radius)):
sensibilitie_index = elements_on_filtering_radius[index]
element_sensibilitie = element_sensibilitie + element_filtering_weights[index] * sensibilities[sensibilitie_index]
element_sensibilitie = element_sensibilitie / sum(element_filtering_weights)
filtered_sensibilities.append(element_sensibilitie[0])
return filtered_sensibilities
def average_sensibilities(last_sensibilities, filtered_sensibilities):
# average sensibilities with last iteration sensibilities
averaged_sensibilities = []
for element_index in range(len(filtered_sensibilities)):
element_sensibilitie = (last_sensibilities[element_index] + filtered_sensibilities[element_index]) / 2
averaged_sensibilities.append(element_sensibilitie)
return averaged_sensibilities
def update_elements_density(averaged_sensibilities, last_elements_density, minimum_area, evolutionary_rate, areas,
surface_type, surface_elements):
# update elements density using BESO softkill optimum criteria
last_area = sum(list(np.array(last_elements_density) * np.array(areas)))
new_area = max(minimum_area, last_area * (1 - evolutionary_rate))
design_elements = []
for i in range(len(surface_type)):
if surface_type[i]:
design_elements = design_elements + surface_elements[i]
design_sensibilities = [averaged_sensibilities[i] for i in design_elements]
low = min(design_sensibilities)
high = max(design_sensibilities)
residue = 10 ** (-5)
new_elements_density = []
while ((high - low) / high) > residue:
new_elements_density = list(last_elements_density)
threshold = (high + low) / 2
for i in range(len(design_sensibilities)):
if design_sensibilities[i] < threshold:
new_elements_density[i] = 0
else:
new_elements_density[i] = 1
area = sum(list(np.array(new_elements_density) * np.array(areas)))
if area > new_area:
low = threshold
else:
high = threshold
new_area = area
return new_elements_density, new_area
def get_minimum_area(areas, minimum_area_ratio):
# get minimum area for optimization
minimum_area = sum(areas) * minimum_area_ratio
return minimum_area
def check_convergence(compliances_list, iteration):
# check BESO algorithm convergence
compliance_diference = (sum(compliances_list[(iteration - 5): iteration]) -
sum(compliances_list[(iteration - 10): (iteration - 5)]))
residue = 0.001
convergence = bool(abs(compliance_diference) <= residue)
return convergence, compliance_diference
| 37.419355 | 127 | 0.683966 | import numpy as np
from FEM import get_element_dof
from tqdm import tqdm
from scipy.spatial.distance import pdist
def get_elements_sensibilities(local_matrix, minimum_density, elements_density,
displacements, penalty, connectivity, nodes_dof):
sensibilities = []
for i in tqdm(range(len(connectivity))):
element_dof = get_element_dof(connectivity[i], nodes_dof)
element_displacements = displacements[element_dof]
element_sensibility = np.matmul(element_displacements,
np.matmul(local_matrix[i],
np.transpose(np.asmatrix(element_displacements))))
if elements_density[i] == 1:
sensibilities.append(element_sensibility[0, 0])
else:
element_sensibility[0, 0] = element_sensibility[0, 0] * (minimum_density ** (penalty - 1))
sensibilities.append(element_sensibility[0, 0])
return sensibilities
def get_elements_on_filtering_radius(centers, element_index, filter_radius):
element_center = centers[element_index]
elements_on_filtering_radius = []
for i in range(len(centers)):
if (element_center[0] - filter_radius) <= centers[i][0] <= (element_center[0] + filter_radius) and \
(element_center[1] - filter_radius) <= centers[i][1] <= (element_center[1] + filter_radius) and \
pdist([centers[i], element_center]) <= filter_radius:
elements_on_filtering_radius = elements_on_filtering_radius + [i]
return elements_on_filtering_radius
def get_filtering_weights(centers, filter_radius, all_elements_on_filtering_radius):
filtering_weights = []
for element_index in range(len(centers)):
element_weights = []
element_center = centers[element_index]
elements_on_filtering_radius = all_elements_on_filtering_radius[element_index]
for elements in elements_on_filtering_radius:
center = centers[elements]
weight = filter_radius - pdist([element_center, center])
element_weights = element_weights + [weight]
filtering_weights.append(element_weights)
return filtering_weights
def filter_sensibilities(sensibilities, all_elements_on_filtering_radius, filtering_weights):
filtered_sensibilities = []
for element_index in range(len(sensibilities)):
element_sensibilitie = 0
elements_on_filtering_radius = all_elements_on_filtering_radius[element_index]
element_filtering_weights = filtering_weights[element_index]
for index in range(len(elements_on_filtering_radius)):
sensibilitie_index = elements_on_filtering_radius[index]
element_sensibilitie = element_sensibilitie + element_filtering_weights[index] * sensibilities[sensibilitie_index]
element_sensibilitie = element_sensibilitie / sum(element_filtering_weights)
filtered_sensibilities.append(element_sensibilitie[0])
return filtered_sensibilities
def average_sensibilities(last_sensibilities, filtered_sensibilities):
averaged_sensibilities = []
for element_index in range(len(filtered_sensibilities)):
element_sensibilitie = (last_sensibilities[element_index] + filtered_sensibilities[element_index]) / 2
averaged_sensibilities.append(element_sensibilitie)
return averaged_sensibilities
def update_elements_density(averaged_sensibilities, last_elements_density, minimum_area, evolutionary_rate, areas,
surface_type, surface_elements):
last_area = sum(list(np.array(last_elements_density) * np.array(areas)))
new_area = max(minimum_area, last_area * (1 - evolutionary_rate))
design_elements = []
for i in range(len(surface_type)):
if surface_type[i]:
design_elements = design_elements + surface_elements[i]
design_sensibilities = [averaged_sensibilities[i] for i in design_elements]
low = min(design_sensibilities)
high = max(design_sensibilities)
residue = 10 ** (-5)
new_elements_density = []
while ((high - low) / high) > residue:
new_elements_density = list(last_elements_density)
threshold = (high + low) / 2
for i in range(len(design_sensibilities)):
if design_sensibilities[i] < threshold:
new_elements_density[i] = 0
else:
new_elements_density[i] = 1
area = sum(list(np.array(new_elements_density) * np.array(areas)))
if area > new_area:
low = threshold
else:
high = threshold
new_area = area
return new_elements_density, new_area
def get_minimum_area(areas, minimum_area_ratio):
minimum_area = sum(areas) * minimum_area_ratio
return minimum_area
def check_convergence(compliances_list, iteration):
compliance_diference = (sum(compliances_list[(iteration - 5): iteration]) -
sum(compliances_list[(iteration - 10): (iteration - 5)]))
residue = 0.001
convergence = bool(abs(compliance_diference) <= residue)
return convergence, compliance_diference
| true | true |
f73347c90037868c8cf888fdd479050d44829a7a | 3,808 | py | Python | parse1.py | 12Me21/markup | c7a5758826d71f5bcd6c39cd8cea10ca75348671 | [
"MIT"
] | 2 | 2018-12-06T07:55:02.000Z | 2018-12-06T07:55:09.000Z | parse1.py | 12Me21/markup | c7a5758826d71f5bcd6c39cd8cea10ca75348671 | [
"MIT"
] | 3 | 2019-02-10T23:22:19.000Z | 2019-10-03T04:31:47.000Z | parse1.py | 12Me21/markup | c7a5758826d71f5bcd6c39cd8cea10ca75348671 | [
"MIT"
] | null | null | null | def highlight(code, language):
return code
def escape_html(code):
return code.replace("&","&").replace("<","<").replace("\n","<br>")
def escape_html_char(char):
if char=="<":
return "<"
if char=="&":
return "&"
if char=="\n":
return "<br>"
if char=="\r":
return ""
return char
def parse(code):
i = -1
c = None
output = ""
def next():
nonlocal i
nonlocal c
i += 1
if i < len(code):
c = code[i]
else:
c = ""
def nextb():
next()
while c=="{":
parse()
def skip_linebreak():
nonlocal i
nonlocal c
if c=="\n" or c=="\r":
next()
def is_start_of_line():
return i==0 or code[i-1]=="\n"
def parse():
nonlocal i
nonlocal c
nonlocal output
nextb()
while c:
## code block
if c=="`":
next()
if c=="`":
next()
# multiline code block
if c=="`":
output += "<code>"
language = ""
while 1:
nextb()
if c=="\n":
break
elif c:
language += c
else:
raise Exception("Reached end of input while reading ``` start")
start = i+1
while 1:
next()
if c=="`":
next()
if c=="`":
next()
if c=="`":
break;
if not c:
raise Exception("Reached end of input while reading code inside ```")
output += highlight(code[start:i-2], language.strip())
output += "</code>"
nextb()
skip_linebreak()
# bad
else:
output += "``"
next()
# inline code block
else:
output += "<code>"
while 1:
if c=="`":
output += "</code>"
break
elif c:
output += escape_html_char(c)
else:
raise Exception("Unclosed ` block")
next()
nextb()
## heading
elif c=="*" and is_start_of_line():
heading_level = 1
nextb()
while c=="*":
heading_level += 1
nextb()
if heading_level > 6:
raise Exception("Heading too deep")
output += "<h%d>" % heading_level
while 1:
if not c or c=="\n":
break
output += escape_html_char(c)
nextb()
output += "</h%d>" % heading_level
nextb()
## escaped char
elif c=="\\":
nextb()
if c:
output += escape_html_char(c)
nextb()
## tables
elif c=="|":
nextb()
# table start
if c=="=":
nextb()
while c=="=":
nextb()
if c=="|":
nextb()
else:
raise Exception("missing | in table start")
while c=="\n" or c=="\r":
nextb()
if c=="|":
nextb()
else:
raise Exception("missing | in table start")
output += "<table><tbody><tr><td>"
skip_linebreak()
# table start (with header)
elif c=="*":
nextb()
if c=="*":
nextb()
if c=="|":
nextb()
else:
raise Exception("missing | in table start")
if c=="\n" or c=="\r":
nextb()
if c=="|":
nextb()
else:
raise Exception("missing | in table start")
output += "<table class='header_table'><tbody><tr><td>"
skip_linebreak()
# other
else:
skip_linebreak() # this is used for the linebreak after | as well as the linebreak between ||
# table end or next row
if c=="|":
nextb()
#table end
if c=="=":
nextb()
if c=="=":
nextb()
if c=="|":
nextb()
else:
raise Exception("missing | in table end")
output += "</td></tr></tbody></table>"
skip_linebreak()
#next row
else:
output += "</td></tr><tr><td>"
skip_linebreak()
# next cell
else:
output += "</td><td>"
## return
elif c=="}":
nextb()
return
## other symbol
else:
output += escape_html_char(c)
nextb()
parse()
return output | 19.528205 | 98 | 0.487395 | def highlight(code, language):
return code
def escape_html(code):
return code.replace("&","&").replace("<","<").replace("\n","<br>")
def escape_html_char(char):
if char=="<":
return "<"
if char=="&":
return "&"
if char=="\n":
return "<br>"
if char=="\r":
return ""
return char
def parse(code):
i = -1
c = None
output = ""
def next():
nonlocal i
nonlocal c
i += 1
if i < len(code):
c = code[i]
else:
c = ""
def nextb():
next()
while c=="{":
parse()
def skip_linebreak():
nonlocal i
nonlocal c
if c=="\n" or c=="\r":
next()
def is_start_of_line():
return i==0 or code[i-1]=="\n"
def parse():
nonlocal i
nonlocal c
nonlocal output
nextb()
while c:
":
next()
if c=="`":
next()
if c=="`":
output += "<code>"
language = ""
while 1:
nextb()
if c=="\n":
break
elif c:
language += c
else:
raise Exception("Reached end of input while reading ``` start")
start = i+1
while 1:
next()
if c=="`":
next()
if c=="`":
next()
if c=="`":
break;
if not c:
raise Exception("Reached end of input while reading code inside ```")
output += highlight(code[start:i-2], language.strip())
output += "</code>"
nextb()
skip_linebreak()
else:
output += "``"
next()
else:
output += "<code>"
while 1:
if c=="`":
output += "</code>"
break
elif c:
output += escape_html_char(c)
else:
raise Exception("Unclosed ` block")
next()
nextb()
c=="*" and is_start_of_line():
heading_level = 1
nextb()
while c=="*":
heading_level += 1
nextb()
if heading_level > 6:
raise Exception("Heading too deep")
output += "<h%d>" % heading_level
while 1:
if not c or c=="\n":
break
output += escape_html_char(c)
nextb()
output += "</h%d>" % heading_level
nextb()
\":
nextb()
if c:
output += escape_html_char(c)
nextb()
c=="|":
nextb()
if c=="=":
nextb()
while c=="=":
nextb()
if c=="|":
nextb()
else:
raise Exception("missing | in table start")
while c=="\n" or c=="\r":
nextb()
if c=="|":
nextb()
else:
raise Exception("missing | in table start")
output += "<table><tbody><tr><td>"
skip_linebreak()
elif c=="*":
nextb()
if c=="*":
nextb()
if c=="|":
nextb()
else:
raise Exception("missing | in table start")
if c=="\n" or c=="\r":
nextb()
if c=="|":
nextb()
else:
raise Exception("missing | in table start")
output += "<table class='header_table'><tbody><tr><td>"
skip_linebreak()
else:
skip_linebreak()
if c=="|":
nextb()
if c=="=":
nextb()
if c=="=":
nextb()
if c=="|":
nextb()
else:
raise Exception("missing | in table end")
output += "</td></tr></tbody></table>"
skip_linebreak()
else:
output += "</td></tr><tr><td>"
skip_linebreak()
else:
output += "</td><td>"
c=="}":
nextb()
return
output += escape_html_char(c)
nextb()
parse()
return output | true | true |
f73347cf356950b8384baf7b94555e71eb4f33a4 | 7,574 | py | Python | homeassistant/components/fibaro/light.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 6 | 2020-07-18T16:33:25.000Z | 2021-09-26T09:52:04.000Z | homeassistant/components/fibaro/light.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 47 | 2020-07-23T07:14:33.000Z | 2022-03-31T06:01:46.000Z | homeassistant/components/fibaro/light.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 5 | 2020-03-29T00:29:13.000Z | 2021-09-06T20:58:40.000Z | """Support for Fibaro lights."""
import asyncio
from functools import partial
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.const import CONF_WHITE_VALUE
import homeassistant.util.color as color_util
from . import CONF_COLOR, CONF_DIMMING, CONF_RESET_COLOR, FIBARO_DEVICES, FibaroDevice
_LOGGER = logging.getLogger(__name__)
def scaleto255(value):
"""Scale the input value from 0-100 to 0-255."""
# Fibaro has a funny way of storing brightness either 0-100 or 0-99
# depending on device type (e.g. dimmer vs led)
if value > 98:
value = 100
return max(0, min(255, ((value * 255.0) / 100.0)))
def scaleto100(value):
"""Scale the input value from 0-255 to 0-100."""
# Make sure a low but non-zero value is not rounded down to zero
if 0 < value < 3:
return 1
return max(0, min(100, ((value * 100.0) / 255.0)))
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Perform the setup for Fibaro controller devices."""
if discovery_info is None:
return
async_add_entities(
[FibaroLight(device) for device in hass.data[FIBARO_DEVICES]["light"]], True
)
class FibaroLight(FibaroDevice, LightEntity):
"""Representation of a Fibaro Light, including dimmable."""
def __init__(self, fibaro_device):
"""Initialize the light."""
self._brightness = None
self._color = (0, 0)
self._last_brightness = 0
self._supported_flags = 0
self._update_lock = asyncio.Lock()
self._white = 0
devconf = fibaro_device.device_config
self._reset_color = devconf.get(CONF_RESET_COLOR, False)
supports_color = (
"color" in fibaro_device.properties and "setColor" in fibaro_device.actions
)
supports_dimming = "levelChange" in fibaro_device.interfaces
supports_white_v = "setW" in fibaro_device.actions
# Configuration can override default capability detection
if devconf.get(CONF_DIMMING, supports_dimming):
self._supported_flags |= SUPPORT_BRIGHTNESS
if devconf.get(CONF_COLOR, supports_color):
self._supported_flags |= SUPPORT_COLOR
if devconf.get(CONF_WHITE_VALUE, supports_white_v):
self._supported_flags |= SUPPORT_WHITE_VALUE
super().__init__(fibaro_device)
self.entity_id = f"{DOMAIN}.{self.ha_id}"
@property
def brightness(self):
"""Return the brightness of the light."""
return scaleto255(self._brightness)
@property
def hs_color(self):
"""Return the color of the light."""
return self._color
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
return self._white
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_flags
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
async with self._update_lock:
await self.hass.async_add_executor_job(partial(self._turn_on, **kwargs))
def _turn_on(self, **kwargs):
"""Really turn the light on."""
if self._supported_flags & SUPPORT_BRIGHTNESS:
target_brightness = kwargs.get(ATTR_BRIGHTNESS)
# No brightness specified, so we either restore it to
# last brightness or switch it on at maximum level
if target_brightness is None:
if self._brightness == 0:
if self._last_brightness:
self._brightness = self._last_brightness
else:
self._brightness = 100
else:
# We set it to the target brightness and turn it on
self._brightness = scaleto100(target_brightness)
if self._supported_flags & SUPPORT_COLOR:
if (
self._reset_color
and kwargs.get(ATTR_WHITE_VALUE) is None
and kwargs.get(ATTR_HS_COLOR) is None
and kwargs.get(ATTR_BRIGHTNESS) is None
):
self._color = (100, 0)
# Update based on parameters
self._white = kwargs.get(ATTR_WHITE_VALUE, self._white)
self._color = kwargs.get(ATTR_HS_COLOR, self._color)
rgb = color_util.color_hs_to_RGB(*self._color)
self.call_set_color(
round(rgb[0] * self._brightness / 100.0),
round(rgb[1] * self._brightness / 100.0),
round(rgb[2] * self._brightness / 100.0),
round(self._white * self._brightness / 100.0),
)
if self.state == "off":
self.set_level(int(self._brightness))
return
if self._reset_color:
bri255 = scaleto255(self._brightness)
self.call_set_color(bri255, bri255, bri255, bri255)
if self._supported_flags & SUPPORT_BRIGHTNESS:
self.set_level(int(self._brightness))
return
# The simplest case is left for last. No dimming, just switch on
self.call_turn_on()
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
async with self._update_lock:
await self.hass.async_add_executor_job(partial(self._turn_off, **kwargs))
def _turn_off(self, **kwargs):
"""Really turn the light off."""
# Let's save the last brightness level before we switch it off
if (
(self._supported_flags & SUPPORT_BRIGHTNESS)
and self._brightness
and self._brightness > 0
):
self._last_brightness = self._brightness
self._brightness = 0
self.call_turn_off()
@property
def is_on(self):
"""Return true if device is on."""
return self.current_binary_state
async def async_update(self):
"""Update the state."""
async with self._update_lock:
await self.hass.async_add_executor_job(self._update)
def _update(self):
"""Really update the state."""
# Brightness handling
if self._supported_flags & SUPPORT_BRIGHTNESS:
self._brightness = float(self.fibaro_device.properties.value)
# Fibaro might report 0-99 or 0-100 for brightness,
# based on device type, so we round up here
if self._brightness > 99:
self._brightness = 100
# Color handling
if (
self._supported_flags & SUPPORT_COLOR
and "color" in self.fibaro_device.properties
and "," in self.fibaro_device.properties.color
):
# Fibaro communicates the color as an 'R, G, B, W' string
rgbw_s = self.fibaro_device.properties.color
if rgbw_s == "0,0,0,0" and "lastColorSet" in self.fibaro_device.properties:
rgbw_s = self.fibaro_device.properties.lastColorSet
rgbw_list = [int(i) for i in rgbw_s.split(",")][:4]
if rgbw_list[0] or rgbw_list[1] or rgbw_list[2]:
self._color = color_util.color_RGB_to_hs(*rgbw_list[:3])
if (self._supported_flags & SUPPORT_WHITE_VALUE) and self.brightness != 0:
self._white = min(255, max(0, rgbw_list[3] * 100.0 / self._brightness))
| 36.066667 | 87 | 0.619752 | import asyncio
from functools import partial
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.const import CONF_WHITE_VALUE
import homeassistant.util.color as color_util
from . import CONF_COLOR, CONF_DIMMING, CONF_RESET_COLOR, FIBARO_DEVICES, FibaroDevice
_LOGGER = logging.getLogger(__name__)
def scaleto255(value):
if value > 98:
value = 100
return max(0, min(255, ((value * 255.0) / 100.0)))
def scaleto100(value):
if 0 < value < 3:
return 1
return max(0, min(100, ((value * 100.0) / 255.0)))
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
if discovery_info is None:
return
async_add_entities(
[FibaroLight(device) for device in hass.data[FIBARO_DEVICES]["light"]], True
)
class FibaroLight(FibaroDevice, LightEntity):
def __init__(self, fibaro_device):
self._brightness = None
self._color = (0, 0)
self._last_brightness = 0
self._supported_flags = 0
self._update_lock = asyncio.Lock()
self._white = 0
devconf = fibaro_device.device_config
self._reset_color = devconf.get(CONF_RESET_COLOR, False)
supports_color = (
"color" in fibaro_device.properties and "setColor" in fibaro_device.actions
)
supports_dimming = "levelChange" in fibaro_device.interfaces
supports_white_v = "setW" in fibaro_device.actions
if devconf.get(CONF_DIMMING, supports_dimming):
self._supported_flags |= SUPPORT_BRIGHTNESS
if devconf.get(CONF_COLOR, supports_color):
self._supported_flags |= SUPPORT_COLOR
if devconf.get(CONF_WHITE_VALUE, supports_white_v):
self._supported_flags |= SUPPORT_WHITE_VALUE
super().__init__(fibaro_device)
self.entity_id = f"{DOMAIN}.{self.ha_id}"
@property
def brightness(self):
return scaleto255(self._brightness)
@property
def hs_color(self):
return self._color
@property
def white_value(self):
return self._white
@property
def supported_features(self):
return self._supported_flags
async def async_turn_on(self, **kwargs):
async with self._update_lock:
await self.hass.async_add_executor_job(partial(self._turn_on, **kwargs))
def _turn_on(self, **kwargs):
if self._supported_flags & SUPPORT_BRIGHTNESS:
target_brightness = kwargs.get(ATTR_BRIGHTNESS)
if target_brightness is None:
if self._brightness == 0:
if self._last_brightness:
self._brightness = self._last_brightness
else:
self._brightness = 100
else:
self._brightness = scaleto100(target_brightness)
if self._supported_flags & SUPPORT_COLOR:
if (
self._reset_color
and kwargs.get(ATTR_WHITE_VALUE) is None
and kwargs.get(ATTR_HS_COLOR) is None
and kwargs.get(ATTR_BRIGHTNESS) is None
):
self._color = (100, 0)
self._white = kwargs.get(ATTR_WHITE_VALUE, self._white)
self._color = kwargs.get(ATTR_HS_COLOR, self._color)
rgb = color_util.color_hs_to_RGB(*self._color)
self.call_set_color(
round(rgb[0] * self._brightness / 100.0),
round(rgb[1] * self._brightness / 100.0),
round(rgb[2] * self._brightness / 100.0),
round(self._white * self._brightness / 100.0),
)
if self.state == "off":
self.set_level(int(self._brightness))
return
if self._reset_color:
bri255 = scaleto255(self._brightness)
self.call_set_color(bri255, bri255, bri255, bri255)
if self._supported_flags & SUPPORT_BRIGHTNESS:
self.set_level(int(self._brightness))
return
self.call_turn_on()
async def async_turn_off(self, **kwargs):
async with self._update_lock:
await self.hass.async_add_executor_job(partial(self._turn_off, **kwargs))
def _turn_off(self, **kwargs):
if (
(self._supported_flags & SUPPORT_BRIGHTNESS)
and self._brightness
and self._brightness > 0
):
self._last_brightness = self._brightness
self._brightness = 0
self.call_turn_off()
@property
def is_on(self):
return self.current_binary_state
async def async_update(self):
async with self._update_lock:
await self.hass.async_add_executor_job(self._update)
def _update(self):
# Brightness handling
if self._supported_flags & SUPPORT_BRIGHTNESS:
self._brightness = float(self.fibaro_device.properties.value)
# Fibaro might report 0-99 or 0-100 for brightness,
# based on device type, so we round up here
if self._brightness > 99:
self._brightness = 100
# Color handling
if (
self._supported_flags & SUPPORT_COLOR
and "color" in self.fibaro_device.properties
and "," in self.fibaro_device.properties.color
):
# Fibaro communicates the color as an 'R, G, B, W' string
rgbw_s = self.fibaro_device.properties.color
if rgbw_s == "0,0,0,0" and "lastColorSet" in self.fibaro_device.properties:
rgbw_s = self.fibaro_device.properties.lastColorSet
rgbw_list = [int(i) for i in rgbw_s.split(",")][:4]
if rgbw_list[0] or rgbw_list[1] or rgbw_list[2]:
self._color = color_util.color_RGB_to_hs(*rgbw_list[:3])
if (self._supported_flags & SUPPORT_WHITE_VALUE) and self.brightness != 0:
self._white = min(255, max(0, rgbw_list[3] * 100.0 / self._brightness))
| true | true |
f73348ea7381087c33a70e3342341824f109489e | 57,462 | py | Python | test/functional/feature_futures.py | Sebz84/ain | 451abddc7802ac4ee4dbf30117ca074414f4fdca | [
"MIT"
] | null | null | null | test/functional/feature_futures.py | Sebz84/ain | 451abddc7802ac4ee4dbf30117ca074414f4fdca | [
"MIT"
] | null | null | null | test/functional/feature_futures.py | Sebz84/ain | 451abddc7802ac4ee4dbf30117ca074414f4fdca | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) DeFi Blockchain Developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""Test Futures contract RPC."""
from test_framework.test_framework import DefiTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from decimal import Decimal
import time
def sort_history(e):
return e['txn']
class FuturesTest(DefiTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-txnotokens=0', '-amkheight=1', '-bayfrontheight=1', '-eunosheight=1', '-fortcanningheight=1', '-fortcanninghillheight=1', '-fortcanningroadheight=150', '-subsidytest=1']]
def run_test(self):
self.nodes[0].generate(101)
# Set up oracles and tokens
self.setup_test()
# Test setting of futures Gov vars
self.futures_setup()
# Test dToken to DUSD
self.test_dtoken_to_dusd()
# Test DUSD to dToken
self.test_dusd_to_dtoken()
# Test futures block range
self.check_swap_block_range()
# Test multiple swaps per account
self.check_multiple_swaps()
# Test withdrawal
self.check_withdrawals()
# Test Satoshi swaps
self.check_minimum_swaps()
# Test changing Gov vars
self.check_gov_var_change()
# Test refunding of unpaid futures
self.unpaid_contract()
# Test list future swap history
self.rpc_history()
def setup_test(self):
# Store addresses
self.address = self.nodes[0].get_genesis_keys().ownerAuthAddress
self.contract_address = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqpsqgljc'
# Store interval
self.futures_interval = 25
# RPC history checks
self.list_history = []
# Set token symbols
self.symbolDFI = 'DFI'
self.symbolDUSD = 'DUSD'
self.symbolTSLA = 'TSLA'
self.symbolGOOGL = 'GOOGL'
self.symbolTWTR = 'TWTR'
self.symbolMSFT = 'MSFT'
self.symbolBTC = 'BTC'
# Setup oracle
oracle_address = self.nodes[0].getnewaddress("", "legacy")
price_feeds = [
{"currency": "USD", "token": self.symbolDFI},
{"currency": "USD", "token": self.symbolTSLA},
{"currency": "USD", "token": self.symbolGOOGL},
{"currency": "USD", "token": self.symbolTWTR},
{"currency": "USD", "token": self.symbolMSFT}
]
self.oracle_id = self.nodes[0].appointoracle(oracle_address, price_feeds, 10)
self.nodes[0].generate(1)
# Create Oracle prices
self.price_tsla = 870
self.price_googl = 2600
self.price_twtr = 37
self.price_msft = 295
# Calculate future swap prices
self.prices = []
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_tsla)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_tsla)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_googl)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_googl)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_twtr)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_twtr)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_msft)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_msft)) * Decimal('0.95000000')))
})
# Feed oracle
oracle_prices = [
{"currency": "USD", "tokenAmount": f'{self.price_tsla}@{self.symbolTSLA}'},
{"currency": "USD", "tokenAmount": f'{self.price_googl}@{self.symbolGOOGL}'},
{"currency": "USD", "tokenAmount": f'{self.price_twtr}@{self.symbolTWTR}'},
{"currency": "USD", "tokenAmount": f'{self.price_msft}@{self.symbolMSFT}'},
]
self.nodes[0].setoracledata(self.oracle_id, int(time.time()), oracle_prices)
self.nodes[0].generate(10)
# Set up non-loan token for failure test
self.nodes[0].createtoken({
"symbol": self.symbolBTC,
"name": self.symbolBTC,
"isDAT": True,
"collateralAddress": self.address
})
self.nodes[0].generate(1)
# Setup loan tokens
self.nodes[0].setloantoken({
'symbol': self.symbolDUSD,
'name': self.symbolDUSD,
'fixedIntervalPriceId': f'{self.symbolDUSD}/USD',
'mintable': True,
'interest': 0})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolTSLA,
'name': self.symbolTSLA,
'fixedIntervalPriceId': f'{self.symbolTSLA}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolGOOGL,
'name': self.symbolGOOGL,
'fixedIntervalPriceId': f'{self.symbolGOOGL}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolTWTR,
'name': self.symbolTWTR,
'fixedIntervalPriceId': f'{self.symbolTWTR}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolMSFT,
'name': self.symbolMSFT,
'fixedIntervalPriceId': f'{self.symbolMSFT}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
# Set token ids
self.idDUSD = list(self.nodes[0].gettoken(self.symbolDUSD).keys())[0]
self.idTSLA = list(self.nodes[0].gettoken(self.symbolTSLA).keys())[0]
self.idGOOGL = list(self.nodes[0].gettoken(self.symbolGOOGL).keys())[0]
self.idTWTR = list(self.nodes[0].gettoken(self.symbolTWTR).keys())[0]
self.idMSFT = list(self.nodes[0].gettoken(self.symbolMSFT).keys())[0]
self.idBTC = list(self.nodes[0].gettoken(self.symbolBTC).keys())[0]
# Mint tokens for swapping
self.nodes[0].minttokens([f'100000@{self.idDUSD}'])
self.nodes[0].minttokens([f'100000@{self.idTSLA}'])
self.nodes[0].minttokens([f'100000@{self.idGOOGL}'])
self.nodes[0].minttokens([f'100000@{self.idTWTR}'])
self.nodes[0].minttokens([f'100000@{self.idMSFT}'])
self.nodes[0].generate(1)
def futures_setup(self):
# Move to fork block
self.nodes[0].generate(150 - self.nodes[0].getblockcount())
# Create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Try futureswap before feature is active
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1@{self.symbolTWTR}')
# Set partial futures attributes
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Try futureswap before feature is fully active
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1@{self.symbolTWTR}')
# Set all futures attributes but set active to false
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'false','v0/params/dfip2203/reward_pct':'0.05','v0/params/dfip2203/block_period':f'{self.futures_interval}'}})
self.nodes[0].generate(1)
# Try futureswap with DFIP2203 active set to false
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1@{self.symbolTWTR}')
# Fully enable DFIP2203
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Verify Gov vars
result = self.nodes[0].getgov('ATTRIBUTES')['ATTRIBUTES']
assert_equal(result['v0/params/dfip2203/active'], 'true')
assert_equal(result['v0/params/dfip2203/reward_pct'], '0.05')
assert_equal(result['v0/params/dfip2203/block_period'], str(self.futures_interval))
# Disable DUSD
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idDUSD)}/dfip2203':'false'}})
self.nodes[0].generate(1)
# Verify Gov vars
result = self.nodes[0].getgov('ATTRIBUTES')['ATTRIBUTES']
assert_equal(result[f'v0/token/{self.idDUSD}/dfip2203'], 'false')
# Check futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
assert_equal(next_futures_block, self.nodes[0].getfutureswapblock())
def test_dtoken_to_dusd(self):
# Create addresses for futures
address_msft = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_msft: f'1@{self.symbolMSFT}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'1@{self.symbolGOOGL}'})
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'1@{self.symbolTSLA}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'1@{self.symbolTWTR}'})
self.nodes[0].generate(1)
# Test futureswap failures
assert_raises_rpc_error(-32600, f'Could not get source loan token {self.idBTC}', self.nodes[0].futureswap, self.address, f'1@{self.symbolBTC}')
assert_raises_rpc_error(-32600, f'DFIP2203 currently disabled for token {self.idDUSD}', self.nodes[0].futureswap, self.address, f'1@{self.symbolDUSD}', int(self.idDUSD))
assert_raises_rpc_error(-32600, f'Could not get destination loan token {self.idBTC}. Set valid destination.', self.nodes[0].futureswap, self.address, f'1@{self.symbolDUSD}', int(self.idBTC))
assert_raises_rpc_error(-32600, 'Destination should not be set when source amount is a dToken', self.nodes[0].futureswap, self.address, f'1@{self.symbolTSLA}', int(self.idBTC))
assert_raises_rpc_error(-32600, 'amount 0.00000000 is less than 1.00000000', self.nodes[0].futureswap, address_twtr, f'1@{self.symbolTSLA}')
# Create user futures contracts
self.nodes[0].futureswap(address_twtr, f'1@{self.symbolTWTR}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_tsla, f'1@{self.symbolTSLA}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_googl, f'1@{self.symbolGOOGL}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_msft, f'1@{self.symbolMSFT}')
self.nodes[0].generate(1)
# List user futures contracts
result = self.nodes[0].listpendingfutureswaps()
assert_equal(result[0]['owner'], address_msft)
assert_equal(result[0]['source'], f'{Decimal("1.00000000")}@{self.symbolMSFT}')
assert_equal(result[0]['destination'], self.symbolDUSD)
assert_equal(result[1]['owner'], address_googl)
assert_equal(result[1]['source'], f'{Decimal("1.00000000")}@{self.symbolGOOGL}')
assert_equal(result[1]['destination'], self.symbolDUSD)
assert_equal(result[2]['owner'], address_tsla)
assert_equal(result[2]['source'], f'{Decimal("1.00000000")}@{self.symbolTSLA}')
assert_equal(result[2]['destination'], self.symbolDUSD)
assert_equal(result[3]['owner'], address_twtr)
assert_equal(result[3]['source'], f'{Decimal("1.00000000")}@{self.symbolTWTR}')
assert_equal(result[3]['destination'], self.symbolDUSD)
# Get user MSFT futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolMSFT}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Get user GOOGL futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolGOOGL}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Get user TSLA futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolTSLA}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolTWTR}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Check DFI2203 amounts do not show up as burns yet
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [])
# Check DFI2203 address on listgovs, current shows pending, burn should be empty.
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
assert('v0/live/economy/dfip2203_burned' not in result)
assert('v0/live/economy/dfip2203_minted' not in result)
# Get token total minted before future swap
total_dusd = Decimal(self.nodes[0].gettoken(self.idDUSD)[self.idDUSD]['minted'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check total minted incremented as expected
new_total_dusd = Decimal(self.nodes[0].gettoken(self.idDUSD)[self.idDUSD]['minted'])
assert_equal(total_dusd + self.prices[0]["discountPrice"] + self.prices[1]["discountPrice"] + self.prices[2]["discountPrice"] + self.prices[3]["discountPrice"], new_total_dusd)
# Check TXN ordering
txn_first = 4294967295
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":self.nodes[0].getblockcount(), 'depth':0, 'txtype':'q'})
result.sort(key = sort_history, reverse = True)
for result_entry in result:
assert_equal(result_entry['txn'], txn_first)
txn_first -= 1
# Pending futures should now be empty
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 0)
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(len(result['values']), 0)
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check that futures have been executed
result = self.nodes[0].getaccount(address_msft)
assert_equal(result, [f'{self.prices[3]["discountPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["discountPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'{self.prices[0]["discountPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'{self.prices[2]["discountPrice"]}@{self.symbolDUSD}'])
# Populate RPC check
self.list_history.append({'height': self.nodes[0].getblockcount(), 'swaps': [
{'address': address_tsla, 'destination': f'{self.prices[0]["discountPrice"]}@{self.symbolDUSD}'},
{'address': address_googl, 'destination': f'{self.prices[1]["discountPrice"]}@{self.symbolDUSD}'},
{'address': address_twtr, 'destination': f'{self.prices[2]["discountPrice"]}@{self.symbolDUSD}'},
{'address': address_msft, 'destination': f'{self.prices[3]["discountPrice"]}@{self.symbolDUSD}'},
]})
def test_dusd_to_dtoken(self):
# Create addresses for futures
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_msft = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_msft: f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_msft, f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}', self.idMSFT)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', self.idTWTR)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', self.symbolGOOGL)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', self.symbolTSLA)
self.nodes[0].generate(1)
# List user futures contracts
result = self.nodes[0].listpendingfutureswaps()
assert_equal(result[0]['owner'], address_tsla)
assert_equal(result[0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[0]['destination'], self.symbolTSLA)
assert_equal(result[1]['owner'], address_googl)
assert_equal(result[1]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[1]['destination'], self.symbolGOOGL)
assert_equal(result[2]['owner'], address_twtr)
assert_equal(result[2]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[2]['destination'], self.symbolTWTR)
assert_equal(result[3]['owner'], address_msft)
assert_equal(result[3]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[3]['destination'], self.symbolMSFT)
# Get user TSLA futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTSLA)
# Get user GOOGL futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolGOOGL)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
# Get user MSFT futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolMSFT)
# Check new DFI2203 amounts do not show up as burns yet
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs, current shows pending if any, burned shows
# deposits from executed swaps and minted shows output from executed swaps.
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
assert_equal(result['v0/live/economy/dfip2203_burned'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
assert_equal(result['v0/live/economy/dfip2203_minted'], [f'{self.prices[0]["discountPrice"] + self.prices[1]["discountPrice"] + self.prices[2]["discountPrice"] + self.prices[3]["discountPrice"]}@{self.symbolDUSD}'])
# Get token total minted before future swap
total_tsla = Decimal(self.nodes[0].gettoken(self.idTSLA)[self.idTSLA]['minted'])
total_googl = Decimal(self.nodes[0].gettoken(self.idGOOGL)[self.idGOOGL]['minted'])
total_twtr = Decimal(self.nodes[0].gettoken(self.idTWTR)[self.idTWTR]['minted'])
total_msft = Decimal(self.nodes[0].gettoken(self.idMSFT)[self.idMSFT]['minted'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check minted totals incremented as expected
new_total_tsla = Decimal(self.nodes[0].gettoken(self.idTSLA)[self.idTSLA]['minted'])
new_total_googl = Decimal(self.nodes[0].gettoken(self.idGOOGL)[self.idGOOGL]['minted'])
new_total_twtr = Decimal(self.nodes[0].gettoken(self.idTWTR)[self.idTWTR]['minted'])
new_total_msft = Decimal(self.nodes[0].gettoken(self.idMSFT)[self.idMSFT]['minted'])
assert_equal(total_tsla + Decimal('1.00000000'), new_total_tsla)
assert_equal(total_googl + Decimal('1.00000000'), new_total_googl)
assert_equal(total_twtr + Decimal('1.00000000'), new_total_twtr)
assert_equal(total_msft + Decimal('1.00000000'), new_total_msft)
# Pending futures should now be empty
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 0)
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(len(result['values']), 0)
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check that futures have been executed
result = self.nodes[0].getaccount(address_msft)
assert_equal(result, [f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'1.00000000@{self.symbolGOOGL}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'1.00000000@{self.symbolTSLA}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'1.00000000@{self.symbolTWTR}'])
# Populate RPC check
self.list_history.append({'height': self.nodes[0].getblockcount(), 'swaps': [
{'address': address_tsla, 'destination': f'1.00000000@{self.symbolTSLA}'},
{'address': address_googl, 'destination': f'1.00000000@{self.symbolGOOGL}'},
{'address': address_twtr, 'destination': f'1.00000000@{self.symbolTWTR}'},
{'address': address_msft, 'destination': f'1.00000000@{self.symbolMSFT}'},
]})
def check_swap_block_range(self):
# Create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Move to just before futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount() - 1)
# Create user futures contracts on futures block
self.nodes[0].futureswap(address, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Check that futures have been executed
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}'])
# Check all pending swaps shows no entries
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 0)
# Check user pending swaps is empty
result = self.nodes[0].getpendingfutureswaps(address)
assert_equal(len(result['values']), 0)
# Try and withdraw smallest amount now contract has been paid
assert_raises_rpc_error(-32600, 'amount 0.00000000 is less than 0.00000001', self.nodes[0].withdrawfutureswap, address, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
# Populate RPC check
self.list_history.append({'height': self.nodes[0].getblockcount(), 'swaps': [
{'address': address, 'destination': f'1.00000000@{self.symbolTSLA}'},
]})
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check that futures has not been executed again
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'913.50000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'4905.60000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_multiple_swaps(self):
# Create addresses for futures
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'{self.prices[2]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create two user futures contracts
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].generate(1)
# Get user TSLA futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTSLA)
assert_equal(result['values'][1]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolTSLA)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check that futures have been executed
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'2.00000000@{self.symbolTSLA}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'2.00000000@{self.symbolTWTR}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'6810.30000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_withdrawals(self):
# Create addresses for futures
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_msft = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'{self.prices[1]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'{self.prices[2]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_msft: f'{self.prices[3]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_msft, f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}', int(self.idMSFT))
self.nodes[0].futureswap(address_msft, f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}', int(self.idMSFT))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Get user MSFT futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTSLA)
assert_equal(result['values'][1]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolTSLA)
# Get user GOOGL futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolGOOGL)
assert_equal(result['values'][1]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolGOOGL)
# Get user TSLA futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
assert_equal(result['values'][1]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolTWTR)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolMSFT)
assert_equal(result['values'][1]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolMSFT)
# Check withdrawal failures
assert_raises_rpc_error(-32600, f'amount 0.00000000 is less than {self.prices[2]["premiumPrice"] * 2}', self.nodes[0].withdrawfutureswap, address_tsla, f'{self.prices[2]["premiumPrice"] * 2}@{self.symbolDUSD}', int(self.idTWTR))
assert_raises_rpc_error(-32600, f'amount {self.prices[0]["premiumPrice"] * 2} is less than {(self.prices[0]["premiumPrice"] * 2) + Decimal("0.00000001")}', self.nodes[0].withdrawfutureswap, address_tsla, f'{(self.prices[0]["premiumPrice"] * 2) + Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
# Withdraw both TSLA contracts
self.nodes[0].withdrawfutureswap(address_tsla, f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Check user pending swap is empty
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(len(result['values']), 0)
# Try and withdraw smallest amount now contract empty
assert_raises_rpc_error(-32600, 'amount 0.00000000 is less than 0.00000001', self.nodes[0].withdrawfutureswap, address_tsla, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
# Withdraw frm GOOGL everything but one Sat
self.nodes[0].withdrawfutureswap(address_googl, f'{(self.prices[1]["premiumPrice"] * 2) - Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].generate(1)
# Check user pending swap
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'0.00000001@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolGOOGL)
# Withdraw one TWTR contract plus 1 Sat of the second one
self.nodes[0].withdrawfutureswap(address_twtr, f'{self.prices[2]["premiumPrice"] + Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].generate(1)
# Check user pending swap
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"] - Decimal("0.00000001")}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
# Withdraw one Sat
self.nodes[0].withdrawfutureswap(address_msft, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idMSFT))
self.nodes[0].generate(1)
# Check user pending swap
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{(self.prices[3]["premiumPrice"] * 2) - Decimal("0.00000001")}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolMSFT)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check final balances
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'{self.prices[2]["premiumPrice"] + Decimal("0.00000001")}@{self.symbolDUSD}', f'0.99999999@{self.symbolTWTR}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{(self.prices[1]["premiumPrice"] * 2) - Decimal("0.00000001")}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_msft)
assert_equal(result, [f'0.00000001@{self.symbolDUSD}', f'1.99999999@{self.symbolMSFT}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.64999999@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'7468.64999999@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'7468.64999999@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_minimum_swaps(self):
# Create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contract with 1 Satoshi
self.nodes[0].futureswap(address, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check one Satoshi swap yields no TSLA
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]["premiumPrice"] - Decimal("0.00000001")}@{self.symbolDUSD}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Create user futures contract to purchase one Satoshi of TSLA
min_purchase = round(self.prices[0]["premiumPrice"] / 100000000, 8)
self.nodes[0].futureswap(address, f'{min_purchase}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check one Satoshi swap yields one TSLA Satoshi
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]["premiumPrice"] - Decimal("0.00000001") - Decimal(min_purchase)}@{self.symbolDUSD}', f'0.00000001@{self.symbolTSLA}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000914@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_gov_var_change(self):
# Set up for block range change, create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Move to before next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval)) - 1
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Create user futures contract with 1 Satoshi to invalidate block period change
self.nodes[0].futureswap(address, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Check contract address has updated
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Test changing block period while DFIP2203 still active
assert_raises_rpc_error(-32600, 'Cannot set block period while DFIP2203 is active', self.nodes[0].setgov, {"ATTRIBUTES":{'v0/params/dfip2203/block_period':f'{self.futures_interval}'}})
# Disable DFIP2203 to be able to change block period
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'false'}})
self.nodes[0].generate(1)
# Check contract address has not changed, no refund on disabling DFIP2203.
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Now set the new block period
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/block_period':f'{self.futures_interval}'}})
self.nodes[0].generate(1)
# Enable DFIP2203
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Create addresses
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Disable DFIP2203
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'false'}})
self.nodes[0].generate(1)
# Check TXN ordering on Gov var refunds
txn_first = 4294967295
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":self.nodes[0].getblockcount(), 'depth':0, 'txtype':'w'})
result.sort(key = sort_history, reverse = True)
for result_entry in result:
assert_equal(result_entry['blockHeight'], self.nodes[0].getblockcount())
assert_equal(result_entry['type'], 'FutureSwapRefund')
assert_equal(result_entry['txn'], txn_first)
txn_first -= 1
# Check other refund entries
assert_equal(result[0]['owner'], self.contract_address)
assert_equal(result[2]['owner'], self.contract_address)
if result[0]['amounts'] != [f'{-self.prices[0]["premiumPrice"]}@{self.symbolDUSD}']:
assert_equal(result[0]['amounts'], [f'{-self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
if result[2]['amounts'] != [f'{-self.prices[0]["premiumPrice"]}@{self.symbolDUSD}']:
assert_equal(result[2]['amounts'], [f'{-self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
if result[1]['owner'] == address_googl:
assert_equal(result[1]['amounts'], [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
else:
assert_equal(result[1]['owner'], address_tsla)
assert_equal(result[1]['amounts'], [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
if result[3]['owner'] == address_googl:
assert_equal(result[3]['amounts'], [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
else:
assert_equal(result[3]['owner'], address_tsla)
assert_equal(result[3]['amounts'], [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
# Balances should be restored
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
# Check contract address remains the same
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Enable DFIP2203
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Disable GOOGL
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idGOOGL)}/dfip2203':'false'}})
self.nodes[0].generate(1)
# Only TSLA contract should remain
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 1)
assert_equal(result[0]['owner'], address_tsla)
assert_equal(result[0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[0]['destination'], self.symbolTSLA)
# Balance should be restored
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
# TSLA balance should be empty
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [])
# Enable GOOGL
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idGOOGL)}/dfip2203':'true'}})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].generate(1)
# GOOGL balance should be empty
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [])
# Disable GOOGL
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idGOOGL)}/dfip2203':'false'}})
self.nodes[0].generate(1)
# Balance should be restored
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check all balances
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'1.00000000@{self.symbolTSLA}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def unpaid_contract(self):
# Create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contract
self.nodes[0].futureswap(address, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
# Remove Oracle
self.nodes[0].removeoracle(self.oracle_id)
self.nodes[0].generate(1)
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check refund in history
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":self.nodes[0].getblockcount(), 'depth':0, 'txtype':'w'})
result.sort(key = sort_history, reverse = True)
assert_equal(result[0]['owner'], self.contract_address)
assert_equal(result[0]['type'], 'FutureSwapRefund')
assert_equal(result[0]['amounts'], [f'{-self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
assert_equal(result[1]['owner'], address)
assert_equal(result[1]['type'], 'FutureSwapRefund')
assert_equal(result[1]['amounts'], [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
assert_equal(result[2]['owner'], self.contract_address)
assert_equal(result[2]['type'], 'FutureSwapRefund')
assert_equal(result[2]['amounts'], [f'{-self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
assert_equal(result[3]['owner'], address)
assert_equal(result[3]['type'], 'FutureSwapRefund')
assert_equal(result[3]['amounts'], [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
# Check user has been refunded
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'])
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def rpc_history(self):
# Check some historical swaps
for history in self.list_history:
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":history['height'], 'depth':0, 'txtype':'q'})
for history_entry in history['swaps']:
found = False
for result_entry in result:
assert_equal(history['height'], result_entry['blockHeight'])
if result_entry['owner'] == history_entry['address']:
assert_equal(result_entry['owner'], history_entry['address'])
assert_equal(result_entry['type'], 'FutureSwapExecution')
assert_equal(result_entry['amounts'], [history_entry['destination']])
found = True
assert(found)
# Check all swaps present
result = self.nodes[0].listaccounthistory('all', {'txtype':'q'})
assert_equal(len(result), 17)
# Check all swap refunds present
result = self.nodes[0].listaccounthistory('all', {'txtype':'w'})
assert_equal(len(result), 12)
# Check swap by specific address
result = self.nodes[0].listaccounthistory(self.list_history[0]['swaps'][0]['address'], {'txtype':'q'})
assert_equal(len(result), 1)
assert_equal(result[0]['blockHeight'], self.list_history[0]['height'])
assert_equal(result[0]['owner'], self.list_history[0]['swaps'][0]['address'])
assert_equal(result[0]['amounts'], [self.list_history[0]['swaps'][0]['destination']])
if __name__ == '__main__':
FuturesTest().main()
| 55.896887 | 314 | 0.657756 |
from test_framework.test_framework import DefiTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from decimal import Decimal
import time
def sort_history(e):
return e['txn']
class FuturesTest(DefiTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-txnotokens=0', '-amkheight=1', '-bayfrontheight=1', '-eunosheight=1', '-fortcanningheight=1', '-fortcanninghillheight=1', '-fortcanningroadheight=150', '-subsidytest=1']]
def run_test(self):
self.nodes[0].generate(101)
self.setup_test()
self.futures_setup()
self.test_dtoken_to_dusd()
self.test_dusd_to_dtoken()
self.check_swap_block_range()
self.check_multiple_swaps()
self.check_withdrawals()
self.check_minimum_swaps()
self.check_gov_var_change()
self.unpaid_contract()
self.rpc_history()
def setup_test(self):
self.address = self.nodes[0].get_genesis_keys().ownerAuthAddress
self.contract_address = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqpsqgljc'
self.futures_interval = 25
self.list_history = []
self.symbolDFI = 'DFI'
self.symbolDUSD = 'DUSD'
self.symbolTSLA = 'TSLA'
self.symbolGOOGL = 'GOOGL'
self.symbolTWTR = 'TWTR'
self.symbolMSFT = 'MSFT'
self.symbolBTC = 'BTC'
oracle_address = self.nodes[0].getnewaddress("", "legacy")
price_feeds = [
{"currency": "USD", "token": self.symbolDFI},
{"currency": "USD", "token": self.symbolTSLA},
{"currency": "USD", "token": self.symbolGOOGL},
{"currency": "USD", "token": self.symbolTWTR},
{"currency": "USD", "token": self.symbolMSFT}
]
self.oracle_id = self.nodes[0].appointoracle(oracle_address, price_feeds, 10)
self.nodes[0].generate(1)
self.price_tsla = 870
self.price_googl = 2600
self.price_twtr = 37
self.price_msft = 295
self.prices = []
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_tsla)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_tsla)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_googl)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_googl)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_twtr)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_twtr)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_msft)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_msft)) * Decimal('0.95000000')))
})
oracle_prices = [
{"currency": "USD", "tokenAmount": f'{self.price_tsla}@{self.symbolTSLA}'},
{"currency": "USD", "tokenAmount": f'{self.price_googl}@{self.symbolGOOGL}'},
{"currency": "USD", "tokenAmount": f'{self.price_twtr}@{self.symbolTWTR}'},
{"currency": "USD", "tokenAmount": f'{self.price_msft}@{self.symbolMSFT}'},
]
self.nodes[0].setoracledata(self.oracle_id, int(time.time()), oracle_prices)
self.nodes[0].generate(10)
self.nodes[0].createtoken({
"symbol": self.symbolBTC,
"name": self.symbolBTC,
"isDAT": True,
"collateralAddress": self.address
})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolDUSD,
'name': self.symbolDUSD,
'fixedIntervalPriceId': f'{self.symbolDUSD}/USD',
'mintable': True,
'interest': 0})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolTSLA,
'name': self.symbolTSLA,
'fixedIntervalPriceId': f'{self.symbolTSLA}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolGOOGL,
'name': self.symbolGOOGL,
'fixedIntervalPriceId': f'{self.symbolGOOGL}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolTWTR,
'name': self.symbolTWTR,
'fixedIntervalPriceId': f'{self.symbolTWTR}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolMSFT,
'name': self.symbolMSFT,
'fixedIntervalPriceId': f'{self.symbolMSFT}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.idDUSD = list(self.nodes[0].gettoken(self.symbolDUSD).keys())[0]
self.idTSLA = list(self.nodes[0].gettoken(self.symbolTSLA).keys())[0]
self.idGOOGL = list(self.nodes[0].gettoken(self.symbolGOOGL).keys())[0]
self.idTWTR = list(self.nodes[0].gettoken(self.symbolTWTR).keys())[0]
self.idMSFT = list(self.nodes[0].gettoken(self.symbolMSFT).keys())[0]
self.idBTC = list(self.nodes[0].gettoken(self.symbolBTC).keys())[0]
self.nodes[0].minttokens([f'100000@{self.idDUSD}'])
self.nodes[0].minttokens([f'100000@{self.idTSLA}'])
self.nodes[0].minttokens([f'100000@{self.idGOOGL}'])
self.nodes[0].minttokens([f'100000@{self.idTWTR}'])
self.nodes[0].minttokens([f'100000@{self.idMSFT}'])
self.nodes[0].generate(1)
def futures_setup(self):
self.nodes[0].generate(150 - self.nodes[0].getblockcount())
address = self.nodes[0].getnewaddress("", "legacy")
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1@{self.symbolTWTR}')
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1@{self.symbolTWTR}')
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'false','v0/params/dfip2203/reward_pct':'0.05','v0/params/dfip2203/block_period':f'{self.futures_interval}'}})
self.nodes[0].generate(1)
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1@{self.symbolTWTR}')
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
result = self.nodes[0].getgov('ATTRIBUTES')['ATTRIBUTES']
assert_equal(result['v0/params/dfip2203/active'], 'true')
assert_equal(result['v0/params/dfip2203/reward_pct'], '0.05')
assert_equal(result['v0/params/dfip2203/block_period'], str(self.futures_interval))
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idDUSD)}/dfip2203':'false'}})
self.nodes[0].generate(1)
result = self.nodes[0].getgov('ATTRIBUTES')['ATTRIBUTES']
assert_equal(result[f'v0/token/{self.idDUSD}/dfip2203'], 'false')
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
assert_equal(next_futures_block, self.nodes[0].getfutureswapblock())
def test_dtoken_to_dusd(self):
address_msft = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
self.nodes[0].accounttoaccount(self.address, {address_msft: f'1@{self.symbolMSFT}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'1@{self.symbolGOOGL}'})
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'1@{self.symbolTSLA}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'1@{self.symbolTWTR}'})
self.nodes[0].generate(1)
assert_raises_rpc_error(-32600, f'Could not get source loan token {self.idBTC}', self.nodes[0].futureswap, self.address, f'1@{self.symbolBTC}')
assert_raises_rpc_error(-32600, f'DFIP2203 currently disabled for token {self.idDUSD}', self.nodes[0].futureswap, self.address, f'1@{self.symbolDUSD}', int(self.idDUSD))
assert_raises_rpc_error(-32600, f'Could not get destination loan token {self.idBTC}. Set valid destination.', self.nodes[0].futureswap, self.address, f'1@{self.symbolDUSD}', int(self.idBTC))
assert_raises_rpc_error(-32600, 'Destination should not be set when source amount is a dToken', self.nodes[0].futureswap, self.address, f'1@{self.symbolTSLA}', int(self.idBTC))
assert_raises_rpc_error(-32600, 'amount 0.00000000 is less than 1.00000000', self.nodes[0].futureswap, address_twtr, f'1@{self.symbolTSLA}')
self.nodes[0].futureswap(address_twtr, f'1@{self.symbolTWTR}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_tsla, f'1@{self.symbolTSLA}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_googl, f'1@{self.symbolGOOGL}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_msft, f'1@{self.symbolMSFT}')
self.nodes[0].generate(1)
result = self.nodes[0].listpendingfutureswaps()
assert_equal(result[0]['owner'], address_msft)
assert_equal(result[0]['source'], f'{Decimal("1.00000000")}@{self.symbolMSFT}')
assert_equal(result[0]['destination'], self.symbolDUSD)
assert_equal(result[1]['owner'], address_googl)
assert_equal(result[1]['source'], f'{Decimal("1.00000000")}@{self.symbolGOOGL}')
assert_equal(result[1]['destination'], self.symbolDUSD)
assert_equal(result[2]['owner'], address_tsla)
assert_equal(result[2]['source'], f'{Decimal("1.00000000")}@{self.symbolTSLA}')
assert_equal(result[2]['destination'], self.symbolDUSD)
assert_equal(result[3]['owner'], address_twtr)
assert_equal(result[3]['source'], f'{Decimal("1.00000000")}@{self.symbolTWTR}')
assert_equal(result[3]['destination'], self.symbolDUSD)
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolMSFT}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolGOOGL}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolTSLA}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolTWTR}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [])
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
assert('v0/live/economy/dfip2203_burned' not in result)
assert('v0/live/economy/dfip2203_minted' not in result)
total_dusd = Decimal(self.nodes[0].gettoken(self.idDUSD)[self.idDUSD]['minted'])
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
new_total_dusd = Decimal(self.nodes[0].gettoken(self.idDUSD)[self.idDUSD]['minted'])
assert_equal(total_dusd + self.prices[0]["discountPrice"] + self.prices[1]["discountPrice"] + self.prices[2]["discountPrice"] + self.prices[3]["discountPrice"], new_total_dusd)
txn_first = 4294967295
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":self.nodes[0].getblockcount(), 'depth':0, 'txtype':'q'})
result.sort(key = sort_history, reverse = True)
for result_entry in result:
assert_equal(result_entry['txn'], txn_first)
txn_first -= 1
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 0)
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].getaccount(address_msft)
assert_equal(result, [f'{self.prices[3]["discountPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["discountPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'{self.prices[0]["discountPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'{self.prices[2]["discountPrice"]}@{self.symbolDUSD}'])
self.list_history.append({'height': self.nodes[0].getblockcount(), 'swaps': [
{'address': address_tsla, 'destination': f'{self.prices[0]["discountPrice"]}@{self.symbolDUSD}'},
{'address': address_googl, 'destination': f'{self.prices[1]["discountPrice"]}@{self.symbolDUSD}'},
{'address': address_twtr, 'destination': f'{self.prices[2]["discountPrice"]}@{self.symbolDUSD}'},
{'address': address_msft, 'destination': f'{self.prices[3]["discountPrice"]}@{self.symbolDUSD}'},
]})
def test_dusd_to_dtoken(self):
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_msft = self.nodes[0].getnewaddress("", "legacy")
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_msft: f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_msft, f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}', self.idMSFT)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', self.idTWTR)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', self.symbolGOOGL)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', self.symbolTSLA)
self.nodes[0].generate(1)
result = self.nodes[0].listpendingfutureswaps()
assert_equal(result[0]['owner'], address_tsla)
assert_equal(result[0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[0]['destination'], self.symbolTSLA)
assert_equal(result[1]['owner'], address_googl)
assert_equal(result[1]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[1]['destination'], self.symbolGOOGL)
assert_equal(result[2]['owner'], address_twtr)
assert_equal(result[2]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[2]['destination'], self.symbolTWTR)
assert_equal(result[3]['owner'], address_msft)
assert_equal(result[3]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[3]['destination'], self.symbolMSFT)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTSLA)
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolGOOGL)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolMSFT)
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
assert_equal(result['v0/live/economy/dfip2203_burned'], [f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
assert_equal(result['v0/live/economy/dfip2203_minted'], [f'{self.prices[0]["discountPrice"] + self.prices[1]["discountPrice"] + self.prices[2]["discountPrice"] + self.prices[3]["discountPrice"]}@{self.symbolDUSD}'])
total_tsla = Decimal(self.nodes[0].gettoken(self.idTSLA)[self.idTSLA]['minted'])
total_googl = Decimal(self.nodes[0].gettoken(self.idGOOGL)[self.idGOOGL]['minted'])
total_twtr = Decimal(self.nodes[0].gettoken(self.idTWTR)[self.idTWTR]['minted'])
total_msft = Decimal(self.nodes[0].gettoken(self.idMSFT)[self.idMSFT]['minted'])
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
new_total_tsla = Decimal(self.nodes[0].gettoken(self.idTSLA)[self.idTSLA]['minted'])
new_total_googl = Decimal(self.nodes[0].gettoken(self.idGOOGL)[self.idGOOGL]['minted'])
new_total_twtr = Decimal(self.nodes[0].gettoken(self.idTWTR)[self.idTWTR]['minted'])
new_total_msft = Decimal(self.nodes[0].gettoken(self.idMSFT)[self.idMSFT]['minted'])
assert_equal(total_tsla + Decimal('1.00000000'), new_total_tsla)
assert_equal(total_googl + Decimal('1.00000000'), new_total_googl)
assert_equal(total_twtr + Decimal('1.00000000'), new_total_twtr)
assert_equal(total_msft + Decimal('1.00000000'), new_total_msft)
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 0)
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'3992.10000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].getaccount(address_msft)
assert_equal(result, [f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'1.00000000@{self.symbolGOOGL}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'1.00000000@{self.symbolTSLA}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'1.00000000@{self.symbolTWTR}'])
self.list_history.append({'height': self.nodes[0].getblockcount(), 'swaps': [
{'address': address_tsla, 'destination': f'1.00000000@{self.symbolTSLA}'},
{'address': address_googl, 'destination': f'1.00000000@{self.symbolGOOGL}'},
{'address': address_twtr, 'destination': f'1.00000000@{self.symbolTWTR}'},
{'address': address_msft, 'destination': f'1.00000000@{self.symbolMSFT}'},
]})
def check_swap_block_range(self):
address = self.nodes[0].getnewaddress("", "legacy")
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount() - 1)
self.nodes[0].futureswap(address, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}'])
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 0)
result = self.nodes[0].getpendingfutureswaps(address)
assert_equal(len(result['values']), 0)
assert_raises_rpc_error(-32600, 'amount 0.00000000 is less than 0.00000001', self.nodes[0].withdrawfutureswap, address, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
self.list_history.append({'height': self.nodes[0].getblockcount(), 'swaps': [
{'address': address, 'destination': f'1.00000000@{self.symbolTSLA}'},
]})
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'913.50000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}'])
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'4905.60000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_multiple_swaps(self):
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'{self.prices[2]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].generate(1)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTSLA)
assert_equal(result['values'][1]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolTSLA)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'2.00000000@{self.symbolTSLA}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'2.00000000@{self.symbolTWTR}'])
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'6810.30000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_withdrawals(self):
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_msft = self.nodes[0].getnewaddress("", "legacy")
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'{self.prices[1]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'{self.prices[2]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_msft: f'{self.prices[3]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_msft, f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}', int(self.idMSFT))
self.nodes[0].futureswap(address_msft, f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}', int(self.idMSFT))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTSLA)
assert_equal(result['values'][1]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolTSLA)
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolGOOGL)
assert_equal(result['values'][1]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolGOOGL)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
assert_equal(result['values'][1]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolTWTR)
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolMSFT)
assert_equal(result['values'][1]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][1]['destination'], self.symbolMSFT)
assert_raises_rpc_error(-32600, f'amount 0.00000000 is less than {self.prices[2]["premiumPrice"] * 2}', self.nodes[0].withdrawfutureswap, address_tsla, f'{self.prices[2]["premiumPrice"] * 2}@{self.symbolDUSD}', int(self.idTWTR))
assert_raises_rpc_error(-32600, f'amount {self.prices[0]["premiumPrice"] * 2} is less than {(self.prices[0]["premiumPrice"] * 2) + Decimal("0.00000001")}', self.nodes[0].withdrawfutureswap, address_tsla, f'{(self.prices[0]["premiumPrice"] * 2) + Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].withdrawfutureswap(address_tsla, f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(len(result['values']), 0)
assert_raises_rpc_error(-32600, 'amount 0.00000000 is less than 0.00000001', self.nodes[0].withdrawfutureswap, address_tsla, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].withdrawfutureswap(address_googl, f'{(self.prices[1]["premiumPrice"] * 2) - Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].generate(1)
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'0.00000001@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolGOOGL)
self.nodes[0].withdrawfutureswap(address_twtr, f'{self.prices[2]["premiumPrice"] + Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTWTR))
self.nodes[0].generate(1)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"] - Decimal("0.00000001")}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
self.nodes[0].withdrawfutureswap(address_msft, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idMSFT))
self.nodes[0].generate(1)
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{(self.prices[3]["premiumPrice"] * 2) - Decimal("0.00000001")}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolMSFT)
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'{self.prices[2]["premiumPrice"] + Decimal("0.00000001")}@{self.symbolDUSD}', f'0.99999999@{self.symbolTWTR}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{(self.prices[1]["premiumPrice"] * 2) - Decimal("0.00000001")}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_msft)
assert_equal(result, [f'0.00000001@{self.symbolDUSD}', f'1.99999999@{self.symbolMSFT}'])
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.64999999@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'7468.64999999@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'7468.64999999@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_minimum_swaps(self):
address = self.nodes[0].getnewaddress("", "legacy")
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
self.nodes[0].futureswap(address, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]["premiumPrice"] - Decimal("0.00000001")}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000000@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
min_purchase = round(self.prices[0]["premiumPrice"] / 100000000, 8)
self.nodes[0].futureswap(address, f'{min_purchase}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]["premiumPrice"] - Decimal("0.00000001") - Decimal(min_purchase)}@{self.symbolDUSD}', f'0.00000001@{self.symbolTSLA}'])
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000914@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def check_gov_var_change(self):
address = self.nodes[0].getnewaddress("", "legacy")
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval)) - 1
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
self.nodes[0].futureswap(address, f'{Decimal("0.00000001")}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
assert_raises_rpc_error(-32600, 'Cannot set block period while DFIP2203 is active', self.nodes[0].setgov, {"ATTRIBUTES":{'v0/params/dfip2203/block_period':f'{self.futures_interval}'}})
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'false'}})
self.nodes[0].generate(1)
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/block_period':f'{self.futures_interval}'}})
self.nodes[0].generate(1)
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'false'}})
self.nodes[0].generate(1)
txn_first = 4294967295
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":self.nodes[0].getblockcount(), 'depth':0, 'txtype':'w'})
result.sort(key = sort_history, reverse = True)
for result_entry in result:
assert_equal(result_entry['blockHeight'], self.nodes[0].getblockcount())
assert_equal(result_entry['type'], 'FutureSwapRefund')
assert_equal(result_entry['txn'], txn_first)
txn_first -= 1
assert_equal(result[0]['owner'], self.contract_address)
assert_equal(result[2]['owner'], self.contract_address)
if result[0]['amounts'] != [f'{-self.prices[0]["premiumPrice"]}@{self.symbolDUSD}']:
assert_equal(result[0]['amounts'], [f'{-self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
if result[2]['amounts'] != [f'{-self.prices[0]["premiumPrice"]}@{self.symbolDUSD}']:
assert_equal(result[2]['amounts'], [f'{-self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
if result[1]['owner'] == address_googl:
assert_equal(result[1]['amounts'], [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
else:
assert_equal(result[1]['owner'], address_tsla)
assert_equal(result[1]['amounts'], [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
if result[3]['owner'] == address_googl:
assert_equal(result[3]['amounts'], [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
else:
assert_equal(result[3]['owner'], address_tsla)
assert_equal(result[3]['amounts'], [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'7468.65000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idGOOGL)}/dfip2203':'false'}})
self.nodes[0].generate(1)
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 1)
assert_equal(result[0]['owner'], address_tsla)
assert_equal(result[0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[0]['destination'], self.symbolTSLA)
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [])
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idGOOGL)}/dfip2203':'true'}})
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', int(self.idGOOGL))
self.nodes[0].generate(1)
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [])
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idGOOGL)}/dfip2203':'false'}})
self.nodes[0].generate(1)
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'1.00000000@{self.symbolTSLA}'])
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def unpaid_contract(self):
address = self.nodes[0].getnewaddress("", "legacy")
self.nodes[0].accounttoaccount(self.address, {address: f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
self.nodes[0].futureswap(address, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].futureswap(address, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', int(self.idTSLA))
self.nodes[0].generate(1)
self.nodes[0].removeoracle(self.oracle_id)
self.nodes[0].generate(1)
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":self.nodes[0].getblockcount(), 'depth':0, 'txtype':'w'})
result.sort(key = sort_history, reverse = True)
assert_equal(result[0]['owner'], self.contract_address)
assert_equal(result[0]['type'], 'FutureSwapRefund')
assert_equal(result[0]['amounts'], [f'{-self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
assert_equal(result[1]['owner'], address)
assert_equal(result[1]['type'], 'FutureSwapRefund')
assert_equal(result[1]['amounts'], [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
assert_equal(result[2]['owner'], self.contract_address)
assert_equal(result[2]['type'], 'FutureSwapRefund')
assert_equal(result[2]['amounts'], [f'{-self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
assert_equal(result[3]['owner'], address)
assert_equal(result[3]['type'], 'FutureSwapRefund')
assert_equal(result[3]['amounts'], [f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address)
assert_equal(result, [f'{self.prices[0]["premiumPrice"] * 2}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'8382.15000915@{self.symbolDUSD}', f'1.00000000@{self.symbolTSLA}', f'1.00000000@{self.symbolGOOGL}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.symbolMSFT}'])
def rpc_history(self):
for history in self.list_history:
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":history['height'], 'depth':0, 'txtype':'q'})
for history_entry in history['swaps']:
found = False
for result_entry in result:
assert_equal(history['height'], result_entry['blockHeight'])
if result_entry['owner'] == history_entry['address']:
assert_equal(result_entry['owner'], history_entry['address'])
assert_equal(result_entry['type'], 'FutureSwapExecution')
assert_equal(result_entry['amounts'], [history_entry['destination']])
found = True
assert(found)
result = self.nodes[0].listaccounthistory('all', {'txtype':'q'})
assert_equal(len(result), 17)
result = self.nodes[0].listaccounthistory('all', {'txtype':'w'})
assert_equal(len(result), 12)
result = self.nodes[0].listaccounthistory(self.list_history[0]['swaps'][0]['address'], {'txtype':'q'})
assert_equal(len(result), 1)
assert_equal(result[0]['blockHeight'], self.list_history[0]['height'])
assert_equal(result[0]['owner'], self.list_history[0]['swaps'][0]['address'])
assert_equal(result[0]['amounts'], [self.list_history[0]['swaps'][0]['destination']])
if __name__ == '__main__':
FuturesTest().main()
| true | true |
f733497cf2d5a00cffbf4d74efa080b6f969a44d | 754 | py | Python | tests/test_model/test_recognizer/test_sknet.py | ZJCV/PyCls | 1ef59301646b6134f2ffcc009b4fd76550fa4089 | [
"Apache-2.0"
] | null | null | null | tests/test_model/test_recognizer/test_sknet.py | ZJCV/PyCls | 1ef59301646b6134f2ffcc009b4fd76550fa4089 | [
"Apache-2.0"
] | null | null | null | tests/test_model/test_recognizer/test_sknet.py | ZJCV/PyCls | 1ef59301646b6134f2ffcc009b4fd76550fa4089 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@date: 2020/11/21 下午4:16
@file: test_resnest.py
@author: zj
@description:
"""
import torch
from zcls.config import cfg
from zcls.config.key_word import KEY_OUTPUT
from zcls.model.recognizers.resnet.resnet import ResNet
def test_data(model, input_shape, output_shape):
data = torch.randn(input_shape)
outputs = model(data)[KEY_OUTPUT]
print(outputs.shape)
assert outputs.shape == output_shape
def test_sknet():
config_file = 'configs/benchmarks/resnet-resnext/sknet50_zcls_imagenet_224.yaml'
cfg.merge_from_file(config_file)
model = ResNet(cfg)
print(model)
test_data(model, (3, 3, 224, 224), (3, 1000))
if __name__ == '__main__':
print('*' * 10 + ' sknet')
test_sknet()
| 20.378378 | 84 | 0.696286 |
import torch
from zcls.config import cfg
from zcls.config.key_word import KEY_OUTPUT
from zcls.model.recognizers.resnet.resnet import ResNet
def test_data(model, input_shape, output_shape):
data = torch.randn(input_shape)
outputs = model(data)[KEY_OUTPUT]
print(outputs.shape)
assert outputs.shape == output_shape
def test_sknet():
config_file = 'configs/benchmarks/resnet-resnext/sknet50_zcls_imagenet_224.yaml'
cfg.merge_from_file(config_file)
model = ResNet(cfg)
print(model)
test_data(model, (3, 3, 224, 224), (3, 1000))
if __name__ == '__main__':
print('*' * 10 + ' sknet')
test_sknet()
| true | true |
f7334ab596a90f93b66ce42aeddf328ed698df40 | 50,455 | py | Python | pyUSID/io/hdf_utils/simple.py | rajgiriUW/pyUSID | 064dcd81d9c42f4eb4782f0a41fd437b3f56f50c | [
"MIT"
] | 25 | 2018-07-11T21:43:56.000Z | 2021-11-17T11:40:00.000Z | pyUSID/io/hdf_utils/simple.py | rajgiriUW/pyUSID | 064dcd81d9c42f4eb4782f0a41fd437b3f56f50c | [
"MIT"
] | 62 | 2018-07-05T20:28:52.000Z | 2021-12-14T09:49:35.000Z | pyUSID/io/hdf_utils/simple.py | rajgiriUW/pyUSID | 064dcd81d9c42f4eb4782f0a41fd437b3f56f50c | [
"MIT"
] | 15 | 2019-03-27T22:28:47.000Z | 2021-01-03T20:23:42.000Z | # -*- coding: utf-8 -*-
"""
Lower-level and simpler USID-specific HDF5 utilities that facilitate higher-level data operations
Created on Tue Nov 3 21:14:25 2015
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import collections
from warnings import warn
import sys
import h5py
import numpy as np
import dask.array as da
from sidpy.hdf.hdf_utils import get_auxiliary_datasets, link_h5_obj_as_alias, \
write_simple_attrs, is_editable_h5, validate_h5_objs_in_same_h5_file, \
get_attr
from sidpy.hdf.dtype_utils import validate_dtype
from sidpy.hdf import hdf_utils as hut
from sidpy.base.string_utils import validate_single_string_arg, validate_list_of_strings
from sidpy.base.num_utils import contains_integers
from sidpy.base.string_utils import clean_string_att
from ..anc_build_utils import build_ind_val_matrices, INDICES_DTYPE, VALUES_DTYPE
from ..dimension import DimType, Dimension
from .base import write_book_keeping_attrs
if sys.version_info.major == 3:
unicode = str
"""
__all__ = ['assign_group_index', 'check_and_link_ancillary', 'check_for_matching_attrs', 'check_for_old',
'check_if_main', 'copy_attributes', 'copy_main_attributes']
"""
def get_all_main(parent, verbose=False):
"""
Simple function to recursively print the contents of an hdf5 group
Parameters
----------
parent : :class:`h5py.Group`
HDF5 Group to search within
verbose : bool, optional. Default = False
If true, extra print statements (usually for debugging) are enabled
Returns
-------
main_list : list of h5py.Dataset
The datasets found in the file that meet the 'Main Data' criteria.
"""
if not isinstance(parent, (h5py.Group, h5py.File)):
raise TypeError('parent should be a h5py.File or h5py.Group object')
from ..usi_data import USIDataset
main_list = list()
def __check(name, obj):
if verbose:
print(name, obj)
if isinstance(obj, h5py.Dataset):
if verbose:
print(name, 'is an HDF5 Dataset.')
ismain = check_if_main(obj)
if ismain:
if verbose:
print(name, 'is a `Main` dataset.')
main_list.append(USIDataset(obj))
if verbose:
print('Checking the group {} for `Main` datasets.'.format(parent.name))
parent.visititems(__check)
return main_list
def find_dataset(h5_group, dset_name):
"""
Uses visit() to find all datasets with the desired name
Parameters
----------
h5_group : :class:`h5py.Group`
Group to search within for the Dataset
dset_name : str
Name of the dataset to search for
Returns
-------
datasets : list
List of [Name, object] pairs corresponding to datasets that match `ds_name`.
"""
from ..usi_data import USIDataset
datasets = list()
for obj in hut.find_dataset(h5_group, dset_name):
try:
datasets.append(USIDataset(obj))
except TypeError:
datasets.append(obj)
return datasets
def find_results_groups(h5_main, tool_name, h5_parent_group=None):
"""
Finds a list of all groups containing results of the process of name
`tool_name` being applied to the dataset
Parameters
----------
h5_main : h5 dataset reference
Reference to the target dataset to which the tool was applied
tool_name : String / unicode
Name of the tool applied to the target dataset
h5_parent_group : h5py.Group, optional. Default = None
Parent group under which the results group will be searched for. Use
this option when the results groups are contained in different HDF5
file compared to `h5_main`. BY default, this function will search
within the same group that contains `h5_main`
Returns
-------
groups : list of references to :class:`h5py.Group` objects
groups whose name contains the tool name and the dataset name
"""
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
tool_name = validate_single_string_arg(tool_name, 'tool_name')
if h5_parent_group is not None:
if not isinstance(h5_parent_group, (h5py.File, h5py.Group)):
raise TypeError("'h5_parent_group' should either be a h5py.File "
"or h5py.Group object")
else:
h5_parent_group = h5_main.parent
dset_name = h5_main.name.split('/')[-1]
groups = []
for key in h5_parent_group.keys():
if dset_name in key and tool_name in key and isinstance(h5_parent_group[key], h5py.Group):
groups.append(h5_parent_group[key])
return groups
def check_and_link_ancillary(h5_dset, anc_names, h5_main=None, anc_refs=None):
"""
This function will add references to auxilliary datasets as attributes
of an input dataset.
If the entries in anc_refs are valid references, they will be added
as attributes with the name taken from the corresponding entry in
anc_names.
If an entry in anc_refs is not a valid reference, the function will
attempt to get the attribute with the same name from the h5_main
dataset
Parameters
----------
h5_dset : HDF5 Dataset
dataset to which the attributes will be written
anc_names : list of str
the attribute names to be used
h5_main : HDF5 Dataset, optional
dataset from which attributes will be copied if `anc_refs` is None
anc_refs : list of HDF5 Object References, optional
references that correspond to the strings in `anc_names`
Returns
-------
None
Notes
-----
Either `h5_main` or `anc_refs` MUST be provided and `anc_refs` has the
higher priority if both are present.
"""
if not isinstance(h5_dset, h5py.Dataset):
raise TypeError('h5_dset should be a h5py.Dataset object')
if isinstance(anc_names, (str, unicode)):
anc_names = [anc_names]
if isinstance(anc_refs, (h5py.Dataset, h5py.Group, h5py.File,
h5py.Reference)):
anc_refs = [anc_refs]
if not isinstance(anc_names, (list, tuple)):
raise TypeError('anc_names should be a list / tuple')
if h5_main is not None:
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
validate_h5_objs_in_same_h5_file(h5_dset, h5_main)
if anc_refs is not None:
if not isinstance(anc_refs, (list, tuple)):
raise TypeError('anc_refs should be a list / tuple')
if anc_refs is None and h5_main is None:
raise ValueError('No objected provided to link as ancillary')
def __check_and_link_single(h5_obj_ref, target_ref_name):
if isinstance(h5_obj_ref, h5py.Reference):
# TODO: Same HDF5 file?
h5_dset.attrs[target_ref_name] = h5_obj_ref
elif isinstance(h5_obj_ref, (h5py.Dataset, h5py.Group, h5py.File)):
validate_h5_objs_in_same_h5_file(h5_obj_ref, h5_dset)
h5_dset.attrs[target_ref_name] = h5_obj_ref.ref
elif h5_main is not None:
h5_anc = get_auxiliary_datasets(h5_main, aux_dset_name=[target_ref_name])
if len(h5_anc) == 1:
link_h5_obj_as_alias(h5_dset, h5_anc[0], target_ref_name)
else:
warnstring = '{} is not a valid h5py Reference and will be skipped.'.format(repr(h5_obj_ref))
warn(warnstring)
if bool(np.iterable(anc_refs) and not isinstance(anc_refs, h5py.Dataset)):
"""
anc_refs can be iterated over
"""
for ref_name, h5_ref in zip(anc_names, anc_refs):
__check_and_link_single(h5_ref, ref_name)
elif anc_refs is not None:
"""
anc_refs is just a single value
"""
__check_and_link_single(anc_refs, anc_names)
elif isinstance(anc_names, str) or isinstance(anc_names, unicode):
"""
Single name provided
"""
__check_and_link_single(None, anc_names)
else:
"""
Iterable of names provided
"""
for name in anc_names:
__check_and_link_single(None, name)
h5_dset.file.flush()
def validate_main_dset(h5_main, must_be_h5):
"""
Checks to make sure that the provided object is a USID main dataset
Errors in parameters will result in Exceptions
Parameters
----------
h5_main : h5py.Dataset or numpy.ndarray or Dask.array.core.array
object that represents the USID main data
must_be_h5 : bool
Set to True if the expecting an h5py.Dataset object.
Set to False if expecting a numpy.ndarray or Dask.array.core.array
Returns
-------
"""
# Check that h5_main is a dataset
if must_be_h5:
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('{} is not an HDF5 Dataset object.'.format(h5_main))
else:
if not isinstance(h5_main, (np.ndarray, da.core.Array)):
raise TypeError('raw_data should either be a np.ndarray or a da.core.Array')
# Check dimensionality
if len(h5_main.shape) != 2:
raise ValueError('Main data is not 2D. Provided object has shape: {}'.format(h5_main.shape))
def validate_anc_h5_dsets(h5_inds, h5_vals, main_shape, is_spectroscopic=True):
"""
Checks ancillary HDF5 datasets against shape of a main dataset.
Errors in parameters will result in Exceptions
Parameters
----------
h5_inds : h5py.Dataset
HDF5 dataset corresponding to the ancillary Indices dataset
h5_vals : h5py.Dataset
HDF5 dataset corresponding to the ancillary Values dataset
main_shape : array-like
Shape of the main dataset expressed as a tuple or similar
is_spectroscopic : bool, Optional. Default = True
set to True if ``dims`` correspond to Spectroscopic Dimensions.
False otherwise.
"""
if not isinstance(h5_inds, h5py.Dataset):
raise TypeError('h5_inds must be a h5py.Dataset object')
if not isinstance(h5_vals, h5py.Dataset):
raise TypeError('h5_vals must be a h5py.Dataset object')
if h5_inds.shape != h5_vals.shape:
raise ValueError('h5_inds: {} and h5_vals: {} should be of the same '
'shape'.format(h5_inds.shape, h5_vals.shape))
if isinstance(main_shape, (list, tuple)):
if not contains_integers(main_shape, min_val=1) or \
len(main_shape) != 2:
raise ValueError("'main_shape' must be a valid HDF5 dataset shape")
else:
raise TypeError('main_shape should be of the following types:'
'h5py.Dataset, tuple, or list. {} provided'
''.format(type(main_shape)))
if h5_inds.shape[is_spectroscopic] != main_shape[is_spectroscopic]:
raise ValueError('index {} in shape of h5_inds: {} and main_data: {} '
'should be equal'.format(int(is_spectroscopic),
h5_inds.shape, main_shape))
def validate_dims_against_main(main_shape, dims, is_spectroscopic=True):
"""
Checks Dimension objects against a given shape for main datasets.
Errors in parameters will result in Exceptions
Parameters
----------
main_shape : array-like
Tuple or list with the shape of the main data
dims : iterable
List of Dimension objects
is_spectroscopic : bool, Optional. Default = True
set to True if ``dims`` correspond to Spectroscopic Dimensions.
False otherwise.
"""
if not isinstance(main_shape, (list, tuple)):
raise TypeError('main_shape should be a list or tuple. Provided object'
' was of type: {}'.format(type(main_shape)))
if len(main_shape) != 2:
raise ValueError('"main_shape" should be of length 2')
contains_integers(main_shape, min_val=1)
if isinstance(dims, Dimension):
dims = [dims]
elif not isinstance(dims, (list, tuple)):
raise TypeError('"dims" must be a list or tuple of usid.Dimension '
'objects. Provided object was of type: {}'
''.format(type(dims)))
if not all([isinstance(obj, Dimension) for obj in dims]):
raise TypeError('One or more objects in "dims" was not usid.Dimension')
if is_spectroscopic:
main_dim = 1
dim_category = 'Spectroscopic'
else:
main_dim = 0
dim_category = 'Position'
# TODO: This is where the dimension type will need to be taken into account
lhs = main_shape[main_dim]
rhs = np.product([len(x.values) for x in dims])
if lhs != rhs:
raise ValueError(dim_category +
' dimensions in main data of size: {} do not match '
'with product of values in provided Dimension objects'
': {}'.format(lhs, rhs))
def check_if_main(h5_main, verbose=False):
"""
Checks the input dataset to see if it has all the necessary
features to be considered a Main dataset. This means it is
2D and has the following attributes:
* Position_Indices
* Position_Values
* Spectroscopic_Indices
* Spectroscopic_Values
* quantity
* units
In addition, the shapes of the ancillary matrices should match with that of
h5_main
Parameters
----------
h5_main : HDF5 Dataset
Dataset of interest
verbose : Boolean (Optional. Default = False)
Whether or not to print statements
Returns
-------
success : Boolean
True if all tests pass
"""
try:
validate_main_dset(h5_main, True)
except Exception as exep:
if verbose:
print(exep)
return False
h5_name = h5_main.name.split('/')[-1]
success = True
# Check for Datasets
dset_names = ['Position_Indices', 'Position_Values',
'Spectroscopic_Indices', 'Spectroscopic_Values']
for name in dset_names:
try:
h5_anc_dset = h5_main.file[h5_main.attrs[name]]
success = np.all([success, isinstance(h5_anc_dset, h5py.Dataset)])
except:
if verbose:
print('{} not found as an attribute of {}.'.format(name, h5_name))
return False
attr_success = np.all([att in h5_main.attrs for att in ['quantity', 'units']])
if not attr_success:
if verbose:
print('{} does not have the mandatory "quantity" and "units" attributes'.format(h5_main.name))
return False
for attr_name in ['quantity', 'units']:
val = get_attr(h5_main, attr_name)
if not isinstance(val, (str, unicode)):
if verbose:
print('Attribute {} of {} found to be {}. Expected a string'.format(attr_name, h5_main.name, val))
return False
# Blindly linking four datasets is still not sufficient. The sizes need to match:
anc_shape_match = list()
h5_pos_inds = h5_main.file[h5_main.attrs['Position_Indices']]
h5_pos_vals = h5_main.file[h5_main.attrs['Position_Values']]
anc_shape_match.append(np.all(h5_pos_vals.shape == h5_pos_inds.shape))
for anc_dset in [h5_pos_vals, h5_pos_inds]:
anc_shape_match.append(np.all(h5_main.shape[0] == anc_dset.shape[0]))
if not np.all(anc_shape_match):
if verbose:
print('The shapes of the Position indices:{}, values:{} datasets did not match with that of the main '
'dataset: {}'.format(h5_pos_inds.shape, h5_pos_vals.shape, h5_main.shape))
return False
anc_shape_match = list()
h5_spec_inds = h5_main.file[h5_main.attrs['Spectroscopic_Indices']]
h5_spec_vals = h5_main.file[h5_main.attrs['Spectroscopic_Values']]
anc_shape_match.append(np.all(h5_spec_inds.shape == h5_spec_vals.shape))
for anc_dset in [h5_spec_inds, h5_spec_vals]:
anc_shape_match.append(np.all(h5_main.shape[1] == anc_dset.shape[1]))
if not np.all(anc_shape_match):
if verbose:
print('The shapes of the Spectroscopic indices:{}, values:{} datasets did not match with that of the main '
'dataset: {}'.format(h5_spec_inds.shape, h5_spec_vals.shape, h5_main.shape))
return False
try:
validate_anc_dset_attrs(h5_pos_inds, h5_pos_vals, is_spec=False)
except ValueError:
if verbose:
print('Attributes of Position datasets did not match')
return False
try:
validate_anc_dset_attrs(h5_spec_inds, h5_spec_vals, is_spec=True)
except ValueError:
if verbose:
print('Attributes of Spectroscopic datasets did not match')
return False
return success
def validate_anc_dset_attrs(h5_inds, h5_vals, is_spec=True):
"""
Validates the attributes of a pair of indices and values datasets.
Throws ValueErrors if any rule is not satisfied
Parameters
----------
h5_inds : h5py.Dataset
Indices dataset
h5_vals : h5py.Dataset
Values Dataset
is_spec : bool, optional. Default = True
Set to True if spectroscopic. Else - Position datasets
"""
def lists_match(left, right):
if len(left) != len(right):
return False
return all([l_it == r_it for l_it, r_it in zip(left, right)])
v_names = get_attr(h5_vals, 'labels')
v_units = get_attr(h5_vals, 'units')
i_names = get_attr(h5_inds, 'labels')
i_units = get_attr(h5_inds, 'units')
for names, units, dset_type in zip([v_names, i_names], [v_units, i_units],
['Values', 'Indices']):
if len(names) != len(units):
raise ValueError('Length of labels: {} and units: {} for the {} '
'dataset do not match'
''.format(len(names), len(units), dset_type))
for i_item, v_item, prop in zip([i_names, i_units], [v_names, v_units],
['labels', 'units']):
if not lists_match(i_item, v_item):
raise ValueError('The "{}" values of the Indices: {} and Values: '
'{} datasets do not match'.format(prop, i_item,
v_item))
# Now check the rows / cols nums against size of any attr:
if h5_inds.shape != h5_vals.shape:
raise ValueError('Shape of Indices: {} and Values: {} datasets do '
'not match'.format(h5_inds.shape, h5_vals.shape))
dim_ind = 1
if is_spec:
dim_ind = 0
if h5_inds.shape[dim_ind] != len(v_names):
raise ValueError('Length of mandatory attributes: {} did not match '
'dimension: {} of the ancillary dataset of shape: {}'
''.format(len(v_names), dim_ind, h5_inds.shape))
def link_as_main(h5_main, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals):
"""
Links the object references to the four position and spectroscopic datasets as
attributes of `h5_main`
Parameters
----------
h5_main : h5py.Dataset
2D Dataset which will have the references added as attributes
h5_pos_inds : h5py.Dataset
Dataset that will be linked with the name 'Position_Indices'
h5_pos_vals : h5py.Dataset
Dataset that will be linked with the name 'Position_Values'
h5_spec_inds : h5py.Dataset
Dataset that will be linked with the name 'Spectroscopic_Indices'
h5_spec_vals : h5py.Dataset
Dataset that will be linked with the name 'Spectroscopic_Values'
Returns
-------
pyUSID.USIDataset
USIDataset version of h5_main now that it is a USID Main dataset
"""
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
validate_anc_h5_dsets(h5_pos_inds, h5_pos_vals, h5_main.shape,
is_spectroscopic=False)
validate_anc_h5_dsets(h5_spec_inds, h5_spec_vals, h5_main.shape,
is_spectroscopic=True)
link_h5_obj_as_alias(h5_main, h5_pos_inds, 'Position_Indices')
link_h5_obj_as_alias(h5_main, h5_pos_vals, 'Position_Values')
link_h5_obj_as_alias(h5_main, h5_spec_inds, 'Spectroscopic_Indices')
link_h5_obj_as_alias(h5_main, h5_spec_vals, 'Spectroscopic_Values')
from ..usi_data import USIDataset
try:
# If all other conditions are satisfied
return USIDataset(h5_main)
except TypeError:
# If some other conditions are yet to be satisfied
return h5_main
def check_for_old(h5_base, tool_name, new_parms=None, target_dset=None,
h5_parent_goup=None, verbose=False):
"""
Check to see if the results of a tool already exist and if they
were performed with the same parameters.
Parameters
----------
h5_base : h5py.Dataset object
Dataset on which the tool is being applied to
tool_name : str
process or analysis name
new_parms : dict, optional
Parameters with which this tool will be performed.
target_dset : str, optional, default = None
Name of the dataset whose attributes will be compared against new_parms.
Default - checking against the group
h5_parent_goup : h5py.Group, optional. Default = None
The group to search under. Use this option when `h5_base` and
the potential results groups (within `h5_parent_goup` are located
in different HDF5 files. Default - search within h5_base.parent
verbose : bool, optional, default = False
Whether or not to print debugging statements
Returns
-------
group : list
List of all :class:`h5py.Group` objects with parameters matching those in `new_parms`
"""
if not isinstance(h5_base, h5py.Dataset):
raise TypeError('h5_base should be a h5py.Dataset object')
tool_name = validate_single_string_arg(tool_name, 'tool_name')
if h5_parent_goup is not None:
if not isinstance(h5_parent_goup, (h5py.File, h5py.Group)):
raise TypeError("'h5_parent_group' should either be a h5py.File "
"or h5py.Group object")
else:
h5_parent_goup = h5_base.parent
if new_parms is None:
new_parms = dict()
else:
if not isinstance(new_parms, dict):
raise TypeError('new_parms should be a dict')
if target_dset is not None:
target_dset = validate_single_string_arg(target_dset, 'target_dset')
matching_groups = []
groups = find_results_groups(h5_base, tool_name,
h5_parent_group=h5_parent_goup)
for group in groups:
if verbose:
print('Looking at group - {}'.format(group.name.split('/')[-1]))
h5_obj = group
if target_dset is not None:
if target_dset in group.keys():
h5_obj = group[target_dset]
else:
if verbose:
print('{} did not contain the target dataset: {}'.format(group.name.split('/')[-1],
target_dset))
continue
if check_for_matching_attrs(h5_obj, new_parms=new_parms, verbose=verbose):
# return group
matching_groups.append(group)
return matching_groups
def get_source_dataset(h5_group):
"""
Find the name of the source dataset used to create the input `h5_group`,
so long as the source dataset is in the same HDF5 file
Parameters
----------
h5_group : :class:`h5py.Group`
Child group whose source dataset will be returned
Returns
-------
h5_source : USIDataset object
Main dataset from which this group was generated
"""
if not isinstance(h5_group, h5py.Group):
raise TypeError('h5_group should be a h5py.Group object')
h5_parent_group = h5_group.parent
group_name = h5_group.name.split('/')[-1]
# What if the group name was not formatted according to Pycroscopy rules?
name_split = group_name.split('-')
if len(name_split) != 2:
raise ValueError("The provided group's name could not be split by '-' as expected in "
"SourceDataset-ProcessName_000")
h5_source = h5_parent_group[name_split[0]]
if not isinstance(h5_source, h5py.Dataset):
raise ValueError('Source object was not a dataset!')
from ..usi_data import USIDataset
return USIDataset(h5_source)
def assign_group_index(h5_parent_group, base_name, verbose=False):
"""
Searches the parent h5 group to find the next available index for the group
Parameters
----------
h5_parent_group : :class:`h5py.Group` object
Parent group under which the new group object will be created
base_name : str or unicode
Base name of the new group without index
verbose : bool, optional. Default=False
Whether or not to print debugging statements
Returns
-------
base_name : str or unicode
Base name of the new group with the next available index as a suffix
"""
if not isinstance(h5_parent_group, h5py.Group):
raise TypeError('h5_parent_group should be a h5py.Group object')
base_name = validate_single_string_arg(base_name, 'base_name')
if len(base_name) == 0:
raise ValueError('base_name should not be an empty string')
if not base_name.endswith('_'):
base_name += '_'
temp = [key for key in h5_parent_group.keys()]
if verbose:
print('Looking for group names starting with {} in parent containing items: '
'{}'.format(base_name, temp))
previous_indices = []
for item_name in temp:
if isinstance(h5_parent_group[item_name], h5py.Group) and item_name.startswith(base_name):
previous_indices.append(int(item_name.replace(base_name, '')))
previous_indices = np.sort(previous_indices)
if verbose:
print('indices of existing groups with the same prefix: {}'.format(previous_indices))
if len(previous_indices) == 0:
index = 0
else:
index = previous_indices[-1] + 1
return base_name + '{:03d}'.format(index)
def create_indexed_group(h5_parent_group, base_name):
"""
Creates a group with an indexed name (eg - 'Measurement_012') under h5_parent_group using the provided base_name
as a prefix for the group's name
Parameters
----------
h5_parent_group : :class:`h5py.Group` or :class:`h5py.File`
File or group within which the new group will be created
base_name : str or unicode
Prefix for the group name. This need not end with a '_'. It will be added automatically
Returns
-------
"""
if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):
raise TypeError('h5_parent_group should be a h5py.File or Group object')
base_name = validate_single_string_arg(base_name, 'base_name')
group_name = assign_group_index(h5_parent_group, base_name)
h5_new_group = h5_parent_group.create_group(group_name)
write_book_keeping_attrs(h5_new_group)
return h5_new_group
def create_results_group(h5_main, tool_name, h5_parent_group=None):
"""
Creates a h5py.Group object autoindexed and named as 'DatasetName-ToolName_00x'
Parameters
----------
h5_main : h5py.Dataset object
Reference to the dataset based on which the process / analysis is being performed
tool_name : string / unicode
Name of the Process / Analysis applied to h5_main
h5_parent_group : h5py.Group, optional. Default = None
Parent group under which the results group will be created. Use this
option to write results into a new HDF5 file. By default, results will
be written into the same group containing `h5_main`
Returns
-------
h5_group : :class:`h5py.Group`
Results group which can now house the results datasets
"""
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
if h5_parent_group is not None:
if not isinstance(h5_parent_group, (h5py.File, h5py.Group)):
raise TypeError("'h5_parent_group' should either be a h5py.File "
"or h5py.Group object")
else:
h5_parent_group = h5_main.parent
tool_name = validate_single_string_arg(tool_name, 'tool_name')
if '-' in tool_name:
warn('tool_name should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(tool_name, tool_name.replace('-', '_')))
tool_name = tool_name.replace('-', '_')
group_name = h5_main.name.split('/')[-1] + '-' + tool_name + '_'
group_name = assign_group_index(h5_parent_group, group_name)
h5_group = h5_parent_group.create_group(group_name)
write_book_keeping_attrs(h5_group)
# Also add some basic attributes like source and tool name. This will allow relaxation of nomenclature restrictions:
# this are NOT being used right now but will be in the subsequent versions of pyUSID
write_simple_attrs(h5_group, {'tool': tool_name, 'num_source_dsets': 1})
# in this case, there is only one source
if h5_parent_group.file == h5_main.file:
for dset_ind, dset in enumerate([h5_main]):
h5_group.attrs['source_' + '{:03d}'.format(dset_ind)] = dset.ref
return h5_group
def copy_main_attributes(h5_main, h5_new):
"""
Copies the units and quantity name from one dataset to another
Parameters
----------
h5_main : h5py.Dataset
Dataset containing the target attributes
h5_new : h5py.Dataset
Dataset to which the target attributes are to be copied
"""
for param, param_name in zip([h5_main, h5_new], ['h5_main', 'h5_new']):
if not isinstance(param, h5py.Dataset):
raise TypeError(param_name + ' should be a h5py.Dataset object')
for att_name in ['quantity', 'units']:
if att_name not in h5_main.attrs:
raise KeyError('Attribute: {} does not exist in {}'.format(att_name, h5_main))
val = get_attr(h5_main, att_name)
h5_new.attrs[att_name] = clean_string_att(val)
def create_empty_dataset(source_dset, dtype, dset_name, h5_group=None,
new_attrs=None, skip_refs=False):
"""
Creates an empty dataset in the h5 file based on the provided dataset in
the same or specified group
Parameters
----------
source_dset : h5py.Dataset object
Source object that provides information on the group and shape of the dataset
dtype : dtype
Data type of the fit / guess datasets
dset_name : String / Unicode
Name of the dataset
h5_group : :class:`h5py.Group`, optional. Default = None
Group within which this dataset will be created
new_attrs : dictionary (Optional)
Any new attributes that need to be written to the dataset
skip_refs : boolean, optional
Should ObjectReferences be skipped when copying attributes from the
`source_dset`
Returns
-------
h5_new_dset : h5py.Dataset object
Newly created dataset
"""
if not isinstance(source_dset, h5py.Dataset):
raise TypeError('source_deset should be a h5py.Dataset object')
_ = validate_dtype(dtype)
if new_attrs is not None:
if not isinstance(new_attrs, dict):
raise TypeError('new_attrs should be a dictionary')
else:
new_attrs = dict()
if h5_group is None:
h5_group = source_dset.parent
else:
if not isinstance(h5_group, (h5py.Group, h5py.File)):
raise TypeError('h5_group should be a h5py.Group or h5py.File object')
if source_dset.file != h5_group.file and not skip_refs:
# Cannot carry over references
warn('H5 object references will not be copied over since {} is in '
'a different HDF5 file as {}'.format(h5_group, source_dset))
skip_refs = True
dset_name = validate_single_string_arg(dset_name, 'dset_name')
if '-' in dset_name:
warn('dset_name should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(dset_name, dset_name.replace('-', '_')))
dset_name = dset_name.replace('-', '_')
kwargs = {'shape': source_dset.shape, 'dtype': dtype, 'compression': source_dset.compression,
'chunks': source_dset.chunks}
if source_dset.file.driver == 'mpio':
if kwargs.pop('compression', None) is not None:
warn('This HDF5 file has been opened wth the "mpio" communicator. '
'mpi4py does not allow creation of compressed datasets. Compression kwarg has been removed')
if dset_name in h5_group.keys():
if isinstance(h5_group[dset_name], h5py.Dataset):
warn('A dataset named: {} already exists in group: {}'.format(dset_name, h5_group.name))
h5_new_dset = h5_group[dset_name]
# Make sure it has the correct shape and dtype
if any((source_dset.shape != h5_new_dset.shape, dtype != h5_new_dset.dtype)):
warn('Either the shape (existing: {} desired: {}) or dtype (existing: {} desired: {}) of the dataset '
'did not match with expectations. Deleting and creating a new one.'.format(h5_new_dset.shape,
source_dset.shape,
h5_new_dset.dtype,
dtype))
del h5_new_dset, h5_group[dset_name]
h5_new_dset = h5_group.create_dataset(dset_name, **kwargs)
else:
raise KeyError('{} is already a {} in group: {}'.format(dset_name, type(h5_group[dset_name]),
h5_group.name))
else:
h5_new_dset = h5_group.create_dataset(dset_name, **kwargs)
# This should link the ancillary datasets correctly
h5_new_dset = hut.copy_attributes(source_dset, h5_new_dset,
skip_refs=skip_refs)
if source_dset.file != h5_group.file:
hut.copy_linked_objects(source_dset, h5_new_dset)
h5_new_dset.attrs.update(new_attrs)
if check_if_main(h5_new_dset):
from ..usi_data import USIDataset
h5_new_dset = USIDataset(h5_new_dset)
# update book keeping attributes
write_book_keeping_attrs(h5_new_dset)
return h5_new_dset
def check_for_matching_attrs(h5_obj, new_parms=None, verbose=False):
"""
Compares attributes in the given H5 object against those in the provided dictionary and returns True if
the parameters match, and False otherwise
Parameters
----------
h5_obj : h5py object (Dataset or :class:`h5py.Group`)
Object whose attributes will be compared against new_parms
new_parms : dict, optional. default = empty dictionary
Parameters to compare against the attributes present in h5_obj
verbose : bool, optional, default = False
Whether or not to print debugging statements
Returns
-------
tests: bool
Whether or not all paramters in new_parms matched with those in h5_obj's attributes
"""
if not isinstance(h5_obj, (h5py.Dataset, h5py.Group, h5py.File)):
raise TypeError('h5_obj should be a h5py.Dataset, h5py.Group, or h5py.File object')
if new_parms is None:
new_parms = dict()
else:
if not isinstance(new_parms, dict):
raise TypeError('new_parms should be a dictionary')
tests = []
for key in new_parms.keys():
if verbose:
print('Looking for new attribute named: {}'.format(key))
# HDF5 cannot store None as an attribute anyway. ignore
if new_parms[key] is None:
continue
try:
old_value = get_attr(h5_obj, key)
except KeyError:
# if parameter was not found assume that something has changed
if verbose:
print('New parm: {} \t- new parm not in group *****'.format(key))
tests.append(False)
break
if isinstance(old_value, np.ndarray):
if not isinstance(new_parms[key], collections.Iterable):
if verbose:
print('New parm: {} \t- new parm not iterable unlike old parm *****'.format(key))
tests.append(False)
break
new_array = np.array(new_parms[key])
if old_value.size != new_array.size:
if verbose:
print('New parm: {} \t- are of different sizes ****'.format(key))
tests.append(False)
else:
try:
answer = np.allclose(old_value, new_array)
except TypeError:
# comes here when comparing string arrays
# Not sure of a better way
answer = []
for old_val, new_val in zip(old_value, new_array):
answer.append(old_val == new_val)
answer = np.all(answer)
if verbose:
print('New parm: {} \t- match: {}'.format(key, answer))
tests.append(answer)
else:
"""if isinstance(new_parms[key], collections.Iterable):
if verbose:
print('New parm: {} \t- new parm is iterable unlike old parm *****'.format(key))
tests.append(False)
break"""
answer = np.all(new_parms[key] == old_value)
if verbose:
print('New parm: {} \t- match: {}'.format(key, answer))
tests.append(answer)
if verbose:
print('')
return all(tests)
def write_ind_val_dsets(h5_parent_group, dimensions, is_spectral=True, verbose=False, base_name=None,
slow_to_fast=False):
"""
Creates h5py.Datasets for the position OR spectroscopic indices and values of the data.
Remember that the contents of the dataset can be changed if need be after the creation of the datasets.
For example if one of the spectroscopic dimensions (e.g. - Bias) was sinusoidal and not linear, The specific
dimension in the Spectroscopic_Values dataset can be manually overwritten.
Parameters
----------
h5_parent_group : :class:`h5py.Group` or :class:`h5py.File`
Group under which the indices and values datasets will be created
dimensions : Dimension or array-like of Dimension objects
Sequence of Dimension objects that provides all necessary instructions for constructing the indices and values
datasets
is_spectral : bool, optional. default = True
Spectroscopic (True) or Position (False)
verbose : Boolean, optional
Whether or not to print statements for debugging purposes
base_name : str or unicode, optional
Prefix for the datasets. Default: 'Position' when is_spectral is False, 'Spectroscopic' otherwise
slow_to_fast : bool, Optional. Default=False
Set to True if the dimensions are arranged from slowest varying to fastest varying.
Set to False otherwise.
Returns
-------
h5_spec_inds : h5py.Dataset
Dataset containing the position indices
h5_spec_vals : h5py.Dataset
Dataset containing the value at each position
Notes
-----
`steps`, `initial_values`, `labels`, and 'units' must be the same length as
`dimensions` when they are specified.
Dimensions should be in the order from fastest varying to slowest.
"""
if isinstance(dimensions, Dimension):
dimensions = [dimensions]
if not isinstance(dimensions, (list, np.ndarray, tuple)):
raise TypeError('dimensions should be array-like ')
if not np.all([isinstance(x, Dimension) for x in dimensions]):
raise TypeError('dimensions should be a sequence of Dimension objects')
if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):
raise TypeError('h5_parent_group should be a h5py.File or Group object')
if not is_editable_h5(h5_parent_group):
raise ValueError('The provided h5 object is not valid / open')
if base_name is not None:
base_name = validate_single_string_arg(base_name, 'base_name')
if not base_name.endswith('_'):
base_name += '_'
else:
base_name = 'Position_'
if is_spectral:
base_name = 'Spectroscopic_'
if not slow_to_fast:
warn('In the future write_ind_val_dsets will default to requiring dimensions to be arranged from slowest to fastest varying')
# check if the datasets already exist. If they do, there's no point in going any further
for sub_name in ['Indices', 'Values']:
if base_name + sub_name in h5_parent_group.keys():
raise KeyError('Dataset: {} already exists in provided group: {}'.format(base_name + sub_name,
h5_parent_group.name))
modes = [dim.mode for dim in dimensions]
sing_mode = np.unique(modes)
if sing_mode.size > 1:
raise NotImplementedError('Cannot yet work on combinations of modes for Dimensions. Consider doing manually')
sing_mode = sing_mode[0]
if sing_mode == DimType.DEFAULT:
if slow_to_fast:
# Ensure that the dimensions are arranged from fast to slow instead
dimensions = dimensions[::-1]
indices, values = build_ind_val_matrices([dim.values for dim in dimensions],
is_spectral=is_spectral)
# At this point, dimensions and unit values are arranged from fastest to slowest
# We want dimensions to be arranged from slowest to fastest:
rev_func = np.flipud if is_spectral else np.fliplr
dimensions = dimensions[::-1]
indices = rev_func(indices)
values = rev_func(values)
elif sing_mode == DimType.INCOMPLETE:
lengths = np.unique([len(dim.values) for dim in dimensions])
if len(lengths) > 1:
raise ValueError('Values for dimensions not of same length')
single_dim = np.arange(lengths[0], dtype=INDICES_DTYPE)
indices = np.tile(single_dim, (2, 1)).T
values = np.dstack(tuple([dim.values for dim in dimensions])).squeeze()
if is_spectral:
indices = indices.T
values = values.T
else:
raise NotImplementedError('Cannot yet work on Dependent dimensions')
if verbose:
print('Indices:')
print(indices)
print('Values:')
print(values)
# Create the Datasets for both Indices and Values
h5_indices = h5_parent_group.create_dataset(base_name + 'Indices', data=INDICES_DTYPE(indices), dtype=INDICES_DTYPE)
h5_values = h5_parent_group.create_dataset(base_name + 'Values', data=VALUES_DTYPE(values), dtype=VALUES_DTYPE)
for h5_dset in [h5_indices, h5_values]:
write_simple_attrs(h5_dset, {'units': [x.units for x in dimensions], 'labels': [x.name for x in dimensions],
'type': [dim.mode.value for dim in dimensions]})
warn('pyUSID.io.hdf_utils.simple.write_ind_val_dsets no longer creates'
'region references for each dimension. Please use '
'pyUSID.io.reg_ref.write_region_references to manually create region '
'references')
return h5_indices, h5_values
def write_reduced_anc_dsets(h5_parent_group, h5_inds, h5_vals, dim_name, basename=None, is_spec=None,
verbose=False):
"""
Creates new Ancillary Indices and Values datasets from the input datasets by dropping the specified dimensions
Parameters
----------
h5_parent_group : :class:`h5py.Group` or h5py.File
Group under which the indices and values datasets will be created
h5_inds : HDF5 Dataset
Spectroscopic or Positions indices dataset
h5_vals : HDF5 Dataset
Spectroscopic or Positions values dataset
dim_name : str or unicode or list of strings
Names of the dimension(s) to remove
basename : str or unicode, Optional
String to which '_Indices' and '_Values' will be appended to get the names of the new datasets.
Default = 'Position' or 'Spectroscopic'
is_spec : bool, optional
Whether or not the provided ancillary datasets are position or spectroscopic
The user is recommended to supply this parameter whenever it is known or possible.
By default, this function will attempt to recognize the answer based on the shape of the datasets.
verbose : bool, optional. Default = False
Whether or not to print debugging print statements
Returns
-------
h5_inds_new : h5py.Dataset
Reduced indices dataset
h5_vals_new : h5py.Dataset
Reduces values dataset
"""
if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):
raise TypeError('h5_parent_group should either be a h5py. Group or File object')
for param, param_name in zip([h5_inds, h5_vals], ['h5_inds', 'h5_vals']):
if not isinstance(param, h5py.Dataset):
raise TypeError(param_name + ' should be a h5py.Dataset object')
if dim_name is not None:
dim_name = validate_list_of_strings(dim_name, 'dim_name')
all_dim_names = list(get_attr(h5_inds, 'labels'))
for item in dim_name:
if item not in all_dim_names:
raise KeyError('Requested dimension: {} not in the list of labels: {}'.format(item, all_dim_names))
ind_mat = h5_inds[()]
val_mat = h5_vals[()]
if is_spec is None:
# Attempt to recognize the type automatically
is_spec = False
if ind_mat.shape[0] == ind_mat.shape[1]:
raise ValueError('Unable automatically guess whether the provided datasets are position or '
'spectroscopic. Please explicitely specify via the "is_spec" boolean kwarg')
if ind_mat.shape[0] < ind_mat.shape[1]:
is_spec = True
else:
if not isinstance(is_spec, bool):
raise TypeError('is_spec should be a boolean. Provided object is of type: {}'.format(type(is_spec)))
if basename is not None:
basename = validate_single_string_arg(basename, 'basename')
if basename.endswith('_'):
basename = basename[:-1]
else:
if is_spec:
basename = 'Spectroscopic'
else:
basename = 'Position'
for sub_name in ['_Indices', '_Values']:
if basename + sub_name in h5_parent_group.keys():
raise KeyError('Dataset: {} already exists in provided group: {}'.format(basename + sub_name,
h5_parent_group.name))
if set(dim_name) != set(all_dim_names):
# At least one dimension will remain
if verbose:
print('All Dimensions: {}. Dimensions to be removed: {}'.format(all_dim_names, dim_name))
if not is_spec:
# Convert to spectral shape
ind_mat = np.transpose(ind_mat)
val_mat = np.transpose(val_mat)
# For all dimensions, find where the index = 0
# basically, we are indexing all dimensions to 0
first_indices = []
keep_dim = np.ones(len(all_dim_names), dtype=bool)
for cur_dim in dim_name:
dim_ind = all_dim_names.index(cur_dim)
keep_dim[dim_ind] = False
# check equality against the minimum value instead of 0 to account for cases when a dimension does not start
# from 0 (already been sliced) - think of multi-dimensional slicing!
first_indices.append(ind_mat[dim_ind] == np.min(ind_mat[dim_ind]))
first_indices = np.vstack(first_indices)
if verbose:
print('Raw first_indices:')
print(first_indices)
print('Dimensions to keep: {}'.format(keep_dim))
step_starts = np.all(first_indices, axis=0)
if verbose:
print('Columns in dataset to keep:')
print(step_starts)
'''
Extract all rows that we want to keep from input indices and values
'''
# TODO: handle TypeError: Indexing elements must be in increasing order
ind_mat = ind_mat[keep_dim, :][:, step_starts]
val_mat = val_mat[keep_dim, :][:, step_starts]
if not is_spec:
# Convert back to position shape
ind_mat = np.transpose(ind_mat)
val_mat = np.transpose(val_mat)
'''
Create new Datasets to hold the data
Name them based on basename
'''
h5_inds_new = h5_parent_group.create_dataset(basename + '_Indices', data=ind_mat, dtype=h5_inds.dtype)
h5_vals_new = h5_parent_group.create_dataset(basename + '_Values', data=val_mat, dtype=h5_vals.dtype)
# Extracting the labels from the original spectroscopic data sets
labels = h5_inds.attrs['labels'][keep_dim]
# Creating the dimension slices for the new spectroscopic data sets
# Adding the labels and units to the new spectroscopic data sets
for dset in [h5_inds_new, h5_vals_new]:
write_simple_attrs(dset, {'labels': labels, 'units': h5_inds.attrs['units'][keep_dim]})
else:
# Remove all dimensions:
h5_inds_new = h5_parent_group.create_dataset(basename + '_Indices', data=np.array([[0]]), dtype=INDICES_DTYPE)
h5_vals_new = h5_parent_group.create_dataset(basename + '_Values', data=np.array([[0]]), dtype=VALUES_DTYPE)
for dset in [h5_inds_new, h5_vals_new]:
write_simple_attrs(dset, {'labels': ['Single_Step'], 'units': ['a. u.']})
return h5_inds_new, h5_vals_new
| 39.325799 | 133 | 0.639322 |
from __future__ import division, print_function, absolute_import, unicode_literals
import collections
from warnings import warn
import sys
import h5py
import numpy as np
import dask.array as da
from sidpy.hdf.hdf_utils import get_auxiliary_datasets, link_h5_obj_as_alias, \
write_simple_attrs, is_editable_h5, validate_h5_objs_in_same_h5_file, \
get_attr
from sidpy.hdf.dtype_utils import validate_dtype
from sidpy.hdf import hdf_utils as hut
from sidpy.base.string_utils import validate_single_string_arg, validate_list_of_strings
from sidpy.base.num_utils import contains_integers
from sidpy.base.string_utils import clean_string_att
from ..anc_build_utils import build_ind_val_matrices, INDICES_DTYPE, VALUES_DTYPE
from ..dimension import DimType, Dimension
from .base import write_book_keeping_attrs
if sys.version_info.major == 3:
unicode = str
def get_all_main(parent, verbose=False):
if not isinstance(parent, (h5py.Group, h5py.File)):
raise TypeError('parent should be a h5py.File or h5py.Group object')
from ..usi_data import USIDataset
main_list = list()
def __check(name, obj):
if verbose:
print(name, obj)
if isinstance(obj, h5py.Dataset):
if verbose:
print(name, 'is an HDF5 Dataset.')
ismain = check_if_main(obj)
if ismain:
if verbose:
print(name, 'is a `Main` dataset.')
main_list.append(USIDataset(obj))
if verbose:
print('Checking the group {} for `Main` datasets.'.format(parent.name))
parent.visititems(__check)
return main_list
def find_dataset(h5_group, dset_name):
from ..usi_data import USIDataset
datasets = list()
for obj in hut.find_dataset(h5_group, dset_name):
try:
datasets.append(USIDataset(obj))
except TypeError:
datasets.append(obj)
return datasets
def find_results_groups(h5_main, tool_name, h5_parent_group=None):
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
tool_name = validate_single_string_arg(tool_name, 'tool_name')
if h5_parent_group is not None:
if not isinstance(h5_parent_group, (h5py.File, h5py.Group)):
raise TypeError("'h5_parent_group' should either be a h5py.File "
"or h5py.Group object")
else:
h5_parent_group = h5_main.parent
dset_name = h5_main.name.split('/')[-1]
groups = []
for key in h5_parent_group.keys():
if dset_name in key and tool_name in key and isinstance(h5_parent_group[key], h5py.Group):
groups.append(h5_parent_group[key])
return groups
def check_and_link_ancillary(h5_dset, anc_names, h5_main=None, anc_refs=None):
if not isinstance(h5_dset, h5py.Dataset):
raise TypeError('h5_dset should be a h5py.Dataset object')
if isinstance(anc_names, (str, unicode)):
anc_names = [anc_names]
if isinstance(anc_refs, (h5py.Dataset, h5py.Group, h5py.File,
h5py.Reference)):
anc_refs = [anc_refs]
if not isinstance(anc_names, (list, tuple)):
raise TypeError('anc_names should be a list / tuple')
if h5_main is not None:
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
validate_h5_objs_in_same_h5_file(h5_dset, h5_main)
if anc_refs is not None:
if not isinstance(anc_refs, (list, tuple)):
raise TypeError('anc_refs should be a list / tuple')
if anc_refs is None and h5_main is None:
raise ValueError('No objected provided to link as ancillary')
def __check_and_link_single(h5_obj_ref, target_ref_name):
if isinstance(h5_obj_ref, h5py.Reference):
h5_dset.attrs[target_ref_name] = h5_obj_ref
elif isinstance(h5_obj_ref, (h5py.Dataset, h5py.Group, h5py.File)):
validate_h5_objs_in_same_h5_file(h5_obj_ref, h5_dset)
h5_dset.attrs[target_ref_name] = h5_obj_ref.ref
elif h5_main is not None:
h5_anc = get_auxiliary_datasets(h5_main, aux_dset_name=[target_ref_name])
if len(h5_anc) == 1:
link_h5_obj_as_alias(h5_dset, h5_anc[0], target_ref_name)
else:
warnstring = '{} is not a valid h5py Reference and will be skipped.'.format(repr(h5_obj_ref))
warn(warnstring)
if bool(np.iterable(anc_refs) and not isinstance(anc_refs, h5py.Dataset)):
for ref_name, h5_ref in zip(anc_names, anc_refs):
__check_and_link_single(h5_ref, ref_name)
elif anc_refs is not None:
"""
anc_refs is just a single value
"""
__check_and_link_single(anc_refs, anc_names)
elif isinstance(anc_names, str) or isinstance(anc_names, unicode):
"""
Single name provided
"""
__check_and_link_single(None, anc_names)
else:
"""
Iterable of names provided
"""
for name in anc_names:
__check_and_link_single(None, name)
h5_dset.file.flush()
def validate_main_dset(h5_main, must_be_h5):
if must_be_h5:
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('{} is not an HDF5 Dataset object.'.format(h5_main))
else:
if not isinstance(h5_main, (np.ndarray, da.core.Array)):
raise TypeError('raw_data should either be a np.ndarray or a da.core.Array')
if len(h5_main.shape) != 2:
raise ValueError('Main data is not 2D. Provided object has shape: {}'.format(h5_main.shape))
def validate_anc_h5_dsets(h5_inds, h5_vals, main_shape, is_spectroscopic=True):
if not isinstance(h5_inds, h5py.Dataset):
raise TypeError('h5_inds must be a h5py.Dataset object')
if not isinstance(h5_vals, h5py.Dataset):
raise TypeError('h5_vals must be a h5py.Dataset object')
if h5_inds.shape != h5_vals.shape:
raise ValueError('h5_inds: {} and h5_vals: {} should be of the same '
'shape'.format(h5_inds.shape, h5_vals.shape))
if isinstance(main_shape, (list, tuple)):
if not contains_integers(main_shape, min_val=1) or \
len(main_shape) != 2:
raise ValueError("'main_shape' must be a valid HDF5 dataset shape")
else:
raise TypeError('main_shape should be of the following types:'
'h5py.Dataset, tuple, or list. {} provided'
''.format(type(main_shape)))
if h5_inds.shape[is_spectroscopic] != main_shape[is_spectroscopic]:
raise ValueError('index {} in shape of h5_inds: {} and main_data: {} '
'should be equal'.format(int(is_spectroscopic),
h5_inds.shape, main_shape))
def validate_dims_against_main(main_shape, dims, is_spectroscopic=True):
if not isinstance(main_shape, (list, tuple)):
raise TypeError('main_shape should be a list or tuple. Provided object'
' was of type: {}'.format(type(main_shape)))
if len(main_shape) != 2:
raise ValueError('"main_shape" should be of length 2')
contains_integers(main_shape, min_val=1)
if isinstance(dims, Dimension):
dims = [dims]
elif not isinstance(dims, (list, tuple)):
raise TypeError('"dims" must be a list or tuple of usid.Dimension '
'objects. Provided object was of type: {}'
''.format(type(dims)))
if not all([isinstance(obj, Dimension) for obj in dims]):
raise TypeError('One or more objects in "dims" was not usid.Dimension')
if is_spectroscopic:
main_dim = 1
dim_category = 'Spectroscopic'
else:
main_dim = 0
dim_category = 'Position'
lhs = main_shape[main_dim]
rhs = np.product([len(x.values) for x in dims])
if lhs != rhs:
raise ValueError(dim_category +
' dimensions in main data of size: {} do not match '
'with product of values in provided Dimension objects'
': {}'.format(lhs, rhs))
def check_if_main(h5_main, verbose=False):
try:
validate_main_dset(h5_main, True)
except Exception as exep:
if verbose:
print(exep)
return False
h5_name = h5_main.name.split('/')[-1]
success = True
dset_names = ['Position_Indices', 'Position_Values',
'Spectroscopic_Indices', 'Spectroscopic_Values']
for name in dset_names:
try:
h5_anc_dset = h5_main.file[h5_main.attrs[name]]
success = np.all([success, isinstance(h5_anc_dset, h5py.Dataset)])
except:
if verbose:
print('{} not found as an attribute of {}.'.format(name, h5_name))
return False
attr_success = np.all([att in h5_main.attrs for att in ['quantity', 'units']])
if not attr_success:
if verbose:
print('{} does not have the mandatory "quantity" and "units" attributes'.format(h5_main.name))
return False
for attr_name in ['quantity', 'units']:
val = get_attr(h5_main, attr_name)
if not isinstance(val, (str, unicode)):
if verbose:
print('Attribute {} of {} found to be {}. Expected a string'.format(attr_name, h5_main.name, val))
return False
anc_shape_match = list()
h5_pos_inds = h5_main.file[h5_main.attrs['Position_Indices']]
h5_pos_vals = h5_main.file[h5_main.attrs['Position_Values']]
anc_shape_match.append(np.all(h5_pos_vals.shape == h5_pos_inds.shape))
for anc_dset in [h5_pos_vals, h5_pos_inds]:
anc_shape_match.append(np.all(h5_main.shape[0] == anc_dset.shape[0]))
if not np.all(anc_shape_match):
if verbose:
print('The shapes of the Position indices:{}, values:{} datasets did not match with that of the main '
'dataset: {}'.format(h5_pos_inds.shape, h5_pos_vals.shape, h5_main.shape))
return False
anc_shape_match = list()
h5_spec_inds = h5_main.file[h5_main.attrs['Spectroscopic_Indices']]
h5_spec_vals = h5_main.file[h5_main.attrs['Spectroscopic_Values']]
anc_shape_match.append(np.all(h5_spec_inds.shape == h5_spec_vals.shape))
for anc_dset in [h5_spec_inds, h5_spec_vals]:
anc_shape_match.append(np.all(h5_main.shape[1] == anc_dset.shape[1]))
if not np.all(anc_shape_match):
if verbose:
print('The shapes of the Spectroscopic indices:{}, values:{} datasets did not match with that of the main '
'dataset: {}'.format(h5_spec_inds.shape, h5_spec_vals.shape, h5_main.shape))
return False
try:
validate_anc_dset_attrs(h5_pos_inds, h5_pos_vals, is_spec=False)
except ValueError:
if verbose:
print('Attributes of Position datasets did not match')
return False
try:
validate_anc_dset_attrs(h5_spec_inds, h5_spec_vals, is_spec=True)
except ValueError:
if verbose:
print('Attributes of Spectroscopic datasets did not match')
return False
return success
def validate_anc_dset_attrs(h5_inds, h5_vals, is_spec=True):
def lists_match(left, right):
if len(left) != len(right):
return False
return all([l_it == r_it for l_it, r_it in zip(left, right)])
v_names = get_attr(h5_vals, 'labels')
v_units = get_attr(h5_vals, 'units')
i_names = get_attr(h5_inds, 'labels')
i_units = get_attr(h5_inds, 'units')
for names, units, dset_type in zip([v_names, i_names], [v_units, i_units],
['Values', 'Indices']):
if len(names) != len(units):
raise ValueError('Length of labels: {} and units: {} for the {} '
'dataset do not match'
''.format(len(names), len(units), dset_type))
for i_item, v_item, prop in zip([i_names, i_units], [v_names, v_units],
['labels', 'units']):
if not lists_match(i_item, v_item):
raise ValueError('The "{}" values of the Indices: {} and Values: '
'{} datasets do not match'.format(prop, i_item,
v_item))
if h5_inds.shape != h5_vals.shape:
raise ValueError('Shape of Indices: {} and Values: {} datasets do '
'not match'.format(h5_inds.shape, h5_vals.shape))
dim_ind = 1
if is_spec:
dim_ind = 0
if h5_inds.shape[dim_ind] != len(v_names):
raise ValueError('Length of mandatory attributes: {} did not match '
'dimension: {} of the ancillary dataset of shape: {}'
''.format(len(v_names), dim_ind, h5_inds.shape))
def link_as_main(h5_main, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals):
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
validate_anc_h5_dsets(h5_pos_inds, h5_pos_vals, h5_main.shape,
is_spectroscopic=False)
validate_anc_h5_dsets(h5_spec_inds, h5_spec_vals, h5_main.shape,
is_spectroscopic=True)
link_h5_obj_as_alias(h5_main, h5_pos_inds, 'Position_Indices')
link_h5_obj_as_alias(h5_main, h5_pos_vals, 'Position_Values')
link_h5_obj_as_alias(h5_main, h5_spec_inds, 'Spectroscopic_Indices')
link_h5_obj_as_alias(h5_main, h5_spec_vals, 'Spectroscopic_Values')
from ..usi_data import USIDataset
try:
return USIDataset(h5_main)
except TypeError:
return h5_main
def check_for_old(h5_base, tool_name, new_parms=None, target_dset=None,
h5_parent_goup=None, verbose=False):
if not isinstance(h5_base, h5py.Dataset):
raise TypeError('h5_base should be a h5py.Dataset object')
tool_name = validate_single_string_arg(tool_name, 'tool_name')
if h5_parent_goup is not None:
if not isinstance(h5_parent_goup, (h5py.File, h5py.Group)):
raise TypeError("'h5_parent_group' should either be a h5py.File "
"or h5py.Group object")
else:
h5_parent_goup = h5_base.parent
if new_parms is None:
new_parms = dict()
else:
if not isinstance(new_parms, dict):
raise TypeError('new_parms should be a dict')
if target_dset is not None:
target_dset = validate_single_string_arg(target_dset, 'target_dset')
matching_groups = []
groups = find_results_groups(h5_base, tool_name,
h5_parent_group=h5_parent_goup)
for group in groups:
if verbose:
print('Looking at group - {}'.format(group.name.split('/')[-1]))
h5_obj = group
if target_dset is not None:
if target_dset in group.keys():
h5_obj = group[target_dset]
else:
if verbose:
print('{} did not contain the target dataset: {}'.format(group.name.split('/')[-1],
target_dset))
continue
if check_for_matching_attrs(h5_obj, new_parms=new_parms, verbose=verbose):
matching_groups.append(group)
return matching_groups
def get_source_dataset(h5_group):
if not isinstance(h5_group, h5py.Group):
raise TypeError('h5_group should be a h5py.Group object')
h5_parent_group = h5_group.parent
group_name = h5_group.name.split('/')[-1]
name_split = group_name.split('-')
if len(name_split) != 2:
raise ValueError("The provided group's name could not be split by '-' as expected in "
"SourceDataset-ProcessName_000")
h5_source = h5_parent_group[name_split[0]]
if not isinstance(h5_source, h5py.Dataset):
raise ValueError('Source object was not a dataset!')
from ..usi_data import USIDataset
return USIDataset(h5_source)
def assign_group_index(h5_parent_group, base_name, verbose=False):
if not isinstance(h5_parent_group, h5py.Group):
raise TypeError('h5_parent_group should be a h5py.Group object')
base_name = validate_single_string_arg(base_name, 'base_name')
if len(base_name) == 0:
raise ValueError('base_name should not be an empty string')
if not base_name.endswith('_'):
base_name += '_'
temp = [key for key in h5_parent_group.keys()]
if verbose:
print('Looking for group names starting with {} in parent containing items: '
'{}'.format(base_name, temp))
previous_indices = []
for item_name in temp:
if isinstance(h5_parent_group[item_name], h5py.Group) and item_name.startswith(base_name):
previous_indices.append(int(item_name.replace(base_name, '')))
previous_indices = np.sort(previous_indices)
if verbose:
print('indices of existing groups with the same prefix: {}'.format(previous_indices))
if len(previous_indices) == 0:
index = 0
else:
index = previous_indices[-1] + 1
return base_name + '{:03d}'.format(index)
def create_indexed_group(h5_parent_group, base_name):
if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):
raise TypeError('h5_parent_group should be a h5py.File or Group object')
base_name = validate_single_string_arg(base_name, 'base_name')
group_name = assign_group_index(h5_parent_group, base_name)
h5_new_group = h5_parent_group.create_group(group_name)
write_book_keeping_attrs(h5_new_group)
return h5_new_group
def create_results_group(h5_main, tool_name, h5_parent_group=None):
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
if h5_parent_group is not None:
if not isinstance(h5_parent_group, (h5py.File, h5py.Group)):
raise TypeError("'h5_parent_group' should either be a h5py.File "
"or h5py.Group object")
else:
h5_parent_group = h5_main.parent
tool_name = validate_single_string_arg(tool_name, 'tool_name')
if '-' in tool_name:
warn('tool_name should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(tool_name, tool_name.replace('-', '_')))
tool_name = tool_name.replace('-', '_')
group_name = h5_main.name.split('/')[-1] + '-' + tool_name + '_'
group_name = assign_group_index(h5_parent_group, group_name)
h5_group = h5_parent_group.create_group(group_name)
write_book_keeping_attrs(h5_group)
# Also add some basic attributes like source and tool name. This will allow relaxation of nomenclature restrictions:
# this are NOT being used right now but will be in the subsequent versions of pyUSID
write_simple_attrs(h5_group, {'tool': tool_name, 'num_source_dsets': 1})
# in this case, there is only one source
if h5_parent_group.file == h5_main.file:
for dset_ind, dset in enumerate([h5_main]):
h5_group.attrs['source_' + '{:03d}'.format(dset_ind)] = dset.ref
return h5_group
def copy_main_attributes(h5_main, h5_new):
for param, param_name in zip([h5_main, h5_new], ['h5_main', 'h5_new']):
if not isinstance(param, h5py.Dataset):
raise TypeError(param_name + ' should be a h5py.Dataset object')
for att_name in ['quantity', 'units']:
if att_name not in h5_main.attrs:
raise KeyError('Attribute: {} does not exist in {}'.format(att_name, h5_main))
val = get_attr(h5_main, att_name)
h5_new.attrs[att_name] = clean_string_att(val)
def create_empty_dataset(source_dset, dtype, dset_name, h5_group=None,
new_attrs=None, skip_refs=False):
if not isinstance(source_dset, h5py.Dataset):
raise TypeError('source_deset should be a h5py.Dataset object')
_ = validate_dtype(dtype)
if new_attrs is not None:
if not isinstance(new_attrs, dict):
raise TypeError('new_attrs should be a dictionary')
else:
new_attrs = dict()
if h5_group is None:
h5_group = source_dset.parent
else:
if not isinstance(h5_group, (h5py.Group, h5py.File)):
raise TypeError('h5_group should be a h5py.Group or h5py.File object')
if source_dset.file != h5_group.file and not skip_refs:
# Cannot carry over references
warn('H5 object references will not be copied over since {} is in '
'a different HDF5 file as {}'.format(h5_group, source_dset))
skip_refs = True
dset_name = validate_single_string_arg(dset_name, 'dset_name')
if '-' in dset_name:
warn('dset_name should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(dset_name, dset_name.replace('-', '_')))
dset_name = dset_name.replace('-', '_')
kwargs = {'shape': source_dset.shape, 'dtype': dtype, 'compression': source_dset.compression,
'chunks': source_dset.chunks}
if source_dset.file.driver == 'mpio':
if kwargs.pop('compression', None) is not None:
warn('This HDF5 file has been opened wth the "mpio" communicator. '
'mpi4py does not allow creation of compressed datasets. Compression kwarg has been removed')
if dset_name in h5_group.keys():
if isinstance(h5_group[dset_name], h5py.Dataset):
warn('A dataset named: {} already exists in group: {}'.format(dset_name, h5_group.name))
h5_new_dset = h5_group[dset_name]
# Make sure it has the correct shape and dtype
if any((source_dset.shape != h5_new_dset.shape, dtype != h5_new_dset.dtype)):
warn('Either the shape (existing: {} desired: {}) or dtype (existing: {} desired: {}) of the dataset '
'did not match with expectations. Deleting and creating a new one.'.format(h5_new_dset.shape,
source_dset.shape,
h5_new_dset.dtype,
dtype))
del h5_new_dset, h5_group[dset_name]
h5_new_dset = h5_group.create_dataset(dset_name, **kwargs)
else:
raise KeyError('{} is already a {} in group: {}'.format(dset_name, type(h5_group[dset_name]),
h5_group.name))
else:
h5_new_dset = h5_group.create_dataset(dset_name, **kwargs)
# This should link the ancillary datasets correctly
h5_new_dset = hut.copy_attributes(source_dset, h5_new_dset,
skip_refs=skip_refs)
if source_dset.file != h5_group.file:
hut.copy_linked_objects(source_dset, h5_new_dset)
h5_new_dset.attrs.update(new_attrs)
if check_if_main(h5_new_dset):
from ..usi_data import USIDataset
h5_new_dset = USIDataset(h5_new_dset)
# update book keeping attributes
write_book_keeping_attrs(h5_new_dset)
return h5_new_dset
def check_for_matching_attrs(h5_obj, new_parms=None, verbose=False):
if not isinstance(h5_obj, (h5py.Dataset, h5py.Group, h5py.File)):
raise TypeError('h5_obj should be a h5py.Dataset, h5py.Group, or h5py.File object')
if new_parms is None:
new_parms = dict()
else:
if not isinstance(new_parms, dict):
raise TypeError('new_parms should be a dictionary')
tests = []
for key in new_parms.keys():
if verbose:
print('Looking for new attribute named: {}'.format(key))
# HDF5 cannot store None as an attribute anyway. ignore
if new_parms[key] is None:
continue
try:
old_value = get_attr(h5_obj, key)
except KeyError:
# if parameter was not found assume that something has changed
if verbose:
print('New parm: {} \t- new parm not in group *****'.format(key))
tests.append(False)
break
if isinstance(old_value, np.ndarray):
if not isinstance(new_parms[key], collections.Iterable):
if verbose:
print('New parm: {} \t- new parm not iterable unlike old parm *****'.format(key))
tests.append(False)
break
new_array = np.array(new_parms[key])
if old_value.size != new_array.size:
if verbose:
print('New parm: {} \t- are of different sizes ****'.format(key))
tests.append(False)
else:
try:
answer = np.allclose(old_value, new_array)
except TypeError:
# comes here when comparing string arrays
# Not sure of a better way
answer = []
for old_val, new_val in zip(old_value, new_array):
answer.append(old_val == new_val)
answer = np.all(answer)
if verbose:
print('New parm: {} \t- match: {}'.format(key, answer))
tests.append(answer)
else:
"""if isinstance(new_parms[key], collections.Iterable):
if verbose:
print('New parm: {} \t- new parm is iterable unlike old parm *****'.format(key))
tests.append(False)
break"""
answer = np.all(new_parms[key] == old_value)
if verbose:
print('New parm: {} \t- match: {}'.format(key, answer))
tests.append(answer)
if verbose:
print('')
return all(tests)
def write_ind_val_dsets(h5_parent_group, dimensions, is_spectral=True, verbose=False, base_name=None,
slow_to_fast=False):
if isinstance(dimensions, Dimension):
dimensions = [dimensions]
if not isinstance(dimensions, (list, np.ndarray, tuple)):
raise TypeError('dimensions should be array-like ')
if not np.all([isinstance(x, Dimension) for x in dimensions]):
raise TypeError('dimensions should be a sequence of Dimension objects')
if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):
raise TypeError('h5_parent_group should be a h5py.File or Group object')
if not is_editable_h5(h5_parent_group):
raise ValueError('The provided h5 object is not valid / open')
if base_name is not None:
base_name = validate_single_string_arg(base_name, 'base_name')
if not base_name.endswith('_'):
base_name += '_'
else:
base_name = 'Position_'
if is_spectral:
base_name = 'Spectroscopic_'
if not slow_to_fast:
warn('In the future write_ind_val_dsets will default to requiring dimensions to be arranged from slowest to fastest varying')
# check if the datasets already exist. If they do, there's no point in going any further
for sub_name in ['Indices', 'Values']:
if base_name + sub_name in h5_parent_group.keys():
raise KeyError('Dataset: {} already exists in provided group: {}'.format(base_name + sub_name,
h5_parent_group.name))
modes = [dim.mode for dim in dimensions]
sing_mode = np.unique(modes)
if sing_mode.size > 1:
raise NotImplementedError('Cannot yet work on combinations of modes for Dimensions. Consider doing manually')
sing_mode = sing_mode[0]
if sing_mode == DimType.DEFAULT:
if slow_to_fast:
dimensions = dimensions[::-1]
indices, values = build_ind_val_matrices([dim.values for dim in dimensions],
is_spectral=is_spectral)
rev_func = np.flipud if is_spectral else np.fliplr
dimensions = dimensions[::-1]
indices = rev_func(indices)
values = rev_func(values)
elif sing_mode == DimType.INCOMPLETE:
lengths = np.unique([len(dim.values) for dim in dimensions])
if len(lengths) > 1:
raise ValueError('Values for dimensions not of same length')
single_dim = np.arange(lengths[0], dtype=INDICES_DTYPE)
indices = np.tile(single_dim, (2, 1)).T
values = np.dstack(tuple([dim.values for dim in dimensions])).squeeze()
if is_spectral:
indices = indices.T
values = values.T
else:
raise NotImplementedError('Cannot yet work on Dependent dimensions')
if verbose:
print('Indices:')
print(indices)
print('Values:')
print(values)
h5_indices = h5_parent_group.create_dataset(base_name + 'Indices', data=INDICES_DTYPE(indices), dtype=INDICES_DTYPE)
h5_values = h5_parent_group.create_dataset(base_name + 'Values', data=VALUES_DTYPE(values), dtype=VALUES_DTYPE)
for h5_dset in [h5_indices, h5_values]:
write_simple_attrs(h5_dset, {'units': [x.units for x in dimensions], 'labels': [x.name for x in dimensions],
'type': [dim.mode.value for dim in dimensions]})
warn('pyUSID.io.hdf_utils.simple.write_ind_val_dsets no longer creates'
'region references for each dimension. Please use '
'pyUSID.io.reg_ref.write_region_references to manually create region '
'references')
return h5_indices, h5_values
def write_reduced_anc_dsets(h5_parent_group, h5_inds, h5_vals, dim_name, basename=None, is_spec=None,
verbose=False):
if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):
raise TypeError('h5_parent_group should either be a h5py. Group or File object')
for param, param_name in zip([h5_inds, h5_vals], ['h5_inds', 'h5_vals']):
if not isinstance(param, h5py.Dataset):
raise TypeError(param_name + ' should be a h5py.Dataset object')
if dim_name is not None:
dim_name = validate_list_of_strings(dim_name, 'dim_name')
all_dim_names = list(get_attr(h5_inds, 'labels'))
for item in dim_name:
if item not in all_dim_names:
raise KeyError('Requested dimension: {} not in the list of labels: {}'.format(item, all_dim_names))
ind_mat = h5_inds[()]
val_mat = h5_vals[()]
if is_spec is None:
is_spec = False
if ind_mat.shape[0] == ind_mat.shape[1]:
raise ValueError('Unable automatically guess whether the provided datasets are position or '
'spectroscopic. Please explicitely specify via the "is_spec" boolean kwarg')
if ind_mat.shape[0] < ind_mat.shape[1]:
is_spec = True
else:
if not isinstance(is_spec, bool):
raise TypeError('is_spec should be a boolean. Provided object is of type: {}'.format(type(is_spec)))
if basename is not None:
basename = validate_single_string_arg(basename, 'basename')
if basename.endswith('_'):
basename = basename[:-1]
else:
if is_spec:
basename = 'Spectroscopic'
else:
basename = 'Position'
for sub_name in ['_Indices', '_Values']:
if basename + sub_name in h5_parent_group.keys():
raise KeyError('Dataset: {} already exists in provided group: {}'.format(basename + sub_name,
h5_parent_group.name))
if set(dim_name) != set(all_dim_names):
if verbose:
print('All Dimensions: {}. Dimensions to be removed: {}'.format(all_dim_names, dim_name))
if not is_spec:
ind_mat = np.transpose(ind_mat)
val_mat = np.transpose(val_mat)
first_indices = []
keep_dim = np.ones(len(all_dim_names), dtype=bool)
for cur_dim in dim_name:
dim_ind = all_dim_names.index(cur_dim)
keep_dim[dim_ind] = False
first_indices.append(ind_mat[dim_ind] == np.min(ind_mat[dim_ind]))
first_indices = np.vstack(first_indices)
if verbose:
print('Raw first_indices:')
print(first_indices)
print('Dimensions to keep: {}'.format(keep_dim))
step_starts = np.all(first_indices, axis=0)
if verbose:
print('Columns in dataset to keep:')
print(step_starts)
ind_mat = ind_mat[keep_dim, :][:, step_starts]
val_mat = val_mat[keep_dim, :][:, step_starts]
if not is_spec:
ind_mat = np.transpose(ind_mat)
val_mat = np.transpose(val_mat)
h5_inds_new = h5_parent_group.create_dataset(basename + '_Indices', data=ind_mat, dtype=h5_inds.dtype)
h5_vals_new = h5_parent_group.create_dataset(basename + '_Values', data=val_mat, dtype=h5_vals.dtype)
labels = h5_inds.attrs['labels'][keep_dim]
for dset in [h5_inds_new, h5_vals_new]:
write_simple_attrs(dset, {'labels': labels, 'units': h5_inds.attrs['units'][keep_dim]})
else:
h5_inds_new = h5_parent_group.create_dataset(basename + '_Indices', data=np.array([[0]]), dtype=INDICES_DTYPE)
h5_vals_new = h5_parent_group.create_dataset(basename + '_Values', data=np.array([[0]]), dtype=VALUES_DTYPE)
for dset in [h5_inds_new, h5_vals_new]:
write_simple_attrs(dset, {'labels': ['Single_Step'], 'units': ['a. u.']})
return h5_inds_new, h5_vals_new
| true | true |
f7334b35d652faa71a488f6a1f1dd8f79a87c2c9 | 3,704 | py | Python | arxivdigest/frontend/database/admin.py | iai-group/arXivDigest | d95f8fdb59aedcc282755f81a6081ca65e7d9450 | [
"MIT"
] | 13 | 2019-11-24T23:05:15.000Z | 2021-09-02T19:36:33.000Z | arxivdigest/frontend/database/admin.py | iai-group/arXivDigest | d95f8fdb59aedcc282755f81a6081ca65e7d9450 | [
"MIT"
] | 99 | 2020-01-08T14:47:57.000Z | 2021-02-25T14:29:50.000Z | arxivdigest/frontend/database/admin.py | iai-group/arXivDigest | d95f8fdb59aedcc282755f81a6081ca65e7d9450 | [
"MIT"
] | 1 | 2018-08-17T07:03:13.000Z | 2018-08-17T07:03:13.000Z | # -*- coding: utf-8 -*-
__author__ = 'Øyvind Jekteberg and Kristian Gingstad'
__copyright__ = 'Copyright 2020, The arXivDigest project'
import datetime
from arxivdigest.frontend.database.db import getDb
def isAdmin(id):
'''Returns True if if the user with user_id of id is an admin and false if not.'''
cur = getDb().cursor()
cur.execute('SELECT admin FROM users where user_id=%s', (id,))
admin = cur.fetchone()[0]
cur.close()
return True if admin is 1 else False
def getSystems():
"""Returns list of all recommending systems with null values if
the systems are not connected to a user."""
cur = getDb().cursor(dictionary=True)
cur.execute('''select system_id, api_key, active, email, firstname, lastname,
organization, system_name from systems left join users
on users.user_id = systems.admin_user_id;''')
systems = cur.fetchall()
cur.close()
return systems
def getSystem(ID):
'''Returns requested system.'''
cur = getDb().cursor(dictionary=True)
cur.execute('''SELECT * FROM systems left join users
on users.user_id = systems.admin_user_id
where system_id = %s''', (ID,))
data = cur.fetchone()
cur.close()
return data
def toggleSystem(systemID, value):
'''Sets active to value for given system. Returns true if successful, false if unsuccessful'''
cur = getDb().cursor()
sql = 'UPDATE systems SET active=%s WHERE system_id = %s'
cur.execute(sql, (value, systemID, ))
if cur.rowcount == 0:
return False
getDb().commit()
cur.close()
return True
def getUserStatistics():
'''Returns statistics about the users'''
cur = getDb().cursor()
sql = 'select count(*), DATE(registered) from users group by DATE(registered) order by registered desc limit 30'
cur.execute(sql)
usersByDate = cur.fetchall()
cur.execute('SELECT count(*) from users')
total = cur.fetchall()[0][0]
today = datetime.datetime.today()
dateList = [(today - datetime.timedelta(days=x)).strftime("%Y-%m-%d")
for x in range(0, 30)]
i = 0
users = []
for x in dateList:
if str(usersByDate[i][1]) == x:
users.append(usersByDate[i][0])
i += 1
else:
users.append(0)
users.reverse(),
dateList.reverse(),
result = {'users': users,
'dates': dateList,
'total': total}
cur.close()
return result
def getArticleStatistics():
'''Returns statistics about the articles '''
cur = getDb().cursor()
sql = 'select count(*), datestamp from articles group by datestamp order by datestamp desc limit 30'
cur.execute(sql)
articlesByDate = cur.fetchall()
cur.execute('SELECT count(*) from articles')
total = cur.fetchall()[0][0]
today = datetime.datetime.today()
dateList = [(today - datetime.timedelta(days=x)).strftime("%Y-%m-%d")
for x in range(0, 30)]
i = 0
articles = []
for x in dateList:
if i < len(articlesByDate) and str(articlesByDate[i][1]) == x:
articles.append(articlesByDate[i][0])
i += 1
else:
articles.append(0)
articles.reverse(),
dateList.reverse(),
result = {'articles': articles,
'dates': dateList,
'total': total}
cur.close()
return result
def getAdmins():
'''Returns admin users id, email and names'''
cur = getDb().cursor(dictionary=True)
sql = 'select user_id, email, firstname, lastname from users where admin=1'
cur.execute(sql)
admindata = cur.fetchall()
cur.close()
return admindata | 30.61157 | 116 | 0.615551 |
__author__ = 'Øyvind Jekteberg and Kristian Gingstad'
__copyright__ = 'Copyright 2020, The arXivDigest project'
import datetime
from arxivdigest.frontend.database.db import getDb
def isAdmin(id):
cur = getDb().cursor()
cur.execute('SELECT admin FROM users where user_id=%s', (id,))
admin = cur.fetchone()[0]
cur.close()
return True if admin is 1 else False
def getSystems():
cur = getDb().cursor(dictionary=True)
cur.execute('''select system_id, api_key, active, email, firstname, lastname,
organization, system_name from systems left join users
on users.user_id = systems.admin_user_id;''')
systems = cur.fetchall()
cur.close()
return systems
def getSystem(ID):
cur = getDb().cursor(dictionary=True)
cur.execute('''SELECT * FROM systems left join users
on users.user_id = systems.admin_user_id
where system_id = %s''', (ID,))
data = cur.fetchone()
cur.close()
return data
def toggleSystem(systemID, value):
cur = getDb().cursor()
sql = 'UPDATE systems SET active=%s WHERE system_id = %s'
cur.execute(sql, (value, systemID, ))
if cur.rowcount == 0:
return False
getDb().commit()
cur.close()
return True
def getUserStatistics():
cur = getDb().cursor()
sql = 'select count(*), DATE(registered) from users group by DATE(registered) order by registered desc limit 30'
cur.execute(sql)
usersByDate = cur.fetchall()
cur.execute('SELECT count(*) from users')
total = cur.fetchall()[0][0]
today = datetime.datetime.today()
dateList = [(today - datetime.timedelta(days=x)).strftime("%Y-%m-%d")
for x in range(0, 30)]
i = 0
users = []
for x in dateList:
if str(usersByDate[i][1]) == x:
users.append(usersByDate[i][0])
i += 1
else:
users.append(0)
users.reverse(),
dateList.reverse(),
result = {'users': users,
'dates': dateList,
'total': total}
cur.close()
return result
def getArticleStatistics():
cur = getDb().cursor()
sql = 'select count(*), datestamp from articles group by datestamp order by datestamp desc limit 30'
cur.execute(sql)
articlesByDate = cur.fetchall()
cur.execute('SELECT count(*) from articles')
total = cur.fetchall()[0][0]
today = datetime.datetime.today()
dateList = [(today - datetime.timedelta(days=x)).strftime("%Y-%m-%d")
for x in range(0, 30)]
i = 0
articles = []
for x in dateList:
if i < len(articlesByDate) and str(articlesByDate[i][1]) == x:
articles.append(articlesByDate[i][0])
i += 1
else:
articles.append(0)
articles.reverse(),
dateList.reverse(),
result = {'articles': articles,
'dates': dateList,
'total': total}
cur.close()
return result
def getAdmins():
cur = getDb().cursor(dictionary=True)
sql = 'select user_id, email, firstname, lastname from users where admin=1'
cur.execute(sql)
admindata = cur.fetchall()
cur.close()
return admindata | true | true |
f7334b3bd2f4cfcb4525c2972d35d5a9cc2551a1 | 1,595 | py | Python | medium/306-additive-number.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 2 | 2021-03-14T11:38:26.000Z | 2021-03-14T11:38:30.000Z | medium/306-additive-number.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | null | null | null | medium/306-additive-number.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 1 | 2022-01-17T19:33:23.000Z | 2022-01-17T19:33:23.000Z | '''
累加数
累加数是一个字符串,组成它的数字可以形成累加序列。
一个有效的累加序列必须至少包含 3 个数。除了最开始的两个数以外,字符串中的其他数都等于它之前两个数相加的和。
给定一个只包含数字 '0'-'9' 的字符串,编写一个算法来判断给定输入是否是累加数。
说明: 累加序列里的数不会以 0 开头,所以不会出现 1, 2, 03 或者 1, 02, 3 的情况。
示例 1:
输入: "112358"
输出: true
解释: 累加序列为: 1, 1, 2, 3, 5, 8 。1 + 1 = 2, 1 + 2 = 3, 2 + 3 = 5, 3 + 5 = 8
示例 2:
输入: "199100199"
输出: true
解释: 累加序列为: 1, 99, 100, 199。1 + 99 = 100, 99 + 100 = 199
'''
'''
思路:回溯+剪枝
最外层有个循环,依次遍历从2开始的位置,将0..i切分成2个整数a、b,求出2个整数之和target
回溯确认从num[i]开始的数与target相同,然后后面与b+target相同,再次回溯
时间复杂度:O(n!)
空间复杂度:O(n)
'''
class Solution:
def isAdditiveNumber(self, num: str) -> bool:
n = len(num)
def backtrack(i, b, target):
t = str(target)
if n - i < len(t): # 剪枝,剩余长度不满足要求
return False
if n - i == len(t): # 剩余长度与target相同,判断字符串是否相同
return num[i:] == t
if num[i:i + len(t)] != t: # 开始的字符串不相同
return False
if num[i + len(t)] == '0': # 剪枝,数字不能以0开头
return False
return backtrack(i + len(t), target, b + target)
for i in range(2, n // 3 * 2): # 开始的2个数字长度最多占到2/3
if num[i] == '0': # 剪枝,数字不能以0开头
continue
for j in range(1, i): # 遍历长度为i的子串中所有分割点
if num[j] == '0': # 剪枝,数字不能以0开头
continue
a, b = num[:j], num[j:i]
b = int(b)
if backtrack(i, b, int(a) + b):
return True
return False
s = Solution()
print(s.isAdditiveNumber("112358"))
print(s.isAdditiveNumber("199100199"))
| 24.921875 | 71 | 0.515361 |
class Solution:
def isAdditiveNumber(self, num: str) -> bool:
n = len(num)
def backtrack(i, b, target):
t = str(target)
if n - i < len(t):
return False
if n - i == len(t):
return num[i:] == t
if num[i:i + len(t)] != t:
return False
if num[i + len(t)] == '0':
return False
return backtrack(i + len(t), target, b + target)
for i in range(2, n // 3 * 2):
if num[i] == '0':
continue
for j in range(1, i):
if num[j] == '0':
continue
a, b = num[:j], num[j:i]
b = int(b)
if backtrack(i, b, int(a) + b):
return True
return False
s = Solution()
print(s.isAdditiveNumber("112358"))
print(s.isAdditiveNumber("199100199"))
| true | true |
f7334d9a86ea0f032876997ef8c3799223b8e2f0 | 1,027 | py | Python | ann_benchmarks/algorithms/flann.py | maumueller/ann-benchmarks-reproducibility | b66ee6981fefefd4ab6e27bb4aac4ec0bfebed2a | [
"MIT"
] | null | null | null | ann_benchmarks/algorithms/flann.py | maumueller/ann-benchmarks-reproducibility | b66ee6981fefefd4ab6e27bb4aac4ec0bfebed2a | [
"MIT"
] | 9 | 2021-01-22T16:24:31.000Z | 2021-11-22T15:10:57.000Z | ann_benchmarks/algorithms/flann.py | maumueller/ann-benchmarks-reproducibility | b66ee6981fefefd4ab6e27bb4aac4ec0bfebed2a | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import pyflann
import numpy
import sklearn.preprocessing
from ann_benchmarks.algorithms.base import BaseANN
class FLANN(BaseANN):
def __init__(self, metric, target_precision):
self._target_precision = target_precision
self.name = 'FLANN(target_precision=%f)' % self._target_precision
self._metric = metric
def fit(self, X):
self._flann = pyflann.FLANN(
target_precision=self._target_precision,
algorithm='autotuned', log_level='info')
if self._metric == 'angular':
X = sklearn.preprocessing.normalize(X, axis=1, norm='l2')
if X.dtype != numpy.float32:
X = X.astype(numpy.float32)
self._flann.build_index(X)
def query(self, v, n):
if self._metric == 'angular':
v = sklearn.preprocessing.normalize([v], axis=1, norm='l2')[0]
if v.dtype != numpy.float32:
v = v.astype(numpy.float32)
return self._flann.nn_index(v, n)[0][0]
| 34.233333 | 74 | 0.641675 | from __future__ import absolute_import
import pyflann
import numpy
import sklearn.preprocessing
from ann_benchmarks.algorithms.base import BaseANN
class FLANN(BaseANN):
def __init__(self, metric, target_precision):
self._target_precision = target_precision
self.name = 'FLANN(target_precision=%f)' % self._target_precision
self._metric = metric
def fit(self, X):
self._flann = pyflann.FLANN(
target_precision=self._target_precision,
algorithm='autotuned', log_level='info')
if self._metric == 'angular':
X = sklearn.preprocessing.normalize(X, axis=1, norm='l2')
if X.dtype != numpy.float32:
X = X.astype(numpy.float32)
self._flann.build_index(X)
def query(self, v, n):
if self._metric == 'angular':
v = sklearn.preprocessing.normalize([v], axis=1, norm='l2')[0]
if v.dtype != numpy.float32:
v = v.astype(numpy.float32)
return self._flann.nn_index(v, n)[0][0]
| true | true |
f7334db4e8ac315b14751bc952ebf10a769b5821 | 19,759 | py | Python | tests/test_scheduler/test_default_modules.py | zhongtianxie/fm-orchestrator | 5ab39bf1981cf4abdf7ca4c2a7d4a6120f1bea2f | [
"MIT"
] | null | null | null | tests/test_scheduler/test_default_modules.py | zhongtianxie/fm-orchestrator | 5ab39bf1981cf4abdf7ca4c2a7d4a6120f1bea2f | [
"MIT"
] | null | null | null | tests/test_scheduler/test_default_modules.py | zhongtianxie/fm-orchestrator | 5ab39bf1981cf4abdf7ca4c2a7d4a6120f1bea2f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from collections import namedtuple
import errno
import dnf
from mock import call, Mock, patch, PropertyMock
import pytest
from module_build_service.common.config import conf
from module_build_service.common.errors import UnprocessableEntity
from module_build_service.common.models import ModuleBuild
from module_build_service.common.utils import import_mmd, load_mmd, mmd_to_str
from module_build_service.scheduler import default_modules
from module_build_service.scheduler.db_session import db_session
from tests import clean_database, make_module_in_db, read_staged_data
@patch("module_build_service.scheduler.default_modules.handle_collisions_with_base_module_rpms")
@patch("module_build_service.scheduler.default_modules._get_default_modules")
def test_add_default_modules(mock_get_dm, mock_hc):
"""
Test that default modules present in the database are added, and the others are ignored.
"""
clean_database()
mmd = load_mmd(read_staged_data("formatted_testmodule.yaml"))
xmd_brs = mmd.get_xmd()["mbs"]["buildrequires"]
assert set(xmd_brs.keys()) == {"platform"}
platform = ModuleBuild.get_build_from_nsvc(
db_session,
"platform",
xmd_brs["platform"]["stream"],
xmd_brs["platform"]["version"],
xmd_brs["platform"]["context"],
)
assert platform
platform_mmd = platform.mmd()
platform_xmd = mmd.get_xmd()
platform_xmd["mbs"]["use_default_modules"] = True
platform_mmd.set_xmd(platform_xmd)
platform.modulemd = mmd_to_str(platform_mmd)
dependencies = [
{"requires": {"platform": ["f28"]},
"buildrequires": {"platform": ["f28"]}}]
make_module_in_db("python:3:12345:1", base_module=platform, dependencies=dependencies)
make_module_in_db("nodejs:11:2345:2", base_module=platform, dependencies=dependencies)
db_session.commit()
mock_get_dm.return_value = {
"nodejs": "11",
"python": "3",
"ruby": "2.6",
}
defaults_added = default_modules.add_default_modules(mmd)
# Make sure that the default modules were added. ruby:2.6 will be ignored since it's not in
# the database
assert set(mmd.get_xmd()["mbs"]["buildrequires"].keys()) == {"nodejs", "platform", "python"}
mock_get_dm.assert_called_once_with(
"f28",
"https://pagure.io/releng/fedora-module-defaults.git",
)
assert "ursine_rpms" not in mmd.get_xmd()["mbs"]
assert defaults_added is True
@patch("module_build_service.scheduler.default_modules._get_default_modules")
def test_add_default_modules_not_linked(mock_get_dm):
"""
Test that no default modules are added when they aren't linked from the base module.
"""
clean_database()
mmd = load_mmd(read_staged_data("formatted_testmodule.yaml"))
assert set(mmd.get_xmd()["mbs"]["buildrequires"].keys()) == {"platform"}
default_modules.add_default_modules(mmd)
assert set(mmd.get_xmd()["mbs"]["buildrequires"].keys()) == {"platform"}
mock_get_dm.assert_not_called()
def test_add_default_modules_platform_not_available():
"""
Test that an exception is raised when the platform module that is buildrequired is missing.
This error should never occur in practice.
"""
clean_database(False, False)
mmd = load_mmd(read_staged_data("formatted_testmodule.yaml"))
expected_error = "Failed to retrieve the module platform:f28:3:00000000 from the database"
with pytest.raises(RuntimeError, match=expected_error):
default_modules.add_default_modules(mmd)
@patch("module_build_service.scheduler.default_modules._get_default_modules")
def test_add_default_modules_compatible_platforms(mock_get_dm):
"""
Test that default modules built against compatible base module streams are added.
"""
clean_database(add_platform_module=False)
# Create compatible base modules.
mmd = load_mmd(read_staged_data("platform"))
for stream in ["f27", "f28"]:
mmd = mmd.copy("platform", stream)
# Set the virtual stream to "fedora" to make these base modules compatible.
xmd = mmd.get_xmd()
xmd["mbs"]["virtual_streams"] = ["fedora"]
xmd["mbs"]["use_default_modules"] = True
mmd.set_xmd(xmd)
import_mmd(db_session, mmd)
mmd = load_mmd(read_staged_data("formatted_testmodule.yaml"))
xmd_brs = mmd.get_xmd()["mbs"]["buildrequires"]
assert set(xmd_brs.keys()) == {"platform"}
platform_f27 = ModuleBuild.get_build_from_nsvc(
db_session, "platform", "f27", "3", "00000000")
assert platform_f27
# Create python default module which requires platform:f27 and therefore cannot be used
# as default module for platform:f28.
dependencies = [
{"requires": {"platform": ["f27"]},
"buildrequires": {"platform": ["f27"]}}]
make_module_in_db("python:3:12345:1", base_module=platform_f27, dependencies=dependencies)
# Create nodejs default module which requries any platform stream and therefore can be used
# as default module for platform:f28.
dependencies[0]["requires"]["platform"] = []
make_module_in_db("nodejs:11:2345:2", base_module=platform_f27, dependencies=dependencies)
db_session.commit()
mock_get_dm.return_value = {
"nodejs": "11",
"python": "3",
"ruby": "2.6",
}
defaults_added = default_modules.add_default_modules(mmd)
# Make sure that the default modules were added. ruby:2.6 will be ignored since it's not in
# the database
assert set(mmd.get_xmd()["mbs"]["buildrequires"].keys()) == {"nodejs", "platform"}
mock_get_dm.assert_called_once_with(
"f28",
"https://pagure.io/releng/fedora-module-defaults.git",
)
assert defaults_added is True
@patch("module_build_service.scheduler.default_modules._get_default_modules")
def test_add_default_modules_request_failed(mock_get_dm):
"""
Test that an exception is raised when the call to _get_default_modules failed.
"""
clean_database()
make_module_in_db("python:3:12345:1")
make_module_in_db("nodejs:11:2345:2")
mmd = load_mmd(read_staged_data("formatted_testmodule.yaml"))
xmd_brs = mmd.get_xmd()["mbs"]["buildrequires"]
assert set(xmd_brs.keys()) == {"platform"}
platform = ModuleBuild.get_build_from_nsvc(
db_session,
"platform",
xmd_brs["platform"]["stream"],
xmd_brs["platform"]["version"],
xmd_brs["platform"]["context"],
)
assert platform
platform_mmd = platform.mmd()
platform_xmd = mmd.get_xmd()
platform_xmd["mbs"]["use_default_modules"] = True
platform_mmd.set_xmd(platform_xmd)
platform.modulemd = mmd_to_str(platform_mmd)
db_session.commit()
expected_error = "some error"
mock_get_dm.side_effect = ValueError(expected_error)
with pytest.raises(ValueError, match=expected_error):
default_modules.add_default_modules(mmd)
@pytest.mark.parametrize("is_rawhide", (True, False))
@patch("shutil.rmtree")
@patch("tempfile.mkdtemp")
@patch("module_build_service.scheduler.default_modules.Modulemd.ModuleIndex.new")
@patch("module_build_service.scheduler.default_modules.scm.SCM")
@patch("module_build_service.scheduler.default_modules._get_rawhide_version")
def test_get_default_modules(
mock_get_rawhide, mock_scm, mock_mmd_new, mock_mkdtemp, mock_rmtree, is_rawhide,
):
"""
Test that _get_default_modules returns the default modules.
"""
mock_scm.return_value.sourcedir = "/some/path"
if is_rawhide:
mock_scm.return_value.checkout_ref.side_effect = [
UnprocessableEntity("invalid branch"),
None,
]
mock_get_rawhide.return_value = "f32"
expected = {"nodejs": "11"}
mock_mmd_new.return_value.get_default_streams.return_value = expected
rv = default_modules._get_default_modules("f32", conf.default_modules_scm_url)
assert rv == expected
if is_rawhide:
mock_scm.return_value.checkout_ref.assert_has_calls(
[call("f32"), call(conf.rawhide_branch)]
)
else:
mock_scm.return_value.checkout_ref.assert_called_once_with("f32")
@pytest.mark.parametrize("uses_rawhide", (True, False))
@patch("shutil.rmtree")
@patch("tempfile.mkdtemp")
@patch(
"module_build_service.scheduler.default_modules.conf.uses_rawhide",
new_callable=PropertyMock,
)
@patch("module_build_service.scheduler.default_modules.Modulemd.ModuleIndex.new")
@patch("module_build_service.scheduler.default_modules.scm.SCM")
@patch("module_build_service.scheduler.default_modules._get_rawhide_version")
def test_get_default_modules_invalid_branch(
mock_get_rawhide, mock_scm, mock_mmd_new, mock_uses_rawhide, mock_mkdtemp, mock_rmtree,
uses_rawhide,
):
"""
Test that _get_default_modules raises an exception with an invalid branch.
"""
mock_uses_rawhide.return_value = uses_rawhide
mock_scm.return_value.sourcedir = "/some/path"
mock_scm.return_value.checkout_ref.side_effect = [
UnprocessableEntity("invalid branch"),
UnprocessableEntity("invalid branch"),
]
if uses_rawhide:
mock_get_rawhide.return_value = "f32"
else:
mock_get_rawhide.return_value = "something_else"
with pytest.raises(RuntimeError, match="Failed to retrieve the default modules"):
default_modules._get_default_modules("f32", conf.default_modules_scm_url)
mock_mmd_new.assert_not_called()
if uses_rawhide:
mock_scm.return_value.checkout_ref.assert_has_calls(
[call("f32"), call(conf.rawhide_branch)],
)
else:
mock_scm.return_value.checkout_ref.assert_called_once_with("f32")
@patch("module_build_service.scheduler.default_modules.get_session")
def test_get_rawhide_version(mock_get_session):
"""
Test that _get_rawhide_version will return rawhide Fedora version.
"""
mock_get_session.return_value.getBuildTarget.return_value = {
"build_tag_name": "f32-build",
}
assert default_modules._get_rawhide_version() == "f32"
@patch("module_build_service.scheduler.default_modules.get_session")
@patch("module_build_service.scheduler.default_modules._get_rpms_from_tags")
def test_handle_collisions_with_base_module_rpms(mock_grft, mock_get_session):
"""
Test that handle_collisions_with_base_module_rpms will add conflicts for NEVRAs in the
modulemd.
"""
mmd = load_mmd(read_staged_data("formatted_testmodule.yaml"))
xmd = mmd.get_xmd()
xmd["mbs"]["buildrequires"]["platform"]["koji_tag"] = "module-el-build"
xmd["mbs"]["buildrequires"]["python"] = {"koji_tag": "module-python27"}
xmd["mbs"]["buildrequires"]["bash"] = {"koji_tag": "module-bash"}
mmd.set_xmd(xmd)
bm_rpms = {
"bash-completion-1:2.7-5.el8.noarch",
"bash-0:4.4.19-7.el8.aarch64",
"python2-tools-0:2.7.16-11.el8.aarch64",
"python2-tools-0:2.7.16-11.el8.x86_64",
"python3-ldap-0:3.1.0-4.el8.aarch64",
"python3-ldap-0:3.1.0-4.el8.x86_64",
}
non_bm_rpms = {
"bash-0:4.4.20-1.el8.aarch64",
"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.aarch64",
"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.x86_64",
}
mock_grft.side_effect = [bm_rpms, non_bm_rpms]
default_modules.handle_collisions_with_base_module_rpms(mmd, ["aarch64", "x86_64"])
mock_get_session.assert_called_once()
xmd_mbs = mmd.get_xmd()["mbs"]
assert set(xmd_mbs["ursine_rpms"]) == {
"bash-0:4.4.19-7.el8.aarch64",
"python2-tools-0:2.7.16-11.el8.aarch64",
"python2-tools-0:2.7.16-11.el8.x86_64",
}
assert mock_grft.call_count == 2
# We can't check the calls directly because the second argument is a set converted to a list,
# so the order can't be determined ahead of time.
first_call = mock_grft.mock_calls[0][1]
assert first_call[0] == mock_get_session.return_value
assert first_call[1] == ["module-el-build"]
assert first_call[2] == ["aarch64", "x86_64"]
second_call = mock_grft.mock_calls[1][1]
assert second_call[0] == mock_get_session.return_value
assert set(second_call[1]) == {"module-bash", "module-python27"}
assert second_call[2] == ["aarch64", "x86_64"]
@patch("module_build_service.scheduler.default_modules.koji_retrying_multicall_map")
@patch("module_build_service.scheduler.default_modules._get_rpms_in_external_repo")
def test_get_rpms_from_tags(mock_grier, mock_multicall_map):
"""
Test the function queries Koji for the tags' and the tags' external repos' for RPMs.
"""
mock_session = Mock()
bash_tagged = [
[
{
"arch": "aarch64",
"epoch": 0,
"name": "bash",
"version": "4.4.20",
"release": "1.module+el8.1.0+123+bbd875cb",
},
{
"arch": "x86_64",
"epoch": 0,
"name": "bash",
"version": "4.4.20",
"release": "1.module+el8.1.0+123+bbd875cb",
}
],
None,
]
python_tagged = [
[
{
"arch": "aarch64",
"epoch": 0,
"name": "python2-tools",
"version": "2.7.18",
"release": "1.module+el8.1.0+3568+bbd875cb",
},
{
"arch": "x86_64",
"epoch": 0,
"name": "python2-tools",
"version": "2.7.18",
"release": "1.module+el8.1.0+3568+bbd875cb",
}
],
None,
]
bash_repos = []
external_repo_url = "http://domain.local/repo/latest/$arch/"
python_repos = [{
"external_repo_id": "12",
"tag_name": "module-python27",
"url": external_repo_url,
}]
mock_multicall_map.side_effect = [
[bash_tagged, python_tagged],
[bash_repos, python_repos],
]
mock_grier.return_value = {
"python2-test-0:2.7.16-11.module+el8.1.0+3568+bbd875cb.aarch64",
"python2-test-0:2.7.16-11.module+el8.1.0+3568+bbd875cb.x86_64",
}
tags = ["module-bash", "module-python27"]
arches = ["aarch64", "x86_64"]
rv = default_modules._get_rpms_from_tags(mock_session, tags, arches)
expected = {
"bash-0:4.4.20-1.module+el8.1.0+123+bbd875cb.aarch64",
"bash-0:4.4.20-1.module+el8.1.0+123+bbd875cb.x86_64",
"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.aarch64",
"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.x86_64",
"python2-test-0:2.7.16-11.module+el8.1.0+3568+bbd875cb.aarch64",
"python2-test-0:2.7.16-11.module+el8.1.0+3568+bbd875cb.x86_64",
}
assert rv == expected
assert mock_multicall_map.call_count == 2
mock_grier.assert_called_once_with(external_repo_url, arches, "module-python27-12")
@patch("module_build_service.scheduler.default_modules.koji_retrying_multicall_map")
def test_get_rpms_from_tags_error_listTaggedRPMS(mock_multicall_map):
"""
Test that an exception is raised if the listTaggedRPMS Koji query fails.
"""
mock_session = Mock()
mock_multicall_map.return_value = None
tags = ["module-bash", "module-python27"]
arches = ["aarch64", "x86_64"]
expected = (
"Getting the tagged RPMs of the following Koji tags failed: module-bash, module-python27"
)
with pytest.raises(RuntimeError, match=expected):
default_modules._get_rpms_from_tags(mock_session, tags, arches)
@patch("module_build_service.scheduler.default_modules.koji_retrying_multicall_map")
def test_get_rpms_from_tags_error_getExternalRepoList(mock_multicall_map):
"""
Test that an exception is raised if the getExternalRepoList Koji query fails.
"""
mock_session = Mock()
mock_multicall_map.side_effect = [[[[], []]], None]
tags = ["module-bash", "module-python27"]
arches = ["aarch64", "x86_64"]
expected = (
"Getting the external repos of the following Koji tags failed: module-bash, module-python27"
)
with pytest.raises(RuntimeError, match=expected):
default_modules._get_rpms_from_tags(mock_session, tags, arches)
@patch("dnf.Base")
@patch("os.makedirs")
def test_get_rpms_in_external_repo(mock_makedirs, mock_dnf_base):
"""
Test that DNF can query the external repos for the available packages.
"""
RPM = namedtuple("RPM", ["arch", "epoch", "name", "release", "version"])
mock_dnf_base.return_value.sack.query.return_value.available.return_value = [
RPM("aarch64", 0, "python", "1.el8", "2.7"),
RPM("aarch64", 0, "python", "1.el8", "3.7"),
RPM("x86_64", 0, "python", "1.el8", "2.7"),
RPM("x86_64", 0, "python", "1.el8", "3.7"),
RPM("i686", 0, "python", "1.el8", "2.7"),
RPM("i686", 0, "python", "1.el8", "3.7"),
]
external_repo_url = "http://domain.local/repo/latest/$arch/"
arches = ["aarch64", "x86_64", "i686"]
cache_dir_name = "module-el-build-12"
rv = default_modules._get_rpms_in_external_repo(external_repo_url, arches, cache_dir_name)
expected = {
"python-0:2.7-1.el8.aarch64",
"python-0:3.7-1.el8.aarch64",
"python-0:2.7-1.el8.x86_64",
"python-0:3.7-1.el8.x86_64",
"python-0:2.7-1.el8.i686",
"python-0:3.7-1.el8.i686",
}
assert rv == expected
# Test that i686 is mapped to i386 using the koji.canonArch().
mock_dnf_base.return_value.repos.add_new_repo.assert_called_with(
"repo_i386",
mock_dnf_base.return_value.conf,
baseurl=["http://domain.local/repo/latest/i386/"],
minrate=conf.dnf_minrate,
)
def test_get_rpms_in_external_repo_invalid_repo_url():
"""
Test that an exception is raised when an invalid repo URL is passed in.
"""
external_repo_url = "http://domain.local/repo/latest/"
arches = ["aarch64", "x86_64"]
cache_dir_name = "module-el-build-12"
expected = (
r"The external repo http://domain.local/repo/latest/ does not contain the \$arch variable"
)
with pytest.raises(ValueError, match=expected):
default_modules._get_rpms_in_external_repo(external_repo_url, arches, cache_dir_name)
@patch("dnf.Base")
@patch("os.makedirs")
def test_get_rpms_in_external_repo_failed_to_load(mock_makedirs, mock_dnf_base):
"""
Test that an exception is raised when an external repo can't be loaded.
"""
class FakeRepo(dict):
@staticmethod
def add_new_repo(*args, **kwargs):
pass
mock_dnf_base.return_value.update_cache.side_effect = dnf.exceptions.RepoError("Failed")
external_repo_url = "http://domain.local/repo/latest/$arch/"
arches = ["aarch64", "x86_64"]
cache_dir_name = "module-el-build-12"
expected = "Failed to load the external repos"
with pytest.raises(RuntimeError, match=expected):
default_modules._get_rpms_in_external_repo(external_repo_url, arches, cache_dir_name)
@patch("os.makedirs")
def test_get_rpms_in_external_repo_failed_to_create_cache(mock_makedirs):
"""
Test that an exception is raised when the cache can't be created.
"""
exc = OSError()
exc.errno = errno.EACCES
mock_makedirs.side_effect = exc
external_repo_url = "http://domain.local/repo/latest/$arch/"
arches = ["aarch64", "x86_64"]
cache_dir_name = "module-el-build-12"
expected = "The MBS cache is not writeable."
with pytest.raises(RuntimeError, match=expected):
default_modules._get_rpms_in_external_repo(external_repo_url, arches, cache_dir_name)
| 37.708015 | 100 | 0.680095 |
from __future__ import absolute_import
from collections import namedtuple
import errno
import dnf
from mock import call, Mock, patch, PropertyMock
import pytest
from module_build_service.common.config import conf
from module_build_service.common.errors import UnprocessableEntity
from module_build_service.common.models import ModuleBuild
from module_build_service.common.utils import import_mmd, load_mmd, mmd_to_str
from module_build_service.scheduler import default_modules
from module_build_service.scheduler.db_session import db_session
from tests import clean_database, make_module_in_db, read_staged_data
@patch("module_build_service.scheduler.default_modules.handle_collisions_with_base_module_rpms")
@patch("module_build_service.scheduler.default_modules._get_default_modules")
def test_add_default_modules(mock_get_dm, mock_hc):
clean_database()
mmd = load_mmd(read_staged_data("formatted_testmodule.yaml"))
xmd_brs = mmd.get_xmd()["mbs"]["buildrequires"]
assert set(xmd_brs.keys()) == {"platform"}
platform = ModuleBuild.get_build_from_nsvc(
db_session,
"platform",
xmd_brs["platform"]["stream"],
xmd_brs["platform"]["version"],
xmd_brs["platform"]["context"],
)
assert platform
platform_mmd = platform.mmd()
platform_xmd = mmd.get_xmd()
platform_xmd["mbs"]["use_default_modules"] = True
platform_mmd.set_xmd(platform_xmd)
platform.modulemd = mmd_to_str(platform_mmd)
dependencies = [
{"requires": {"platform": ["f28"]},
"buildrequires": {"platform": ["f28"]}}]
make_module_in_db("python:3:12345:1", base_module=platform, dependencies=dependencies)
make_module_in_db("nodejs:11:2345:2", base_module=platform, dependencies=dependencies)
db_session.commit()
mock_get_dm.return_value = {
"nodejs": "11",
"python": "3",
"ruby": "2.6",
}
defaults_added = default_modules.add_default_modules(mmd)
# the database
assert set(mmd.get_xmd()["mbs"]["buildrequires"].keys()) == {"nodejs", "platform", "python"}
mock_get_dm.assert_called_once_with(
"f28",
"https://pagure.io/releng/fedora-module-defaults.git",
)
assert "ursine_rpms" not in mmd.get_xmd()["mbs"]
assert defaults_added is True
@patch("module_build_service.scheduler.default_modules._get_default_modules")
def test_add_default_modules_not_linked(mock_get_dm):
clean_database()
mmd = load_mmd(read_staged_data("formatted_testmodule.yaml"))
assert set(mmd.get_xmd()["mbs"]["buildrequires"].keys()) == {"platform"}
default_modules.add_default_modules(mmd)
assert set(mmd.get_xmd()["mbs"]["buildrequires"].keys()) == {"platform"}
mock_get_dm.assert_not_called()
def test_add_default_modules_platform_not_available():
clean_database(False, False)
mmd = load_mmd(read_staged_data("formatted_testmodule.yaml"))
expected_error = "Failed to retrieve the module platform:f28:3:00000000 from the database"
with pytest.raises(RuntimeError, match=expected_error):
default_modules.add_default_modules(mmd)
@patch("module_build_service.scheduler.default_modules._get_default_modules")
def test_add_default_modules_compatible_platforms(mock_get_dm):
clean_database(add_platform_module=False)
# Create compatible base modules.
mmd = load_mmd(read_staged_data("platform"))
for stream in ["f27", "f28"]:
mmd = mmd.copy("platform", stream)
# Set the virtual stream to "fedora" to make these base modules compatible.
xmd = mmd.get_xmd()
xmd["mbs"]["virtual_streams"] = ["fedora"]
xmd["mbs"]["use_default_modules"] = True
mmd.set_xmd(xmd)
import_mmd(db_session, mmd)
mmd = load_mmd(read_staged_data("formatted_testmodule.yaml"))
xmd_brs = mmd.get_xmd()["mbs"]["buildrequires"]
assert set(xmd_brs.keys()) == {"platform"}
platform_f27 = ModuleBuild.get_build_from_nsvc(
db_session, "platform", "f27", "3", "00000000")
assert platform_f27
# Create python default module which requires platform:f27 and therefore cannot be used
# as default module for platform:f28.
dependencies = [
{"requires": {"platform": ["f27"]},
"buildrequires": {"platform": ["f27"]}}]
make_module_in_db("python:3:12345:1", base_module=platform_f27, dependencies=dependencies)
# Create nodejs default module which requries any platform stream and therefore can be used
# as default module for platform:f28.
dependencies[0]["requires"]["platform"] = []
make_module_in_db("nodejs:11:2345:2", base_module=platform_f27, dependencies=dependencies)
db_session.commit()
mock_get_dm.return_value = {
"nodejs": "11",
"python": "3",
"ruby": "2.6",
}
defaults_added = default_modules.add_default_modules(mmd)
# Make sure that the default modules were added. ruby:2.6 will be ignored since it's not in
assert set(mmd.get_xmd()["mbs"]["buildrequires"].keys()) == {"nodejs", "platform"}
mock_get_dm.assert_called_once_with(
"f28",
"https://pagure.io/releng/fedora-module-defaults.git",
)
assert defaults_added is True
@patch("module_build_service.scheduler.default_modules._get_default_modules")
def test_add_default_modules_request_failed(mock_get_dm):
clean_database()
make_module_in_db("python:3:12345:1")
make_module_in_db("nodejs:11:2345:2")
mmd = load_mmd(read_staged_data("formatted_testmodule.yaml"))
xmd_brs = mmd.get_xmd()["mbs"]["buildrequires"]
assert set(xmd_brs.keys()) == {"platform"}
platform = ModuleBuild.get_build_from_nsvc(
db_session,
"platform",
xmd_brs["platform"]["stream"],
xmd_brs["platform"]["version"],
xmd_brs["platform"]["context"],
)
assert platform
platform_mmd = platform.mmd()
platform_xmd = mmd.get_xmd()
platform_xmd["mbs"]["use_default_modules"] = True
platform_mmd.set_xmd(platform_xmd)
platform.modulemd = mmd_to_str(platform_mmd)
db_session.commit()
expected_error = "some error"
mock_get_dm.side_effect = ValueError(expected_error)
with pytest.raises(ValueError, match=expected_error):
default_modules.add_default_modules(mmd)
@pytest.mark.parametrize("is_rawhide", (True, False))
@patch("shutil.rmtree")
@patch("tempfile.mkdtemp")
@patch("module_build_service.scheduler.default_modules.Modulemd.ModuleIndex.new")
@patch("module_build_service.scheduler.default_modules.scm.SCM")
@patch("module_build_service.scheduler.default_modules._get_rawhide_version")
def test_get_default_modules(
mock_get_rawhide, mock_scm, mock_mmd_new, mock_mkdtemp, mock_rmtree, is_rawhide,
):
mock_scm.return_value.sourcedir = "/some/path"
if is_rawhide:
mock_scm.return_value.checkout_ref.side_effect = [
UnprocessableEntity("invalid branch"),
None,
]
mock_get_rawhide.return_value = "f32"
expected = {"nodejs": "11"}
mock_mmd_new.return_value.get_default_streams.return_value = expected
rv = default_modules._get_default_modules("f32", conf.default_modules_scm_url)
assert rv == expected
if is_rawhide:
mock_scm.return_value.checkout_ref.assert_has_calls(
[call("f32"), call(conf.rawhide_branch)]
)
else:
mock_scm.return_value.checkout_ref.assert_called_once_with("f32")
@pytest.mark.parametrize("uses_rawhide", (True, False))
@patch("shutil.rmtree")
@patch("tempfile.mkdtemp")
@patch(
"module_build_service.scheduler.default_modules.conf.uses_rawhide",
new_callable=PropertyMock,
)
@patch("module_build_service.scheduler.default_modules.Modulemd.ModuleIndex.new")
@patch("module_build_service.scheduler.default_modules.scm.SCM")
@patch("module_build_service.scheduler.default_modules._get_rawhide_version")
def test_get_default_modules_invalid_branch(
mock_get_rawhide, mock_scm, mock_mmd_new, mock_uses_rawhide, mock_mkdtemp, mock_rmtree,
uses_rawhide,
):
mock_uses_rawhide.return_value = uses_rawhide
mock_scm.return_value.sourcedir = "/some/path"
mock_scm.return_value.checkout_ref.side_effect = [
UnprocessableEntity("invalid branch"),
UnprocessableEntity("invalid branch"),
]
if uses_rawhide:
mock_get_rawhide.return_value = "f32"
else:
mock_get_rawhide.return_value = "something_else"
with pytest.raises(RuntimeError, match="Failed to retrieve the default modules"):
default_modules._get_default_modules("f32", conf.default_modules_scm_url)
mock_mmd_new.assert_not_called()
if uses_rawhide:
mock_scm.return_value.checkout_ref.assert_has_calls(
[call("f32"), call(conf.rawhide_branch)],
)
else:
mock_scm.return_value.checkout_ref.assert_called_once_with("f32")
@patch("module_build_service.scheduler.default_modules.get_session")
def test_get_rawhide_version(mock_get_session):
mock_get_session.return_value.getBuildTarget.return_value = {
"build_tag_name": "f32-build",
}
assert default_modules._get_rawhide_version() == "f32"
@patch("module_build_service.scheduler.default_modules.get_session")
@patch("module_build_service.scheduler.default_modules._get_rpms_from_tags")
def test_handle_collisions_with_base_module_rpms(mock_grft, mock_get_session):
mmd = load_mmd(read_staged_data("formatted_testmodule.yaml"))
xmd = mmd.get_xmd()
xmd["mbs"]["buildrequires"]["platform"]["koji_tag"] = "module-el-build"
xmd["mbs"]["buildrequires"]["python"] = {"koji_tag": "module-python27"}
xmd["mbs"]["buildrequires"]["bash"] = {"koji_tag": "module-bash"}
mmd.set_xmd(xmd)
bm_rpms = {
"bash-completion-1:2.7-5.el8.noarch",
"bash-0:4.4.19-7.el8.aarch64",
"python2-tools-0:2.7.16-11.el8.aarch64",
"python2-tools-0:2.7.16-11.el8.x86_64",
"python3-ldap-0:3.1.0-4.el8.aarch64",
"python3-ldap-0:3.1.0-4.el8.x86_64",
}
non_bm_rpms = {
"bash-0:4.4.20-1.el8.aarch64",
"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.aarch64",
"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.x86_64",
}
mock_grft.side_effect = [bm_rpms, non_bm_rpms]
default_modules.handle_collisions_with_base_module_rpms(mmd, ["aarch64", "x86_64"])
mock_get_session.assert_called_once()
xmd_mbs = mmd.get_xmd()["mbs"]
assert set(xmd_mbs["ursine_rpms"]) == {
"bash-0:4.4.19-7.el8.aarch64",
"python2-tools-0:2.7.16-11.el8.aarch64",
"python2-tools-0:2.7.16-11.el8.x86_64",
}
assert mock_grft.call_count == 2
# so the order can't be determined ahead of time.
first_call = mock_grft.mock_calls[0][1]
assert first_call[0] == mock_get_session.return_value
assert first_call[1] == ["module-el-build"]
assert first_call[2] == ["aarch64", "x86_64"]
second_call = mock_grft.mock_calls[1][1]
assert second_call[0] == mock_get_session.return_value
assert set(second_call[1]) == {"module-bash", "module-python27"}
assert second_call[2] == ["aarch64", "x86_64"]
@patch("module_build_service.scheduler.default_modules.koji_retrying_multicall_map")
@patch("module_build_service.scheduler.default_modules._get_rpms_in_external_repo")
def test_get_rpms_from_tags(mock_grier, mock_multicall_map):
mock_session = Mock()
bash_tagged = [
[
{
"arch": "aarch64",
"epoch": 0,
"name": "bash",
"version": "4.4.20",
"release": "1.module+el8.1.0+123+bbd875cb",
},
{
"arch": "x86_64",
"epoch": 0,
"name": "bash",
"version": "4.4.20",
"release": "1.module+el8.1.0+123+bbd875cb",
}
],
None,
]
python_tagged = [
[
{
"arch": "aarch64",
"epoch": 0,
"name": "python2-tools",
"version": "2.7.18",
"release": "1.module+el8.1.0+3568+bbd875cb",
},
{
"arch": "x86_64",
"epoch": 0,
"name": "python2-tools",
"version": "2.7.18",
"release": "1.module+el8.1.0+3568+bbd875cb",
}
],
None,
]
bash_repos = []
external_repo_url = "http://domain.local/repo/latest/$arch/"
python_repos = [{
"external_repo_id": "12",
"tag_name": "module-python27",
"url": external_repo_url,
}]
mock_multicall_map.side_effect = [
[bash_tagged, python_tagged],
[bash_repos, python_repos],
]
mock_grier.return_value = {
"python2-test-0:2.7.16-11.module+el8.1.0+3568+bbd875cb.aarch64",
"python2-test-0:2.7.16-11.module+el8.1.0+3568+bbd875cb.x86_64",
}
tags = ["module-bash", "module-python27"]
arches = ["aarch64", "x86_64"]
rv = default_modules._get_rpms_from_tags(mock_session, tags, arches)
expected = {
"bash-0:4.4.20-1.module+el8.1.0+123+bbd875cb.aarch64",
"bash-0:4.4.20-1.module+el8.1.0+123+bbd875cb.x86_64",
"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.aarch64",
"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.x86_64",
"python2-test-0:2.7.16-11.module+el8.1.0+3568+bbd875cb.aarch64",
"python2-test-0:2.7.16-11.module+el8.1.0+3568+bbd875cb.x86_64",
}
assert rv == expected
assert mock_multicall_map.call_count == 2
mock_grier.assert_called_once_with(external_repo_url, arches, "module-python27-12")
@patch("module_build_service.scheduler.default_modules.koji_retrying_multicall_map")
def test_get_rpms_from_tags_error_listTaggedRPMS(mock_multicall_map):
mock_session = Mock()
mock_multicall_map.return_value = None
tags = ["module-bash", "module-python27"]
arches = ["aarch64", "x86_64"]
expected = (
"Getting the tagged RPMs of the following Koji tags failed: module-bash, module-python27"
)
with pytest.raises(RuntimeError, match=expected):
default_modules._get_rpms_from_tags(mock_session, tags, arches)
@patch("module_build_service.scheduler.default_modules.koji_retrying_multicall_map")
def test_get_rpms_from_tags_error_getExternalRepoList(mock_multicall_map):
mock_session = Mock()
mock_multicall_map.side_effect = [[[[], []]], None]
tags = ["module-bash", "module-python27"]
arches = ["aarch64", "x86_64"]
expected = (
"Getting the external repos of the following Koji tags failed: module-bash, module-python27"
)
with pytest.raises(RuntimeError, match=expected):
default_modules._get_rpms_from_tags(mock_session, tags, arches)
@patch("dnf.Base")
@patch("os.makedirs")
def test_get_rpms_in_external_repo(mock_makedirs, mock_dnf_base):
RPM = namedtuple("RPM", ["arch", "epoch", "name", "release", "version"])
mock_dnf_base.return_value.sack.query.return_value.available.return_value = [
RPM("aarch64", 0, "python", "1.el8", "2.7"),
RPM("aarch64", 0, "python", "1.el8", "3.7"),
RPM("x86_64", 0, "python", "1.el8", "2.7"),
RPM("x86_64", 0, "python", "1.el8", "3.7"),
RPM("i686", 0, "python", "1.el8", "2.7"),
RPM("i686", 0, "python", "1.el8", "3.7"),
]
external_repo_url = "http://domain.local/repo/latest/$arch/"
arches = ["aarch64", "x86_64", "i686"]
cache_dir_name = "module-el-build-12"
rv = default_modules._get_rpms_in_external_repo(external_repo_url, arches, cache_dir_name)
expected = {
"python-0:2.7-1.el8.aarch64",
"python-0:3.7-1.el8.aarch64",
"python-0:2.7-1.el8.x86_64",
"python-0:3.7-1.el8.x86_64",
"python-0:2.7-1.el8.i686",
"python-0:3.7-1.el8.i686",
}
assert rv == expected
mock_dnf_base.return_value.repos.add_new_repo.assert_called_with(
"repo_i386",
mock_dnf_base.return_value.conf,
baseurl=["http://domain.local/repo/latest/i386/"],
minrate=conf.dnf_minrate,
)
def test_get_rpms_in_external_repo_invalid_repo_url():
external_repo_url = "http://domain.local/repo/latest/"
arches = ["aarch64", "x86_64"]
cache_dir_name = "module-el-build-12"
expected = (
r"The external repo http://domain.local/repo/latest/ does not contain the \$arch variable"
)
with pytest.raises(ValueError, match=expected):
default_modules._get_rpms_in_external_repo(external_repo_url, arches, cache_dir_name)
@patch("dnf.Base")
@patch("os.makedirs")
def test_get_rpms_in_external_repo_failed_to_load(mock_makedirs, mock_dnf_base):
class FakeRepo(dict):
@staticmethod
def add_new_repo(*args, **kwargs):
pass
mock_dnf_base.return_value.update_cache.side_effect = dnf.exceptions.RepoError("Failed")
external_repo_url = "http://domain.local/repo/latest/$arch/"
arches = ["aarch64", "x86_64"]
cache_dir_name = "module-el-build-12"
expected = "Failed to load the external repos"
with pytest.raises(RuntimeError, match=expected):
default_modules._get_rpms_in_external_repo(external_repo_url, arches, cache_dir_name)
@patch("os.makedirs")
def test_get_rpms_in_external_repo_failed_to_create_cache(mock_makedirs):
exc = OSError()
exc.errno = errno.EACCES
mock_makedirs.side_effect = exc
external_repo_url = "http://domain.local/repo/latest/$arch/"
arches = ["aarch64", "x86_64"]
cache_dir_name = "module-el-build-12"
expected = "The MBS cache is not writeable."
with pytest.raises(RuntimeError, match=expected):
default_modules._get_rpms_in_external_repo(external_repo_url, arches, cache_dir_name)
| true | true |
f7334dde9ee3f496655e36d1caad886e90dd8fa1 | 538 | py | Python | src/GenPro/genetic_algorithm/crossover.py | Hispar/procedural_generation | ed265637cb5ddf570439ee26263d534e07fac7b1 | [
"MIT"
] | null | null | null | src/GenPro/genetic_algorithm/crossover.py | Hispar/procedural_generation | ed265637cb5ddf570439ee26263d534e07fac7b1 | [
"MIT"
] | null | null | null | src/GenPro/genetic_algorithm/crossover.py | Hispar/procedural_generation | ed265637cb5ddf570439ee26263d534e07fac7b1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Python imports
# 3rd Party imports
# App imports
from .individual import Individual
class Crossover:
parent_a: Individual
parent_b: Individual
individual_class: type
def __init__(self, individual_class: type = Individual):
self.individual_class = individual_class
def set_parents(self, parent_a: Individual, parent_b: Individual):
self.parent_a = parent_a
self.parent_b = parent_b
def create_offspring(self) -> Individual:
raise NotImplementedError
| 22.416667 | 70 | 0.70632 |
from .individual import Individual
class Crossover:
parent_a: Individual
parent_b: Individual
individual_class: type
def __init__(self, individual_class: type = Individual):
self.individual_class = individual_class
def set_parents(self, parent_a: Individual, parent_b: Individual):
self.parent_a = parent_a
self.parent_b = parent_b
def create_offspring(self) -> Individual:
raise NotImplementedError
| true | true |
f7334dff44a05ce7f3676b02ed83879a5e3aeb65 | 272 | py | Python | backend/app/app/models/team_role.py | saschajullmann/sedotra | aaa38f6d533daa725a7037a8c446da978ffafa7d | [
"MIT"
] | null | null | null | backend/app/app/models/team_role.py | saschajullmann/sedotra | aaa38f6d533daa725a7037a8c446da978ffafa7d | [
"MIT"
] | null | null | null | backend/app/app/models/team_role.py | saschajullmann/sedotra | aaa38f6d533daa725a7037a8c446da978ffafa7d | [
"MIT"
] | null | null | null | from app.db.base_class import Base
from app.models import Team, User
from sqlalchemy_oso.roles import resource_role_class
# ROLE MODELS ##
TeamRoleMixin = resource_role_class(Base, User, Team, ["OWNER", "ADMIN", "MEMBER"])
class TeamRole(Base, TeamRoleMixin):
pass
| 24.727273 | 83 | 0.764706 | from app.db.base_class import Base
from app.models import Team, User
from sqlalchemy_oso.roles import resource_role_class
amRoleMixin = resource_role_class(Base, User, Team, ["OWNER", "ADMIN", "MEMBER"])
class TeamRole(Base, TeamRoleMixin):
pass
| true | true |
f7334e61c25dec469b3715c617f79fc68414c00c | 201 | py | Python | stella_nav_recognizer/src/stella_nav_recognizer/__init__.py | ymd-stella/stella_nav | b92f2dcaf52d0bb03c9ea4228124dc3444af2681 | [
"MIT"
] | null | null | null | stella_nav_recognizer/src/stella_nav_recognizer/__init__.py | ymd-stella/stella_nav | b92f2dcaf52d0bb03c9ea4228124dc3444af2681 | [
"MIT"
] | null | null | null | stella_nav_recognizer/src/stella_nav_recognizer/__init__.py | ymd-stella/stella_nav | b92f2dcaf52d0bb03c9ea4228124dc3444af2681 | [
"MIT"
] | 1 | 2022-01-14T07:55:22.000Z | 2022-01-14T07:55:22.000Z | from .recognizer import Recognizer
from .distance_checker import DistanceChecker
from .time_checker import TimeChecker
from .pass_checker import PassChecker
from .stop_recognizer import StopRecognizer
| 33.5 | 45 | 0.875622 | from .recognizer import Recognizer
from .distance_checker import DistanceChecker
from .time_checker import TimeChecker
from .pass_checker import PassChecker
from .stop_recognizer import StopRecognizer
| true | true |
f7334ead8c61c9fbd98faca3010788cd7fcc2d1c | 3,490 | py | Python | test.py | Jovian-Dsouza/Avenger_FaceNet | e8bdffd017c9c27d4dc0f347f6992f760f1af5db | [
"Apache-2.0"
] | null | null | null | test.py | Jovian-Dsouza/Avenger_FaceNet | e8bdffd017c9c27d4dc0f347f6992f760f1af5db | [
"Apache-2.0"
] | null | null | null | test.py | Jovian-Dsouza/Avenger_FaceNet | e8bdffd017c9c27d4dc0f347f6992f760f1af5db | [
"Apache-2.0"
] | null | null | null | import os
import torch
import numpy as np
from torchvision import transforms
from torch import nn
from torch.nn import Softmax
from facenet_pytorch import MTCNN
from PIL import Image
import matplotlib.pyplot as plt
from loadOpenFace import prepareOpenFace
from collections import OrderedDict
import argparse
# Check if CUDA GPU is available
useCuda = torch.cuda.is_available()
if useCuda:
print('CUDA is avialable')
device = torch.device('cuda:0')
else:
print('CUDA is not avialable')
device = torch.device('cpu')
def load_model_from_chk(chk_path):
'''Returns model and idx_to_class dictionary'''
try:
# Load checkpoint
checkpoint = torch.load(chk_path, map_location=torch.device('cpu'))
idx_to_class = checkpoint['idx_to_class']
# Load the inception model
model = prepareOpenFace(useCuda)
model.eval()
n_classes = len(idx_to_class)
# Initialize the classifier model
classifier_model = nn.Sequential(OrderedDict([
("nn4_small_v2", model),
("fc", nn.Linear(736, n_classes))
]))
# load the trained parameters
classifier_model.load_state_dict(checkpoint['model_state_dict'])
print("Model Loaded from %s" % chk_path)
return classifier_model, idx_to_class
except FileNotFoundError:
print("Model checkpoint not found %s" % chk_path)
return None
# Load mtcnn to align and crop images
mtcnn = MTCNN(
image_size=160, margin=0, min_face_size=20,
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=False,
device=device
)
# tranfomation applied to croped image
face_transform = transforms.Compose([transforms.Resize(96),
transforms.ToTensor()])
softmax = Softmax(dim=1)
# Load the model
chk_path = 'models/AvengersClassifier.pth'
classifier_model, idx_to_class = load_model_from_chk(chk_path)
classifier_model = classifier_model.to(device)
classifier_model.eval()
def predict(img_path, prob_theshold = 0.9):
try:
img = Image.open(img_path)
except FileNotFoundError:
return
# Crop, Align and standardize the Image
mtcnn_img = mtcnn(img.convert('RGB'))
# If no face then return
if mtcnn_img is None:
plt.show()
print("ERROR, Could not detect a face in image")
return
# Convert to PIL image
mtcnn_img = Image.fromarray(np.array(mtcnn_img.permute(1, 2, 0).numpy(), dtype=np.uint8))
# Do the Prediction
mtcnn_img = face_transform(mtcnn_img).unsqueeze(0)
mtcnn_img = mtcnn_img.to(device)
with torch.no_grad():
label = classifier_model(mtcnn_img)
label = softmax(label) # To Convert the logit to probabilities
prob, pred = label.data.max(1, keepdim=True)
prob, pred = float(prob), int(pred)
if prob < prob_theshold:
print("UNKNOWN FACE, but similar to %s with %0.2f%% probability" %
(idx_to_class[pred], 100 * prob))
else:
print("%s with %0.2f%% probability" %
(idx_to_class[pred], 100 * prob))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Takes in image path and does prediction')
parser.add_argument('-p', '--path', help='Image path')
args = parser.parse_args()
img_path = args.path
print()
predict(img_path) | 30.347826 | 93 | 0.647851 | import os
import torch
import numpy as np
from torchvision import transforms
from torch import nn
from torch.nn import Softmax
from facenet_pytorch import MTCNN
from PIL import Image
import matplotlib.pyplot as plt
from loadOpenFace import prepareOpenFace
from collections import OrderedDict
import argparse
useCuda = torch.cuda.is_available()
if useCuda:
print('CUDA is avialable')
device = torch.device('cuda:0')
else:
print('CUDA is not avialable')
device = torch.device('cpu')
def load_model_from_chk(chk_path):
try:
checkpoint = torch.load(chk_path, map_location=torch.device('cpu'))
idx_to_class = checkpoint['idx_to_class']
model = prepareOpenFace(useCuda)
model.eval()
n_classes = len(idx_to_class)
classifier_model = nn.Sequential(OrderedDict([
("nn4_small_v2", model),
("fc", nn.Linear(736, n_classes))
]))
classifier_model.load_state_dict(checkpoint['model_state_dict'])
print("Model Loaded from %s" % chk_path)
return classifier_model, idx_to_class
except FileNotFoundError:
print("Model checkpoint not found %s" % chk_path)
return None
mtcnn = MTCNN(
image_size=160, margin=0, min_face_size=20,
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=False,
device=device
)
face_transform = transforms.Compose([transforms.Resize(96),
transforms.ToTensor()])
softmax = Softmax(dim=1)
chk_path = 'models/AvengersClassifier.pth'
classifier_model, idx_to_class = load_model_from_chk(chk_path)
classifier_model = classifier_model.to(device)
classifier_model.eval()
def predict(img_path, prob_theshold = 0.9):
try:
img = Image.open(img_path)
except FileNotFoundError:
return
mtcnn_img = mtcnn(img.convert('RGB'))
if mtcnn_img is None:
plt.show()
print("ERROR, Could not detect a face in image")
return
mtcnn_img = Image.fromarray(np.array(mtcnn_img.permute(1, 2, 0).numpy(), dtype=np.uint8))
mtcnn_img = face_transform(mtcnn_img).unsqueeze(0)
mtcnn_img = mtcnn_img.to(device)
with torch.no_grad():
label = classifier_model(mtcnn_img)
label = softmax(label)
prob, pred = label.data.max(1, keepdim=True)
prob, pred = float(prob), int(pred)
if prob < prob_theshold:
print("UNKNOWN FACE, but similar to %s with %0.2f%% probability" %
(idx_to_class[pred], 100 * prob))
else:
print("%s with %0.2f%% probability" %
(idx_to_class[pred], 100 * prob))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Takes in image path and does prediction')
parser.add_argument('-p', '--path', help='Image path')
args = parser.parse_args()
img_path = args.path
print()
predict(img_path) | true | true |
f7334f70dc0f8370bceffc1326f413679a27ba14 | 2,531 | py | Python | nuplan/cli/test/test_nuplan_cli.py | motional/nuplan-devkit | e39029e788b17f47f2fcadb774098ef8fbdd0d67 | [
"Apache-2.0"
] | 128 | 2021-12-06T15:41:14.000Z | 2022-03-29T13:16:32.000Z | nuplan/cli/test/test_nuplan_cli.py | motional/nuplan-devkit | e39029e788b17f47f2fcadb774098ef8fbdd0d67 | [
"Apache-2.0"
] | 28 | 2021-12-11T08:11:31.000Z | 2022-03-25T02:35:43.000Z | nuplan/cli/test/test_nuplan_cli.py | motional/nuplan-devkit | e39029e788b17f47f2fcadb774098ef8fbdd0d67 | [
"Apache-2.0"
] | 14 | 2021-12-11T04:12:26.000Z | 2022-03-24T06:38:30.000Z | import unittest
from unittest.mock import Mock, PropertyMock, patch
from typer.testing import CliRunner
from nuplan.cli.nuplan_cli import cli
runner = CliRunner()
class TestNuPlanCli(unittest.TestCase):
"""
Test nuplan cli with typer engine
"""
@patch("nuplan.cli.db_cli.NuPlanDB")
def test_db_info_info(self, mock_db: Mock) -> None:
"""Test nuplan_cpi.py db info"""
mock_db.return_value.__str__.return_value = "DB Info"
result = runner.invoke(cli, ["db", "info"])
mock_db.return_value.__str__.assert_called_once()
self.assertEqual(result.exit_code, 0)
@patch("nuplan.cli.db_cli.NuPlanDB")
def test_db_cli_duration(self, mock_db: Mock) -> None:
"""Test nuplan_cpi.py db duration"""
# Let lidar_pc return list of 1000 elements
mock_lidar_pc = PropertyMock(return_value=[1] * 1000)
type(mock_db.return_value).lidar_pc = mock_lidar_pc
# Invoke the tested cli command
result = runner.invoke(cli, ["db", "duration"])
# Expectations check
mock_lidar_pc.assert_called_once()
self.assertEqual(result.exit_code, 0)
self.assertTrue("00:00:50" in result.stdout)
@patch("nuplan.cli.db_cli.NuPlanDB")
def test_db_cli_log_vehicle(self, mock_db: Mock) -> None:
"""Test nuplan_cpi.py db log-vehicle"""
log_data = {"logfile": "SomeLog", "vehicle_name": "Voyager", "vehicle_type": "Spaceship"}
mock_log = Mock(**log_data)
mock_logs = PropertyMock(return_value=[mock_log])
type(mock_db.return_value).log = mock_logs
result = runner.invoke(cli, ["db", "log-vehicle"])
mock_logs.assert_called_once()
for data in log_data.values():
self.assertTrue(data in result.stdout)
self.assertEqual(result.exit_code, 0)
@patch("nuplan.cli.db_cli.NuPlanDB")
def test_db_cli_scenarios(self, mock_db: Mock) -> None:
"""Test nuplan_cpi.py db scenarios"""
mock_result = Mock()
mock_result.distinct.return_value.all.return_value = ["A"]
mock_db.return_value.session.query.return_value = mock_result
mock_db.return_value.scenario_tag.select_many.return_value = [1, 2, 3]
result = runner.invoke(cli, ["db", "scenarios"])
self.assertEqual(result.exit_code, 0)
self.assertTrue("The available scenario tags from db:" in result.stdout)
self.assertTrue("A has 3 scenarios" in result.stdout)
if __name__ == '__main__':
unittest.main()
| 34.671233 | 97 | 0.666535 | import unittest
from unittest.mock import Mock, PropertyMock, patch
from typer.testing import CliRunner
from nuplan.cli.nuplan_cli import cli
runner = CliRunner()
class TestNuPlanCli(unittest.TestCase):
@patch("nuplan.cli.db_cli.NuPlanDB")
def test_db_info_info(self, mock_db: Mock) -> None:
mock_db.return_value.__str__.return_value = "DB Info"
result = runner.invoke(cli, ["db", "info"])
mock_db.return_value.__str__.assert_called_once()
self.assertEqual(result.exit_code, 0)
@patch("nuplan.cli.db_cli.NuPlanDB")
def test_db_cli_duration(self, mock_db: Mock) -> None:
mock_lidar_pc = PropertyMock(return_value=[1] * 1000)
type(mock_db.return_value).lidar_pc = mock_lidar_pc
result = runner.invoke(cli, ["db", "duration"])
mock_lidar_pc.assert_called_once()
self.assertEqual(result.exit_code, 0)
self.assertTrue("00:00:50" in result.stdout)
@patch("nuplan.cli.db_cli.NuPlanDB")
def test_db_cli_log_vehicle(self, mock_db: Mock) -> None:
log_data = {"logfile": "SomeLog", "vehicle_name": "Voyager", "vehicle_type": "Spaceship"}
mock_log = Mock(**log_data)
mock_logs = PropertyMock(return_value=[mock_log])
type(mock_db.return_value).log = mock_logs
result = runner.invoke(cli, ["db", "log-vehicle"])
mock_logs.assert_called_once()
for data in log_data.values():
self.assertTrue(data in result.stdout)
self.assertEqual(result.exit_code, 0)
@patch("nuplan.cli.db_cli.NuPlanDB")
def test_db_cli_scenarios(self, mock_db: Mock) -> None:
mock_result = Mock()
mock_result.distinct.return_value.all.return_value = ["A"]
mock_db.return_value.session.query.return_value = mock_result
mock_db.return_value.scenario_tag.select_many.return_value = [1, 2, 3]
result = runner.invoke(cli, ["db", "scenarios"])
self.assertEqual(result.exit_code, 0)
self.assertTrue("The available scenario tags from db:" in result.stdout)
self.assertTrue("A has 3 scenarios" in result.stdout)
if __name__ == '__main__':
unittest.main()
| true | true |
f7334f715d4f0e5fc0a3870bd2e7dcc6b7e0418a | 366 | py | Python | apps/users/forms.py | stanicavuleta/heyposher | 4430bdfb025b0699e840e35cc63f5eb975abb586 | [
"Apache-2.0"
] | null | null | null | apps/users/forms.py | stanicavuleta/heyposher | 4430bdfb025b0699e840e35cc63f5eb975abb586 | [
"Apache-2.0"
] | null | null | null | apps/users/forms.py | stanicavuleta/heyposher | 4430bdfb025b0699e840e35cc63f5eb975abb586 | [
"Apache-2.0"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserChangeForm
from .models import CustomUser
class CustomUserChangeForm(UserChangeForm):
email = forms.EmailField(required=True)
class Meta:
model = CustomUser
fields = ('email', 'first_name', 'last_name')
class UploadAvatarForm(forms.Form):
avatar = forms.FileField()
| 22.875 | 53 | 0.73224 | from django import forms
from django.contrib.auth.forms import UserChangeForm
from .models import CustomUser
class CustomUserChangeForm(UserChangeForm):
email = forms.EmailField(required=True)
class Meta:
model = CustomUser
fields = ('email', 'first_name', 'last_name')
class UploadAvatarForm(forms.Form):
avatar = forms.FileField()
| true | true |
f7334f72765d730fd9037c4e43280ef5b91c38f5 | 8,026 | py | Python | config/settings/production.py | TobKed/penny_wise | 80f35eebf2ad8d062fd9f7cc66087f988377543d | [
"MIT"
] | null | null | null | config/settings/production.py | TobKed/penny_wise | 80f35eebf2ad8d062fd9f7cc66087f988377543d | [
"MIT"
] | null | null | null | config/settings/production.py | TobKed/penny_wise | 80f35eebf2ad8d062fd9f7cc66087f988377543d | [
"MIT"
] | null | null | null | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["penny-wise.men"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# STATIC
# ------------------------
STATICFILES_STORAGE = "config.settings.production.StaticRootS3Boto3Storage"
STATIC_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/static/"
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = "static"
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = "media"
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = "config.settings.production.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="Penny Wise <noreply@penny-wise.men>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[Penny Wise]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ["gunicorn"] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ["collectfast"] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console", "mail_admins"],
"propagate": True,
},
},
}
# Your stuff...
# ------------------------------------------------------------------------------
| 39.536946 | 89 | 0.611139 | from .base import *
from .base import env
= env("DJANGO_SECRET_KEY")
= env.list("DJANGO_ALLOWED_HOSTS", default=["penny-wise.men"])
DATABASES["default"] = env.db("DATABASE_URL")
DATABASES["default"]["ATOMIC_REQUESTS"] = True
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60)
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
": True,
},
}
}
= ("HTTP_X_FORWARDED_PROTO", "https")
= env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
= True
= True
env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
= env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
PS += ["storages"]
SS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
ET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
AGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
YSTRING_AUTH = False
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# STATIC
# ------------------------
STATICFILES_STORAGE = "config.settings.production.StaticRootS3Boto3Storage"
STATIC_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/static/"
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = "static"
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = "media"
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = "config.settings.production.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="Penny Wise <noreply@penny-wise.men>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[Penny Wise]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ["gunicorn"] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ["collectfast"] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console", "mail_admins"],
"propagate": True,
},
},
}
# Your stuff...
# ------------------------------------------------------------------------------
| true | true |
f7334f78e4a3398d17455abcc07112f2602903de | 738 | py | Python | src/101-symmetric-tree.py | sahilrider/LeetCode-Solutions | 9cac844c27b5dbf37a70c2981a09cd92457f7ff1 | [
"MIT"
] | 2 | 2020-03-06T11:44:25.000Z | 2020-03-13T20:07:48.000Z | src/101-symmetric-tree.py | sahilrider/LeetCode-Solutions | 9cac844c27b5dbf37a70c2981a09cd92457f7ff1 | [
"MIT"
] | null | null | null | src/101-symmetric-tree.py | sahilrider/LeetCode-Solutions | 9cac844c27b5dbf37a70c2981a09cd92457f7ff1 | [
"MIT"
] | null | null | null | '''https://leetcode.com/problems/symmetric-tree/'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isMirror(self, left, right):
if left is None and right is None:
return True
if left is None or right is None:
return False
if left.val==right.val:
return self.isMirror(left.left, right.right) and self.isMirror(left.right, right.left)
else:
return False
def isSymmetric(self, root: TreeNode) -> bool:
if not root:
return True
return self.isMirror(root.left, root.right)
| 29.52 | 98 | 0.575881 |
class Solution:
def isMirror(self, left, right):
if left is None and right is None:
return True
if left is None or right is None:
return False
if left.val==right.val:
return self.isMirror(left.left, right.right) and self.isMirror(left.right, right.left)
else:
return False
def isSymmetric(self, root: TreeNode) -> bool:
if not root:
return True
return self.isMirror(root.left, root.right)
| true | true |
f7334f886023c4cf8c863c1eb15747ac6f2c0f0c | 36,240 | py | Python | ssd_keras/ssd7_training_inferenceonvideo.py | ajinkyakhoche/Object-Detection-Project | 3964fd5b445957581205478bb46db58fba3a9fc3 | [
"MIT"
] | null | null | null | ssd_keras/ssd7_training_inferenceonvideo.py | ajinkyakhoche/Object-Detection-Project | 3964fd5b445957581205478bb46db58fba3a9fc3 | [
"MIT"
] | null | null | null | ssd_keras/ssd7_training_inferenceonvideo.py | ajinkyakhoche/Object-Detection-Project | 3964fd5b445957581205478bb46db58fba3a9fc3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""ssd7_training_inferenceonvideo.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1gMZm_sCuKq7g_cZIGfEcYyKoVw-U7jTX
"""
# from IPython.display import clear_output
# ! rm -rf *
# ! wget -O repo.zip https://github.com/pierluigiferrari/ssd_keras/archive/master.zip
# ! unzip -o repo.zip
# ! mv ssd_keras-master/* .
# ! pip install tqdm
# ! rm -rf ssd_keras-master
# clear_output()
# ! wget https://drive.google.com/uc?export=download&confirm=m0XG&id=1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D
# ! rm *.md *.ipynb *.txt *.zip
# ! ls
"""# SSD7 Training Tutorial
This tutorial explains how to train an SSD7 on the Udacity road traffic datasets, and just generally how to use this SSD implementation.
Disclaimer about SSD7:
As you will see below, training SSD7 on the aforementioned datasets yields alright results, but I'd like to emphasize that SSD7 is not a carefully optimized network architecture. The idea was just to build a low-complexity network that is fast (roughly 127 FPS or more than 3 times as fast as SSD300 on a GTX 1070) for testing purposes. Would slightly different anchor box scaling factors or a slightly different number of filters in individual convolution layers make SSD7 significantly better at similar complexity? I don't know, I haven't tried.
"""
from keras.optimizers import Adam , SGD
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TerminateOnNaN, CSVLogger
from keras import backend as K
from keras.models import load_model
from math import ceil
import numpy as np
from matplotlib import pyplot as plt
from models.keras_ssd7 import build_model
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast
from data_generator.object_detection_2d_data_generator import DataGenerator
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
from data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize
from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize
from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation
# %matplotlib inline
"""## 1. Set the model configuration parameters
The cell below sets a number of parameters that define the model configuration. The parameters set here are being used both by the `build_model()` function that builds the model as well as further down by the constructor for the `SSDInputEncoder` object that is needed to to match ground truth and anchor boxes during the training.
Here are just some comments on a few of the parameters, read the documentation for more details:
* Set the height, width, and number of color channels to whatever you want the model to accept as image input. If your input images have a different size than you define as the model input here, or if your images have non-uniform size, then you must use the data generator's image transformations (resizing and/or cropping) so that your images end up having the required input size before they are fed to the model. to convert your images to the model input size during training. The SSD300 training tutorial uses the same image pre-processing and data augmentation as the original Caffe implementation, so take a look at that to see one possibility of how to deal with non-uniform-size images.
* The number of classes is the number of positive classes in your dataset, e.g. 20 for Pascal VOC or 80 for MS COCO. Class ID 0 must always be reserved for the background class, i.e. your positive classes must have positive integers as their IDs in your dataset.
* The `mode` argument in the `build_model()` function determines whether the model will be built with or without a `DecodeDetections` layer as its last layer. In 'training' mode, the model outputs the raw prediction tensor, while in 'inference' and 'inference_fast' modes, the raw predictions are being decoded into absolute coordinates and filtered via confidence thresholding, non-maximum suppression, and top-k filtering. The difference between latter two modes is that 'inference' uses the decoding procedure of the original Caffe implementation, while 'inference_fast' uses a faster, but possibly less accurate decoding procedure.
* The reason why the list of scaling factors has 5 elements even though there are only 4 predictor layers in tSSD7 is that the last scaling factor is used for the second aspect-ratio-1 box of the last predictor layer. Refer to the documentation for details.
* `build_model()` and `SSDInputEncoder` have two arguments for the anchor box aspect ratios: `aspect_ratios_global` and `aspect_ratios_per_layer`. You can use either of the two, you don't need to set both. If you use `aspect_ratios_global`, then you pass one list of aspect ratios and these aspect ratios will be used for all predictor layers. Every aspect ratio you want to include must be listed once and only once. If you use `aspect_ratios_per_layer`, then you pass a nested list containing lists of aspect ratios for each individual predictor layer. This is what the SSD300 training tutorial does. It's your design choice whether all predictor layers should use the same aspect ratios or whether you think that for your dataset, certain aspect ratios are only necessary for some predictor layers but not for others. Of course more aspect ratios means more predicted boxes, which in turn means increased computational complexity.
* If `two_boxes_for_ar1 == True`, then each predictor layer will predict two boxes with aspect ratio one, one a bit smaller, the other one a bit larger.
* If `clip_boxes == True`, then the anchor boxes will be clipped so that they lie entirely within the image boundaries. It is recommended not to clip the boxes. The anchor boxes form the reference frame for the localization prediction. This reference frame should be the same at every spatial position.
* In the matching process during the training, the anchor box offsets are being divided by the variances. Leaving them at 1.0 for each of the four box coordinates means that they have no effect. Setting them to less than 1.0 spreads the imagined anchor box offset distribution for the respective box coordinate.
* `normalize_coords` converts all coordinates from absolute coordinate to coordinates that are relative to the image height and width. This setting has no effect on the outcome of the training.
"""
img_height = 300 # Height of the input images
img_width = 480 # Width of the input images
img_channels = 3 # Number of color channels of the input images
intensity_mean = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.
intensity_range = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.
n_classes = 5 # Number of positive classes
scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.
aspect_ratios = [0.5, 1.0, 2.0] # The list of aspect ratios for the anchor boxes
two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1
steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended
offsets = None # In case you'd like to set the offsets for the anchor box grids manually; not recommended
clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries
variances = [1.0, 1.0, 1.0, 1.0] # The list of variances by which the encoded target coordinates are scaled
normalize_coords = True # Whether or not the model is supposed to use coordinates relative to the image size
"""## 2. Build or load the model
You will want to execute either of the two code cells in the subsequent two sub-sections, not both.
### 2.1 Create a new model
If you want to create a new model, this is the relevant section for you. If you want to load a previously saved model, skip ahead to section 2.2.
The code cell below does the following things:
1. It calls the function `build_model()` to build the model.
2. It optionally loads some weights into the model.
3. It then compiles the model for the training. In order to do so, we're defining an optimizer (Adam) and a loss function (SSDLoss) to be passed to the `compile()` method.
`SSDLoss` is a custom Keras loss function that implements the multi-task log loss for classification and smooth L1 loss for localization. `neg_pos_ratio` and `alpha` are set as in the paper.
"""
# 1: Build the Keras model
K.clear_session() # Clear previous models from memory.
model = build_model(image_size=(img_height, img_width, img_channels),
n_classes=n_classes,
mode='training',
l2_regularization=0.0005,
scales=scales,
aspect_ratios_global=aspect_ratios,
aspect_ratios_per_layer=None,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
normalize_coords=normalize_coords,
subtract_mean=intensity_mean,
divide_by_stddev=intensity_range)
# 2: Optional: Load some weights
#model.load_weights('./ssd7_weights.h5', by_name=True)
# 3: Instantiate an Adam optimizer and the SSD loss function and compile the model
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False) # Recommed to fix bug [https://github.com/pierluigiferrari/ssd_keras/issues/84]
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
"""### 2.2 Load a saved model
If you have previously created and saved a model and would now like to load it, simply execute the next code cell. The only thing you need to do is to set the path to the saved model HDF5 file that you would like to load.
The SSD model contains custom objects: Neither the loss function, nor the anchor box or detection decoding layer types are contained in the Keras core library, so we need to provide them to the model loader.
This next code cell assumes that you want to load a model that was created in 'training' mode. If you want to load a model that was created in 'inference' or 'inference_fast' mode, you'll have to add the `DecodeDetections` or `DecodeDetectionsFast` layer type to the `custom_objects` dictionary below.
"""
LOAD_MODEL = True
if LOAD_MODEL:
# TODO: Set the path to the `.h5` file of the model to be loaded.
model_path = '../udacity_data/SavedModels/training1/ssd7_epoch-05_loss-2.5061_val_loss-2.5454.h5'
# We need to create an SSDLoss object in order to pass that to the model loader.
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
K.clear_session() # Clear previous models from memory.
model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,
'compute_loss': ssd_loss.compute_loss})
"""## 3. Set up the data generators for the training
The code cells below set up data generators for the training and validation datasets to train the model. You will have to set the file paths to your dataset. Depending on the annotations format of your dataset, you might also have to switch from the CSV parser to the XML or JSON parser, or you might have to write a new parser method in the `DataGenerator` class that can handle whatever format your annotations are in. The [README](https://github.com/pierluigiferrari/ssd_keras/blob/master/README.md) of this repository provides a summary of the design of the `DataGenerator`, which should help you in case you need to write a new parser or adapt one of the existing parsers to your needs.
Note that the generator provides two options to speed up the training. By default, it loads the individual images for a batch from disk. This has two disadvantages. First, for compressed image formats like JPG, this is a huge computational waste, because every image needs to be decompressed again and again every time it is being loaded. Second, the images on disk are likely not stored in a contiguous block of memory, which may also slow down the loading process. The first option that `DataGenerator` provides to deal with this is to load the entire dataset into memory, which reduces the access time for any image to a negligible amount, but of course this is only an option if you have enough free memory to hold the whole dataset. As a second option, `DataGenerator` provides the possibility to convert the dataset into a single HDF5 file. This HDF5 file stores the images as uncompressed arrays in a contiguous block of memory, which dramatically speeds up the loading time. It's not as good as having the images in memory, but it's a lot better than the default option of loading them from their compressed JPG state every time they are needed. Of course such an HDF5 dataset may require significantly more disk space than the compressed images. You can later load these HDF5 datasets directly in the constructor.
Set the batch size to to your preference and to what your GPU memory allows, it's not the most important hyperparameter. The Caffe implementation uses a batch size of 32, but smaller batch sizes work fine, too.
The `DataGenerator` itself is fairly generic. I doesn't contain any data augmentation or bounding box encoding logic. Instead, you pass a list of image transformations and an encoder for the bounding boxes in the `transformations` and `label_encoder` arguments of the data generator's `generate()` method, and the data generator will then apply those given transformations and the encoding to the data. Everything here is preset already, but if you'd like to learn more about the data generator and its data augmentation capabilities, take a look at the detailed tutorial in [this](https://github.com/pierluigiferrari/data_generator_object_detection_2d) repository.
The image processing chain defined further down in the object named `data_augmentation_chain` is just one possibility of what a data augmentation pipeline for unform-size images could look like. Feel free to put together other image processing chains, you can use the `DataAugmentationConstantInputSize` class as a template. Or you could use the original SSD data augmentation pipeline by instantiting an `SSDDataAugmentation` object and passing that to the generator instead. This procedure is not exactly efficient, but it evidently produces good results on multiple datasets.
An `SSDInputEncoder` object, `ssd_input_encoder`, is passed to both the training and validation generators. As explained above, it matches the ground truth labels to the model's anchor boxes and encodes the box coordinates into the format that the model needs.
### Note:
The example setup below was used to train SSD7 on two road traffic datasets released by [Udacity](https://github.com/udacity/self-driving-car/tree/master/annotations) with around 20,000 images in total and 5 object classes (car, truck, pedestrian, bicyclist, traffic light), although the vast majority of the objects are cars. The original datasets have a constant image size of 1200x1920 RGB. I consolidated the two datasets, removed a few bad samples (although there are probably many more), and resized the images to 300x480 RGB, i.e. to one sixteenth of the original image size. In case you'd like to train a model on the same dataset, you can download the consolidated and resized dataset I used [here](https://drive.google.com/open?id=1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D) (about 900 MB).
"""
# ! wget --header 'Host: doc-08-64-docs.googleusercontent.com' --user-agent 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0' --header 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' --header 'Accept-Language: en-GB,en;q=0.5' --referer 'https://drive.google.com/uc?id=1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D&export=download' --header 'Cookie: AUTH_jnah6s13kkbb9peqjnhhrvs24bcqfb6v=06338804252926118732|1535551200000|ag6qrtoegj3b578klq9mv59em3e2u2ll' --header 'Upgrade-Insecure-Requests: 1' 'https://doc-08-64-docs.googleusercontent.com/docs/securesc/dbqrqv6dp9ts3hf02kejajr0k5nf0854/g19v9tjp4on3gskf6gjiibmlmfk52r5q/1535551200000/01021765827329596762/06338804252926118732/1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D?e=download' --output-document 'udacity_driving_datasets.zip'
# ! unzip udacity_driving_datasets.zip
# #clear_output()
# ! rm *.zip
# ! ls
# 1: Instantiate two `DataGenerator` objects: One for training, one for validation.
# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.
train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
# 2: Parse the image and label lists for the training and validation datasets.
# TODO: Set the paths to your dataset here.
# Images
images_dir = '../udacity_data/udacity_driving_datasets/'
# Ground truth
train_labels_filename = '../udacity_data/udacity_driving_datasets/labels_train.csv'
val_labels_filename = '../udacity_data/udacity_driving_datasets/labels_val.csv'
train_dataset.parse_csv(images_dir=images_dir,
labels_filename=train_labels_filename,
input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'], # This is the order of the first six columns in the CSV file that contains the labels for your dataset. If your labels are in XML format, maybe the XML parser will be helpful, check the documentation.
include_classes='all')
val_dataset.parse_csv(images_dir=images_dir,
labels_filename=val_labels_filename,
input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'],
include_classes='all')
# Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will
# speed up the training. Doing this is not relevant in case you activated the `load_images_into_memory`
# option in the constructor, because in that cas the images are in memory already anyway. If you don't
# want to create HDF5 datasets, comment out the subsequent two function calls.
#train_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_train.h5',
# resize=False,
# variable_image_size=True,
# verbose=True)
#val_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_val.h5',
# resize=False,
# variable_image_size=True,
# verbose=True)
# Get the number of samples in the training and validations datasets.
train_dataset_size = train_dataset.get_dataset_size()
val_dataset_size = val_dataset.get_dataset_size()
print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size))
print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size))
# 3: Set the batch size.
batch_size = 16
# 4: Define the image processing chain.
data_augmentation_chain = DataAugmentationConstantInputSize(random_brightness=(-48, 48, 0.5),
random_contrast=(0.5, 1.8, 0.5),
random_saturation=(0.5, 1.8, 0.5),
random_hue=(18, 0.5),
random_flip=0.5,
random_translate=((0.03,0.5), (0.03,0.5), 0.5),
random_scale=(0.5, 2.0, 0.5),
n_trials_max=3,
clip_boxes=True,
overlap_criterion='area',
bounds_box_filter=(0.3, 1.0),
bounds_validator=(0.5, 1.0),
n_boxes_min=1,
background=(0,0,0))
# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.
# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
predictor_sizes = [model.get_layer('classes4').output_shape[1:3],
model.get_layer('classes5').output_shape[1:3],
model.get_layer('classes6').output_shape[1:3],
model.get_layer('classes7').output_shape[1:3]]
ssd_input_encoder = SSDInputEncoder(img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
aspect_ratios_global=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.3,
normalize_coords=normalize_coords)
# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.
train_generator = train_dataset.generate(batch_size=batch_size,
shuffle=True,
transformations=[data_augmentation_chain],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
val_generator = val_dataset.generate(batch_size=batch_size,
shuffle=False,
transformations=[],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
"""## 4. Set the remaining training parameters and train the model
We've already chosen an optimizer and a learning rate and set the batch size above, now let's set the remaining training parameters.
I'll set a few Keras callbacks below, one for early stopping, one to reduce the learning rate if the training stagnates, one to save the best models during the training, and one to continuously stream the training history to a CSV file after every epoch. Logging to a CSV file makes sense, because if we didn't do that, in case the training terminates with an exception at some point or if the kernel of this Jupyter notebook dies for some reason or anything like that happens, we would lose the entire history for the trained epochs. Feel free to add more callbacks if you want TensorBoard summaries or whatever.
"""
# Define model callbacks.
# TODO: Set the filepath under which you want to save the weights.
model_checkpoint = ModelCheckpoint(filepath='../udacity_data/SavedModels/training2/ssd7_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(filename='ssd7_training_log.csv',
separator=',',
append=True)
early_stopping = EarlyStopping(monitor='val_loss',
min_delta=0.0,
patience=10,
verbose=1)
reduce_learning_rate = ReduceLROnPlateau(monitor='val_loss',
factor=0.2,
patience=8,
verbose=1,
epsilon=0.001,
cooldown=0,
min_lr=0.00001)
callbacks = [model_checkpoint,
csv_logger,
early_stopping,
reduce_learning_rate]
"""I'll set one epoch to consist of 1,000 training steps I'll arbitrarily set the number of epochs to 20 here. This does not imply that 20,000 training steps is the right number. Depending on the model, the dataset, the learning rate, etc. you might have to train much longer to achieve convergence, or maybe less.
Instead of trying to train a model to convergence in one go, you might want to train only for a few epochs at a time.
In order to only run a partial training and resume smoothly later on, there are a few things you should note:
1. Always load the full model if you can, rather than building a new model and loading previously saved weights into it. Optimizers like SGD or Adam keep running averages of past gradient moments internally. If you always save and load full models when resuming a training, then the state of the optimizer is maintained and the training picks up exactly where it left off. If you build a new model and load weights into it, the optimizer is being initialized from scratch, which, especially in the case of Adam, leads to small but unnecessary setbacks every time you resume the training with previously saved weights.
2. You should tell `fit_generator()` which epoch to start from, otherwise it will start with epoch 0 every time you resume the training. Set `initial_epoch` to be the next epoch of your training. Note that this parameter is zero-based, i.e. the first epoch is epoch 0. If you had trained for 10 epochs previously and now you'd want to resume the training from there, you'd set `initial_epoch = 10` (since epoch 10 is the eleventh epoch). Furthermore, set `final_epoch` to the last epoch you want to run. To stick with the previous example, if you had trained for 10 epochs previously and now you'd want to train for another 10 epochs, you'd set `initial_epoch = 10` and `final_epoch = 20`.
3. Callbacks like `ModelCheckpoint` or `ReduceLROnPlateau` are stateful, so you might want ot save their state somehow if you want to pick up a training exactly where you left off.
"""
# TODO: Set the epochs to train for.
# If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.
initial_epoch = 0
final_epoch = 25
steps_per_epoch = 1000
history = model.fit_generator(generator=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=final_epoch,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=ceil(val_dataset_size/batch_size),
initial_epoch=initial_epoch)
"""Let's look at how the training and validation loss evolved to check whether our training is going in the right direction:"""
plt.figure(figsize=(20,12))
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.legend(loc='upper right', prop={'size': 24});
"""The validation loss has been decreasing at a similar pace as the training loss, indicating that our model has been learning effectively over the last 30 epochs. We could try to train longer and see if the validation loss can be decreased further. Once the validation loss stops decreasing for a couple of epochs in a row, that's when we will want to stop training. Our final weights will then be the weights of the epoch that had the lowest validation loss.
### 5. Make predictions
Now let's make some predictions on the validation dataset with the trained model. For convenience we'll use the validation generator which we've already set up above. Feel free to change the batch size.
You can set the `shuffle` option to `False` if you would like to check the model's progress on the same image(s) over the course of the training.
"""
# 1: Set the generator for the predictions.
predict_generator = val_dataset.generate(batch_size=1,
shuffle=True,
transformations=[],
label_encoder=None,
returns={'processed_images',
'processed_labels',
'filenames'},
keep_images_without_gt=False)
# 2: Generate samples
batch_images, batch_labels, batch_filenames = next(predict_generator)
i = 0 # Which batch item to look at
print("Image:", batch_filenames[i])
print()
print("Ground truth boxes:\n")
print(batch_labels[i])
# 3: Make a prediction
y_pred = model.predict(batch_images)
"""Now let's decode the raw predictions in `y_pred`.
Had we created the model in 'inference' or 'inference_fast' mode, then the model's final layer would be a `DecodeDetections` layer and `y_pred` would already contain the decoded predictions, but since we created the model in 'training' mode, the model outputs raw predictions that still need to be decoded and filtered. This is what the `decode_detections()` function is for. It does exactly what the `DecodeDetections` layer would do, but using Numpy instead of TensorFlow (i.e. on the CPU instead of the GPU).
`decode_detections()` with default argument values follows the procedure of the original SSD implementation: First, a very low confidence threshold of 0.01 is applied to filter out the majority of the predicted boxes, then greedy non-maximum suppression is performed per class with an intersection-over-union threshold of 0.45, and out of what is left after that, the top 200 highest confidence boxes are returned. Those settings are for precision-recall scoring purposes though. In order to get some usable final predictions, we'll set the confidence threshold much higher, e.g. to 0.5, since we're only interested in the very confident predictions.
"""
# 4: Decode the raw prediction `y_pred`
y_pred_decoded = decode_detections(y_pred,
confidence_thresh=0.5,
iou_threshold=0.45,
top_k=200,
normalize_coords=normalize_coords,
img_height=img_height,
img_width=img_width)
np.set_printoptions(precision=2, suppress=True, linewidth=90)
print("Predicted boxes:\n")
print(' class conf xmin ymin xmax ymax')
print(y_pred_decoded[i])
"""Finally, let's draw the predicted boxes onto the image. Each predicted box says its confidence next to the category name. The ground truth boxes are also drawn onto the image in green for comparison."""
# 5: Draw the predicted boxes onto the image
plt.figure(figsize=(20,12))
plt.imshow(batch_images[i])
current_axis = plt.gca()
colors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist() # Set the colors for the bounding boxes
classes = ['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'light'] # Just so we can print class names onto the image instead of IDs
# Draw the ground truth boxes in green (omit the label for more clarity)
for box in batch_labels[i]:
xmin = box[1]
ymin = box[2]
xmax = box[3]
ymax = box[4]
label = '{}'.format(classes[int(box[0])])
current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2))
#current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'green', 'alpha':1.0})
# Draw the predicted boxes in blue
for box in y_pred_decoded[i]:
xmin = box[-4]
ymin = box[-3]
xmax = box[-2]
ymax = box[-1]
color = colors[int(box[0])]
label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))
current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})
# !pip install pytube==9.1.0
# from pytube import YouTube
# YouTube('https://www.youtube.com/watch?v=_Ad7Co55alM').streams.first().download()
# ! ls
# ! mkdir output_frames
# Offline video processing
# i = 0
# cap = cv2.VideoCapture('test_videos/Driving in Russia 4K video Car driving in winter.mp4')
# width = int(cap.get(3))
# height = int(cap.get(4))
# property_id = int(cv2.CAP_PROP_FRAME_COUNT)
# fps = cap.get(cv2.CAP_PROP_FPS)
# total_frames = int(cv2.VideoCapture.get(cap, property_id))
# # Define the codec and create VideoWriter object
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# out = cv2.VideoWriter('output.avi',fourcc, fps, (width,height))
# # Read until video is completed
# for j in range(total_frames):
# print(str(j)+'/'+str(total_frames))
# # Capture frame-by-frame
# ret, frame = cap.read()
# if ret == True:
# frame = frame[...,::-1]
# frame_resized = cv2.resize(frame, (480, 300))
# frame_tensor = np.expand_dims(frame_resized, axis=0)
# y_pred = model.predict(frame_tensor)
# y_pred_decoded = decode_detections(y_pred,
# confidence_thresh=0.5,
# iou_threshold=0.45,
# top_k=200,
# normalize_coords=normalize_coords,
# img_height=img_height,
# img_width=img_width)
# plt.figure(figsize=(20,12))
# plt.imshow(frame_resized)
# current_axis = plt.gca()
# colors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist() # Set the colors for the bounding boxes
# classes = ['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'light'] # Just so we can print class names onto the image instead of IDs
# # Draw the predicted boxes in blue
# for box in y_pred_decoded[i]:
# xmin = box[-4]
# ymin = box[-3]
# xmax = box[-2]
# ymax = box[-1]
# color = colors[int(box[0])]
# label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
# current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))
# current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})
# plt.savefig('output_frames/video_frame'+str(j)+'.png')
# plt.close('all')
# if j % 10 == 0:
# clear_output()
# # Break the loop
# else:
# break
# out.release()
# cap.release() | 67.865169 | 1,322 | 0.690977 |
from keras.optimizers import Adam , SGD
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TerminateOnNaN, CSVLogger
from keras import backend as K
from keras.models import load_model
from math import ceil
import numpy as np
from matplotlib import pyplot as plt
from models.keras_ssd7 import build_model
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast
from data_generator.object_detection_2d_data_generator import DataGenerator
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
from data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize
from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize
from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation
img_height = 300
img_width = 480
img_channels = 3
intensity_mean = 127.5
intensity_range = 127.5
n_classes = 5
scales = [0.08, 0.16, 0.32, 0.64, 0.96]
aspect_ratios = [0.5, 1.0, 2.0]
two_boxes_for_ar1 = True
steps = None
offsets = None # In case you'd like to set the offsets for the anchor box grids manually; not recommended
clip_boxes = False
variances = [1.0, 1.0, 1.0, 1.0]
normalize_coords = True
K.clear_session()
model = build_model(image_size=(img_height, img_width, img_channels),
n_classes=n_classes,
mode='training',
l2_regularization=0.0005,
scales=scales,
aspect_ratios_global=aspect_ratios,
aspect_ratios_per_layer=None,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
normalize_coords=normalize_coords,
subtract_mean=intensity_mean,
divide_by_stddev=intensity_range)
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
LOAD_MODEL = True
if LOAD_MODEL:
model_path = '../udacity_data/SavedModels/training1/ssd7_epoch-05_loss-2.5061_val_loss-2.5454.h5'
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
K.clear_session()
model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,
'compute_loss': ssd_loss.compute_loss})
ataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
images_dir = '../udacity_data/udacity_driving_datasets/'
train_labels_filename = '../udacity_data/udacity_driving_datasets/labels_train.csv'
val_labels_filename = '../udacity_data/udacity_driving_datasets/labels_val.csv'
train_dataset.parse_csv(images_dir=images_dir,
labels_filename=train_labels_filename,
input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'],
include_classes='all')
val_dataset.parse_csv(images_dir=images_dir,
labels_filename=val_labels_filename,
input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'],
include_classes='all')
# want to create HDF5 datasets, comment out the subsequent two function calls.
#train_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_train.h5',
# resize=False,
# variable_image_size=True,
# verbose=True)
#val_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_val.h5',
# resize=False,
# variable_image_size=True,
# verbose=True)
# Get the number of samples in the training and validations datasets.
train_dataset_size = train_dataset.get_dataset_size()
val_dataset_size = val_dataset.get_dataset_size()
print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size))
print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size))
# 3: Set the batch size.
batch_size = 16
# 4: Define the image processing chain.
data_augmentation_chain = DataAugmentationConstantInputSize(random_brightness=(-48, 48, 0.5),
random_contrast=(0.5, 1.8, 0.5),
random_saturation=(0.5, 1.8, 0.5),
random_hue=(18, 0.5),
random_flip=0.5,
random_translate=((0.03,0.5), (0.03,0.5), 0.5),
random_scale=(0.5, 2.0, 0.5),
n_trials_max=3,
clip_boxes=True,
overlap_criterion='area',
bounds_box_filter=(0.3, 1.0),
bounds_validator=(0.5, 1.0),
n_boxes_min=1,
background=(0,0,0))
# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.
# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
predictor_sizes = [model.get_layer('classes4').output_shape[1:3],
model.get_layer('classes5').output_shape[1:3],
model.get_layer('classes6').output_shape[1:3],
model.get_layer('classes7').output_shape[1:3]]
ssd_input_encoder = SSDInputEncoder(img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
aspect_ratios_global=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.3,
normalize_coords=normalize_coords)
train_generator = train_dataset.generate(batch_size=batch_size,
shuffle=True,
transformations=[data_augmentation_chain],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
val_generator = val_dataset.generate(batch_size=batch_size,
shuffle=False,
transformations=[],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
# Define model callbacks.
# TODO: Set the filepath under which you want to save the weights.
model_checkpoint = ModelCheckpoint(filepath='../udacity_data/SavedModels/training2/ssd7_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(filename='ssd7_training_log.csv',
separator=',',
append=True)
early_stopping = EarlyStopping(monitor='val_loss',
min_delta=0.0,
patience=10,
verbose=1)
reduce_learning_rate = ReduceLROnPlateau(monitor='val_loss',
factor=0.2,
patience=8,
verbose=1,
epsilon=0.001,
cooldown=0,
min_lr=0.00001)
callbacks = [model_checkpoint,
csv_logger,
early_stopping,
reduce_learning_rate]
# TODO: Set the epochs to train for.
# If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.
initial_epoch = 0
final_epoch = 25
steps_per_epoch = 1000
history = model.fit_generator(generator=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=final_epoch,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=ceil(val_dataset_size/batch_size),
initial_epoch=initial_epoch)
plt.figure(figsize=(20,12))
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.legend(loc='upper right', prop={'size': 24});
predict_generator = val_dataset.generate(batch_size=1,
shuffle=True,
transformations=[],
label_encoder=None,
returns={'processed_images',
'processed_labels',
'filenames'},
keep_images_without_gt=False)
batch_images, batch_labels, batch_filenames = next(predict_generator)
i = 0
print("Image:", batch_filenames[i])
print()
print("Ground truth boxes:\n")
print(batch_labels[i])
y_pred = model.predict(batch_images)
y_pred_decoded = decode_detections(y_pred,
confidence_thresh=0.5,
iou_threshold=0.45,
top_k=200,
normalize_coords=normalize_coords,
img_height=img_height,
img_width=img_width)
np.set_printoptions(precision=2, suppress=True, linewidth=90)
print("Predicted boxes:\n")
print(' class conf xmin ymin xmax ymax')
print(y_pred_decoded[i])
plt.figure(figsize=(20,12))
plt.imshow(batch_images[i])
current_axis = plt.gca()
colors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist()
classes = ['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'light']
for box in batch_labels[i]:
xmin = box[1]
ymin = box[2]
xmax = box[3]
ymax = box[4]
label = '{}'.format(classes[int(box[0])])
current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2))
for box in y_pred_decoded[i]:
xmin = box[-4]
ymin = box[-3]
xmax = box[-2]
ymax = box[-1]
color = colors[int(box[0])]
label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))
current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})
| true | true |
f7334fe02b877f67eebc0bb6a56cb5af31511e41 | 5,547 | py | Python | test/test_get_payouts_response_v3_page.py | velopaymentsapi/velo-python | 59b39555e9714139b4bf697151cc7d15f6dd510e | [
"Apache-2.0"
] | null | null | null | test/test_get_payouts_response_v3_page.py | velopaymentsapi/velo-python | 59b39555e9714139b4bf697151cc7d15f6dd510e | [
"Apache-2.0"
] | null | null | null | test/test_get_payouts_response_v3_page.py | velopaymentsapi/velo-python | 59b39555e9714139b4bf697151cc7d15f6dd510e | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Velo Payments APIs
## Terms and Definitions Throughout this document and the Velo platform the following terms are used: * **Payor.** An entity (typically a corporation) which wishes to pay funds to one or more payees via a payout. * **Payee.** The recipient of funds paid out by a payor. * **Payment.** A single transfer of funds from a payor to a payee. * **Payout.** A batch of Payments, typically used by a payor to logically group payments (e.g. by business day). Technically there need be no relationship between the payments in a payout - a single payout can contain payments to multiple payees and/or multiple payments to a single payee. * **Sandbox.** An integration environment provided by Velo Payments which offers a similar API experience to the production environment, but all funding and payment events are simulated, along with many other services such as OFAC sanctions list checking. ## Overview The Velo Payments API allows a payor to perform a number of operations. The following is a list of the main capabilities in a natural order of execution: * Authenticate with the Velo platform * Maintain a collection of payees * Query the payor’s current balance of funds within the platform and perform additional funding * Issue payments to payees * Query the platform for a history of those payments This document describes the main concepts and APIs required to get up and running with the Velo Payments platform. It is not an exhaustive API reference. For that, please see the separate Velo Payments API Reference. ## API Considerations The Velo Payments API is REST based and uses the JSON format for requests and responses. Most calls are secured using OAuth 2 security and require a valid authentication access token for successful operation. See the Authentication section for details. Where a dynamic value is required in the examples below, the {token} format is used, suggesting that the caller needs to supply the appropriate value of the token in question (without including the { or } characters). Where curl examples are given, the –d @filename.json approach is used, indicating that the request body should be placed into a file named filename.json in the current directory. Each of the curl examples in this document should be considered a single line on the command-line, regardless of how they appear in print. ## Authenticating with the Velo Platform Once Velo backoffice staff have added your organization as a payor within the Velo platform sandbox, they will create you a payor Id, an API key and an API secret and share these with you in a secure manner. You will need to use these values to authenticate with the Velo platform in order to gain access to the APIs. The steps to take are explained in the following: create a string comprising the API key (e.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8) and API secret (e.g. c396b26b-137a-44fd-87f5-34631f8fd529) with a colon between them. E.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8:c396b26b-137a-44fd-87f5-34631f8fd529 base64 encode this string. E.g.: NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== create an HTTP **Authorization** header with the value set to e.g. Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== perform the Velo authentication REST call using the HTTP header created above e.g. via curl: ``` curl -X POST \\ -H \"Content-Type: application/json\" \\ -H \"Authorization: Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ==\" \\ 'https://api.sandbox.velopayments.com/v1/authenticate?grant_type=client_credentials' ``` If successful, this call will result in a **200** HTTP status code and a response body such as: ``` { \"access_token\":\"19f6bafd-93fd-4747-b229-00507bbc991f\", \"token_type\":\"bearer\", \"expires_in\":1799, \"scope\":\"...\" } ``` ## API access following authentication Following successful authentication, the value of the access_token field in the response (indicated in green above) should then be presented with all subsequent API calls to allow the Velo platform to validate that the caller is authenticated. This is achieved by setting the HTTP Authorization header with the value set to e.g. Bearer 19f6bafd-93fd-4747-b229-00507bbc991f such as the curl example below: ``` -H \"Authorization: Bearer 19f6bafd-93fd-4747-b229-00507bbc991f \" ``` If you make other Velo API calls which require authorization but the Authorization header is missing or invalid then you will get a **401** HTTP status response. # noqa: E501
The version of the OpenAPI document: 2.26.124
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import velo_payments
from velo_payments.models.get_payouts_response_v3_page import GetPayoutsResponseV3Page # noqa: E501
from velo_payments.rest import ApiException
class TestGetPayoutsResponseV3Page(unittest.TestCase):
"""GetPayoutsResponseV3Page unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetPayoutsResponseV3Page(self):
"""Test GetPayoutsResponseV3Page"""
# FIXME: construct object with mandatory attributes with example values
# model = velo_payments.models.get_payouts_response_v3_page.GetPayoutsResponseV3Page() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 138.675 | 4,651 | 0.777898 |
from __future__ import absolute_import
import unittest
import velo_payments
from velo_payments.models.get_payouts_response_v3_page import GetPayoutsResponseV3Page
from velo_payments.rest import ApiException
class TestGetPayoutsResponseV3Page(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testGetPayoutsResponseV3Page(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f733545472eb5a7e9abeb9f44923f599f25b5296 | 2,256 | py | Python | niapy/problems/zakharov.py | altaregos/NiaPy | 74f1b2827778d9086603f4a8cb523f6b5537212a | [
"MIT"
] | 202 | 2018-02-06T12:13:42.000Z | 2022-03-18T16:33:20.000Z | niapy/problems/zakharov.py | altaregos/NiaPy | 74f1b2827778d9086603f4a8cb523f6b5537212a | [
"MIT"
] | 262 | 2018-02-06T14:49:15.000Z | 2022-03-25T19:49:46.000Z | niapy/problems/zakharov.py | altaregos/NiaPy | 74f1b2827778d9086603f4a8cb523f6b5537212a | [
"MIT"
] | 136 | 2018-02-06T16:55:32.000Z | 2022-03-05T17:49:52.000Z | # encoding=utf8
"""Implementations of Zakharov function."""
import numpy as np
from niapy.problems.problem import Problem
__all__ = ['Zakharov']
class Zakharov(Problem):
r"""Implementations of Zakharov functions.
Date: 2018
Author: Klemen Berkovič
License: MIT
Function:
**Zakharov Function**
:math:`f(\textbf{x}) = \sum_{i = 1}^D x_i^2 + \left( \sum_{i = 1}^D 0.5 i x_i \right)^2 + \left( \sum_{i = 1}^D 0.5 i x_i \right)^4`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-5, 10]`, for all :math:`i = 1, 2,..., D`.
**Global minimum:**
:math:`f(\textbf{x}^*) = 0` at :math:`\textbf{x}^* = (0, \cdots, 0)`
LaTeX formats:
Inline:
$f(\textbf{x}) = \sum_{i = 1}^D x_i^2 + \left( \sum_{i = 1}^D 0.5 i x_i \right)^2 + \left( \sum_{i = 1}^D 0.5 i x_i \right)^4$
Equation:
\begin{equation} f(\textbf{x}) = \sum_{i = 1}^D x_i^2 + \left( \sum_{i = 1}^D 0.5 i x_i \right)^2 + \left( \sum_{i = 1}^D 0.5 i x_i \right)^4 \end{equation}
Domain:
$-5 \leq x_i \leq 10$
Reference:
https://www.sfu.ca/~ssurjano/zakharov.html
"""
def __init__(self, dimension=4, lower=-5.0, upper=10.0, *args, **kwargs):
r"""Initialize Zakharov problem..
Args:
dimension (Optional[int]): Dimension of the problem.
lower (Optional[Union[float, Iterable[float]]]): Lower bounds of the problem.
upper (Optional[Union[float, Iterable[float]]]): Upper bounds of the problem.
See Also:
:func:`niapy.problems.Problem.__init__`
"""
super().__init__(dimension, lower, upper, *args, **kwargs)
@staticmethod
def latex_code():
r"""Return the latex code of the problem.
Returns:
str: Latex code.
"""
return r'''$f(\textbf{x}) = \sum_{i = 1}^D x_i^2 + \left( \sum_{i = 1}^D 0.5 i x_i \right)^2 + \left( \sum_{i = 1}^D 0.5 i x_i \right)^4$'''
def _evaluate(self, x):
sum1 = np.sum(x * x)
sum2 = np.sum(0.5 * np.arange(1, self.dimension + 1) * x)
return sum1 + sum2 ** 2 + sum2 ** 4
| 30.486486 | 168 | 0.551862 |
import numpy as np
from niapy.problems.problem import Problem
__all__ = ['Zakharov']
class Zakharov(Problem):
def __init__(self, dimension=4, lower=-5.0, upper=10.0, *args, **kwargs):
super().__init__(dimension, lower, upper, *args, **kwargs)
@staticmethod
def latex_code():
return r'''$f(\textbf{x}) = \sum_{i = 1}^D x_i^2 + \left( \sum_{i = 1}^D 0.5 i x_i \right)^2 + \left( \sum_{i = 1}^D 0.5 i x_i \right)^4$'''
def _evaluate(self, x):
sum1 = np.sum(x * x)
sum2 = np.sum(0.5 * np.arange(1, self.dimension + 1) * x)
return sum1 + sum2 ** 2 + sum2 ** 4
| true | true |
f733551a5bced026ab87f85689f3f09e21b07880 | 4,161 | py | Python | searchlight-6.0.0/searchlight/tests/unit/test_ironic_node_plugin.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | searchlight-6.0.0/searchlight/tests/unit/test_ironic_node_plugin.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | searchlight-6.0.0/searchlight/tests/unit/test_ironic_node_plugin.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from ironicclient import exceptions as ironic_exc
from keystoneclient import exceptions as keystone_exc
import searchlight.elasticsearch.plugins.ironic as ironic_plugin
from searchlight.elasticsearch.plugins.ironic import nodes as nodes_plugin
from searchlight.elasticsearch.plugins.ironic import resources as ir_resources
from searchlight.elasticsearch.plugins import openstack_clients
import searchlight.tests.utils as test_utils
NODE_UUID = "1be26c0b-03f2-4d2e-ae87-c02d7f33c123"
NODE_PROPERTIES = {"memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10,
"cpus": 8},
def _create_node_fixture():
node = {
"chassis_uuid": "db0eef9d-45b2-4dc0-94a8-fc283c01171f",
"clean_step": None,
"console_enabled": False,
"created_at": "2016-01-26T20:41:03+00:00",
"driver": "fake",
"driver_info": {
"host": "192.168.0.111"},
"extra": {},
"inspection_finished_at": None,
"inspection_started_at": None,
"instance_info": {},
"instance_uuid": None,
"last_error": None,
"maintenance": False,
"maintenance_reason": None,
"network_interface": "flat",
"name": None,
"power_state": "power off",
"properties": NODE_PROPERTIES,
"provision_state": "deploying",
"provision_updated_at": "2016-01-27T20:41:03+00:00",
"resource_class": None,
"target_power_state": None,
"target_provision_state": "active",
"updated_at": "2016-01-27T20:41:03+00:00",
"uuid": NODE_UUID
}
return node
class TestNodeLoaderPlugin(test_utils.BaseTestCase):
def setUp(self):
super(TestNodeLoaderPlugin, self).setUp()
self.plugin = nodes_plugin.NodeIndex()
def test_default_index_name(self):
self.assertEqual('searchlight', self.plugin.resource_group_name)
def test_document_type(self):
self.assertEqual('OS::Ironic::Node',
self.plugin.get_document_type())
def test_rbac_filter(self):
rbac = self.plugin._get_rbac_field_filters({})
self.assertEqual([], rbac)
def test_admin_only_fields(self):
admin_only_fields = self.plugin.admin_only_fields
self.assertEqual([], admin_only_fields)
def test_document_id(self):
self.assertEqual('uuid', self.plugin.get_document_id_field())
def test_facets_with_options(self):
expected = ('power_state', 'target_power_state', 'provision_state',
'target_provision_state', 'maintenance', 'console_enabled')
self.assertEqual(expected, self.plugin.facets_with_options)
def test_serialize(self):
serialized = ironic_plugin.serialize_resource(_create_node_fixture(),
ir_resources.NODE_FIELDS)
# id cloned from uuid
self.assertEqual(NODE_UUID, serialized['id'])
# if name is not set it's uuid
self.assertEqual(NODE_UUID, serialized['name'])
# properties remapped to node_properties
self.assertEqual(NODE_PROPERTIES, serialized['node_properties'])
self.assertNotIn('properties', serialized)
def test_service_not_present_exception(self):
with mock.patch.object(openstack_clients, '_get_session'):
with mock.patch('ironicclient.client.get_client') as ironic_cl:
ironic_cl.side_effect = ironic_exc.AmbiguousAuthSystem
self.assertRaises(keystone_exc.EndpointNotFound,
openstack_clients.get_ironicclient)
| 38.88785 | 79 | 0.674357 |
import mock
from ironicclient import exceptions as ironic_exc
from keystoneclient import exceptions as keystone_exc
import searchlight.elasticsearch.plugins.ironic as ironic_plugin
from searchlight.elasticsearch.plugins.ironic import nodes as nodes_plugin
from searchlight.elasticsearch.plugins.ironic import resources as ir_resources
from searchlight.elasticsearch.plugins import openstack_clients
import searchlight.tests.utils as test_utils
NODE_UUID = "1be26c0b-03f2-4d2e-ae87-c02d7f33c123"
NODE_PROPERTIES = {"memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10,
"cpus": 8},
def _create_node_fixture():
node = {
"chassis_uuid": "db0eef9d-45b2-4dc0-94a8-fc283c01171f",
"clean_step": None,
"console_enabled": False,
"created_at": "2016-01-26T20:41:03+00:00",
"driver": "fake",
"driver_info": {
"host": "192.168.0.111"},
"extra": {},
"inspection_finished_at": None,
"inspection_started_at": None,
"instance_info": {},
"instance_uuid": None,
"last_error": None,
"maintenance": False,
"maintenance_reason": None,
"network_interface": "flat",
"name": None,
"power_state": "power off",
"properties": NODE_PROPERTIES,
"provision_state": "deploying",
"provision_updated_at": "2016-01-27T20:41:03+00:00",
"resource_class": None,
"target_power_state": None,
"target_provision_state": "active",
"updated_at": "2016-01-27T20:41:03+00:00",
"uuid": NODE_UUID
}
return node
class TestNodeLoaderPlugin(test_utils.BaseTestCase):
def setUp(self):
super(TestNodeLoaderPlugin, self).setUp()
self.plugin = nodes_plugin.NodeIndex()
def test_default_index_name(self):
self.assertEqual('searchlight', self.plugin.resource_group_name)
def test_document_type(self):
self.assertEqual('OS::Ironic::Node',
self.plugin.get_document_type())
def test_rbac_filter(self):
rbac = self.plugin._get_rbac_field_filters({})
self.assertEqual([], rbac)
def test_admin_only_fields(self):
admin_only_fields = self.plugin.admin_only_fields
self.assertEqual([], admin_only_fields)
def test_document_id(self):
self.assertEqual('uuid', self.plugin.get_document_id_field())
def test_facets_with_options(self):
expected = ('power_state', 'target_power_state', 'provision_state',
'target_provision_state', 'maintenance', 'console_enabled')
self.assertEqual(expected, self.plugin.facets_with_options)
def test_serialize(self):
serialized = ironic_plugin.serialize_resource(_create_node_fixture(),
ir_resources.NODE_FIELDS)
self.assertEqual(NODE_UUID, serialized['id'])
self.assertEqual(NODE_UUID, serialized['name'])
# properties remapped to node_properties
self.assertEqual(NODE_PROPERTIES, serialized['node_properties'])
self.assertNotIn('properties', serialized)
def test_service_not_present_exception(self):
with mock.patch.object(openstack_clients, '_get_session'):
with mock.patch('ironicclient.client.get_client') as ironic_cl:
ironic_cl.side_effect = ironic_exc.AmbiguousAuthSystem
self.assertRaises(keystone_exc.EndpointNotFound,
openstack_clients.get_ironicclient)
| true | true |
f733559859faaadf7e100706f9518dac784382a7 | 13,000 | py | Python | official/nlp/modeling/networks/packed_sequence_embedding.py | patrickbook/models | 718fb2c0d478ab6c9906a3dbf44099942a2c6426 | [
"Apache-2.0"
] | 15 | 2018-08-15T19:29:39.000Z | 2021-11-05T02:14:59.000Z | official/nlp/modeling/networks/packed_sequence_embedding.py | patrickbook/models | 718fb2c0d478ab6c9906a3dbf44099942a2c6426 | [
"Apache-2.0"
] | null | null | null | official/nlp/modeling/networks/packed_sequence_embedding.py | patrickbook/models | 718fb2c0d478ab6c9906a3dbf44099942a2c6426 | [
"Apache-2.0"
] | 8 | 2019-06-06T20:37:15.000Z | 2022-03-04T13:54:38.000Z | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An embedding network supporting packed sequences and position ids."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp import keras_nlp
from official.nlp.modeling import layers
@tf.keras.utils.register_keras_serializable(package='Text')
class PackedSequenceEmbedding(tf.keras.Model):
"""An embedding network supporting packed sequences and position ids.
This network implements an embedding layer similar to the one described in
"BERT: Pre-training of Deep Bidirectional Transformers for Language
Understanding" (https://arxiv.org/abs/1810.04805). On top of it, it supports
to (1) pack multiple sequences into one sequence and (2) allow additional
"position_ids" as input.
Args:
vocab_size: The size of the token vocabulary.
type_vocab_size: The size of the type vocabulary.
embedding_width: Width of token embeddings.
hidden_size: The output size for this encoder.
max_seq_length: The maximum sequence length for this encoder.
initializer: The initializer for the embedding portion of this encoder.
dropout_rate: The dropout rate to apply before the encoding layers.
pack_multiple_sequences: If True, we can feed multiple sequences into one
sequence for training and inference (they don't impact each other).
use_position_id: Whether to expect `position_ids` as an input to the
network. If False, the `position_ids` will be inferred: (1) when
pack_multiple_sequences is False, we assume the position ids are 0, 1,
2, ..., seq_length - 1; (2) when pack_multiple_sequences is True, there
may be multiple sub sequences, and for each sub sequence, its position
ids start from 0, 1, 2, ...
"""
def __init__(self,
vocab_size,
type_vocab_size,
embedding_width,
hidden_size,
max_seq_length,
initializer,
dropout_rate,
use_position_id=False,
pack_multiple_sequences=False,
**kwargs):
initializer = tf.keras.initializers.get(initializer)
config_dict = {
'vocab_size': vocab_size,
'type_vocab_size': type_vocab_size,
'embedding_width': embedding_width,
'hidden_size': hidden_size,
'max_seq_length': max_seq_length,
'initializer': tf.keras.initializers.serialize(initializer),
'dropout_rate': dropout_rate,
'use_position_id': use_position_id,
'pack_multiple_sequences': pack_multiple_sequences,
}
word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
inputs = {
'input_word_ids': word_ids,
'input_mask': mask,
'input_type_ids': type_ids,
}
if use_position_id:
position_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='position_ids')
inputs['position_ids'] = position_ids
else:
position_ids = None
if pack_multiple_sequences:
sub_seq_mask = PackedSequenceMask()(word_ids)
else:
sub_seq_mask = None
embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings')
word_embeddings = embedding_layer(word_ids)
# Always uses dynamic slicing for simplicity.
position_embedding_layer = PositionEmbeddingWithSubSeqMask(
initializer=initializer,
use_dynamic_slicing=True,
max_sequence_length=max_seq_length,
name='position_embedding')
position_embeddings = position_embedding_layer(
word_embeddings, position_ids, sub_seq_mask)
type_embeddings = (
layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings')(type_ids))
embeddings = tf.keras.layers.Add()(
[word_embeddings, position_embeddings, type_embeddings])
embeddings = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)(
embeddings)
embeddings = tf.keras.layers.Dropout(
rate=dropout_rate, dtype=tf.float32)(
embeddings)
if embedding_width != hidden_size:
embeddings = tf.keras.layers.experimental.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes=None,
kernel_initializer=initializer,
name='embedding_projection')(
embeddings)
attention_mask = keras_nlp.layers.SelfAttentionMask()(embeddings, mask)
if sub_seq_mask is not None:
attention_mask = tf.keras.layers.Lambda(
lambda x: x[0] * tf.cast(x[1], x[0].dtype))(
[attention_mask, sub_seq_mask])
outputs = [embeddings, attention_mask]
super(PackedSequenceEmbedding, self).__init__(
inputs=inputs, outputs=outputs, **kwargs)
# TF does not track immutable attrs which do not contain Trackables,
# so by creating a config namedtuple instead of a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self._embedding_layer = embedding_layer
self._position_embedding_layer = position_embedding_layer
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Text')
class PackedSequenceMask(tf.keras.layers.Layer):
"""A layer to create a mask to indicate multiple sub sequences."""
def call(self, input_ids):
"""Implements call() for the layer.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length].
Returns:
boolean Tensor of shape [batch_size, seq_length, seq_length]. [x, y, z]
is True if for x'th instance in a batch, y'th token and z'th token are
from the same sub sequence.
"""
# Suppose
# - the first token in the parent sequence is [CLS].
# - every sequence starts from [CLS].
# - every sequence only contains one [CLS].
seq_start_token = input_ids[:, 0:1]
seq_start_loc = tf.cast(tf.equal(input_ids, seq_start_token), tf.int32)
# Set different ids for different sub sequences.
seq_ids = tf.expand_dims(tf.cumsum(seq_start_loc, -1), -1)
return tf.equal(seq_ids, tf.transpose(seq_ids, [0, 2, 1]))
@tf.keras.utils.register_keras_serializable(package='Text')
class PositionEmbeddingWithSubSeqMask(tf.keras.layers.Layer):
"""Creates a positional embedding with sub-sequence masking.
This layer creates a positional embedding as described in "BERT: Pre-training
of Deep Bidirectional Transformers for Language Understanding"
(https://arxiv.org/abs/1810.04805). On top of it, it supports
`position_ids` and `sub_sequence_mask` tensors.
This layer can be set up to either create a statically shaped slice or a
dynamically shaped slice. If `use_dynamic_slicing` is True, the input tensor
can have a dynamic 1st dimension, while if `use_dynamic_slicing` is False the
input size must be fixed.
Args:
initializer: The initializer to use for the embedding weights. Defaults to
"glorot_uniform".
use_dynamic_slicing: Whether to use the dynamic slicing path.
max_sequence_length: The maximum size of the dynamic sequence. Only
applicable if `use_dynamic_slicing` is True.
"""
def __init__(self,
initializer='glorot_uniform',
use_dynamic_slicing=False,
max_sequence_length=None,
**kwargs):
# We need to have a default dtype of float32, since the inputs (which Keras
# usually uses to infer the dtype) will always be int32.
if 'dtype' not in kwargs:
kwargs['dtype'] = 'float32'
super(PositionEmbeddingWithSubSeqMask, self).__init__(**kwargs)
if use_dynamic_slicing and max_sequence_length is None:
raise ValueError(
'If `use_dynamic_slicing` is True, `max_sequence_length` must be set.'
)
self._max_sequence_length = max_sequence_length
self._initializer = tf.keras.initializers.get(initializer)
self._use_dynamic_slicing = use_dynamic_slicing
def get_config(self):
config = {
'max_sequence_length': self._max_sequence_length,
'initializer': tf.keras.initializers.serialize(self._initializer),
'use_dynamic_slicing': self._use_dynamic_slicing,
}
base_config = super(PositionEmbeddingWithSubSeqMask, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
"""Implements build() for the layer."""
dimension_list = input_shape.as_list()
if len(dimension_list) != 3:
raise ValueError('PositionEmbedding expects a 3-dimensional input tensor '
'of shape [batch, sequence, width]')
seq_length = dimension_list[1]
width = dimension_list[2]
# If we are not using dynamic slicing, we must assume that the sequence
# length is fixed and max_sequence_length should not be specified.
if not self._use_dynamic_slicing:
if seq_length is None:
raise ValueError(
'PositionEmbedding must have `use_dynamic_slicing` set '
'to True (and max_sequence_length set) when the '
'sequence (1st) dimension of the input is None.')
if self._max_sequence_length is not None:
raise ValueError(
'When `use_dynamic_slicing` is False, max_sequence_length should '
'not be specified and we ought to use seq_length to get the '
'variable shape.')
if self._max_sequence_length is not None:
weight_sequence_length = self._max_sequence_length
else:
weight_sequence_length = seq_length
self._position_embeddings = self.add_weight(
'embeddings',
shape=[weight_sequence_length, width],
initializer=self._initializer)
super(PositionEmbeddingWithSubSeqMask, self).build(input_shape)
def call(self, inputs, position_ids=None, sub_sequence_mask=None):
"""Implements call() for the layer.
When `position_ids` is specified, it will return the position embeddings
corresponding to this `position_ids`; otherwise, `position_ids` will be
inferred in the following way:
(1) When `sub_sequence_mask` is None, we assume the position ids are
0, 1, 2, ..., seq_length - 1.
(2) When `sub_sequence_mask` is specified, there may be multiple sub
sequences, and for each sub sequence, its position ids start from
0, 1, 2, ...
Args:
inputs: Word embeddings in shape [batch, seq_length, embedding_dim].
position_ids: An optional int32 tensor in shape [batch, seq_length].
sub_sequence_mask: An optional bool tensor in shape [batch, seq_length,
seq_length]. [x, y, z] is True if for x'th instance in a batch, y'th
token and z'th token are from the same sub sequence.
Returns:
The position embeddings in shape [batch, seq_length, embedding_dim].
"""
input_shape = tf_utils.get_shape_list(inputs, expected_rank=3)
if self._use_dynamic_slicing:
position_embeddings = self._position_embeddings[:input_shape[1], :]
else:
position_embeddings = self._position_embeddings
if position_ids is not None:
return tf.gather(position_embeddings, position_ids)
if sub_sequence_mask is None:
return tf.broadcast_to(position_embeddings, input_shape)
else:
sub_sequence_mask = tf.cast(sub_sequence_mask, tf.int32)
# For each sub sequence, its position ids start from 0, 1, 2, ...
position_ids = tf.linalg.diag_part(tf.cumsum(sub_sequence_mask, -1)) - 1
return tf.gather(position_embeddings, position_ids)
| 40.498442 | 80 | 0.694846 |
import collections
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp import keras_nlp
from official.nlp.modeling import layers
@tf.keras.utils.register_keras_serializable(package='Text')
class PackedSequenceEmbedding(tf.keras.Model):
def __init__(self,
vocab_size,
type_vocab_size,
embedding_width,
hidden_size,
max_seq_length,
initializer,
dropout_rate,
use_position_id=False,
pack_multiple_sequences=False,
**kwargs):
initializer = tf.keras.initializers.get(initializer)
config_dict = {
'vocab_size': vocab_size,
'type_vocab_size': type_vocab_size,
'embedding_width': embedding_width,
'hidden_size': hidden_size,
'max_seq_length': max_seq_length,
'initializer': tf.keras.initializers.serialize(initializer),
'dropout_rate': dropout_rate,
'use_position_id': use_position_id,
'pack_multiple_sequences': pack_multiple_sequences,
}
word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
inputs = {
'input_word_ids': word_ids,
'input_mask': mask,
'input_type_ids': type_ids,
}
if use_position_id:
position_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='position_ids')
inputs['position_ids'] = position_ids
else:
position_ids = None
if pack_multiple_sequences:
sub_seq_mask = PackedSequenceMask()(word_ids)
else:
sub_seq_mask = None
embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings')
word_embeddings = embedding_layer(word_ids)
position_embedding_layer = PositionEmbeddingWithSubSeqMask(
initializer=initializer,
use_dynamic_slicing=True,
max_sequence_length=max_seq_length,
name='position_embedding')
position_embeddings = position_embedding_layer(
word_embeddings, position_ids, sub_seq_mask)
type_embeddings = (
layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings')(type_ids))
embeddings = tf.keras.layers.Add()(
[word_embeddings, position_embeddings, type_embeddings])
embeddings = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)(
embeddings)
embeddings = tf.keras.layers.Dropout(
rate=dropout_rate, dtype=tf.float32)(
embeddings)
if embedding_width != hidden_size:
embeddings = tf.keras.layers.experimental.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes=None,
kernel_initializer=initializer,
name='embedding_projection')(
embeddings)
attention_mask = keras_nlp.layers.SelfAttentionMask()(embeddings, mask)
if sub_seq_mask is not None:
attention_mask = tf.keras.layers.Lambda(
lambda x: x[0] * tf.cast(x[1], x[0].dtype))(
[attention_mask, sub_seq_mask])
outputs = [embeddings, attention_mask]
super(PackedSequenceEmbedding, self).__init__(
inputs=inputs, outputs=outputs, **kwargs)
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self._embedding_layer = embedding_layer
self._position_embedding_layer = position_embedding_layer
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Text')
class PackedSequenceMask(tf.keras.layers.Layer):
def call(self, input_ids):
seq_start_token = input_ids[:, 0:1]
seq_start_loc = tf.cast(tf.equal(input_ids, seq_start_token), tf.int32)
seq_ids = tf.expand_dims(tf.cumsum(seq_start_loc, -1), -1)
return tf.equal(seq_ids, tf.transpose(seq_ids, [0, 2, 1]))
@tf.keras.utils.register_keras_serializable(package='Text')
class PositionEmbeddingWithSubSeqMask(tf.keras.layers.Layer):
def __init__(self,
initializer='glorot_uniform',
use_dynamic_slicing=False,
max_sequence_length=None,
**kwargs):
if 'dtype' not in kwargs:
kwargs['dtype'] = 'float32'
super(PositionEmbeddingWithSubSeqMask, self).__init__(**kwargs)
if use_dynamic_slicing and max_sequence_length is None:
raise ValueError(
'If `use_dynamic_slicing` is True, `max_sequence_length` must be set.'
)
self._max_sequence_length = max_sequence_length
self._initializer = tf.keras.initializers.get(initializer)
self._use_dynamic_slicing = use_dynamic_slicing
def get_config(self):
config = {
'max_sequence_length': self._max_sequence_length,
'initializer': tf.keras.initializers.serialize(self._initializer),
'use_dynamic_slicing': self._use_dynamic_slicing,
}
base_config = super(PositionEmbeddingWithSubSeqMask, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
dimension_list = input_shape.as_list()
if len(dimension_list) != 3:
raise ValueError('PositionEmbedding expects a 3-dimensional input tensor '
'of shape [batch, sequence, width]')
seq_length = dimension_list[1]
width = dimension_list[2]
if not self._use_dynamic_slicing:
if seq_length is None:
raise ValueError(
'PositionEmbedding must have `use_dynamic_slicing` set '
'to True (and max_sequence_length set) when the '
'sequence (1st) dimension of the input is None.')
if self._max_sequence_length is not None:
raise ValueError(
'When `use_dynamic_slicing` is False, max_sequence_length should '
'not be specified and we ought to use seq_length to get the '
'variable shape.')
if self._max_sequence_length is not None:
weight_sequence_length = self._max_sequence_length
else:
weight_sequence_length = seq_length
self._position_embeddings = self.add_weight(
'embeddings',
shape=[weight_sequence_length, width],
initializer=self._initializer)
super(PositionEmbeddingWithSubSeqMask, self).build(input_shape)
def call(self, inputs, position_ids=None, sub_sequence_mask=None):
input_shape = tf_utils.get_shape_list(inputs, expected_rank=3)
if self._use_dynamic_slicing:
position_embeddings = self._position_embeddings[:input_shape[1], :]
else:
position_embeddings = self._position_embeddings
if position_ids is not None:
return tf.gather(position_embeddings, position_ids)
if sub_sequence_mask is None:
return tf.broadcast_to(position_embeddings, input_shape)
else:
sub_sequence_mask = tf.cast(sub_sequence_mask, tf.int32)
position_ids = tf.linalg.diag_part(tf.cumsum(sub_sequence_mask, -1)) - 1
return tf.gather(position_embeddings, position_ids)
| true | true |
f73355edada3c3d79ec02189472eee21b60d88ce | 97 | py | Python | roomba/__init__.py | Leseratte10/Roomba980-Python | 37f4938c957b2710371c1c103d8a0b5130784926 | [
"MIT"
] | 257 | 2017-05-15T21:16:32.000Z | 2022-03-28T12:25:32.000Z | roomba/__init__.py | Leseratte10/Roomba980-Python | 37f4938c957b2710371c1c103d8a0b5130784926 | [
"MIT"
] | 80 | 2017-05-17T12:42:11.000Z | 2022-02-23T20:26:12.000Z | roomba/__init__.py | Leseratte10/Roomba980-Python | 37f4938c957b2710371c1c103d8a0b5130784926 | [
"MIT"
] | 83 | 2017-05-16T06:49:07.000Z | 2022-02-05T04:36:56.000Z | from __future__ import absolute_import
from .roomba import Roomba
from .password import Password
| 24.25 | 38 | 0.85567 | from __future__ import absolute_import
from .roomba import Roomba
from .password import Password
| true | true |
f73355f6cd03cffa0edf5607316f63152c7549c0 | 19,217 | py | Python | src/sentry/api/endpoints/organization_details.py | pombredanne/django-sentry | 4ad09417fb3cfa3aa4a0d4175ae49fe02837c567 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/api/endpoints/organization_details.py | pombredanne/django-sentry | 4ad09417fb3cfa3aa4a0d4175ae49fe02837c567 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/api/endpoints/organization_details.py | pombredanne/django-sentry | 4ad09417fb3cfa3aa4a0d4175ae49fe02837c567 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import logging
import six
from rest_framework import serializers, status
from uuid import uuid4
from sentry import roles
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.decorators import sudo_required
from sentry.api.fields import AvatarField
from sentry.api.fields.empty_integer import EmptyIntegerField
from sentry.api.serializers import serialize
from sentry.api.serializers.models import organization as org_serializers
from sentry.api.serializers.rest_framework import ListField
from sentry.constants import LEGACY_RATE_LIMIT_OPTIONS, RESERVED_ORGANIZATION_SLUGS
from sentry.lang.native.utils import STORE_CRASH_REPORTS_DEFAULT, convert_crashreport_count
from sentry.models import (
AuditLogEntryEvent,
Authenticator,
AuthProvider,
Organization,
OrganizationAvatar,
OrganizationOption,
OrganizationStatus,
)
from sentry.tasks.deletion import delete_organization
from sentry.utils.apidocs import scenario, attach_scenarios
from sentry.utils.cache import memoize
ERR_DEFAULT_ORG = "You cannot remove the default organization."
ERR_NO_USER = "This request requires an authenticated user."
ERR_NO_2FA = "Cannot require two-factor authentication without personal two-factor enabled."
ERR_SSO_ENABLED = "Cannot require two-factor authentication with SSO enabled"
ORG_OPTIONS = (
# serializer field name, option key name, type, default value
(
"projectRateLimit",
"sentry:project-rate-limit",
int,
org_serializers.PROJECT_RATE_LIMIT_DEFAULT,
),
(
"accountRateLimit",
"sentry:account-rate-limit",
int,
org_serializers.ACCOUNT_RATE_LIMIT_DEFAULT,
),
("dataScrubber", "sentry:require_scrub_data", bool, org_serializers.REQUIRE_SCRUB_DATA_DEFAULT),
("sensitiveFields", "sentry:sensitive_fields", list, org_serializers.SENSITIVE_FIELDS_DEFAULT),
("safeFields", "sentry:safe_fields", list, org_serializers.SAFE_FIELDS_DEFAULT),
(
"scrapeJavaScript",
"sentry:scrape_javascript",
bool,
org_serializers.SCRAPE_JAVASCRIPT_DEFAULT,
),
(
"dataScrubberDefaults",
"sentry:require_scrub_defaults",
bool,
org_serializers.REQUIRE_SCRUB_DEFAULTS_DEFAULT,
),
(
"storeCrashReports",
"sentry:store_crash_reports",
convert_crashreport_count,
STORE_CRASH_REPORTS_DEFAULT,
),
(
"attachmentsRole",
"sentry:attachments_role",
six.text_type,
org_serializers.ATTACHMENTS_ROLE_DEFAULT,
),
(
"scrubIPAddresses",
"sentry:require_scrub_ip_address",
bool,
org_serializers.REQUIRE_SCRUB_IP_ADDRESS_DEFAULT,
),
("trustedRelays", "sentry:trusted-relays", list, org_serializers.TRUSTED_RELAYS_DEFAULT),
("allowJoinRequests", "sentry:join_requests", bool, org_serializers.JOIN_REQUESTS_DEFAULT),
)
delete_logger = logging.getLogger("sentry.deletions.api")
DELETION_STATUSES = frozenset(
[OrganizationStatus.PENDING_DELETION, OrganizationStatus.DELETION_IN_PROGRESS]
)
@scenario("RetrieveOrganization")
def retrieve_organization_scenario(runner):
runner.request(method="GET", path="/organizations/%s/" % runner.org.slug)
@scenario("UpdateOrganization")
def update_organization_scenario(runner):
with runner.isolated_org("Badly Misnamed") as org:
runner.request(
method="PUT",
path="/organizations/%s/" % org.slug,
data={"name": "Impeccably Designated", "slug": "impeccably-designated"},
)
class OrganizationSerializer(serializers.Serializer):
name = serializers.CharField(max_length=64)
slug = serializers.RegexField(r"^[a-z0-9_\-]+$", max_length=50)
accountRateLimit = EmptyIntegerField(
min_value=0, max_value=1000000, required=False, allow_null=True
)
projectRateLimit = EmptyIntegerField(
min_value=50, max_value=100, required=False, allow_null=True
)
avatar = AvatarField(required=False, allow_null=True)
avatarType = serializers.ChoiceField(
choices=(("upload", "upload"), ("letter_avatar", "letter_avatar")),
required=False,
allow_null=True,
)
openMembership = serializers.BooleanField(required=False)
allowSharedIssues = serializers.BooleanField(required=False)
enhancedPrivacy = serializers.BooleanField(required=False)
dataScrubber = serializers.BooleanField(required=False)
dataScrubberDefaults = serializers.BooleanField(required=False)
sensitiveFields = ListField(child=serializers.CharField(), required=False)
safeFields = ListField(child=serializers.CharField(), required=False)
storeCrashReports = serializers.IntegerField(min_value=-1, max_value=20, required=False)
attachmentsRole = serializers.CharField(required=True)
scrubIPAddresses = serializers.BooleanField(required=False)
scrapeJavaScript = serializers.BooleanField(required=False)
isEarlyAdopter = serializers.BooleanField(required=False)
require2FA = serializers.BooleanField(required=False)
trustedRelays = ListField(child=serializers.CharField(), required=False)
allowJoinRequests = serializers.BooleanField(required=False)
@memoize
def _has_legacy_rate_limits(self):
org = self.context["organization"]
return OrganizationOption.objects.filter(
organization=org, key__in=LEGACY_RATE_LIMIT_OPTIONS
).exists()
def _has_sso_enabled(self):
org = self.context["organization"]
return AuthProvider.objects.filter(organization=org).exists()
def validate_slug(self, value):
# Historically, the only check just made sure there was more than 1
# character for the slug, but since then, there are many slugs that
# fit within this new imposed limit. We're not fixing existing, but
# just preventing new bad values.
if len(value) < 3:
raise serializers.ValidationError(
'This slug "%s" is too short. Minimum of 3 characters.' % (value,)
)
if value in RESERVED_ORGANIZATION_SLUGS:
raise serializers.ValidationError(
'This slug "%s" is reserved and not allowed.' % (value,)
)
qs = Organization.objects.filter(slug=value).exclude(id=self.context["organization"].id)
if qs.exists():
raise serializers.ValidationError('The slug "%s" is already in use.' % (value,))
return value
def validate_sensitiveFields(self, value):
if value and not all(value):
raise serializers.ValidationError("Empty values are not allowed.")
return value
def validate_safeFields(self, value):
if value and not all(value):
raise serializers.ValidationError("Empty values are not allowed.")
return value
def validate_attachmentsRole(self, value):
try:
roles.get(value)
except KeyError:
raise serializers.ValidationError("Invalid role")
return value
def validate_require2FA(self, value):
user = self.context["user"]
has_2fa = Authenticator.objects.user_has_2fa(user)
if value and not has_2fa:
raise serializers.ValidationError(ERR_NO_2FA)
if value and self._has_sso_enabled():
raise serializers.ValidationError(ERR_SSO_ENABLED)
return value
def validate_trustedRelays(self, value):
from sentry import features
organization = self.context["organization"]
request = self.context["request"]
has_relays = features.has("organizations:relay", organization, actor=request.user)
if not has_relays:
raise serializers.ValidationError(
"Organization does not have the relay feature enabled"
)
return value
def validate_accountRateLimit(self, value):
if not self._has_legacy_rate_limits:
raise serializers.ValidationError(
"The accountRateLimit option cannot be configured for this organization"
)
return value
def validate_projectRateLimit(self, value):
if not self._has_legacy_rate_limits:
raise serializers.ValidationError(
"The accountRateLimit option cannot be configured for this organization"
)
return value
def validate(self, attrs):
attrs = super(OrganizationSerializer, self).validate(attrs)
if attrs.get("avatarType") == "upload":
has_existing_file = OrganizationAvatar.objects.filter(
organization=self.context["organization"], file__isnull=False
).exists()
if not has_existing_file and not attrs.get("avatar"):
raise serializers.ValidationError(
{"avatarType": "Cannot set avatarType to upload without avatar"}
)
return attrs
def save(self):
org = self.context["organization"]
changed_data = {}
for key, option, type_, default_value in ORG_OPTIONS:
if key not in self.initial_data:
continue
try:
option_inst = OrganizationOption.objects.get(organization=org, key=option)
except OrganizationOption.DoesNotExist:
OrganizationOption.objects.set_value(
organization=org, key=option, value=type_(self.initial_data[key])
)
if self.initial_data[key] != default_value:
changed_data[key] = u"to {}".format(self.initial_data[key])
else:
option_inst.value = self.initial_data[key]
# check if ORG_OPTIONS changed
if option_inst.has_changed("value"):
old_val = option_inst.old_value("value")
changed_data[key] = u"from {} to {}".format(old_val, option_inst.value)
option_inst.save()
if "openMembership" in self.initial_data:
org.flags.allow_joinleave = self.initial_data["openMembership"]
if "allowSharedIssues" in self.initial_data:
org.flags.disable_shared_issues = not self.initial_data["allowSharedIssues"]
if "enhancedPrivacy" in self.initial_data:
org.flags.enhanced_privacy = self.initial_data["enhancedPrivacy"]
if "isEarlyAdopter" in self.initial_data:
org.flags.early_adopter = self.initial_data["isEarlyAdopter"]
if "require2FA" in self.initial_data:
org.flags.require_2fa = self.initial_data["require2FA"]
if "name" in self.initial_data:
org.name = self.initial_data["name"]
if "slug" in self.initial_data:
org.slug = self.initial_data["slug"]
org_tracked_field = {
"name": org.name,
"slug": org.slug,
"default_role": org.default_role,
"flag_field": {
"allow_joinleave": org.flags.allow_joinleave.is_set,
"enhanced_privacy": org.flags.enhanced_privacy.is_set,
"disable_shared_issues": org.flags.disable_shared_issues.is_set,
"early_adopter": org.flags.early_adopter.is_set,
"require_2fa": org.flags.require_2fa.is_set,
},
}
# check if fields changed
for f, v in six.iteritems(org_tracked_field):
if f != "flag_field":
if org.has_changed(f):
old_val = org.old_value(f)
changed_data[f] = u"from {} to {}".format(old_val, v)
else:
# check if flag fields changed
for f, v in six.iteritems(org_tracked_field["flag_field"]):
if org.flag_has_changed(f):
changed_data[f] = u"to {}".format(v)
org.save()
if "avatar" in self.initial_data or "avatarType" in self.initial_data:
OrganizationAvatar.save_avatar(
relation={"organization": org},
type=self.initial_data.get("avatarType", "upload"),
avatar=self.initial_data.get("avatar"),
filename=u"{}.png".format(org.slug),
)
if "require2FA" in self.initial_data and self.initial_data["require2FA"] is True:
org.handle_2fa_required(self.context["request"])
return org, changed_data
class OwnerOrganizationSerializer(OrganizationSerializer):
defaultRole = serializers.ChoiceField(choices=roles.get_choices())
cancelDeletion = serializers.BooleanField(required=False)
def save(self, *args, **kwargs):
org = self.context["organization"]
cancel_deletion = "cancelDeletion" in self.initial_data and org.status in DELETION_STATUSES
if "defaultRole" in self.initial_data:
org.default_role = self.initial_data["defaultRole"]
if cancel_deletion:
org.status = OrganizationStatus.VISIBLE
return super(OwnerOrganizationSerializer, self).save(*args, **kwargs)
class OrganizationDetailsEndpoint(OrganizationEndpoint):
doc_section = DocSection.ORGANIZATIONS
@attach_scenarios([retrieve_organization_scenario])
def get(self, request, organization):
"""
Retrieve an Organization
````````````````````````
Return details on an individual organization including various details
such as membership access, features, and teams.
:pparam string organization_slug: the slug of the organization the
team should be created for.
:param string detailed: Specify '0' to retrieve details without projects and teams.
:auth: required
"""
is_detailed = request.GET.get("detailed", "1") != "0"
serializer = (
org_serializers.DetailedOrganizationSerializerWithProjectsAndTeams
if is_detailed
else org_serializers.DetailedOrganizationSerializer
)
context = serialize(organization, request.user, serializer(), access=request.access)
return self.respond(context)
@attach_scenarios([update_organization_scenario])
def put(self, request, organization):
"""
Update an Organization
``````````````````````
Update various attributes and configurable settings for the given
organization.
:pparam string organization_slug: the slug of the organization the
team should be created for.
:param string name: an optional new name for the organization.
:param string slug: an optional new slug for the organization. Needs
to be available and unique.
:auth: required
"""
if request.access.has_scope("org:admin"):
serializer_cls = OwnerOrganizationSerializer
else:
serializer_cls = OrganizationSerializer
was_pending_deletion = organization.status in DELETION_STATUSES
serializer = serializer_cls(
data=request.data,
partial=True,
context={"organization": organization, "user": request.user, "request": request},
)
if serializer.is_valid():
organization, changed_data = serializer.save()
if was_pending_deletion:
self.create_audit_entry(
request=request,
organization=organization,
target_object=organization.id,
event=AuditLogEntryEvent.ORG_RESTORE,
data=organization.get_audit_log_data(),
)
delete_logger.info(
"object.delete.canceled",
extra={"object_id": organization.id, "model": Organization.__name__},
)
elif changed_data:
self.create_audit_entry(
request=request,
organization=organization,
target_object=organization.id,
event=AuditLogEntryEvent.ORG_EDIT,
data=changed_data,
)
return self.respond(
serialize(
organization,
request.user,
org_serializers.DetailedOrganizationSerializerWithProjectsAndTeams(),
access=request.access,
)
)
return self.respond(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@sudo_required
def delete(self, request, organization):
"""
Delete an Organization
``````````````````````
Schedules an organization for deletion. This API endpoint cannot
be invoked without a user context for security reasons. This means
that at present an organization can only be deleted from the
Sentry UI.
Deletion happens asynchronously and therefor is not immediate.
However once deletion has begun the state of a project changes and
will be hidden from most public views.
:pparam string organization_slug: the slug of the organization the
team should be created for.
:auth: required, user-context-needed
"""
if not request.user.is_authenticated():
return self.respond({"detail": ERR_NO_USER}, status=401)
if organization.is_default:
return self.respond({"detail": ERR_DEFAULT_ORG}, status=400)
updated = Organization.objects.filter(
id=organization.id, status=OrganizationStatus.VISIBLE
).update(status=OrganizationStatus.PENDING_DELETION)
if updated:
transaction_id = uuid4().hex
countdown = 86400
entry = self.create_audit_entry(
request=request,
organization=organization,
target_object=organization.id,
event=AuditLogEntryEvent.ORG_REMOVE,
data=organization.get_audit_log_data(),
transaction_id=transaction_id,
)
organization.send_delete_confirmation(entry, countdown)
delete_organization.apply_async(
kwargs={
"object_id": organization.id,
"transaction_id": transaction_id,
"actor_id": request.user.id,
},
countdown=countdown,
)
delete_logger.info(
"object.delete.queued",
extra={
"object_id": organization.id,
"transaction_id": transaction_id,
"model": Organization.__name__,
},
)
context = serialize(
organization,
request.user,
org_serializers.DetailedOrganizationSerializerWithProjectsAndTeams(),
access=request.access,
)
return self.respond(context, status=202)
| 39.62268 | 100 | 0.63907 | from __future__ import absolute_import
import logging
import six
from rest_framework import serializers, status
from uuid import uuid4
from sentry import roles
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.decorators import sudo_required
from sentry.api.fields import AvatarField
from sentry.api.fields.empty_integer import EmptyIntegerField
from sentry.api.serializers import serialize
from sentry.api.serializers.models import organization as org_serializers
from sentry.api.serializers.rest_framework import ListField
from sentry.constants import LEGACY_RATE_LIMIT_OPTIONS, RESERVED_ORGANIZATION_SLUGS
from sentry.lang.native.utils import STORE_CRASH_REPORTS_DEFAULT, convert_crashreport_count
from sentry.models import (
AuditLogEntryEvent,
Authenticator,
AuthProvider,
Organization,
OrganizationAvatar,
OrganizationOption,
OrganizationStatus,
)
from sentry.tasks.deletion import delete_organization
from sentry.utils.apidocs import scenario, attach_scenarios
from sentry.utils.cache import memoize
ERR_DEFAULT_ORG = "You cannot remove the default organization."
ERR_NO_USER = "This request requires an authenticated user."
ERR_NO_2FA = "Cannot require two-factor authentication without personal two-factor enabled."
ERR_SSO_ENABLED = "Cannot require two-factor authentication with SSO enabled"
ORG_OPTIONS = (
(
"projectRateLimit",
"sentry:project-rate-limit",
int,
org_serializers.PROJECT_RATE_LIMIT_DEFAULT,
),
(
"accountRateLimit",
"sentry:account-rate-limit",
int,
org_serializers.ACCOUNT_RATE_LIMIT_DEFAULT,
),
("dataScrubber", "sentry:require_scrub_data", bool, org_serializers.REQUIRE_SCRUB_DATA_DEFAULT),
("sensitiveFields", "sentry:sensitive_fields", list, org_serializers.SENSITIVE_FIELDS_DEFAULT),
("safeFields", "sentry:safe_fields", list, org_serializers.SAFE_FIELDS_DEFAULT),
(
"scrapeJavaScript",
"sentry:scrape_javascript",
bool,
org_serializers.SCRAPE_JAVASCRIPT_DEFAULT,
),
(
"dataScrubberDefaults",
"sentry:require_scrub_defaults",
bool,
org_serializers.REQUIRE_SCRUB_DEFAULTS_DEFAULT,
),
(
"storeCrashReports",
"sentry:store_crash_reports",
convert_crashreport_count,
STORE_CRASH_REPORTS_DEFAULT,
),
(
"attachmentsRole",
"sentry:attachments_role",
six.text_type,
org_serializers.ATTACHMENTS_ROLE_DEFAULT,
),
(
"scrubIPAddresses",
"sentry:require_scrub_ip_address",
bool,
org_serializers.REQUIRE_SCRUB_IP_ADDRESS_DEFAULT,
),
("trustedRelays", "sentry:trusted-relays", list, org_serializers.TRUSTED_RELAYS_DEFAULT),
("allowJoinRequests", "sentry:join_requests", bool, org_serializers.JOIN_REQUESTS_DEFAULT),
)
delete_logger = logging.getLogger("sentry.deletions.api")
DELETION_STATUSES = frozenset(
[OrganizationStatus.PENDING_DELETION, OrganizationStatus.DELETION_IN_PROGRESS]
)
@scenario("RetrieveOrganization")
def retrieve_organization_scenario(runner):
runner.request(method="GET", path="/organizations/%s/" % runner.org.slug)
@scenario("UpdateOrganization")
def update_organization_scenario(runner):
with runner.isolated_org("Badly Misnamed") as org:
runner.request(
method="PUT",
path="/organizations/%s/" % org.slug,
data={"name": "Impeccably Designated", "slug": "impeccably-designated"},
)
class OrganizationSerializer(serializers.Serializer):
name = serializers.CharField(max_length=64)
slug = serializers.RegexField(r"^[a-z0-9_\-]+$", max_length=50)
accountRateLimit = EmptyIntegerField(
min_value=0, max_value=1000000, required=False, allow_null=True
)
projectRateLimit = EmptyIntegerField(
min_value=50, max_value=100, required=False, allow_null=True
)
avatar = AvatarField(required=False, allow_null=True)
avatarType = serializers.ChoiceField(
choices=(("upload", "upload"), ("letter_avatar", "letter_avatar")),
required=False,
allow_null=True,
)
openMembership = serializers.BooleanField(required=False)
allowSharedIssues = serializers.BooleanField(required=False)
enhancedPrivacy = serializers.BooleanField(required=False)
dataScrubber = serializers.BooleanField(required=False)
dataScrubberDefaults = serializers.BooleanField(required=False)
sensitiveFields = ListField(child=serializers.CharField(), required=False)
safeFields = ListField(child=serializers.CharField(), required=False)
storeCrashReports = serializers.IntegerField(min_value=-1, max_value=20, required=False)
attachmentsRole = serializers.CharField(required=True)
scrubIPAddresses = serializers.BooleanField(required=False)
scrapeJavaScript = serializers.BooleanField(required=False)
isEarlyAdopter = serializers.BooleanField(required=False)
require2FA = serializers.BooleanField(required=False)
trustedRelays = ListField(child=serializers.CharField(), required=False)
allowJoinRequests = serializers.BooleanField(required=False)
@memoize
def _has_legacy_rate_limits(self):
org = self.context["organization"]
return OrganizationOption.objects.filter(
organization=org, key__in=LEGACY_RATE_LIMIT_OPTIONS
).exists()
def _has_sso_enabled(self):
org = self.context["organization"]
return AuthProvider.objects.filter(organization=org).exists()
def validate_slug(self, value):
# just preventing new bad values.
if len(value) < 3:
raise serializers.ValidationError(
'This slug "%s" is too short. Minimum of 3 characters.' % (value,)
)
if value in RESERVED_ORGANIZATION_SLUGS:
raise serializers.ValidationError(
'This slug "%s" is reserved and not allowed.' % (value,)
)
qs = Organization.objects.filter(slug=value).exclude(id=self.context["organization"].id)
if qs.exists():
raise serializers.ValidationError('The slug "%s" is already in use.' % (value,))
return value
def validate_sensitiveFields(self, value):
if value and not all(value):
raise serializers.ValidationError("Empty values are not allowed.")
return value
def validate_safeFields(self, value):
if value and not all(value):
raise serializers.ValidationError("Empty values are not allowed.")
return value
def validate_attachmentsRole(self, value):
try:
roles.get(value)
except KeyError:
raise serializers.ValidationError("Invalid role")
return value
def validate_require2FA(self, value):
user = self.context["user"]
has_2fa = Authenticator.objects.user_has_2fa(user)
if value and not has_2fa:
raise serializers.ValidationError(ERR_NO_2FA)
if value and self._has_sso_enabled():
raise serializers.ValidationError(ERR_SSO_ENABLED)
return value
def validate_trustedRelays(self, value):
from sentry import features
organization = self.context["organization"]
request = self.context["request"]
has_relays = features.has("organizations:relay", organization, actor=request.user)
if not has_relays:
raise serializers.ValidationError(
"Organization does not have the relay feature enabled"
)
return value
def validate_accountRateLimit(self, value):
if not self._has_legacy_rate_limits:
raise serializers.ValidationError(
"The accountRateLimit option cannot be configured for this organization"
)
return value
def validate_projectRateLimit(self, value):
if not self._has_legacy_rate_limits:
raise serializers.ValidationError(
"The accountRateLimit option cannot be configured for this organization"
)
return value
def validate(self, attrs):
attrs = super(OrganizationSerializer, self).validate(attrs)
if attrs.get("avatarType") == "upload":
has_existing_file = OrganizationAvatar.objects.filter(
organization=self.context["organization"], file__isnull=False
).exists()
if not has_existing_file and not attrs.get("avatar"):
raise serializers.ValidationError(
{"avatarType": "Cannot set avatarType to upload without avatar"}
)
return attrs
def save(self):
org = self.context["organization"]
changed_data = {}
for key, option, type_, default_value in ORG_OPTIONS:
if key not in self.initial_data:
continue
try:
option_inst = OrganizationOption.objects.get(organization=org, key=option)
except OrganizationOption.DoesNotExist:
OrganizationOption.objects.set_value(
organization=org, key=option, value=type_(self.initial_data[key])
)
if self.initial_data[key] != default_value:
changed_data[key] = u"to {}".format(self.initial_data[key])
else:
option_inst.value = self.initial_data[key]
# check if ORG_OPTIONS changed
if option_inst.has_changed("value"):
old_val = option_inst.old_value("value")
changed_data[key] = u"from {} to {}".format(old_val, option_inst.value)
option_inst.save()
if "openMembership" in self.initial_data:
org.flags.allow_joinleave = self.initial_data["openMembership"]
if "allowSharedIssues" in self.initial_data:
org.flags.disable_shared_issues = not self.initial_data["allowSharedIssues"]
if "enhancedPrivacy" in self.initial_data:
org.flags.enhanced_privacy = self.initial_data["enhancedPrivacy"]
if "isEarlyAdopter" in self.initial_data:
org.flags.early_adopter = self.initial_data["isEarlyAdopter"]
if "require2FA" in self.initial_data:
org.flags.require_2fa = self.initial_data["require2FA"]
if "name" in self.initial_data:
org.name = self.initial_data["name"]
if "slug" in self.initial_data:
org.slug = self.initial_data["slug"]
org_tracked_field = {
"name": org.name,
"slug": org.slug,
"default_role": org.default_role,
"flag_field": {
"allow_joinleave": org.flags.allow_joinleave.is_set,
"enhanced_privacy": org.flags.enhanced_privacy.is_set,
"disable_shared_issues": org.flags.disable_shared_issues.is_set,
"early_adopter": org.flags.early_adopter.is_set,
"require_2fa": org.flags.require_2fa.is_set,
},
}
# check if fields changed
for f, v in six.iteritems(org_tracked_field):
if f != "flag_field":
if org.has_changed(f):
old_val = org.old_value(f)
changed_data[f] = u"from {} to {}".format(old_val, v)
else:
# check if flag fields changed
for f, v in six.iteritems(org_tracked_field["flag_field"]):
if org.flag_has_changed(f):
changed_data[f] = u"to {}".format(v)
org.save()
if "avatar" in self.initial_data or "avatarType" in self.initial_data:
OrganizationAvatar.save_avatar(
relation={"organization": org},
type=self.initial_data.get("avatarType", "upload"),
avatar=self.initial_data.get("avatar"),
filename=u"{}.png".format(org.slug),
)
if "require2FA" in self.initial_data and self.initial_data["require2FA"] is True:
org.handle_2fa_required(self.context["request"])
return org, changed_data
class OwnerOrganizationSerializer(OrganizationSerializer):
defaultRole = serializers.ChoiceField(choices=roles.get_choices())
cancelDeletion = serializers.BooleanField(required=False)
def save(self, *args, **kwargs):
org = self.context["organization"]
cancel_deletion = "cancelDeletion" in self.initial_data and org.status in DELETION_STATUSES
if "defaultRole" in self.initial_data:
org.default_role = self.initial_data["defaultRole"]
if cancel_deletion:
org.status = OrganizationStatus.VISIBLE
return super(OwnerOrganizationSerializer, self).save(*args, **kwargs)
class OrganizationDetailsEndpoint(OrganizationEndpoint):
doc_section = DocSection.ORGANIZATIONS
@attach_scenarios([retrieve_organization_scenario])
def get(self, request, organization):
is_detailed = request.GET.get("detailed", "1") != "0"
serializer = (
org_serializers.DetailedOrganizationSerializerWithProjectsAndTeams
if is_detailed
else org_serializers.DetailedOrganizationSerializer
)
context = serialize(organization, request.user, serializer(), access=request.access)
return self.respond(context)
@attach_scenarios([update_organization_scenario])
def put(self, request, organization):
if request.access.has_scope("org:admin"):
serializer_cls = OwnerOrganizationSerializer
else:
serializer_cls = OrganizationSerializer
was_pending_deletion = organization.status in DELETION_STATUSES
serializer = serializer_cls(
data=request.data,
partial=True,
context={"organization": organization, "user": request.user, "request": request},
)
if serializer.is_valid():
organization, changed_data = serializer.save()
if was_pending_deletion:
self.create_audit_entry(
request=request,
organization=organization,
target_object=organization.id,
event=AuditLogEntryEvent.ORG_RESTORE,
data=organization.get_audit_log_data(),
)
delete_logger.info(
"object.delete.canceled",
extra={"object_id": organization.id, "model": Organization.__name__},
)
elif changed_data:
self.create_audit_entry(
request=request,
organization=organization,
target_object=organization.id,
event=AuditLogEntryEvent.ORG_EDIT,
data=changed_data,
)
return self.respond(
serialize(
organization,
request.user,
org_serializers.DetailedOrganizationSerializerWithProjectsAndTeams(),
access=request.access,
)
)
return self.respond(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@sudo_required
def delete(self, request, organization):
if not request.user.is_authenticated():
return self.respond({"detail": ERR_NO_USER}, status=401)
if organization.is_default:
return self.respond({"detail": ERR_DEFAULT_ORG}, status=400)
updated = Organization.objects.filter(
id=organization.id, status=OrganizationStatus.VISIBLE
).update(status=OrganizationStatus.PENDING_DELETION)
if updated:
transaction_id = uuid4().hex
countdown = 86400
entry = self.create_audit_entry(
request=request,
organization=organization,
target_object=organization.id,
event=AuditLogEntryEvent.ORG_REMOVE,
data=organization.get_audit_log_data(),
transaction_id=transaction_id,
)
organization.send_delete_confirmation(entry, countdown)
delete_organization.apply_async(
kwargs={
"object_id": organization.id,
"transaction_id": transaction_id,
"actor_id": request.user.id,
},
countdown=countdown,
)
delete_logger.info(
"object.delete.queued",
extra={
"object_id": organization.id,
"transaction_id": transaction_id,
"model": Organization.__name__,
},
)
context = serialize(
organization,
request.user,
org_serializers.DetailedOrganizationSerializerWithProjectsAndTeams(),
access=request.access,
)
return self.respond(context, status=202)
| true | true |
f733570b112312f20ec6b5c7261608a8698e4237 | 5,407 | py | Python | great_expectations/expectations/core/expect_column_values_to_not_match_like_pattern_list.py | MeganBeckett/great_expectations | 31fca97b53bfefae2b853458ec0e1be87024242c | [
"Apache-2.0"
] | null | null | null | great_expectations/expectations/core/expect_column_values_to_not_match_like_pattern_list.py | MeganBeckett/great_expectations | 31fca97b53bfefae2b853458ec0e1be87024242c | [
"Apache-2.0"
] | null | null | null | great_expectations/expectations/core/expect_column_values_to_not_match_like_pattern_list.py | MeganBeckett/great_expectations | 31fca97b53bfefae2b853458ec0e1be87024242c | [
"Apache-2.0"
] | null | null | null | from typing import Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.expectations.expectation import (
ColumnMapExpectation,
InvalidExpectationConfigurationError,
)
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.util import substitute_none_for_missing
class ExpectColumnValuesToNotMatchLikePatternList(ColumnMapExpectation):
"""Expect column entries to be strings that do NOT match any of a provided list of like patterns expressions.
expect_column_values_to_not_match_like_pattern_list is a \
:func:`column_map_expectation <great_expectations.execution_engine.execution_engine.MetaExecutionEngine
.column_map_expectation>`.
Args:
column (str): \
The column name.
like_pattern_list (List[str]): \
The list of like pattern expressions the column entries should NOT match.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex \
<great_expectations.execution_engine.execution_engine.ExecutionEngine.expect_column_values_to_match_regex>`
:func:`expect_column_values_to_match_regex_list \
<great_expectations.execution_engine.execution_engine.ExecutionEngine
.expect_column_values_to_match_regex_list>`
"""
library_metadata = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
map_metric = "column_values.not_match_like_pattern_list"
success_keys = (
"like_pattern_list",
"mostly",
)
default_kwarg_values = {
"like_pattern_list": None,
"row_condition": None,
"condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
"mostly": 1,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
}
args_keys = (
"column",
"like_pattern_list",
)
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> bool:
super().validate_configuration(configuration)
try:
assert (
"like_pattern_list" in configuration.kwargs
), "Must provide like_pattern_list"
assert isinstance(
configuration.kwargs.get("like_pattern_list"), (list, dict)
), "like_pattern_list must be a list"
assert isinstance(configuration.kwargs.get("like_pattern_list"), dict) or (
len(configuration.kwargs.get("like_pattern_list")) > 0
), "At least one like_pattern must be supplied in the like_pattern_list."
if isinstance(configuration.kwargs.get("like_pattern_list"), dict):
assert "$PARAMETER" in configuration.kwargs.get(
"like_pattern_list"
), 'Evaluation Parameter dict for like_pattern_list kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
| 40.350746 | 115 | 0.670797 | from typing import Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.expectations.expectation import (
ColumnMapExpectation,
InvalidExpectationConfigurationError,
)
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.util import substitute_none_for_missing
class ExpectColumnValuesToNotMatchLikePatternList(ColumnMapExpectation):
library_metadata = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
map_metric = "column_values.not_match_like_pattern_list"
success_keys = (
"like_pattern_list",
"mostly",
)
default_kwarg_values = {
"like_pattern_list": None,
"row_condition": None,
"condition_parser": None,
"mostly": 1,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
}
args_keys = (
"column",
"like_pattern_list",
)
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> bool:
super().validate_configuration(configuration)
try:
assert (
"like_pattern_list" in configuration.kwargs
), "Must provide like_pattern_list"
assert isinstance(
configuration.kwargs.get("like_pattern_list"), (list, dict)
), "like_pattern_list must be a list"
assert isinstance(configuration.kwargs.get("like_pattern_list"), dict) or (
len(configuration.kwargs.get("like_pattern_list")) > 0
), "At least one like_pattern must be supplied in the like_pattern_list."
if isinstance(configuration.kwargs.get("like_pattern_list"), dict):
assert "$PARAMETER" in configuration.kwargs.get(
"like_pattern_list"
), 'Evaluation Parameter dict for like_pattern_list kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
| true | true |
f73357a5b2d66ea9f461dd6fb7cc090f85d5ed12 | 857 | py | Python | patreon/schemas/pledge.py | monokrome/patreon-python | 09aad6800c6b9f33e026712a43e5f7e7c7a2a391 | [
"Apache-2.0"
] | null | null | null | patreon/schemas/pledge.py | monokrome/patreon-python | 09aad6800c6b9f33e026712a43e5f7e7c7a2a391 | [
"Apache-2.0"
] | null | null | null | patreon/schemas/pledge.py | monokrome/patreon-python | 09aad6800c6b9f33e026712a43e5f7e7c7a2a391 | [
"Apache-2.0"
] | null | null | null | class Attributes(object):
amount_cents = 'amount_cents'
total_historical_amount_cents = 'total_historical_amount_cents'
declined_since = 'declined_since'
created_at = 'created_at'
pledge_cap_cents = 'pledge_cap_cents'
patron_pays_fees = 'patron_pays_fees'
unread_count = 'unread_count'
class Relationships(object):
patron = 'patron'
reward = 'reward'
creator = 'creator'
address = 'address'
card = 'card'
pledge_vat_location = 'pledge_vat_location'
default_attributes = [
Attributes.amount_cents,
Attributes.declined_since,
Attributes.created_at,
Attributes.pledge_cap_cents,
Attributes.patron_pays_fees,
]
default_relationships = [
Relationships.patron,
Relationships.reward,
Relationships.creator,
Relationships.address,
Relationships.pledge_vat_location,
]
| 24.485714 | 67 | 0.732789 | class Attributes(object):
amount_cents = 'amount_cents'
total_historical_amount_cents = 'total_historical_amount_cents'
declined_since = 'declined_since'
created_at = 'created_at'
pledge_cap_cents = 'pledge_cap_cents'
patron_pays_fees = 'patron_pays_fees'
unread_count = 'unread_count'
class Relationships(object):
patron = 'patron'
reward = 'reward'
creator = 'creator'
address = 'address'
card = 'card'
pledge_vat_location = 'pledge_vat_location'
default_attributes = [
Attributes.amount_cents,
Attributes.declined_since,
Attributes.created_at,
Attributes.pledge_cap_cents,
Attributes.patron_pays_fees,
]
default_relationships = [
Relationships.patron,
Relationships.reward,
Relationships.creator,
Relationships.address,
Relationships.pledge_vat_location,
]
| true | true |
f73357eea3449ee1a148448a2b3b6e919e70dae1 | 3,686 | py | Python | python/add_tag.py | CiscoDevNet/stealthwatch-code-examples | b3313ba97008225706ca22c5c417bd40cc1b4f71 | [
"MIT"
] | 1 | 2019-08-06T19:13:04.000Z | 2019-08-06T19:13:04.000Z | python/add_tag.py | CiscoDevNet/stealthwatch-code-examples | b3313ba97008225706ca22c5c417bd40cc1b4f71 | [
"MIT"
] | null | null | null | python/add_tag.py | CiscoDevNet/stealthwatch-code-examples | b3313ba97008225706ca22c5c417bd40cc1b4f71 | [
"MIT"
] | 1 | 2022-03-10T16:34:12.000Z | 2022-03-10T16:34:12.000Z | #!/usr/bin/env python
"""
This script will add a new tag (host group) in Stealthwatch using the REST API.
For more information on this API, please visit:
https://developer.cisco.com/docs/stealthwatch/
-
Script Dependencies:
requests
Depencency Installation:
$ pip install requests
System Requirements:
Stealthwatch Version: 7.0.0 or higher
Copyright (c) 2019, Cisco Systems, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import requests
try:
requests.packages.urllib3.disable_warnings()
except:
pass
# Enter all authentication info
SMC_USER = ""
SMC_PASSWORD = ""
SMC_HOST = ""
SMC_TENANT_ID = ""
# Set the URL for SMC login
url = "https://" + SMC_HOST + "/token/v2/authenticate"
# Let's create the loginrequest data
login_request_data = {
"username": SMC_USER,
"password": SMC_PASSWORD
}
# Initialize the Requests session
api_session = requests.Session()
# Perform the POST request to login
response = api_session.request("POST", url, verify=False, data=login_request_data)
# If the login was successful
if(response.status_code == 200):
# Set the filter with the request data
request_data = [
{
"name": "Sample Threat Feed",
"location": "OUTSIDE",
"description": "A sample of a threat feed",
"ranges": [
"149.202.170.60",
"23.129.64.101",
"37.187.129.166",
"91.146.121.3"
],
"hostBaselines": False,
"suppressExcludedServices": True,
"inverseSuppression": False,
"hostTrap": False,
"sendToCta": False,
"parentId": 0
}
]
# Add the new tag (host group) in the SMC
# TODO: Complete the "Tags" API URL
url = 'https://' + SMC_HOST + '/smc-configuration/rest/v1/...'
request_headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
# TODO: Send the request to the SMC. Make sure you send the "request_data" and "request_headers" in your request.
# Remember, the SMC will use a self-signed certificate by default.
# Hint: look at the authentication request above.
# If successfully able to add the tag (host group)
if (response.status_code == 200):
print("New tag (host group) successfully added")
# If unable to add the new tag (host group)
else:
print("An error has ocurred, while adding tags (host groups), with the following code {}".format(response.status_code))
# If the login was unsuccessful
else:
print("An error has ocurred, while logging in, with the following code {}".format(response.status_code))
| 32.619469 | 127 | 0.689908 |
import json
import requests
try:
requests.packages.urllib3.disable_warnings()
except:
pass
SMC_USER = ""
SMC_PASSWORD = ""
SMC_HOST = ""
SMC_TENANT_ID = ""
url = "https://" + SMC_HOST + "/token/v2/authenticate"
login_request_data = {
"username": SMC_USER,
"password": SMC_PASSWORD
}
# Initialize the Requests session
api_session = requests.Session()
# Perform the POST request to login
response = api_session.request("POST", url, verify=False, data=login_request_data)
# If the login was successful
if(response.status_code == 200):
# Set the filter with the request data
request_data = [
{
"name": "Sample Threat Feed",
"location": "OUTSIDE",
"description": "A sample of a threat feed",
"ranges": [
"149.202.170.60",
"23.129.64.101",
"37.187.129.166",
"91.146.121.3"
],
"hostBaselines": False,
"suppressExcludedServices": True,
"inverseSuppression": False,
"hostTrap": False,
"sendToCta": False,
"parentId": 0
}
]
# Add the new tag (host group) in the SMC
# TODO: Complete the "Tags" API URL
url = 'https://' + SMC_HOST + '/smc-configuration/rest/v1/...'
request_headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
# TODO: Send the request to the SMC. Make sure you send the "request_data" and "request_headers" in your request.
# Remember, the SMC will use a self-signed certificate by default.
# Hint: look at the authentication request above.
# If successfully able to add the tag (host group)
if (response.status_code == 200):
print("New tag (host group) successfully added")
# If unable to add the new tag (host group)
else:
print("An error has ocurred, while adding tags (host groups), with the following code {}".format(response.status_code))
# If the login was unsuccessful
else:
print("An error has ocurred, while logging in, with the following code {}".format(response.status_code))
| true | true |
f7335a266a37d8066b45c5d8decfeb65d046060a | 4,162 | py | Python | schedule/urls.py | westphahl/django-scheduler | ac52eb54145fbd1d585f6ba7e14beb45ac91e0ca | [
"BSD-3-Clause"
] | 1 | 2015-08-31T15:05:26.000Z | 2015-08-31T15:05:26.000Z | schedule/urls.py | westphahl/django-scheduler | ac52eb54145fbd1d585f6ba7e14beb45ac91e0ca | [
"BSD-3-Clause"
] | null | null | null | schedule/urls.py | westphahl/django-scheduler | ac52eb54145fbd1d585f6ba7e14beb45ac91e0ca | [
"BSD-3-Clause"
] | null | null | null | try:
from django.conf.urls import patterns, url
except ImportError:
from django.conf.urls.defaults import patterns, url
from django.views.generic.list import ListView
from schedule.models import Calendar
from schedule.feeds import UpcomingEventsFeed
from schedule.feeds import CalendarICalendar
from schedule.periods import Year, Month, Week, Day
from schedule.views import DeleteEventView
urlpatterns = patterns(
'',
# urls for Calendars
url(r'^calendar/$',
ListView.as_view(queryset=Calendar.objects.all(),
template_name='schedule/calendar_list.html'),
name="calendar_list"),
url(r'^calendar/year/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar_by_periods',
name="year_calendar",
kwargs={'periods': [Year], 'template_name': 'schedule/calendar_year.html'}),
url(r'^calendar/tri_month/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar_by_periods',
name="tri_month_calendar",
kwargs={'periods': [Month], 'template_name': 'schedule/calendar_tri_month.html'}),
url(r'^calendar/compact_month/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar_by_periods',
name="compact_calendar",
kwargs={'periods': [Month], 'template_name': 'schedule/calendar_compact_month.html'}),
url(r'^calendar/month/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar_by_periods',
name="month_calendar",
kwargs={'periods': [Month], 'template_name': 'schedule/calendar_month.html'}),
url(r'^calendar/week/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar_by_periods',
name="week_calendar",
kwargs={'periods': [Week], 'template_name': 'schedule/calendar_week.html'}),
url(r'^calendar/daily/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar_by_periods',
name="day_calendar",
kwargs={'periods': [Day], 'template_name': 'schedule/calendar_day.html'}),
url(r'^calendar/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar',
name="calendar_home",
),
# Event Urls
url(r'^event/create/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.create_or_edit_event',
name='calendar_create_event'),
url(r'^event/edit/(?P<calendar_slug>[-\w]+)/(?P<event_id>\d+)/$',
'schedule.views.create_or_edit_event',
name='edit_event'),
url(r'^event/(?P<event_id>\d+)/$',
'schedule.views.event',
name="event"),
url(r'^event/delete/(?P<event_id>\d+)/$',
DeleteEventView.as_view(),
name="delete_event"),
# urls for already persisted occurrences
url(r'^occurrence/(?P<event_id>\d+)/(?P<occurrence_id>\d+)/$',
'schedule.views.occurrence',
name="occurrence"),
url(r'^occurrence/cancel/(?P<event_id>\d+)/(?P<occurrence_id>\d+)/$',
'schedule.views.cancel_occurrence',
name="cancel_occurrence"),
url(r'^occurrence/edit/(?P<event_id>\d+)/(?P<occurrence_id>\d+)/$',
'schedule.views.edit_occurrence',
name="edit_occurrence"),
#urls for unpersisted occurrences
url(r'^occurrence/(?P<event_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<hour>\d+)/(?P<minute>\d+)/(?P<second>\d+)/$',
'schedule.views.occurrence',
name="occurrence_by_date"),
url(r'^occurrence/cancel/(?P<event_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<hour>\d+)/(?P<minute>\d+)/(?P<second>\d+)/$',
'schedule.views.cancel_occurrence',
name="cancel_occurrence_by_date"),
url(r'^occurrence/edit/(?P<event_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<hour>\d+)/(?P<minute>\d+)/(?P<second>\d+)/$',
'schedule.views.edit_occurrence',
name="edit_occurrence_by_date"),
#feed urls
url(r'^feed/calendar/upcoming/(.*)/$', UpcomingEventsFeed(), name='upcoming_events_feed'),
url(r'^ical/calendar/(.*)/$', CalendarICalendar(), name='calendar_ical'),
#api urls
url(r'^api/occurrences', 'schedule.views.api_occurrences', name='api_occurences'),
url(r'^$', ListView.as_view(queryset=Calendar.objects.all()), name='schedule'),
)
| 42.040404 | 138 | 0.628784 | try:
from django.conf.urls import patterns, url
except ImportError:
from django.conf.urls.defaults import patterns, url
from django.views.generic.list import ListView
from schedule.models import Calendar
from schedule.feeds import UpcomingEventsFeed
from schedule.feeds import CalendarICalendar
from schedule.periods import Year, Month, Week, Day
from schedule.views import DeleteEventView
urlpatterns = patterns(
'',
url(r'^calendar/$',
ListView.as_view(queryset=Calendar.objects.all(),
template_name='schedule/calendar_list.html'),
name="calendar_list"),
url(r'^calendar/year/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar_by_periods',
name="year_calendar",
kwargs={'periods': [Year], 'template_name': 'schedule/calendar_year.html'}),
url(r'^calendar/tri_month/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar_by_periods',
name="tri_month_calendar",
kwargs={'periods': [Month], 'template_name': 'schedule/calendar_tri_month.html'}),
url(r'^calendar/compact_month/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar_by_periods',
name="compact_calendar",
kwargs={'periods': [Month], 'template_name': 'schedule/calendar_compact_month.html'}),
url(r'^calendar/month/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar_by_periods',
name="month_calendar",
kwargs={'periods': [Month], 'template_name': 'schedule/calendar_month.html'}),
url(r'^calendar/week/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar_by_periods',
name="week_calendar",
kwargs={'periods': [Week], 'template_name': 'schedule/calendar_week.html'}),
url(r'^calendar/daily/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar_by_periods',
name="day_calendar",
kwargs={'periods': [Day], 'template_name': 'schedule/calendar_day.html'}),
url(r'^calendar/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.calendar',
name="calendar_home",
),
url(r'^event/create/(?P<calendar_slug>[-\w]+)/$',
'schedule.views.create_or_edit_event',
name='calendar_create_event'),
url(r'^event/edit/(?P<calendar_slug>[-\w]+)/(?P<event_id>\d+)/$',
'schedule.views.create_or_edit_event',
name='edit_event'),
url(r'^event/(?P<event_id>\d+)/$',
'schedule.views.event',
name="event"),
url(r'^event/delete/(?P<event_id>\d+)/$',
DeleteEventView.as_view(),
name="delete_event"),
url(r'^occurrence/(?P<event_id>\d+)/(?P<occurrence_id>\d+)/$',
'schedule.views.occurrence',
name="occurrence"),
url(r'^occurrence/cancel/(?P<event_id>\d+)/(?P<occurrence_id>\d+)/$',
'schedule.views.cancel_occurrence',
name="cancel_occurrence"),
url(r'^occurrence/edit/(?P<event_id>\d+)/(?P<occurrence_id>\d+)/$',
'schedule.views.edit_occurrence',
name="edit_occurrence"),
url(r'^occurrence/(?P<event_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<hour>\d+)/(?P<minute>\d+)/(?P<second>\d+)/$',
'schedule.views.occurrence',
name="occurrence_by_date"),
url(r'^occurrence/cancel/(?P<event_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<hour>\d+)/(?P<minute>\d+)/(?P<second>\d+)/$',
'schedule.views.cancel_occurrence',
name="cancel_occurrence_by_date"),
url(r'^occurrence/edit/(?P<event_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<hour>\d+)/(?P<minute>\d+)/(?P<second>\d+)/$',
'schedule.views.edit_occurrence',
name="edit_occurrence_by_date"),
url(r'^feed/calendar/upcoming/(.*)/$', UpcomingEventsFeed(), name='upcoming_events_feed'),
url(r'^ical/calendar/(.*)/$', CalendarICalendar(), name='calendar_ical'),
url(r'^api/occurrences', 'schedule.views.api_occurrences', name='api_occurences'),
url(r'^$', ListView.as_view(queryset=Calendar.objects.all()), name='schedule'),
)
| true | true |
f7335bc3ea488aa0b9fe56f25e2850daab4b61ac | 1,853 | py | Python | foreman/data_refinery_foreman/surveyor/utils.py | cgreene/refinebio | fe75e42f2963d60c4307806cba11520754547190 | [
"BSD-3-Clause"
] | null | null | null | foreman/data_refinery_foreman/surveyor/utils.py | cgreene/refinebio | fe75e42f2963d60c4307806cba11520754547190 | [
"BSD-3-Clause"
] | null | null | null | foreman/data_refinery_foreman/surveyor/utils.py | cgreene/refinebio | fe75e42f2963d60c4307806cba11520754547190 | [
"BSD-3-Clause"
] | null | null | null | import collections
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None
):
"""
Exponential back off for requests.
via https://www.peterbe.com/plog/best-practice-with-retries-with-requests
"""
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def flatten(d, parent_key='', sep='_'):
"""
Flattens a dictionary using a seperator.
via https://stackoverflow.com/a/6027615
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def get_title_and_authors_for_pubmed_id(pmid):
""" Given a PMID, return that PMID's (title, [authors]). """
try:
j_url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=pubmed&id=" + str(pmid) + "&retmode=json&tool=refinebio&email=hello@refine.bio"
resp = requests_retry_session().get(j_url, timeout=60)
title = resp.json()['result'][str(pmid)]['title']
author_names = []
for author in resp.json()['result'][str(pmid)]['authors']:
author_names.append(author['name'])
return (title, author_names)
except:
# This is fine for a timeout
return ("", [])
| 30.377049 | 159 | 0.639504 | import collections
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def get_title_and_authors_for_pubmed_id(pmid):
try:
j_url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=pubmed&id=" + str(pmid) + "&retmode=json&tool=refinebio&email=hello@refine.bio"
resp = requests_retry_session().get(j_url, timeout=60)
title = resp.json()['result'][str(pmid)]['title']
author_names = []
for author in resp.json()['result'][str(pmid)]['authors']:
author_names.append(author['name'])
return (title, author_names)
except:
return ("", [])
| true | true |
f7335c38c896fcc26e45a5a8bbc3ee31f517d787 | 256 | py | Python | catalog/utils/__init__.py | fbsamples/cp_reference | 028b384767d06158a64be8cbb1af613e2f3c881e | [
"MIT"
] | 2 | 2021-09-05T04:21:33.000Z | 2021-11-03T20:56:46.000Z | catalog/utils/__init__.py | fbsamples/cp_reference | 028b384767d06158a64be8cbb1af613e2f3c881e | [
"MIT"
] | null | null | null | catalog/utils/__init__.py | fbsamples/cp_reference | 028b384767d06158a64be8cbb1af613e2f3c881e | [
"MIT"
] | null | null | null | # Copyright 2004-present, Facebook. All Rights Reserved.
# flake8: noqa
from .catalogs import (
post_item_batch,
post_item_batch_by_id,
create_product,
update_product,
create_catalog,
)
from .dummy_products import create_dummy_products
| 23.272727 | 56 | 0.765625 |
from .catalogs import (
post_item_batch,
post_item_batch_by_id,
create_product,
update_product,
create_catalog,
)
from .dummy_products import create_dummy_products
| true | true |
f7335d0fb2e6008745cd9a791fbda0096a6ea781 | 172,534 | py | Python | nova/compute/api.py | nkrinner/nova | 1372397d4f5f8c155af6f1f4ab5dc68be00c9c01 | [
"Apache-2.0"
] | null | null | null | nova/compute/api.py | nkrinner/nova | 1372397d4f5f8c155af6f1f4ab5dc68be00c9c01 | [
"Apache-2.0"
] | null | null | null | nova/compute/api.py | nkrinner/nova | 1372397d4f5f8c155af6f1f4ab5dc68be00c9c01 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import base64
import functools
import re
import string
import uuid
from oslo.config import cfg
import six
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
from nova import hooks
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova.objects import aggregate as aggregate_obj
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.objects import instance_action
from nova.objects import instance_group as instance_group_obj
from nova.objects import instance_info_cache
from nova.objects import keypair as keypair_obj
from nova.objects import migration as migration_obj
from nova.objects import quotas as quotas_obj
from nova.objects import security_group as security_group_obj
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova import rpc
from nova import servicegroup
from nova import utils
from nova import volume
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.BoolOpt('allow_migrate_to_same_host',
default=False,
help='Allow migrate machine to the same host. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
help='Availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='Kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(uuid)s',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
cfg.IntOpt('max_local_block_devices',
default=3,
help='Maximum number of devices that will result '
'in a local image being created on the hypervisor node. '
'Setting this to 0 means nova will allow only '
'boot from volume. A negative number means unlimited.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
RO_SECURITY_GROUPS = ['default']
VIDEO_RAM = 'hw_video:ram_max_mb'
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been successfully
started at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance['vm_state'] not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
if (task_state is not None and
instance['task_state'] not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method=f.__name__)
if must_have_launched and not instance['launched_at']:
raise exception.InstanceInvalidState(
attr=None,
not_launched=True,
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance['locked'] and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
def _diff_dict(orig, new):
"""Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
of one or two elements. The first element of the list will be
either '+' or '-', indicating whether the key was updated or
deleted; if the key was updated, the list will contain a second
element, giving the updated value.
"""
# Figure out what keys went away
result = dict((k, ['-']) for k in set(orig.keys()) - set(new.keys()))
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_service=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_service = (image_service or
glance.get_default_image_service())
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver())
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('compute', CONF.host)
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance['uuid'])
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method)
def _record_action_start(self, context, instance, action):
instance_action.InstanceAction.action_start(context,
instance['uuid'],
action,
want_result=False)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
QUOTAS.limit_check(context, injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
QUOTAS.limit_check(context, injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
vram_mb = int(instance_type.get('extra_specs', {}).get(VIDEO_RAM, 0))
req_ram = max_count * (instance_type['memory_mb'] + vram_mb)
# Check the quota
try:
reservations = QUOTAS.reserve(context, instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
headroom = exc.kwargs['headroom']
allowed = headroom['instances']
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // (instance_type['memory_mb'] +
vram_mb))
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
allowed = 0
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
params = {'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. %(msg)s"),
params)
else:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s"),
params)
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
raise exception.TooManyInstances(overs=overs,
req=requested[resource],
used=used, allowed=total_allowed,
resource=resource)
return max_count, reservations
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
if not isinstance(metadata, dict):
msg = (_("Metadata type should be dict."))
raise exception.InvalidMetadata(reason=msg)
num_metadata = len(metadata)
try:
QUOTAS.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
LOG.warn(_("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties"),
{'pid': context.project_id,
'num_metadata': num_metadata})
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in metadata.iteritems():
if not isinstance(k, six.string_types):
msg = _("Metadata property key '%s' is not a string.") % k
raise exception.InvalidMetadata(reason=msg)
if not isinstance(v, six.string_types):
msg = (_("Metadata property value '%(v)s' for key '%(k)s' is "
"not a string.") % {'v': v, 'k': k})
raise exception.InvalidMetadata(reason=msg)
if len(k) == 0:
msg = _("Metadata property key blank")
raise exception.InvalidMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""Check if the security group requested exists and belongs to
the project.
"""
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks,
max_count):
"""Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
return self.network_api.validate_networks(context, requested_networks,
max_count)
@staticmethod
def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
1. Passed in with create-instance request.
2. Inherited from image.
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
image_service, kernel_id = glance.get_remote_image_service(
context, kernel_id)
image_service.show(context, kernel_id)
if ramdisk_id is not None:
image_service, ramdisk_id = glance.get_remote_image_service(
context, ramdisk_id)
image_service.show(context, ramdisk_id)
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
if forced_host:
check_policy(context, 'create:forced_host', {})
if forced_node:
check_policy(context, 'create:forced_host', {})
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance['uuid'],
'name': instance['display_name'],
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance['display_name']
instance.display_name = new_name
if not instance.get('hostname', None):
instance.hostname = utils.sanitize_hostname(new_name)
instance.save()
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image, instance_type):
if not image:
# Image checks don't apply when building from volume
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.FlavorMemoryTooSmall()
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
root_gb = instance_type['root_gb']
if root_gb:
if int(image.get('size') or 0) > root_gb * (1024 ** 3):
raise exception.FlavorDiskTooSmall()
if int(image.get('min_disk') or 0) > root_gb:
raise exception.FlavorDiskTooSmall()
def _check_and_transform_bdm(self, base_options, image_meta, min_count,
max_count, block_device_mapping, legacy_bdm):
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
image_ref = base_options.get('image_ref', '')
# Get the block device mappings defined by the image.
image_defined_bdms = \
image_meta.get('properties', {}).get('block_device_mapping', [])
legacy_image_defined = not image_meta.get(
'properties', {}).get('bdm_v2', False)
if not legacy_image_defined:
image_defined_bdms = map(block_device.BlockDeviceDict,
image_defined_bdms)
if legacy_bdm:
if legacy_image_defined:
block_device_mapping += image_defined_bdms
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name)
else:
root_in_image_bdms = block_device.get_root_bdm(
image_defined_bdms) is not None
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name,
no_root=root_in_image_bdms) + image_defined_bdms
else:
# NOTE (ndipanov): client will insert an image mapping into the v2
# block_device_mapping, but if there is a bootable device in image
# mappings - we need to get rid of the inserted image.
if legacy_image_defined:
image_defined_bdms = block_device.from_legacy_mapping(
image_defined_bdms, None, root_device_name)
root_in_image_bdms = block_device.get_root_bdm(
image_defined_bdms) is not None
if image_ref and root_in_image_bdms:
block_device_mapping = [bdm for bdm in block_device_mapping
if not (
bdm.get('source_type') == 'image'
and bdm.get('boot_index') == 0)]
block_device_mapping += image_defined_bdms
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
return block_device_mapping
def _get_image(self, context, image_href):
if not image_href:
return None, {}
(image_service, image_id) = glance.get_remote_image_service(
context, image_href)
image = image_service.show(context, image_id)
return image_id, image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
if image_id is not None:
self._check_requested_image(context, image_id,
image, instance_type)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, forced_host,
user_data, metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
auto_disk_config, reservation_id,
max_count):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.FlavorNotFound(flavor_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
# NOTE(mikal): user_data is stored in a text column, and
# the database might silently truncate if its over length.
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files)
self._check_requested_secgroups(context, security_groups)
# Note: max_count is the number of instances requested by the user,
# max_network_count is the maximum number of instances taking into
# account any network quotas
max_network_count = self._check_requested_networks(context,
requested_networks, max_count)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name:
key_pair = keypair_obj.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
root_device_name = block_device.properties_root_device_name(
boot_meta.get('properties', {}))
system_metadata = flavors.save_flavor_info(
dict(), instance_type)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
# return the validated options and maximum number of instances allowed
# by the network quotas
return base_options, max_network_count
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
filter_properties['force_hosts'] = [forced_host]
if forced_node:
filter_properties['force_nodes'] = [forced_node]
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping):
# Reserve quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug(_("Going to run %s instances...") % num_instances)
instances = []
try:
for i in xrange(num_instances):
instance = instance_obj.Instance()
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
num_instances, i)
instances.append(instance)
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
try:
instance.destroy()
except exception.ObjectActionError:
pass
finally:
QUOTAS.rollback(context, quota_reservations)
# Commit the reservations
QUOTAS.commit(context, quota_reservations)
return instances
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if legacy_bdm and bdm.get('device_name') != 'vda':
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_service.show(context, image_id)
return image_meta.get('properties', {})
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif bdm.get('volume_id'):
try:
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
return volume.get('volume_image_metadata', {})
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
return {}
@staticmethod
def _update_instance_group_by_name(context, instance_uuids, group_name):
try:
ig = instance_group_obj.InstanceGroup.get_by_name(context,
group_name)
instance_group_obj.InstanceGroup.add_members(context, ig.uuid,
instance_uuids)
except exception.InstanceGroupNotFound:
# NOTE(russellb) If the group does not already exist, we need to
# automatically create it to be backwards compatible with old
# handling of the 'group' scheduler hint. The policy type will be
# 'legacy', indicating that this group was created to emulate
# legacy group behavior.
ig = instance_group_obj.InstanceGroup()
ig.name = group_name
ig.project_id = context.project_id
ig.user_id = context.user_id
ig.policies = ['legacy']
ig.members = instance_uuids
ig.create(context)
@staticmethod
def _update_instance_group(context, instances, scheduler_hints):
if not scheduler_hints:
return
group_hint = scheduler_hints.get('group')
if not group_hint:
return
instance_uuids = [instance.uuid for instance in instances]
if uuidutils.is_uuid_like(group_hint):
instance_group_obj.InstanceGroup.add_members(context, group_hint,
instance_uuids)
else:
API._update_instance_group_by_name(context, instance_uuids,
group_hint)
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None,
legacy_bdm=True):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = {}
boot_meta['properties'] = \
self._get_bdm_image_metadata(context,
block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(context,
availability_zone)
base_options, max_net_count = self._validate_and_build_base_options(
context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
forced_host, user_data, metadata, injected_files, access_ip_v4,
access_ip_v6, requested_networks, config_drive,
block_device_mapping, auto_disk_config, reservation_id,
max_count)
# max_net_count is the maximum number of instances requested by the
# user adjusted for any network quota constraints, including
# considertaion of connections to each requested network
if max_net_count == 0:
raise exception.PortLimitExceeded()
elif max_net_count < max_count:
LOG.debug(_("max count reduced from %(max_count)d to "
"%(max_net_count)d due to network port quota"),
{'max_count': max_count,
'max_net_count': max_net_count})
max_count = max_net_count
block_device_mapping = self._check_and_transform_bdm(
base_options, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host, forced_node, instance_type)
self._update_instance_group(context, instances, scheduler_hints)
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
if size is None and bdm.get('source_type') == 'blank':
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, instance_uuid, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug(_("Image bdm %s"), bdm, instance_uuid=instance_uuid)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _update_block_device_mapping(self, elevated_context,
instance_type, instance_uuid,
block_device_mapping):
"""tell vm driver to attach volume at boot time by updating
BlockDeviceMapping
"""
LOG.debug(_("block_device_mapping %s"), block_device_mapping,
instance_uuid=instance_uuid)
for bdm in block_device_mapping:
bdm['volume_size'] = self._volume_size(instance_type, bdm)
if bdm.get('volume_size') == 0:
continue
bdm['instance_uuid'] = instance_uuid
self.db.block_device_mapping_update_or_create(elevated_context,
bdm,
legacy=False)
def _validate_bdm(self, context, instance, instance_type, all_mappings):
def _subsequent_list(l):
return all(el + 1 == l[i + 1] for i, el in enumerate(l[:-1]))
# Make sure that the boot indexes make sense
boot_indexes = sorted([bdm['boot_index']
for bdm in all_mappings
if bdm.get('boot_index') is not None
and bdm.get('boot_index') >= 0])
if 0 not in boot_indexes or not _subsequent_list(boot_indexes):
raise exception.InvalidBDMBootSequence()
for bdm in all_mappings:
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.get('snapshot_id')
volume_id = bdm.get('volume_id')
image_id = bdm.get('image_id')
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
if (bdm['source_type'] == 'image' and
bdm['destination_type'] == 'volume' and
not bdm['volume_size']):
raise exception.InvalidBDM(message=_("Images with "
"destination_type 'volume' need to have a non-zero "
"size specified"))
elif volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
self.volume_api.get_snapshot(context, snapshot_id)
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
ephemeral_size = sum(bdm.get('volume_size') or 0
for bdm in all_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
swap_list = [bdm for bdm in all_mappings
if block_device.new_format_is_swap(bdm)]
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].get('volume_size') or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in all_mappings
if bdm.get('destination_type') == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _populate_instance_for_bdm(self, context, instance, instance_type,
image, block_device_mapping):
"""Populate instance block device mapping information."""
instance_uuid = instance['uuid']
image_properties = image.get('properties', {})
image_mapping = image_properties.get('mappings', [])
if image_mapping:
image_mapping = self._prepare_image_mapping(instance_type,
instance_uuid, image_mapping)
self._validate_bdm(context, instance, instance_type,
block_device_mapping + image_mapping)
for mapping in (image_mapping, block_device_mapping):
if not mapping:
continue
self._update_block_device_mapping(context,
instance_type, instance_uuid, mapping)
def _populate_instance_shutdown_terminate(self, instance, image,
block_device_mapping):
"""Populate instance shutdown_terminate information."""
image_properties = image.get('properties', {})
if (block_device_mapping or
image_properties.get('mappings') or
image_properties.get('block_device_mapping')):
instance.shutdown_terminate = False
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
if instance.obj_attr_is_set('hostname'):
hostname = instance.get('hostname')
else:
hostname = None
if display_name is None:
display_name = self._default_display_name(instance['uuid'])
instance.display_name = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
instance.hostname = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, instance, image,
index, security_groups, instance_type):
"""Build the beginning of a new instance."""
if not instance.obj_attr_is_set('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
instance['uuid'] = str(uuid.uuid4())
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = instance_info_cache.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
# Make sure we have the dict form that we need for instance_update.
instance['system_metadata'] = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance['image_ref'])
instance['system_metadata'].update(system_meta)
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
"""
self._populate_instance_for_create(instance, image, index,
security_group, instance_type)
self._populate_instance_names(instance, num_instances)
self._populate_instance_shutdown_terminate(instance, image,
block_device_mapping)
self.security_group_api.ensure_default(context)
instance.create(context)
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
# NOTE (ndipanov): This can now raise exceptions but the instance
# has been created, so delete it and re-raise so
# that other cleanup can happen.
try:
self._populate_instance_for_bdm(context, instance,
instance_type, image, block_device_mapping)
except exception.InvalidBDM:
with excutils.save_and_reraise_exception():
instance.destroy(context)
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
check_policy(context, 'create', target)
if requested_networks:
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
def _check_multiple_instances_neutron_ports(self, requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for net, ip, port in requested_networks:
if port:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None, legacy_bdm=True):
"""Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
if requested_networks and max_count > 1 and utils.is_neutron():
self._check_multiple_instances_neutron_ports(requested_networks)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm)
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
services = service_obj.ServiceList.get_all_by_topic(context,
CONF.compute_topic)
for service in services:
host_name = service.host
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
@wrap_check_policy
def update(self, context, instance, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
:param instance: The instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:returns: A reference to the updated instance
"""
refs = self._update(context, instance, **kwargs)
return refs[1]
def _update(self, context, instance, **kwargs):
# Update the instance record and send a state update notification
# if task or vm state changed
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance['uuid'], kwargs)
notifications.send_update(context, old_ref,
instance_ref, service="api")
return dict(old_ref.iteritems()), dict(instance_ref.iteritems())
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance.disable_terminate:
LOG.info(_('instance termination disabled'),
instance=instance)
return
host = instance['host']
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
reservations = None
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
# At these states an instance has a snapshot associate.
if instance['vm_state'] in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
snapshot_id = instance.system_metadata.get('shelved_image_id')
LOG.info(_("Working on deleting snapshot %s "
"from shelved instance..."),
snapshot_id, instance=instance)
try:
self.image_service.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning(_("Failed to delete snapshot "
"from shelved instance (%s)."),
exc.format_message(), instance=instance)
except Exception as exc:
LOG.exception(_("Something wrong happened when trying to "
"delete snapshot from shelved instance."),
instance=instance)
original_task_state = instance.task_state
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
new_type_id = instance.instance_type_id
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
reservations = self._create_reservations(context,
instance,
new_type_id,
project_id, user_id)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell. Also,
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
if not host:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.start" % delete_type)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
except exception.ObjectActionError:
instance.refresh()
if instance.vm_state == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
is_up = False
try:
service = service_obj.Service.get_by_compute_host(
context.elevated(), instance.host)
if self.servicegroup_api.service_is_up(service):
is_up = True
if original_task_state in (task_states.DELETING,
task_states.SOFT_DELETING):
LOG.info(_('Instance is already in deleting state, '
'ignoring this request'), instance=instance)
if reservations:
QUOTAS.rollback(context, reservations,
project_id=project_id,
user_id=user_id)
return
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms, reservations=reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms, delete_type, cb)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
reservations = None
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
mig_cls = migration_obj.Migration
migration = None
for status in ('finished', 'confirming'):
try:
migration = mig_cls.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info(_('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s') %
{'id': migration.id,
'status': migration.status},
context=context, instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info(_('Instance may have been confirmed during delete'),
context=context, instance=instance)
return
src_host = migration.source_compute
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
try:
deltas = self._downsize_quota_delta(context, instance)
except KeyError:
LOG.info(_('Migration %s may have been confirmed during delete') %
migration.id, context=context, instance=instance)
return
quotas = self._reserve_quota_delta(context, deltas, instance)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration,
src_host, quotas.reservations,
cast=False)
def _create_reservations(self, context, old_instance, new_instance_type_id,
project_id, user_id):
instance_vcpus = old_instance['vcpus']
instance_memory_mb = old_instance['memory_mb']
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if old_instance['task_state'] in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
Migration = migration_obj.Migration
try:
migration = Migration.get_by_instance_and_status(
context.elevated(), old_instance.uuid, 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration = None
if (migration and
new_instance_type_id ==
migration.new_instance_type_id):
old_inst_type_id = migration.old_instance_type_id
try:
old_inst_type = flavors.get_flavor(old_inst_type_id)
except exception.FlavorNotFound:
LOG.warning(_("Flavor %d not found"), old_inst_type_id)
pass
else:
instance_vcpus = old_inst_type['vcpus']
vram_mb = int(old_inst_type['extra_specs']
.get(VIDEO_RAM, 0))
instance_memory_mb = (old_inst_type['memory_mb'] + vram_mb)
LOG.debug(_("going to delete a resizing instance"))
reservations = QUOTAS.reserve(context,
project_id=project_id,
user_id=user_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return reservations
def _local_delete(self, context, instance, bdms, delete_type, cb):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
instance.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.start" % delete_type)
elevated = context.elevated()
if self.cell_type != 'api':
self.network_api.deallocate_for_instance(elevated,
instance)
# cleanup volumes
for bdm in bdms:
if bdm.is_volume:
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
try:
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(elevated, bdm.volume_id)
if bdm.delete_on_termination:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
err_str = _("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
bdm.destroy(context)
cb(context, instance, bdms, local=True)
sys_meta = instance.system_metadata
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=sys_meta)
def _do_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
def _do_soft_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_('Going to try to soft delete instance'),
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate instance"), instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
flavor = instance.get_flavor()
num_instances, quota_reservations = self._check_num_instances_quota(
context, flavor, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance.host:
instance.task_state = task_states.RESTORING
instance.deleted_at = None
instance.save(expected_task_state=[None])
self.compute_rpcapi.restore_instance(context, instance)
else:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.deleted_at = None
instance.save(expected_task_state=[None])
QUOTAS.commit(context, quota_reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, quota_reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED],
must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete_instance(context, instance)
def force_stop(self, context, instance, do_cast=True):
LOG.debug(_("Going to try to stop instance"), instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR],
task_state=[None])
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug(_("Going to try to start instance"), instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
def get(self, context, instance_id, want_objects=False,
expected_attrs=None):
"""Get a single instance with the given instance_id."""
if not expected_attrs:
expected_attrs = []
expected_attrs.extend(['metadata', 'system_metadata',
'security_groups', 'info_cache'])
# NOTE(ameade): we still need to support integer ids for ec2
try:
if uuidutils.is_uuid_like(instance_id):
instance = instance_obj.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif utils.is_int_like(instance_id):
instance = instance_obj.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
raise exception.InstanceNotFound(instance_id=instance_id)
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, sort_key='created_at',
sort_dir='desc', limit=None, marker=None, want_objects=False,
expected_attrs=None):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be returned sorted in the order specified by the
'sort_dir' parameter using the key specified in the 'sort_key'
parameter.
"""
#TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
LOG.debug(_("Searching by: %s") % str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
flavor = flavor_obj.Flavor.get_by_flavor_id(context, flavor_id)
filters['instance_type_id'] = flavor.id
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in search_opts.iteritems():
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, six.string_types):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return []
inst_models = self._get_instances_by_filters(context, filters,
sort_key, sort_dir, limit=limit, marker=marker,
expected_attrs=expected_attrs)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
def _get_instances_by_filters(self, context, filters,
sort_key, sort_dir,
limit=None,
marker=None, expected_attrs=None):
if 'ip6' in filters or 'ip' in filters:
res = self.network_api.get_instance_uuids_by_ip_filter(context,
filters)
# NOTE(jkoelker) It is possible that we will get the same
# instance uuid twice (one for ipv4 and ipv6)
uuids = set([r['instance_uuid'] for r in res])
filters['uuid'] = uuids
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
if expected_attrs:
fields.extend(expected_attrs)
return instance_obj.InstanceList.get_by_filters(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker, expected_attrs=fields)
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup
:param backup_type: 'daily' or 'weekly'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
props_copy = dict(extra_properties, backup_type=backup_type)
image_meta = self._create_image(context, instance, name,
'backup', extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=[None])
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the snapshot
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
instance.save(expected_task_state=[None])
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param extra_properties: dict of extra image properties to include
"""
if extra_properties is None:
extra_properties = {}
instance_uuid = instance['uuid']
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
image_ref = instance.image_ref
sent_meta = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
sent_meta['name'] = name
sent_meta['is_public'] = False
# The properties set up above and in extra_properties have precedence
properties.update(extra_properties or {})
sent_meta['properties'].update(properties)
return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param image_meta: metadata for the new image
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta['name'] = name
image_meta['is_public'] = False
properties = image_meta['properties']
if instance['root_device_name']:
properties['root_device_name'] = instance['root_device_name']
properties.update(extra_properties or {})
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
mapping = []
for bdm in bdms:
if bdm.no_device:
continue
if bdm.is_volume:
# create snapshot based on volume_id
volume = self.volume_api.get(context, bdm.volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
mapping_dict = block_device.snapshot_from_bdm(snapshot['id'],
bdm)
mapping_dict = mapping_dict.get_image_mapping()
else:
mapping_dict = bdm.get_image_mapping()
mapping.append(mapping_dict)
# NOTE (ndipanov): Remove swap/ephemerals from mappings as they will be
# in the block_device_mapping for the new image.
image_mappings = properties.get('mappings')
if image_mappings:
properties['mappings'] = [m for m in image_mappings
if not block_device.is_swap_or_ephemeral(
m['virtual'])]
if mapping:
properties['block_device_mapping'] = mapping
properties['bdm_v2'] = True
for attr in ('status', 'location', 'id'):
image_meta.pop(attr, None)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
return self.image_service.create(context, image_meta, data='')
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=set(
vm_states.ALLOW_SOFT_REBOOT + vm_states.ALLOW_HARD_REBOOT),
task_state=[None, task_states.REBOOTING,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if (reboot_type == 'SOFT' and
(instance['vm_state'] not in vm_states.ALLOW_SOFT_REBOOT)):
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method='reboot')
if ((reboot_type == 'SOFT' and
instance['task_state'] in
(task_states.REBOOTING, task_states.REBOOTING_HARD)) or
(reboot_type == 'HARD' and
instance['task_state'] == task_states.REBOOTING_HARD)):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method='reboot')
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance.task_state = state
instance.save(expected_task_state=[None, task_states.REBOOTING])
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR],
task_state=[None])
def rebuild(self, context, instance, image_href, admin_password,
files_to_inject=None, **kwargs):
"""Rebuild the given instance with the provided attributes."""
orig_image_ref = instance.image_ref or ''
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
preserve_ephemeral = kwargs.get('preserve_ephemeral', False)
auto_disk_config = kwargs.get('auto_disk_config')
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
flavor = instance.get_flavor()
self._checks_for_create_and_rebuild(context, image_id, image,
flavor, metadata, files_to_inject)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that if
# the system_metadata for this instance is updated after
# we do the previous save() and before we update.. those
# other updates will be lost. Since this problem exists in
# a lot of other places, I think it should be addressed in
# a DB layer overhaul.
orig_sys_metadata = dict(instance.system_metadata)
# Remove the old keys
for key in instance.system_metadata.keys():
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del instance.system_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, flavor)
instance.system_metadata.update(new_sys_metadata)
instance.save()
return orig_sys_metadata
# Since image might have changed, we may have new values for
# os_type, vm_mode, etc
options_from_image = self._inherit_properties_from_image(
image, auto_disk_config)
instance.update(options_from_image)
instance.task_state = task_states.REBUILDING
instance.image_ref = image_href
instance.kernel_id = kernel_id or ""
instance.ramdisk_id = ramdisk_id or ""
instance.progress = 0
instance.update(kwargs)
instance.save(expected_task_state=[None])
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_rpcapi.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, kwargs=kwargs)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reverse quota reservation for increased resource usage
deltas = self._reverse_upsize_quota_delta(context, migration)
quotas = self._reserve_quota_delta(context, deltas, instance)
instance.task_state = task_states.RESIZE_REVERTING
try:
instance.save(expected_task_state=[None])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback(context)
migration.status = 'reverting'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit(context)
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute,
quotas.reservations or [])
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
if migration is None:
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, instance)
quotas = self._reserve_quota_delta(context, deltas, instance)
migration.status = 'confirming'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit(context)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute,
quotas.reservations or [])
@staticmethod
def _resize_quota_delta(context, new_flavor,
old_flavor, sense, compare):
"""Calculate any quota adjustment required at a particular point
in the resize cycle.
:param context: the request context
:param new_instance_type: the target instance type
:param old_instance_type: the original instance type
:param sense: the sense of the adjustment, 1 indicates a
forward adjustment, whereas -1 indicates a
reversal of a prior adjustment
:param compare: the direction of the comparison, 1 indicates
we're checking for positive deltas, whereas
-1 indicates negative deltas
"""
def _quota_delta(resource):
return sense * (new_flavor[resource] - old_flavor[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
@staticmethod
def _upsize_quota_delta(context, new_flavor, old_flavor):
"""Calculate deltas required to adjust quota for an instance upsize.
"""
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, 1)
@staticmethod
def _reverse_upsize_quota_delta(context, migration_ref):
"""Calculate deltas required to reverse a prior upsizing
quota adjustment.
"""
old_flavor = flavor_obj.Flavor.get_by_id(
context, migration_ref['old_instance_type_id'])
new_flavor = flavor_obj.Flavor.get_by_id(
context, migration_ref['new_instance_type_id'])
return API._resize_quota_delta(context, new_flavor, old_flavor, -1, -1)
@staticmethod
def _downsize_quota_delta(context, instance):
"""Calculate deltas required to adjust quota for an instance downsize.
"""
old_flavor = instance.get_flavor('old')
new_flavor = instance.get_flavor('new')
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, -1)
@staticmethod
def _reserve_quota_delta(context, deltas, instance):
"""If there are deltas to reserve, construct a Quotas object and
reserve the deltas for the given project.
@param context: The nova request context.
@param deltas: A dictionary of the proposed delta changes.
@param instance: The instance we're operating on, so that
quotas can use the correct project_id/user_id.
@return: nova.objects.quotas.Quotas
"""
quotas = quotas_obj.Quotas()
if deltas:
project_id, user_id = quotas_obj.ids_from_instance(context,
instance)
quotas.reserve(context, project_id=project_id, user_id=user_id,
**deltas)
return quotas
@staticmethod
def _resize_cells_support(context, quotas, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
# With cells, the best we can do right now is commit the
# reservations immediately...
quotas.commit(context)
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts to deal
# with quotas. We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = migration_obj.Migration()
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.create(context.elevated())
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def resize(self, context, instance, flavor_id=None,
**extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = flavors.extract_flavor(instance)
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug(_("flavor_id is None. Assuming migration."),
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s"),
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id and self.cell_type != 'compute':
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
quotas = self._reserve_quota_delta(context, deltas, instance)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
headroom = exc.kwargs['headroom']
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used, allowed=total_allowed,
resource=resource)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
# Here when flavor_id is None, the process is considered as migrate.
if (not flavor_id and not CONF.allow_migrate_to_same_host):
filter_properties['ignore_hosts'].append(instance['host'])
if self.cell_type == 'api':
# Commit reservations early and create migration record.
self._resize_cells_support(context, quotas, instance,
current_instance_type,
new_instance_type)
self._record_action_start(context, instance, instance_actions.RESIZE)
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type,
reservations=quotas.reservations or [])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED],
task_state=[None])
def shelve(self, context, instance):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SHELVE)
image_id = None
if not self.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance['display_name']
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED], task_state=[None])
def shelve_offload(self, context, instance):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=[None])
self.compute_rpcapi.shelve_offload_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED], task_state=[None])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None):
"""Rescue the given instance."""
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
for bdm in bdms:
if bdm.volume_id:
vol = self.volume_api.get(context, bdm.volume_id)
self.volume_api.check_attached(context, vol)
if self.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance['uuid'],
reason=reason)
self.update(context,
instance,
task_state=task_states.RESCUING,
expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password, rescue_image_ref=rescue_image_ref)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
self.update(context,
instance,
task_state=task_states.UNRESCUING,
expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
self.update(context,
instance,
task_state=task_states.UPDATING_PASSWORD,
expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_rdp_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_rdp_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
LOG.debug(_('Locking'), context=context, instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
# If the instance was locked by someone else, check
# that we're allowed to override the lock
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
check_policy(context, 'unlock_override', instance)
context = context.elevated()
LOG.debug(_('Unlocking'), context=context, instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
@wrap_check_policy
def get_lock(self, context, instance):
"""Return the boolean state of given instance's lock."""
return self.get(context, instance['uuid'])['locked']
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
def _attach_volume(self, context, instance, volume_id, device,
disk_bus, device_type):
"""Attach an existing volume to an existing instance.
This method is separated to make it possible for cells version
to override it.
"""
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
device = self.compute_rpcapi.reserve_block_device_name(
context, device=device, instance=instance, volume_id=volume_id,
disk_bus=disk_bus, device_type=device_type)
volume_bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device, bdm=volume_bdm)
except Exception:
with excutils.save_and_reraise_exception():
volume_bdm.destroy(context)
return device
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED],
task_state=[None])
def attach_volume(self, context, instance, volume_id, device=None,
disk_bus=None, device_type=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
return self._attach_volume(context, instance, volume_id, device,
disk_bus, device_type)
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance.
This method is separated to make it easier for cells version
to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED],
task_state=[None])
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance['uuid']:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=[None])
def swap_volume(self, context, instance, old_volume, new_volume):
"""Swap volume attached to an instance."""
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if old_volume['instance_uuid'] != instance['uuid']:
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")
raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_detach(context, old_volume)
self.volume_api.check_attach(context, new_volume, instance=instance)
self.volume_api.begin_detaching(context, old_volume['id'])
self.volume_api.reserve_volume(context, new_volume['id'])
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'])
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
self.volume_api.unreserve_volume(context, new_volume['id'])
@wrap_check_policy
@check_instance_lock
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
@check_instance_lock
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
rv = self.db.instance_metadata_get(context, instance['uuid'])
return dict(rv.iteritems())
def get_all_instance_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='metadata')
def get_all_system_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='system_metadata')
def _get_all_instance_metadata(self, context, search_filts, metadata_type):
"""Get all metadata."""
def _match_any(pattern_list, string):
return any([re.match(pattern, string)
for pattern in pattern_list])
def _filter_metadata(instance, search_filt, input_metadata):
uuids = search_filt.get('resource_id', [])
keys_filter = search_filt.get('key', [])
values_filter = search_filt.get('value', [])
output_metadata = {}
if uuids and instance.get('uuid') not in uuids:
return {}
for (k, v) in input_metadata.iteritems():
# Both keys and value defined -- AND
if ((keys_filter and values_filter) and
not _match_any(keys_filter, k) and
not _match_any(values_filter, v)):
continue
# Only keys or value is defined
elif ((keys_filter and not _match_any(keys_filter, k)) or
(values_filter and not _match_any(values_filter, v))):
continue
output_metadata[k] = v
return output_metadata
formatted_metadata_list = []
instances = self._get_instances_by_filters(context, filters={},
sort_key='created_at',
sort_dir='desc')
for instance in instances:
try:
check_policy(context, 'get_all_instance_%s' % metadata_type,
instance)
metadata = instance.get(metadata_type, {})
for filt in search_filts:
# By chaining the input to the output, the filters are
# ANDed together
metadata = _filter_metadata(instance, filt, metadata)
for (k, v) in metadata.iteritems():
formatted_metadata_list.append({'key': k, 'value': v,
'instance_id': instance.get('uuid')})
except exception.PolicyNotAuthorized:
# failed policy check - not allowed to
# read this metadata
continue
return formatted_metadata_list
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
instance.delete_metadata_key(key)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = dict(instance.metadata)
if delete:
_metadata = metadata
else:
_metadata = dict(instance.metadata)
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
instance.metadata = _metadata
instance.save()
diff = _diff_dict(orig, instance.metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
"""Get all faults for a list of instance uuids."""
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance['uuid'] for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def is_volume_backed_instance(self, context, instance, bdms=None):
if not instance['image_ref']:
return True
if bdms is None:
bdms = block_device_obj.BlockDeviceMappingList.\
get_by_instance_uuid(context, instance['uuid'])
root_bdm = bdms.root_bdm()
if not root_bdm:
return False
return root_bdm.is_volume
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug(_("Going to try to live migrate instance to %s"),
host_name or "another host", instance=instance)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=[None])
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
"""
LOG.debug(_('vm evacuation scheduled'))
inst_host = instance['host']
service = service_obj.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
msg = (_('Instance compute service state on %s '
'expected to be down, but it was up.') % inst_host)
LOG.error(msg)
raise exception.ComputeServiceInUse(host=inst_host)
instance = self.update(context, instance, expected_task_state=[None],
task_state=task_states.REBUILDING)
self._record_action_start(context, instance, instance_actions.EVACUATE)
# NOTE(danms): Transitional until evacuate supports objects
inst_obj = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
return self.compute_rpcapi.rebuild_instance(context,
instance=inst_obj,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
return migration_obj.MigrationList.get_by_filters(context, filters)
@wrap_check_policy
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_create(context, bdm.instance,
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
@wrap_check_policy
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_delete(context, bdm.instance,
volume_id, snapshot_id, delete_info)
def external_instance_event(self, context, instances, events):
# NOTE(danms): The external API consumer just provides events,
# but doesn't know where they go. We need to collate lists
# by the host the affected instance is on and dispatch them
# according to host
instances_by_host = {}
events_by_host = {}
hosts_by_instance = {}
for instance in instances:
instances_on_host = instances_by_host.get(instance.host, [])
instances_on_host.append(instance)
instances_by_host[instance.host] = instances_on_host
hosts_by_instance[instance.uuid] = instance.host
for event in events:
host = hosts_by_instance[event.instance_uuid]
events_on_host = events_by_host.get(host, [])
events_on_host.append(event)
events_by_host[host] = events_on_host
for host in instances_by_host:
self.compute_rpcapi.external_instance_event(
context, instances_by_host[host], events_by_host[host])
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = service_obj.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
@wrap_exception()
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'enabled': enabled}
compute_utils.notify_about_host_update(context,
'set_enabled.start',
payload)
result = self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
compute_utils.notify_about_host_update(context,
'set_enabled.end',
payload)
return result
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
@wrap_exception()
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'action': action}
compute_utils.notify_about_host_update(context,
'power_action.start',
payload)
result = self.rpcapi.host_power_action(context, action=action,
host=host_name)
compute_utils.notify_about_host_update(context,
'power_action.end',
payload)
return result
@wrap_exception()
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'mode': mode}
compute_utils.notify_about_host_update(context,
'set_maintenance.start',
payload)
result = self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
compute_utils.notify_about_host_update(context,
'set_maintenance.end',
payload)
return result
def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
services = service_obj.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in filters.iteritems():
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return service_obj.Service.get_by_compute_host(context, host_name)
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
service = service_obj.Service.get_by_args(context, host_name,
binary)
service.update(params_to_update)
service.save()
return service
def service_delete(self, context, service_id):
"""Deletes the specified service."""
service_obj.Service.get_by_id(context, service_id).destroy()
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return self.db.instance_get_all_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return self.db.compute_node_get(context, int(compute_id))
def compute_node_get_all(self, context):
return self.db.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.db.compute_node_search_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance):
return instance_action.InstanceActionList.get_by_instance_uuid(
context, instance['uuid'])
def action_get_by_request_id(self, context, instance, request_id):
return instance_action.InstanceAction.get_by_request_id(
context, instance['uuid'], request_id)
def action_events_get(self, context, instance, action_id):
return instance_action.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate = aggregate_obj.Aggregate()
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create(context)
aggregate = self._reformat_aggregate_info(aggregate)
# To maintain the same API result as before.
del aggregate['hosts']
del aggregate['metadata']
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
return self._reformat_aggregate_info(aggregate)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
aggregates = aggregate_obj.AggregateList.get_all(context)
return [self._reformat_aggregate_info(agg) for agg in aggregates]
def is_safe_to_update_az(self, context, aggregate, metadata,
action_name):
"""Determine if updates alter an aggregate's availability zone."""
if 'availability_zone' in metadata:
aggregate_az = aggregate.metadata.get("availability_zone")
for host in aggregate.hosts:
host_az = availability_zones.get_host_availability_zone(
context, host)
if (host_az and host_az != metadata["availability_zone"]
and host_az != CONF.default_availability_zone and
host_az != aggregate_az):
msg = _("This aggregate contains hosts in"
" an existing availability zone")
raise exception.InvalidAggregateAction(
action=action_name,
aggregate_id=aggregate.id,
reason=msg)
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
self.is_safe_to_update_az(context, aggregate,
values, "update aggregate")
if values:
aggregate.metadata = values
aggregate.save()
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
self.is_safe_to_update_az(context, aggregate,
metadata, "update aggregate metadata")
aggregate.update_metadata(metadata)
# If updated metadata include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if metadata and metadata.get('availability_zone'):
availability_zones.reset_cache()
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = aggregate_obj.Aggregate.get_by_id(context,
aggregate_id)
if len(aggregate.hosts) > 0:
msg = _("Host aggregate is not empty")
raise exception.InvalidAggregateAction(action='delete',
aggregate_id=aggregate_id,
reason=msg)
aggregate.destroy()
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
def _check_az_for_host(self, aggregate_meta, host_az, aggregate_id):
# NOTE(mtreinish) The availability_zone key returns a set of
# zones so loop over each zone. However there should only
# ever be one zone in the set because an aggregate can only
# have a single availability zone set at one time.
for aggregate_az in aggregate_meta["availability_zone"]:
# NOTE(mtreinish) Ensure that the aggregate_az is not none
# if it is none then that is just a regular aggregate and
# it is valid to have a host in multiple aggregates.
if aggregate_az and aggregate_az != host_az:
msg = _("Host already in availability zone "
"%s") % host_az
action_name = "add_host_to_aggregate"
raise exception.InvalidAggregateAction(
action=action_name, aggregate_id=aggregate_id,
reason=msg)
def _update_az_cache_for_host(self, context, host_name, aggregate_meta):
# Update the availability_zone cache to avoid getting wrong
# availability_zone in cache retention time when add/remove
# host to/from aggregate.
if aggregate_meta and aggregate_meta.get('availability_zone'):
availability_zones.update_host_availability_zone_cache(context,
host_name)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
host_az = availability_zones.get_host_availability_zone(context,
host_name)
if host_az and host_az != CONF.default_availability_zone:
aggregate_meta = self.db.aggregate_metadata_get_by_metadata_key(
context, aggregate_id, 'availability_zone')
if aggregate_meta.get("availability_zone"):
self._check_az_for_host(aggregate_meta, host_az, aggregate_id)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.add_host(context, host_name)
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.delete_host(host_name)
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
def _reformat_aggregate_info(self, aggregate):
"""Builds a dictionary with aggregate props, metadata and hosts."""
return dict(aggregate.iteritems())
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
get_notifier = functools.partial(rpc.get_notifier, service='api')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = self.get_notifier()
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
if not 0 < len(key_name) < 256:
raise exception.InvalidKeypair(
reason=_('Keypair name must be between '
'1 and 255 characters long'))
count = QUOTAS.count(context, 'key_pairs', user_id)
try:
QUOTAS.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@wrap_exception()
def import_key_pair(self, context, user_id, key_name, public_key):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'import.start', key_name)
fingerprint = crypto.generate_fingerprint(public_key)
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'import.end', key_name)
return keypair
@wrap_exception()
def create_key_pair(self, context, user_id, key_name):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'create.start', key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'create.end', key_name)
return keypair, private_key
@wrap_exception()
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self._notify(context, 'delete.start', key_name)
keypair_obj.KeyPair.destroy_by_name(context, user_id, key_name)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id):
"""List key pairs."""
return keypair_obj.KeyPairList.get_by_user(context, user_id)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
return keypair_obj.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
def validate_property(self, value, property, allowed):
"""Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
if not val:
msg = _("Security group %s cannot be empty.") % property
self.raise_invalid_property(msg)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
if len(val) > 255:
msg = _("Security group %s should not be greater "
"than 255 characters.") % property
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.audit(_("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
columns_to_join = ['rules.grantee_group']
group_ref = self.db.security_group_update(context,
security_group['id'],
group,
columns_to_join=columns_to_join)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
quotas = quotas_obj.Quotas()
quota_project, quota_user = quotas_obj.ids_from_security_group(
context, security_group)
try:
quotas.reserve(context, project_id=quota_project,
user_id=quota_user, security_groups=-1)
except Exception:
LOG.exception(_("Failed to update usages deallocating "
"security group"))
LOG.audit(_("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
quotas.commit()
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both.
"""
count = QUOTAS.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
QUOTAS.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Authorize security group ingress %s")
LOG.audit(msg, name, context=context)
rules = [self.db.security_group_rule_create(context, v) for v in vals]
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Revoke security group ingress %s")
LOG.audit(msg, security_group['name'], context=context)
for rule_id in rule_ids:
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
is_duplicate = True
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
is_duplicate = False
break
if is_duplicate:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
try:
return self.db.security_group_default_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
security_group = self.db.security_group_get(
context, id, columns_to_join=['instances'])
for instance in security_group['instances']:
if instance['host'] is not None:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id))
# ..then we distill the rules into the groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context, rule['parent_group_id'],
columns_to_join=['instances'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
if instance['uuid'] not in instances:
instances[instance['uuid']] = instance
# ..then we send a request to refresh the rules for each instance.
for instance in instances.values():
if instance['host']:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = self.db.instance_get_by_uuid(context, instance_uuid)
groups = instance.get('security_groups')
if groups:
return [{'name': group['name']} for group in groups]
def populate_security_groups(self, instance, security_groups):
if not security_groups:
# Make sure it's an empty list and not None
security_groups = []
instance.security_groups = security_group_obj.make_secgroup_list(
security_groups)
| 43.890613 | 79 | 0.599754 |
import base64
import functools
import re
import string
import uuid
from oslo.config import cfg
import six
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
from nova import hooks
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova.objects import aggregate as aggregate_obj
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.objects import instance_action
from nova.objects import instance_group as instance_group_obj
from nova.objects import instance_info_cache
from nova.objects import keypair as keypair_obj
from nova.objects import migration as migration_obj
from nova.objects import quotas as quotas_obj
from nova.objects import security_group as security_group_obj
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova import rpc
from nova import servicegroup
from nova import utils
from nova import volume
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.BoolOpt('allow_migrate_to_same_host',
default=False,
help='Allow migrate machine to the same host. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
help='Availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='Kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(uuid)s',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
cfg.IntOpt('max_local_block_devices',
default=3,
help='Maximum number of devices that will result '
'in a local image being created on the hypervisor node. '
'Setting this to 0 means nova will allow only '
'boot from volume. A negative number means unlimited.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
RO_SECURITY_GROUPS = ['default']
VIDEO_RAM = 'hw_video:ram_max_mb'
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance['vm_state'] not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
if (task_state is not None and
instance['task_state'] not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method=f.__name__)
if must_have_launched and not instance['launched_at']:
raise exception.InstanceInvalidState(
attr=None,
not_launched=True,
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance['locked'] and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
def _diff_dict(orig, new):
# Figure out what keys went away
result = dict((k, ['-']) for k in set(orig.keys()) - set(new.keys()))
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
class API(base.Base):
def __init__(self, image_service=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_service = (image_service or
glance.get_default_image_service())
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver())
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('compute', CONF.host)
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary.
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance['uuid'])
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method)
def _record_action_start(self, context, instance, action):
instance_action.InstanceAction.action_start(context,
instance['uuid'],
action,
want_result=False)
def _check_injected_file_quota(self, context, injected_files):
if injected_files is None:
return
try:
QUOTAS.limit_check(context, injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
QUOTAS.limit_check(context, injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count):
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
vram_mb = int(instance_type.get('extra_specs', {}).get(VIDEO_RAM, 0))
req_ram = max_count * (instance_type['memory_mb'] + vram_mb)
# Check the quota
try:
reservations = QUOTAS.reserve(context, instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
headroom = exc.kwargs['headroom']
allowed = headroom['instances']
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // (instance_type['memory_mb'] +
vram_mb))
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
allowed = 0
elif min_count <= allowed <= max_count:
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
params = {'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. %(msg)s"),
params)
else:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s"),
params)
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
raise exception.TooManyInstances(overs=overs,
req=requested[resource],
used=used, allowed=total_allowed,
resource=resource)
return max_count, reservations
def _check_metadata_properties_quota(self, context, metadata=None):
if not metadata:
metadata = {}
if not isinstance(metadata, dict):
msg = (_("Metadata type should be dict."))
raise exception.InvalidMetadata(reason=msg)
num_metadata = len(metadata)
try:
QUOTAS.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
LOG.warn(_("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties"),
{'pid': context.project_id,
'num_metadata': num_metadata})
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in metadata.iteritems():
if not isinstance(k, six.string_types):
msg = _("Metadata property key '%s' is not a string.") % k
raise exception.InvalidMetadata(reason=msg)
if not isinstance(v, six.string_types):
msg = (_("Metadata property value '%(v)s' for key '%(k)s' is "
"not a string.") % {'v': v, 'k': k})
raise exception.InvalidMetadata(reason=msg)
if len(k) == 0:
msg = _("Metadata property key blank")
raise exception.InvalidMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks,
max_count):
return self.network_api.validate_networks(context, requested_networks,
max_count)
@staticmethod
def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image):
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
image_service, kernel_id = glance.get_remote_image_service(
context, kernel_id)
image_service.show(context, kernel_id)
if ramdisk_id is not None:
image_service, ramdisk_id = glance.get_remote_image_service(
context, ramdisk_id)
image_service.show(context, ramdisk_id)
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
if forced_host:
check_policy(context, 'create:forced_host', {})
if forced_node:
check_policy(context, 'create:forced_host', {})
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance['uuid'],
'name': instance['display_name'],
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance['display_name']
instance.display_name = new_name
if not instance.get('hostname', None):
instance.hostname = utils.sanitize_hostname(new_name)
instance.save()
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image, instance_type):
if not image:
# Image checks don't apply when building from volume
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.FlavorMemoryTooSmall()
root_gb = instance_type['root_gb']
if root_gb:
if int(image.get('size') or 0) > root_gb * (1024 ** 3):
raise exception.FlavorDiskTooSmall()
if int(image.get('min_disk') or 0) > root_gb:
raise exception.FlavorDiskTooSmall()
def _check_and_transform_bdm(self, base_options, image_meta, min_count,
max_count, block_device_mapping, legacy_bdm):
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
image_ref = base_options.get('image_ref', '')
image_defined_bdms = \
image_meta.get('properties', {}).get('block_device_mapping', [])
legacy_image_defined = not image_meta.get(
'properties', {}).get('bdm_v2', False)
if not legacy_image_defined:
image_defined_bdms = map(block_device.BlockDeviceDict,
image_defined_bdms)
if legacy_bdm:
if legacy_image_defined:
block_device_mapping += image_defined_bdms
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name)
else:
root_in_image_bdms = block_device.get_root_bdm(
image_defined_bdms) is not None
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name,
no_root=root_in_image_bdms) + image_defined_bdms
else:
if legacy_image_defined:
image_defined_bdms = block_device.from_legacy_mapping(
image_defined_bdms, None, root_device_name)
root_in_image_bdms = block_device.get_root_bdm(
image_defined_bdms) is not None
if image_ref and root_in_image_bdms:
block_device_mapping = [bdm for bdm in block_device_mapping
if not (
bdm.get('source_type') == 'image'
and bdm.get('boot_index') == 0)]
block_device_mapping += image_defined_bdms
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
return block_device_mapping
def _get_image(self, context, image_href):
if not image_href:
return None, {}
(image_service, image_id) = glance.get_remote_image_service(
context, image_href)
image = image_service.show(context, image_id)
return image_id, image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
if image_id is not None:
self._check_requested_image(context, image_id,
image, instance_type)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, forced_host,
user_data, metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
auto_disk_config, reservation_id,
max_count):
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.FlavorNotFound(flavor_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files)
self._check_requested_secgroups(context, security_groups)
max_network_count = self._check_requested_networks(context,
requested_networks, max_count)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name:
key_pair = keypair_obj.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
root_device_name = block_device.properties_root_device_name(
boot_meta.get('properties', {}))
system_metadata = flavors.save_flavor_info(
dict(), instance_type)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
return base_options, max_network_count
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
filter_properties['force_hosts'] = [forced_host]
if forced_node:
filter_properties['force_nodes'] = [forced_node]
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping):
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug(_("Going to run %s instances...") % num_instances)
instances = []
try:
for i in xrange(num_instances):
instance = instance_obj.Instance()
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
num_instances, i)
instances.append(instance)
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
try:
instance.destroy()
except exception.ObjectActionError:
pass
finally:
QUOTAS.rollback(context, quota_reservations)
QUOTAS.commit(context, quota_reservations)
return instances
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if legacy_bdm and bdm.get('device_name') != 'vda':
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_service.show(context, image_id)
return image_meta.get('properties', {})
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif bdm.get('volume_id'):
try:
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
return volume.get('volume_image_metadata', {})
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
return {}
@staticmethod
def _update_instance_group_by_name(context, instance_uuids, group_name):
try:
ig = instance_group_obj.InstanceGroup.get_by_name(context,
group_name)
instance_group_obj.InstanceGroup.add_members(context, ig.uuid,
instance_uuids)
except exception.InstanceGroupNotFound:
ig = instance_group_obj.InstanceGroup()
ig.name = group_name
ig.project_id = context.project_id
ig.user_id = context.user_id
ig.policies = ['legacy']
ig.members = instance_uuids
ig.create(context)
@staticmethod
def _update_instance_group(context, instances, scheduler_hints):
if not scheduler_hints:
return
group_hint = scheduler_hints.get('group')
if not group_hint:
return
instance_uuids = [instance.uuid for instance in instances]
if uuidutils.is_uuid_like(group_hint):
instance_group_obj.InstanceGroup.add_members(context, group_hint,
instance_uuids)
else:
API._update_instance_group_by_name(context, instance_uuids,
group_hint)
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None,
legacy_bdm=True):
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = {}
boot_meta['properties'] = \
self._get_bdm_image_metadata(context,
block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(context,
availability_zone)
base_options, max_net_count = self._validate_and_build_base_options(
context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
forced_host, user_data, metadata, injected_files, access_ip_v4,
access_ip_v6, requested_networks, config_drive,
block_device_mapping, auto_disk_config, reservation_id,
max_count)
if max_net_count == 0:
raise exception.PortLimitExceeded()
elif max_net_count < max_count:
LOG.debug(_("max count reduced from %(max_count)d to "
"%(max_net_count)d due to network port quota"),
{'max_count': max_count,
'max_net_count': max_net_count})
max_count = max_net_count
block_device_mapping = self._check_and_transform_bdm(
base_options, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host, forced_node, instance_type)
self._update_instance_group(context, instances, scheduler_hints)
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
if size is None and bdm.get('source_type') == 'blank':
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, instance_uuid, mappings):
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug(_("Image bdm %s"), bdm, instance_uuid=instance_uuid)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _update_block_device_mapping(self, elevated_context,
instance_type, instance_uuid,
block_device_mapping):
LOG.debug(_("block_device_mapping %s"), block_device_mapping,
instance_uuid=instance_uuid)
for bdm in block_device_mapping:
bdm['volume_size'] = self._volume_size(instance_type, bdm)
if bdm.get('volume_size') == 0:
continue
bdm['instance_uuid'] = instance_uuid
self.db.block_device_mapping_update_or_create(elevated_context,
bdm,
legacy=False)
def _validate_bdm(self, context, instance, instance_type, all_mappings):
def _subsequent_list(l):
return all(el + 1 == l[i + 1] for i, el in enumerate(l[:-1]))
boot_indexes = sorted([bdm['boot_index']
for bdm in all_mappings
if bdm.get('boot_index') is not None
and bdm.get('boot_index') >= 0])
if 0 not in boot_indexes or not _subsequent_list(boot_indexes):
raise exception.InvalidBDMBootSequence()
for bdm in all_mappings:
snapshot_id = bdm.get('snapshot_id')
volume_id = bdm.get('volume_id')
image_id = bdm.get('image_id')
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
if (bdm['source_type'] == 'image' and
bdm['destination_type'] == 'volume' and
not bdm['volume_size']):
raise exception.InvalidBDM(message=_("Images with "
"destination_type 'volume' need to have a non-zero "
"size specified"))
elif volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
self.volume_api.get_snapshot(context, snapshot_id)
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
ephemeral_size = sum(bdm.get('volume_size') or 0
for bdm in all_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
swap_list = [bdm for bdm in all_mappings
if block_device.new_format_is_swap(bdm)]
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].get('volume_size') or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in all_mappings
if bdm.get('destination_type') == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _populate_instance_for_bdm(self, context, instance, instance_type,
image, block_device_mapping):
instance_uuid = instance['uuid']
image_properties = image.get('properties', {})
image_mapping = image_properties.get('mappings', [])
if image_mapping:
image_mapping = self._prepare_image_mapping(instance_type,
instance_uuid, image_mapping)
self._validate_bdm(context, instance, instance_type,
block_device_mapping + image_mapping)
for mapping in (image_mapping, block_device_mapping):
if not mapping:
continue
self._update_block_device_mapping(context,
instance_type, instance_uuid, mapping)
def _populate_instance_shutdown_terminate(self, instance, image,
block_device_mapping):
image_properties = image.get('properties', {})
if (block_device_mapping or
image_properties.get('mappings') or
image_properties.get('block_device_mapping')):
instance.shutdown_terminate = False
def _populate_instance_names(self, instance, num_instances):
display_name = instance.get('display_name')
if instance.obj_attr_is_set('hostname'):
hostname = instance.get('hostname')
else:
hostname = None
if display_name is None:
display_name = self._default_display_name(instance['uuid'])
instance.display_name = display_name
if hostname is None and num_instances == 1:
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
hostname = display_name
instance.hostname = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, instance, image,
index, security_groups, instance_type):
if not instance.obj_attr_is_set('uuid'):
instance['uuid'] = str(uuid.uuid4())
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = instance_info_cache.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
instance['system_metadata'] = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
system_meta.setdefault('image_base_image_ref', instance['image_ref'])
instance['system_metadata'].update(system_meta)
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index):
self._populate_instance_for_create(instance, image, index,
security_group, instance_type)
self._populate_instance_names(instance, num_instances)
self._populate_instance_shutdown_terminate(instance, image,
block_device_mapping)
self.security_group_api.ensure_default(context)
instance.create(context)
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
# NOTE (ndipanov): This can now raise exceptions but the instance
# has been created, so delete it and re-raise so
# that other cleanup can happen.
try:
self._populate_instance_for_bdm(context, instance,
instance_type, image, block_device_mapping)
except exception.InvalidBDM:
with excutils.save_and_reraise_exception():
instance.destroy(context)
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
check_policy(context, 'create', target)
if requested_networks:
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
def _check_multiple_instances_neutron_ports(self, requested_networks):
for net, ip, port in requested_networks:
if port:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None, legacy_bdm=True):
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
if requested_networks and max_count > 1 and utils.is_neutron():
self._check_multiple_instances_neutron_ports(requested_networks)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm)
def trigger_provider_fw_rules_refresh(self, context):
services = service_obj.ServiceList.get_all_by_topic(context,
CONF.compute_topic)
for service in services:
host_name = service.host
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
@wrap_check_policy
def update(self, context, instance, **kwargs):
refs = self._update(context, instance, **kwargs)
return refs[1]
def _update(self, context, instance, **kwargs):
# Update the instance record and send a state update notification
# if task or vm state changed
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance['uuid'], kwargs)
notifications.send_update(context, old_ref,
instance_ref, service="api")
return dict(old_ref.iteritems()), dict(instance_ref.iteritems())
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance.disable_terminate:
LOG.info(_('instance termination disabled'),
instance=instance)
return
host = instance['host']
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
reservations = None
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
# At these states an instance has a snapshot associate.
if instance['vm_state'] in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
snapshot_id = instance.system_metadata.get('shelved_image_id')
LOG.info(_("Working on deleting snapshot %s "
"from shelved instance..."),
snapshot_id, instance=instance)
try:
self.image_service.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning(_("Failed to delete snapshot "
"from shelved instance (%s)."),
exc.format_message(), instance=instance)
except Exception as exc:
LOG.exception(_("Something wrong happened when trying to "
"delete snapshot from shelved instance."),
instance=instance)
original_task_state = instance.task_state
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
new_type_id = instance.instance_type_id
# NOTE(comstud): If we delete the instance locally, we'll
reservations = self._create_reservations(context,
instance,
new_type_id,
project_id, user_id)
if self.cell_type == 'api':
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell. Also,
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
if not host:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.start" % delete_type)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
except exception.ObjectActionError:
instance.refresh()
if instance.vm_state == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
is_up = False
try:
service = service_obj.Service.get_by_compute_host(
context.elevated(), instance.host)
if self.servicegroup_api.service_is_up(service):
is_up = True
if original_task_state in (task_states.DELETING,
task_states.SOFT_DELETING):
LOG.info(_('Instance is already in deleting state, '
'ignoring this request'), instance=instance)
if reservations:
QUOTAS.rollback(context, reservations,
project_id=project_id,
user_id=user_id)
return
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms, reservations=reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms, delete_type, cb)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
reservations = None
except exception.InstanceNotFound:
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
def _confirm_resize_on_deleting(self, context, instance):
mig_cls = migration_obj.Migration
migration = None
for status in ('finished', 'confirming'):
try:
migration = mig_cls.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info(_('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s') %
{'id': migration.id,
'status': migration.status},
context=context, instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info(_('Instance may have been confirmed during delete'),
context=context, instance=instance)
return
src_host = migration.source_compute
try:
deltas = self._downsize_quota_delta(context, instance)
except KeyError:
LOG.info(_('Migration %s may have been confirmed during delete') %
migration.id, context=context, instance=instance)
return
quotas = self._reserve_quota_delta(context, deltas, instance)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration,
src_host, quotas.reservations,
cast=False)
def _create_reservations(self, context, old_instance, new_instance_type_id,
project_id, user_id):
instance_vcpus = old_instance['vcpus']
instance_memory_mb = old_instance['memory_mb']
if old_instance['task_state'] in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
Migration = migration_obj.Migration
try:
migration = Migration.get_by_instance_and_status(
context.elevated(), old_instance.uuid, 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration = None
if (migration and
new_instance_type_id ==
migration.new_instance_type_id):
old_inst_type_id = migration.old_instance_type_id
try:
old_inst_type = flavors.get_flavor(old_inst_type_id)
except exception.FlavorNotFound:
LOG.warning(_("Flavor %d not found"), old_inst_type_id)
pass
else:
instance_vcpus = old_inst_type['vcpus']
vram_mb = int(old_inst_type['extra_specs']
.get(VIDEO_RAM, 0))
instance_memory_mb = (old_inst_type['memory_mb'] + vram_mb)
LOG.debug(_("going to delete a resizing instance"))
reservations = QUOTAS.reserve(context,
project_id=project_id,
user_id=user_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return reservations
def _local_delete(self, context, instance, bdms, delete_type, cb):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
instance.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.start" % delete_type)
elevated = context.elevated()
if self.cell_type != 'api':
self.network_api.deallocate_for_instance(elevated,
instance)
# cleanup volumes
for bdm in bdms:
if bdm.is_volume:
# NOTE(vish): We don't have access to correct volume
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
try:
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(elevated, bdm.volume_id)
if bdm.delete_on_termination:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
err_str = _("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
bdm.destroy(context)
cb(context, instance, bdms, local=True)
sys_meta = instance.system_metadata
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=sys_meta)
def _do_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
def _do_soft_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
LOG.debug(_('Going to try to soft delete instance'),
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
LOG.debug(_("Going to try to terminate instance"), instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
flavor = instance.get_flavor()
num_instances, quota_reservations = self._check_num_instances_quota(
context, flavor, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance.host:
instance.task_state = task_states.RESTORING
instance.deleted_at = None
instance.save(expected_task_state=[None])
self.compute_rpcapi.restore_instance(context, instance)
else:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.deleted_at = None
instance.save(expected_task_state=[None])
QUOTAS.commit(context, quota_reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, quota_reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED],
must_have_launched=False)
def force_delete(self, context, instance):
self._delete_instance(context, instance)
def force_stop(self, context, instance, do_cast=True):
LOG.debug(_("Going to try to stop instance"), instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR],
task_state=[None])
def stop(self, context, instance, do_cast=True):
self.force_stop(context, instance, do_cast)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
LOG.debug(_("Going to try to start instance"), instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.START)
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
def get(self, context, instance_id, want_objects=False,
expected_attrs=None):
if not expected_attrs:
expected_attrs = []
expected_attrs.extend(['metadata', 'system_metadata',
'security_groups', 'info_cache'])
try:
if uuidutils.is_uuid_like(instance_id):
instance = instance_obj.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif utils.is_int_like(instance_id):
instance = instance_obj.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
raise exception.InstanceNotFound(instance_id=instance_id)
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, sort_key='created_at',
sort_dir='desc', limit=None, marker=None, want_objects=False,
expected_attrs=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
LOG.debug(_("Searching by: %s") % str(search_opts))
filters = {}
def _remap_flavor_filter(flavor_id):
flavor = flavor_obj.Flavor.get_by_flavor_id(context, flavor_id)
filters['instance_type_id'] = flavor.id
def _remap_fixed_ip_filter(fixed_ip):
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter}
for opt, value in search_opts.iteritems():
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
if isinstance(remap_object, six.string_types):
filters[remap_object] = value
else:
try:
remap_object(value)
# return an empty list
except ValueError:
return []
inst_models = self._get_instances_by_filters(context, filters,
sort_key, sort_dir, limit=limit, marker=marker,
expected_attrs=expected_attrs)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
def _get_instances_by_filters(self, context, filters,
sort_key, sort_dir,
limit=None,
marker=None, expected_attrs=None):
if 'ip6' in filters or 'ip' in filters:
res = self.network_api.get_instance_uuids_by_ip_filter(context,
filters)
# NOTE(jkoelker) It is possible that we will get the same
# instance uuid twice (one for ipv4 and ipv6)
uuids = set([r['instance_uuid'] for r in res])
filters['uuid'] = uuids
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
if expected_attrs:
fields.extend(expected_attrs)
return instance_obj.InstanceList.get_by_filters(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker, expected_attrs=fields)
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
props_copy = dict(extra_properties, backup_type=backup_type)
image_meta = self._create_image(context, instance, name,
'backup', extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=[None])
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
instance.save(expected_task_state=[None])
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
if extra_properties is None:
extra_properties = {}
instance_uuid = instance['uuid']
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
image_ref = instance.image_ref
sent_meta = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
sent_meta['name'] = name
sent_meta['is_public'] = False
# The properties set up above and in extra_properties have precedence
properties.update(extra_properties or {})
sent_meta['properties'].update(properties)
return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
extra_properties=None):
image_meta['name'] = name
image_meta['is_public'] = False
properties = image_meta['properties']
if instance['root_device_name']:
properties['root_device_name'] = instance['root_device_name']
properties.update(extra_properties or {})
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
mapping = []
for bdm in bdms:
if bdm.no_device:
continue
if bdm.is_volume:
# create snapshot based on volume_id
volume = self.volume_api.get(context, bdm.volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
mapping_dict = block_device.snapshot_from_bdm(snapshot['id'],
bdm)
mapping_dict = mapping_dict.get_image_mapping()
else:
mapping_dict = bdm.get_image_mapping()
mapping.append(mapping_dict)
image_mappings = properties.get('mappings')
if image_mappings:
properties['mappings'] = [m for m in image_mappings
if not block_device.is_swap_or_ephemeral(
m['virtual'])]
if mapping:
properties['block_device_mapping'] = mapping
properties['bdm_v2'] = True
for attr in ('status', 'location', 'id'):
image_meta.pop(attr, None)
image_meta['size'] = 0
return self.image_service.create(context, image_meta, data='')
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=set(
vm_states.ALLOW_SOFT_REBOOT + vm_states.ALLOW_HARD_REBOOT),
task_state=[None, task_states.REBOOTING,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
if (reboot_type == 'SOFT' and
(instance['vm_state'] not in vm_states.ALLOW_SOFT_REBOOT)):
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method='reboot')
if ((reboot_type == 'SOFT' and
instance['task_state'] in
(task_states.REBOOTING, task_states.REBOOTING_HARD)) or
(reboot_type == 'HARD' and
instance['task_state'] == task_states.REBOOTING_HARD)):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method='reboot')
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance.task_state = state
instance.save(expected_task_state=[None, task_states.REBOOTING])
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR],
task_state=[None])
def rebuild(self, context, instance, image_href, admin_password,
files_to_inject=None, **kwargs):
orig_image_ref = instance.image_ref or ''
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
preserve_ephemeral = kwargs.get('preserve_ephemeral', False)
auto_disk_config = kwargs.get('auto_disk_config')
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
flavor = instance.get_flavor()
self._checks_for_create_and_rebuild(context, image_id, image,
flavor, metadata, files_to_inject)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
# the system_metadata for this instance is updated after
# we do the previous save() and before we update.. those
# other updates will be lost. Since this problem exists in
# a lot of other places, I think it should be addressed in
# a DB layer overhaul.
orig_sys_metadata = dict(instance.system_metadata)
# Remove the old keys
for key in instance.system_metadata.keys():
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del instance.system_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, flavor)
instance.system_metadata.update(new_sys_metadata)
instance.save()
return orig_sys_metadata
# Since image might have changed, we may have new values for
# os_type, vm_mode, etc
options_from_image = self._inherit_properties_from_image(
image, auto_disk_config)
instance.update(options_from_image)
instance.task_state = task_states.REBUILDING
instance.image_ref = image_href
instance.kernel_id = kernel_id or ""
instance.ramdisk_id = ramdisk_id or ""
instance.progress = 0
instance.update(kwargs)
instance.save(expected_task_state=[None])
# On a rebuild, since we're potentially changing images, we need to
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_rpcapi.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, kwargs=kwargs)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
elevated = context.elevated()
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reverse quota reservation for increased resource usage
deltas = self._reverse_upsize_quota_delta(context, migration)
quotas = self._reserve_quota_delta(context, deltas, instance)
instance.task_state = task_states.RESIZE_REVERTING
try:
instance.save(expected_task_state=[None])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback(context)
migration.status = 'reverting'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit(context)
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute,
quotas.reservations or [])
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
elevated = context.elevated()
if migration is None:
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, instance)
quotas = self._reserve_quota_delta(context, deltas, instance)
migration.status = 'confirming'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit(context)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute,
quotas.reservations or [])
@staticmethod
def _resize_quota_delta(context, new_flavor,
old_flavor, sense, compare):
def _quota_delta(resource):
return sense * (new_flavor[resource] - old_flavor[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
@staticmethod
def _upsize_quota_delta(context, new_flavor, old_flavor):
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, 1)
@staticmethod
def _reverse_upsize_quota_delta(context, migration_ref):
old_flavor = flavor_obj.Flavor.get_by_id(
context, migration_ref['old_instance_type_id'])
new_flavor = flavor_obj.Flavor.get_by_id(
context, migration_ref['new_instance_type_id'])
return API._resize_quota_delta(context, new_flavor, old_flavor, -1, -1)
@staticmethod
def _downsize_quota_delta(context, instance):
old_flavor = instance.get_flavor('old')
new_flavor = instance.get_flavor('new')
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, -1)
@staticmethod
def _reserve_quota_delta(context, deltas, instance):
quotas = quotas_obj.Quotas()
if deltas:
project_id, user_id = quotas_obj.ids_from_instance(context,
instance)
quotas.reserve(context, project_id=project_id, user_id=user_id,
**deltas)
return quotas
@staticmethod
def _resize_cells_support(context, quotas, instance,
current_instance_type, new_instance_type):
# With cells, the best we can do right now is commit the
# reservations immediately...
quotas.commit(context)
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts to deal
# with quotas. We don't need source and/or destination
mig = migration_obj.Migration()
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.create(context.elevated())
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def resize(self, context, instance, flavor_id=None,
**extra_instance_updates):
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = flavors.extract_flavor(instance)
if not flavor_id:
LOG.debug(_("flavor_id is None. Assuming migration."),
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s"),
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id and self.cell_type != 'compute':
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
quotas = self._reserve_quota_delta(context, deltas, instance)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
headroom = exc.kwargs['headroom']
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used, allowed=total_allowed,
resource=resource)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
# Here when flavor_id is None, the process is considered as migrate.
if (not flavor_id and not CONF.allow_migrate_to_same_host):
filter_properties['ignore_hosts'].append(instance['host'])
if self.cell_type == 'api':
# Commit reservations early and create migration record.
self._resize_cells_support(context, quotas, instance,
current_instance_type,
new_instance_type)
self._record_action_start(context, instance, instance_actions.RESIZE)
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type,
reservations=quotas.reservations or [])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED],
task_state=[None])
def shelve(self, context, instance):
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SHELVE)
image_id = None
if not self.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance['display_name']
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED], task_state=[None])
def shelve_offload(self, context, instance):
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=[None])
self.compute_rpcapi.shelve_offload_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED], task_state=[None])
def unshelve(self, context, instance):
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def pause(self, context, instance):
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def suspend(self, context, instance):
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None):
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
for bdm in bdms:
if bdm.volume_id:
vol = self.volume_api.get(context, bdm.volume_id)
self.volume_api.check_attached(context, vol)
if self.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance['uuid'],
reason=reason)
self.update(context,
instance,
task_state=task_states.RESCUING,
expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password, rescue_image_ref=rescue_image_ref)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
self.update(context,
instance,
task_state=task_states.UNRESCUING,
expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
self.update(context,
instance,
task_state=task_states.UPDATING_PASSWORD,
expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_rdp_console(self, context, instance, console_type):
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_rdp_connect_info(self, context, instance, console_type):
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
LOG.debug(_('Locking'), context=context, instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
@wrap_check_policy
def unlock(self, context, instance):
# If the instance was locked by someone else, check
# that we're allowed to override the lock
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
check_policy(context, 'unlock_override', instance)
context = context.elevated()
LOG.debug(_('Unlocking'), context=context, instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
@wrap_check_policy
def get_lock(self, context, instance):
return self.get(context, instance['uuid'])['locked']
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
self.compute_rpcapi.inject_network_info(context, instance=instance)
def _attach_volume(self, context, instance, volume_id, device,
disk_bus, device_type):
device = self.compute_rpcapi.reserve_block_device_name(
context, device=device, instance=instance, volume_id=volume_id,
disk_bus=disk_bus, device_type=device_type)
volume_bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device, bdm=volume_bdm)
except Exception:
with excutils.save_and_reraise_exception():
volume_bdm.destroy(context)
return device
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED],
task_state=[None])
def attach_volume(self, context, instance, volume_id, device=None,
disk_bus=None, device_type=None):
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
return self._attach_volume(context, instance, volume_id, device,
disk_bus, device_type)
def _detach_volume(self, context, instance, volume):
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED],
task_state=[None])
def detach_volume(self, context, instance, volume):
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
if volume['instance_uuid'] != instance['uuid']:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=[None])
def swap_volume(self, context, instance, old_volume, new_volume):
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if old_volume['instance_uuid'] != instance['uuid']:
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")
raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_detach(context, old_volume)
self.volume_api.check_attach(context, new_volume, instance=instance)
self.volume_api.begin_detaching(context, old_volume['id'])
self.volume_api.reserve_volume(context, new_volume['id'])
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'])
except Exception:
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
self.volume_api.unreserve_volume(context, new_volume['id'])
@wrap_check_policy
@check_instance_lock
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
@check_instance_lock
def detach_interface(self, context, instance, port_id):
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
rv = self.db.instance_metadata_get(context, instance['uuid'])
return dict(rv.iteritems())
def get_all_instance_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='metadata')
def get_all_system_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='system_metadata')
def _get_all_instance_metadata(self, context, search_filts, metadata_type):
def _match_any(pattern_list, string):
return any([re.match(pattern, string)
for pattern in pattern_list])
def _filter_metadata(instance, search_filt, input_metadata):
uuids = search_filt.get('resource_id', [])
keys_filter = search_filt.get('key', [])
values_filter = search_filt.get('value', [])
output_metadata = {}
if uuids and instance.get('uuid') not in uuids:
return {}
for (k, v) in input_metadata.iteritems():
if ((keys_filter and values_filter) and
not _match_any(keys_filter, k) and
not _match_any(values_filter, v)):
continue
elif ((keys_filter and not _match_any(keys_filter, k)) or
(values_filter and not _match_any(values_filter, v))):
continue
output_metadata[k] = v
return output_metadata
formatted_metadata_list = []
instances = self._get_instances_by_filters(context, filters={},
sort_key='created_at',
sort_dir='desc')
for instance in instances:
try:
check_policy(context, 'get_all_instance_%s' % metadata_type,
instance)
metadata = instance.get(metadata_type, {})
for filt in search_filts:
metadata = _filter_metadata(instance, filt, metadata)
for (k, v) in metadata.iteritems():
formatted_metadata_list.append({'key': k, 'value': v,
'instance_id': instance.get('uuid')})
except exception.PolicyNotAuthorized:
continue
return formatted_metadata_list
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
instance.delete_metadata_key(key)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
orig = dict(instance.metadata)
if delete:
_metadata = metadata
else:
_metadata = dict(instance.metadata)
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
instance.metadata = _metadata
instance.save()
diff = _diff_dict(orig, instance.metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance['uuid'] for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def is_volume_backed_instance(self, context, instance, bdms=None):
if not instance['image_ref']:
return True
if bdms is None:
bdms = block_device_obj.BlockDeviceMappingList.\
get_by_instance_uuid(context, instance['uuid'])
root_bdm = bdms.root_bdm()
if not root_bdm:
return False
return root_bdm.is_volume
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
LOG.debug(_("Going to try to live migrate instance to %s"),
host_name or "another host", instance=instance)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=[None])
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
LOG.debug(_('vm evacuation scheduled'))
inst_host = instance['host']
service = service_obj.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
msg = (_('Instance compute service state on %s '
'expected to be down, but it was up.') % inst_host)
LOG.error(msg)
raise exception.ComputeServiceInUse(host=inst_host)
instance = self.update(context, instance, expected_task_state=[None],
task_state=task_states.REBUILDING)
self._record_action_start(context, instance, instance_actions.EVACUATE)
inst_obj = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
return self.compute_rpcapi.rebuild_instance(context,
instance=inst_obj,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
return migration_obj.MigrationList.get_by_filters(context, filters)
@wrap_check_policy
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_create(context, bdm.instance,
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
@wrap_check_policy
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_delete(context, bdm.instance,
volume_id, snapshot_id, delete_info)
def external_instance_event(self, context, instances, events):
# by the host the affected instance is on and dispatch them
# according to host
instances_by_host = {}
events_by_host = {}
hosts_by_instance = {}
for instance in instances:
instances_on_host = instances_by_host.get(instance.host, [])
instances_on_host.append(instance)
instances_by_host[instance.host] = instances_on_host
hosts_by_instance[instance.uuid] = instance.host
for event in events:
host = hosts_by_instance[event.instance_uuid]
events_on_host = events_by_host.get(host, [])
events_on_host.append(event)
events_by_host[host] = events_on_host
for host in instances_by_host:
self.compute_rpcapi.external_instance_event(
context, instances_by_host[host], events_by_host[host])
class HostAPI(base.Base):
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
service = service_obj.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
@wrap_exception()
def set_host_enabled(self, context, host_name, enabled):
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'enabled': enabled}
compute_utils.notify_about_host_update(context,
'set_enabled.start',
payload)
result = self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
compute_utils.notify_about_host_update(context,
'set_enabled.end',
payload)
return result
def get_host_uptime(self, context, host_name):
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
@wrap_exception()
def host_power_action(self, context, host_name, action):
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'action': action}
compute_utils.notify_about_host_update(context,
'power_action.start',
payload)
result = self.rpcapi.host_power_action(context, action=action,
host=host_name)
compute_utils.notify_about_host_update(context,
'power_action.end',
payload)
return result
@wrap_exception()
def set_host_maintenance(self, context, host_name, mode):
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'mode': mode}
compute_utils.notify_about_host_update(context,
'set_maintenance.start',
payload)
result = self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
compute_utils.notify_about_host_update(context,
'set_maintenance.end',
payload)
return result
def service_get_all(self, context, filters=None, set_zones=False):
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
services = service_obj.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in filters.iteritems():
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
return service_obj.Service.get_by_compute_host(context, host_name)
def service_update(self, context, host_name, binary, params_to_update):
service = service_obj.Service.get_by_args(context, host_name,
binary)
service.update(params_to_update)
service.save()
return service
def service_delete(self, context, service_id):
service_obj.Service.get_by_id(context, service_id).destroy()
def instance_get_all_by_host(self, context, host_name):
return self.db.instance_get_all_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
return self.db.compute_node_get(context, int(compute_id))
def compute_node_get_all(self, context):
return self.db.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.db.compute_node_search_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
def actions_get(self, context, instance):
return instance_action.InstanceActionList.get_by_instance_uuid(
context, instance['uuid'])
def action_get_by_request_id(self, context, instance, request_id):
return instance_action.InstanceAction.get_by_request_id(
context, instance['uuid'], request_id)
def action_events_get(self, context, instance, action_id):
return instance_action.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
aggregate = aggregate_obj.Aggregate()
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create(context)
aggregate = self._reformat_aggregate_info(aggregate)
# To maintain the same API result as before.
del aggregate['hosts']
del aggregate['metadata']
return aggregate
def get_aggregate(self, context, aggregate_id):
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
return self._reformat_aggregate_info(aggregate)
def get_aggregate_list(self, context):
aggregates = aggregate_obj.AggregateList.get_all(context)
return [self._reformat_aggregate_info(agg) for agg in aggregates]
def is_safe_to_update_az(self, context, aggregate, metadata,
action_name):
if 'availability_zone' in metadata:
aggregate_az = aggregate.metadata.get("availability_zone")
for host in aggregate.hosts:
host_az = availability_zones.get_host_availability_zone(
context, host)
if (host_az and host_az != metadata["availability_zone"]
and host_az != CONF.default_availability_zone and
host_az != aggregate_az):
msg = _("This aggregate contains hosts in"
" an existing availability zone")
raise exception.InvalidAggregateAction(
action=action_name,
aggregate_id=aggregate.id,
reason=msg)
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
self.is_safe_to_update_az(context, aggregate,
values, "update aggregate")
if values:
aggregate.metadata = values
aggregate.save()
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
self.is_safe_to_update_az(context, aggregate,
metadata, "update aggregate metadata")
aggregate.update_metadata(metadata)
# If updated metadata include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if metadata and metadata.get('availability_zone'):
availability_zones.reset_cache()
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = aggregate_obj.Aggregate.get_by_id(context,
aggregate_id)
if len(aggregate.hosts) > 0:
msg = _("Host aggregate is not empty")
raise exception.InvalidAggregateAction(action='delete',
aggregate_id=aggregate_id,
reason=msg)
aggregate.destroy()
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
def _check_az_for_host(self, aggregate_meta, host_az, aggregate_id):
# NOTE(mtreinish) The availability_zone key returns a set of
# zones so loop over each zone. However there should only
# ever be one zone in the set because an aggregate can only
# have a single availability zone set at one time.
for aggregate_az in aggregate_meta["availability_zone"]:
# NOTE(mtreinish) Ensure that the aggregate_az is not none
# if it is none then that is just a regular aggregate and
# it is valid to have a host in multiple aggregates.
if aggregate_az and aggregate_az != host_az:
msg = _("Host already in availability zone "
"%s") % host_az
action_name = "add_host_to_aggregate"
raise exception.InvalidAggregateAction(
action=action_name, aggregate_id=aggregate_id,
reason=msg)
def _update_az_cache_for_host(self, context, host_name, aggregate_meta):
# Update the availability_zone cache to avoid getting wrong
# availability_zone in cache retention time when add/remove
# host to/from aggregate.
if aggregate_meta and aggregate_meta.get('availability_zone'):
availability_zones.update_host_availability_zone_cache(context,
host_name)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
host_az = availability_zones.get_host_availability_zone(context,
host_name)
if host_az and host_az != CONF.default_availability_zone:
aggregate_meta = self.db.aggregate_metadata_get_by_metadata_key(
context, aggregate_id, 'availability_zone')
if aggregate_meta.get("availability_zone"):
self._check_az_for_host(aggregate_meta, host_az, aggregate_id)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.add_host(context, host_name)
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.delete_host(host_name)
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
def _reformat_aggregate_info(self, aggregate):
return dict(aggregate.iteritems())
class KeypairAPI(base.Base):
get_notifier = functools.partial(rpc.get_notifier, service='api')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = self.get_notifier()
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
if not 0 < len(key_name) < 256:
raise exception.InvalidKeypair(
reason=_('Keypair name must be between '
'1 and 255 characters long'))
count = QUOTAS.count(context, 'key_pairs', user_id)
try:
QUOTAS.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@wrap_exception()
def import_key_pair(self, context, user_id, key_name, public_key):
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'import.start', key_name)
fingerprint = crypto.generate_fingerprint(public_key)
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'import.end', key_name)
return keypair
@wrap_exception()
def create_key_pair(self, context, user_id, key_name):
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'create.start', key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'create.end', key_name)
return keypair, private_key
@wrap_exception()
def delete_key_pair(self, context, user_id, key_name):
self._notify(context, 'delete.start', key_name)
keypair_obj.KeyPair.destroy_by_name(context, user_id, key_name)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id):
return keypair_obj.KeyPairList.get_by_user(context, user_id)
def get_key_pair(self, context, user_id, key_name):
return keypair_obj.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
def validate_property(self, value, property, allowed):
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
if not val:
msg = _("Security group %s cannot be empty.") % property
self.raise_invalid_property(msg)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
if len(val) > 255:
msg = _("Security group %s should not be greater "
"than 255 characters.") % property
self.raise_invalid_property(msg)
def ensure_default(self, context):
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.audit(_("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
columns_to_join = ['rules.grantee_group']
group_ref = self.db.security_group_update(context,
security_group['id'],
group,
columns_to_join=columns_to_join)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
quotas = quotas_obj.Quotas()
quota_project, quota_user = quotas_obj.ids_from_security_group(
context, security_group)
try:
quotas.reserve(context, project_id=quota_project,
user_id=quota_user, security_groups=-1)
except Exception:
LOG.exception(_("Failed to update usages deallocating "
"security group"))
LOG.audit(_("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
quotas.commit()
def is_associated_with_server(self, security_group, instance_uuid):
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
count = QUOTAS.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
QUOTAS.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Authorize security group ingress %s")
LOG.audit(msg, name, context=context)
rules = [self.db.security_group_rule_create(context, v) for v in vals]
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Revoke security group ingress %s")
LOG.audit(msg, security_group['name'], context=context)
for rule_id in rule_ids:
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
for rule in self.db.security_group_default_rule_list(context):
is_duplicate = True
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
is_duplicate = False
break
if is_duplicate:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
try:
return self.db.security_group_default_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def trigger_rules_refresh(self, context, id):
security_group = self.db.security_group_get(
context, id, columns_to_join=['instances'])
for instance in security_group['instances']:
if instance['host'] is not None:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def trigger_members_refresh(self, context, group_ids):
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id))
# ..then we distill the rules into the groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context, rule['parent_group_id'],
columns_to_join=['instances'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
if instance['uuid'] not in instances:
instances[instance['uuid']] = instance
# ..then we send a request to refresh the rules for each instance.
for instance in instances.values():
if instance['host']:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = self.db.instance_get_by_uuid(context, instance_uuid)
groups = instance.get('security_groups')
if groups:
return [{'name': group['name']} for group in groups]
def populate_security_groups(self, instance, security_groups):
if not security_groups:
# Make sure it's an empty list and not None
security_groups = []
instance.security_groups = security_group_obj.make_secgroup_list(
security_groups)
| true | true |
f7335d5a4bcd975b1b405e0af50968afb9bf061a | 782 | py | Python | start_habittracker.py | nasch7kadse/habitTrackerIU | c2dcf1870641b857a130c67393a142140409ee91 | [
"MIT"
] | null | null | null | start_habittracker.py | nasch7kadse/habitTrackerIU | c2dcf1870641b857a130c67393a142140409ee91 | [
"MIT"
] | null | null | null | start_habittracker.py | nasch7kadse/habitTrackerIU | c2dcf1870641b857a130c67393a142140409ee91 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from habittracker.utils import connect_to_database, init_sqlite_table, check_file_existing
from habittracker.commands import get_main_user_choice, display_title_bar, evaluate_main_user_choice
def main():
database_name = "habits.db"
# Initialize database when database doesn't exist
if not check_file_existing(database_name):
init_sqlite_table(database_name)
# Get DB connection
connection = connect_to_database(database_name)
# Start program
display_title_bar()
while True:
user_choice = get_main_user_choice()
if not user_choice == "exit":
evaluate_main_user_choice(user_choice, connection)
else:
break
if __name__ == "__main__":
main() | 27.928571 | 100 | 0.713555 |
from habittracker.utils import connect_to_database, init_sqlite_table, check_file_existing
from habittracker.commands import get_main_user_choice, display_title_bar, evaluate_main_user_choice
def main():
database_name = "habits.db"
if not check_file_existing(database_name):
init_sqlite_table(database_name)
# Get DB connection
connection = connect_to_database(database_name)
# Start program
display_title_bar()
while True:
user_choice = get_main_user_choice()
if not user_choice == "exit":
evaluate_main_user_choice(user_choice, connection)
else:
break
if __name__ == "__main__":
main() | true | true |
f7335dca379a3a27904e683451f4ce5b30a570de | 16,159 | py | Python | raiden/transfer/mediated_transfer/state_change.py | gcarq/raiden | 82241c6da9188c4e029aef3bb42f0ab9f055c0e4 | [
"MIT"
] | null | null | null | raiden/transfer/mediated_transfer/state_change.py | gcarq/raiden | 82241c6da9188c4e029aef3bb42f0ab9f055c0e4 | [
"MIT"
] | null | null | null | raiden/transfer/mediated_transfer/state_change.py | gcarq/raiden | 82241c6da9188c4e029aef3bb42f0ab9f055c0e4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=too-few-public-methods,too-many-arguments,too-many-instance-attributes
from raiden.transfer.architecture import StateChange
from raiden.transfer.state import RouteState
from raiden.transfer.mediated_transfer.state import (
LockedTransferSignedState,
TransferDescriptionWithSecretState,
)
from raiden.utils import pex, sha3, typing
# Note: The init states must contain all the required data for trying doing
# useful work, ie. there must /not/ be an event for requesting new data.
class ActionInitInitiator(StateChange):
""" Initial state of a new mediated transfer.
Args:
transfer: A state object containing the transfer details.
routes: A list of possible routes provided by a routing service.
secret: The secret that must be used with the transfer.
"""
def __init__(self, payment_network_identifier, transfer_description, routes):
if not isinstance(transfer_description, TransferDescriptionWithSecretState):
raise ValueError('transfer must be an TransferDescriptionWithSecretState instance.')
self.payment_network_identifier = payment_network_identifier
self.transfer = transfer_description
self.routes = routes
def __repr__(self):
return '<ActionInitInitiator network:{} transfer:{}>'.format(
self.payment_network_identifier,
self.transfer,
)
def __eq__(self, other):
return (
isinstance(other, ActionInitInitiator) and
self.payment_network_identifier == other.payment_network_identifier and
self.transfer == other.transfer and
self.routes == other.routes
)
def __ne__(self, other):
return not self.__eq__(other)
class ActionInitMediator(StateChange):
""" Initial state for a new mediator.
Args:
routes: A list of possible routes provided by a routing service.
from_route: The payee route.
from_transfer: The payee transfer.
"""
def __init__(
self,
payment_network_identifier,
routes: typing.List[RouteState],
from_route: RouteState,
from_transfer: LockedTransferSignedState):
if not isinstance(from_route, RouteState):
raise ValueError('from_route must be a RouteState instance')
if not isinstance(from_transfer, LockedTransferSignedState):
raise ValueError('from_transfer must be a LockedTransferSignedState instance')
self.payment_network_identifier = payment_network_identifier
self.routes = routes
self.from_route = from_route
self.from_transfer = from_transfer
def __repr__(self):
return '<ActionInitMediator network:{} from_route:{} from_transfer:{}>'.format(
self.payment_network_identifier,
self.from_route,
self.from_transfer,
)
def __eq__(self, other):
return (
isinstance(other, ActionInitMediator) and
self.payment_network_identifier == other.payment_network_identifier and
self.routes == other.routes and
self.from_route == other.from_route and
self.from_transfer == other.from_transfer
)
def __ne__(self, other):
return not self.__eq__(other)
class ActionInitTarget(StateChange):
""" Initial state for a new target.
Args:
route: The payee route.
transfer: The payee transfer.
"""
def __init__(self, payment_network_identifier, route, transfer):
if not isinstance(route, RouteState):
raise ValueError('route must be a RouteState instance')
if not isinstance(transfer, LockedTransferSignedState):
raise ValueError('transfer must be a LockedTransferSignedState instance')
self.payment_network_identifier = payment_network_identifier
self.route = route
self.transfer = transfer
def __repr__(self):
return '<ActionInitTarget network:{} route:{} transfer:{}>'.format(
self.payment_network_identifier,
self.route,
self.transfer,
)
def __eq__(self, other):
return (
isinstance(other, ActionInitTarget) and
self.payment_network_identifier == other.payment_network_identifier and
self.route == other.route and
self.transfer == other.transfer
)
def __ne__(self, other):
return not self.__eq__(other)
class ActionCancelRoute(StateChange):
""" Cancel the current route.
Notes:
Used to cancel a specific route but not the transfer. May be used for
timeouts.
"""
def __init__(self, identifier, routes):
self.identifier = identifier
self.routes = routes
def __repr__(self):
return '<ActionCancelRoute id:{}>'.format(
self.identifier,
)
def __eq__(self, other):
return (
isinstance(other, ActionCancelRoute) and
self.identifier == other.identifier and
self.routes == other.routes
)
def __ne__(self, other):
return not self.__eq__(other)
class ReceiveSecretRequest(StateChange):
""" A SecretRequest message received. """
def __init__(self, identifier, amount, hashlock, sender):
self.identifier = identifier
self.amount = amount
self.hashlock = hashlock
self.sender = sender
self.revealsecret = None
def __repr__(self):
return '<ReceiveSecretRequest id:{} amount:{} hashlock:{} sender:{}>'.format(
self.identifier,
self.amount,
pex(self.hashlock),
pex(self.sender),
)
def __eq__(self, other):
return (
isinstance(other, ReceiveSecretRequest) and
self.identifier == other.identifier and
self.amount == other.amount and
self.hashlock == other.hashlock and
self.sender == other.sender and
self.revealsecret == other.revealsecret
)
def __ne__(self, other):
return not self.__eq__(other)
class ReceiveSecretReveal(StateChange):
""" A SecretReveal message received. """
def __init__(self, secret, sender):
hashlock = sha3(secret)
self.secret = secret
self.hashlock = hashlock
self.sender = sender
def __repr__(self):
return '<ReceiveSecretReveal hashlock:{} sender:{}>'.format(
pex(self.hashlock),
pex(self.sender),
)
def __eq__(self, other):
return (
isinstance(other, ReceiveSecretReveal) and
self.secret == other.secret and
self.hashlock == other.hashlock and
self.sender == other.sender
)
def __ne__(self, other):
return not self.__eq__(other)
class ReceiveTransferRefundCancelRoute(StateChange):
""" A RefundTransfer message received by initiator will cancel the current
route.
"""
def __init__(self, sender, routes, transfer, secret):
if not isinstance(transfer, LockedTransferSignedState):
raise ValueError('transfer must be an instance of LockedTransferSignedState')
hashlock = sha3(secret)
self.sender = sender
self.transfer = transfer
self.routes = routes
self.hashlock = hashlock
self.secret = secret
def __repr__(self):
return '<ReceiveTransferRefundCancelRoute sender:{} transfer:{}>'.format(
pex(self.sender),
self.transfer
)
def __eq__(self, other):
return (
isinstance(other, ReceiveTransferRefundCancelRoute) and
self.sender == other.sender and
self.transfer == other.transfer and
self.routes == other.routes and
self.secret == other.secret and
self.hashlock == other.hashlock
)
def __ne__(self, other):
return not self.__eq__(other)
class ReceiveTransferRefund(StateChange):
""" A RefundTransfer message received. """
def __init__(self, sender, transfer: LockedTransferSignedState):
if not isinstance(transfer, LockedTransferSignedState):
raise ValueError('transfer must be an instance of LockedTransferSignedState')
self.sender = sender
self.transfer = transfer
def __repr__(self):
return '<ReceiveTransferRefund sender:{} transfer:{}>'.format(
pex(self.sender),
self.transfer,
)
def __eq__(self, other):
return (
isinstance(other, ReceiveTransferRefund) and
self.sender == other.sender and
self.transfer == other.transfer
)
def __ne__(self, other):
return not self.__eq__(other)
class ReceiveBalanceProof(StateChange):
""" A balance proof `identifier` was received. """
def __init__(self, identifier, node_address, balance_proof):
self.identifier = identifier
self.node_address = node_address
self.balance_proof = balance_proof
def __repr__(self):
return '<ReceiveBalanceProof id:{} node:{} balance_proof:{}>'.format(
self.identifier,
pex(self.node_address),
self.balance_proof,
)
def __eq__(self, other):
return (
isinstance(other, ReceiveBalanceProof) and
self.identifier == other.identifier and
self.node_address == other.node_address and
self.balance_proof == other.balance_proof
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractReceiveWithdraw(StateChange):
""" A lock was withdrawn via the blockchain.
Used when a hash time lock was withdrawn and a log ChannelSecretRevealed is
emited by the netting channel.
Note:
For this state change the contract caller is not important but only the
receiving address. `receiver` is the address to which the lock's token
was transferred, this may be either of the channel participants.
If the channel was used for a mediated transfer that was refunded, this
event must be used twice, once for each receiver.
"""
def __init__(self, channel_address, secret, receiver):
hashlock = sha3(secret)
self.channel_address = channel_address
self.hashlock = hashlock
self.receiver = receiver
self.secret = secret
def __repr__(self):
return '<ContractReceiveWithdraw channel:{} hashlock:{} receiver:{}>'.format(
pex(self.channel_address),
pex(self.hashlock),
pex(self.receiver),
)
def __eq__(self, other):
return (
isinstance(other, ContractReceiveWithdraw) and
self.channel_address == other.channel_address and
self.hashlock == other.hashlock and
self.receiver == other.receiver and
self.secret == other.secret
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractReceiveClosed(StateChange):
def __init__(self, channel_address, closing_address, block_number):
self.channel_address = channel_address
self.closing_address = closing_address
self.block_number = block_number # TODO: rename to closed_block
def __repr__(self):
return '<ContractReceiveClosed channel:{} closing:{} block_number:{}>'.format(
pex(self.channel_address),
pex(self.closing_address),
self.block_number,
)
def __eq__(self, other):
return (
isinstance(other, ContractReceiveClosed) and
self.channel_address == other.channel_address and
self.closing_address == other.closing_address and
self.block_number == other.block_number
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractReceiveSettled(StateChange):
def __init__(self, channel_address, block_number):
self.channel_address = channel_address
self.block_number = block_number # TODO: rename to settle_block_number
def __repr__(self):
return '<ContractReceiveSettled channel:{} block_number:{}>'.format(
pex(self.channel_address),
self.block_number,
)
def __eq__(self, other):
return (
isinstance(other, ContractReceiveSettled) and
self.channel_address == other.channel_address and
self.block_number == other.block_number
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractReceiveBalance(StateChange):
def __init__(
self,
channel_address,
token_address,
participant_address,
balance,
block_number):
self.channel_address = channel_address
self.token_address = token_address
self.participant_address = participant_address
self.balance = balance
self.block_number = block_number
def __repr__(self):
return (
'<ContractReceiveBalance'
' channel:{} token:{} participant:{} balance:{} block_number:{}'
'>'
).format(
pex(self.channel_address),
pex(self.token_address),
pex(self.participant_address),
self.balance,
self.block_number,
)
def __eq__(self, other):
return (
isinstance(other, ContractReceiveBalance) and
self.channel_address == other.channel_address and
self.token_address == other.token_address and
self.participant_address == other.participant_address and
self.block_number == other.block_number
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractReceiveNewChannel(StateChange):
def __init__(
self,
manager_address,
channel_address,
participant1,
participant2,
settle_timeout):
self.manager_address = manager_address
self.channel_address = channel_address
self.participant1 = participant1
self.participant2 = participant2
self.settle_timeout = settle_timeout
def __repr__(self):
return (
'<ContractReceiveNewChannel'
' manager:{} channel:{} participant1:{} participant2:{} settle_timeout:{}'
'>'
).format(
pex(self.manager_address),
pex(self.channel_address),
pex(self.participant1),
pex(self.participant2),
self.settle_timeout
)
def __eq__(self, other):
return (
isinstance(other, ContractReceiveNewChannel) and
self.manager_address == other.manager_address and
self.channel_address == other.channel_address and
self.participant1 == other.participant1 and
self.participant2 == other.participant2 and
self.settle_timeout == other.settle_timeout
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractReceiveTokenAdded(StateChange):
def __init__(self, registry_address, token_address, manager_address):
self.registry_address = registry_address
self.token_address = token_address
self.manager_address = manager_address
def __repr__(self):
return '<ContractReceiveTokenAdded registry:{} token:{} manager:{}>'.format(
pex(self.registry_address),
pex(self.token_address),
pex(self.manager_address),
)
def __eq__(self, other):
return (
isinstance(other, ContractReceiveTokenAdded) and
self.registry_address == other.registry_address and
self.token_address == other.token_address and
self.manager_address == other.manager_address
)
def __ne__(self, other):
return not self.__eq__(other)
| 31.871795 | 96 | 0.633641 |
from raiden.transfer.architecture import StateChange
from raiden.transfer.state import RouteState
from raiden.transfer.mediated_transfer.state import (
LockedTransferSignedState,
TransferDescriptionWithSecretState,
)
from raiden.utils import pex, sha3, typing
class ActionInitInitiator(StateChange):
def __init__(self, payment_network_identifier, transfer_description, routes):
if not isinstance(transfer_description, TransferDescriptionWithSecretState):
raise ValueError('transfer must be an TransferDescriptionWithSecretState instance.')
self.payment_network_identifier = payment_network_identifier
self.transfer = transfer_description
self.routes = routes
def __repr__(self):
return '<ActionInitInitiator network:{} transfer:{}>'.format(
self.payment_network_identifier,
self.transfer,
)
def __eq__(self, other):
return (
isinstance(other, ActionInitInitiator) and
self.payment_network_identifier == other.payment_network_identifier and
self.transfer == other.transfer and
self.routes == other.routes
)
def __ne__(self, other):
return not self.__eq__(other)
class ActionInitMediator(StateChange):
def __init__(
self,
payment_network_identifier,
routes: typing.List[RouteState],
from_route: RouteState,
from_transfer: LockedTransferSignedState):
if not isinstance(from_route, RouteState):
raise ValueError('from_route must be a RouteState instance')
if not isinstance(from_transfer, LockedTransferSignedState):
raise ValueError('from_transfer must be a LockedTransferSignedState instance')
self.payment_network_identifier = payment_network_identifier
self.routes = routes
self.from_route = from_route
self.from_transfer = from_transfer
def __repr__(self):
return '<ActionInitMediator network:{} from_route:{} from_transfer:{}>'.format(
self.payment_network_identifier,
self.from_route,
self.from_transfer,
)
def __eq__(self, other):
return (
isinstance(other, ActionInitMediator) and
self.payment_network_identifier == other.payment_network_identifier and
self.routes == other.routes and
self.from_route == other.from_route and
self.from_transfer == other.from_transfer
)
def __ne__(self, other):
return not self.__eq__(other)
class ActionInitTarget(StateChange):
def __init__(self, payment_network_identifier, route, transfer):
if not isinstance(route, RouteState):
raise ValueError('route must be a RouteState instance')
if not isinstance(transfer, LockedTransferSignedState):
raise ValueError('transfer must be a LockedTransferSignedState instance')
self.payment_network_identifier = payment_network_identifier
self.route = route
self.transfer = transfer
def __repr__(self):
return '<ActionInitTarget network:{} route:{} transfer:{}>'.format(
self.payment_network_identifier,
self.route,
self.transfer,
)
def __eq__(self, other):
return (
isinstance(other, ActionInitTarget) and
self.payment_network_identifier == other.payment_network_identifier and
self.route == other.route and
self.transfer == other.transfer
)
def __ne__(self, other):
return not self.__eq__(other)
class ActionCancelRoute(StateChange):
def __init__(self, identifier, routes):
self.identifier = identifier
self.routes = routes
def __repr__(self):
return '<ActionCancelRoute id:{}>'.format(
self.identifier,
)
def __eq__(self, other):
return (
isinstance(other, ActionCancelRoute) and
self.identifier == other.identifier and
self.routes == other.routes
)
def __ne__(self, other):
return not self.__eq__(other)
class ReceiveSecretRequest(StateChange):
def __init__(self, identifier, amount, hashlock, sender):
self.identifier = identifier
self.amount = amount
self.hashlock = hashlock
self.sender = sender
self.revealsecret = None
def __repr__(self):
return '<ReceiveSecretRequest id:{} amount:{} hashlock:{} sender:{}>'.format(
self.identifier,
self.amount,
pex(self.hashlock),
pex(self.sender),
)
def __eq__(self, other):
return (
isinstance(other, ReceiveSecretRequest) and
self.identifier == other.identifier and
self.amount == other.amount and
self.hashlock == other.hashlock and
self.sender == other.sender and
self.revealsecret == other.revealsecret
)
def __ne__(self, other):
return not self.__eq__(other)
class ReceiveSecretReveal(StateChange):
def __init__(self, secret, sender):
hashlock = sha3(secret)
self.secret = secret
self.hashlock = hashlock
self.sender = sender
def __repr__(self):
return '<ReceiveSecretReveal hashlock:{} sender:{}>'.format(
pex(self.hashlock),
pex(self.sender),
)
def __eq__(self, other):
return (
isinstance(other, ReceiveSecretReveal) and
self.secret == other.secret and
self.hashlock == other.hashlock and
self.sender == other.sender
)
def __ne__(self, other):
return not self.__eq__(other)
class ReceiveTransferRefundCancelRoute(StateChange):
def __init__(self, sender, routes, transfer, secret):
if not isinstance(transfer, LockedTransferSignedState):
raise ValueError('transfer must be an instance of LockedTransferSignedState')
hashlock = sha3(secret)
self.sender = sender
self.transfer = transfer
self.routes = routes
self.hashlock = hashlock
self.secret = secret
def __repr__(self):
return '<ReceiveTransferRefundCancelRoute sender:{} transfer:{}>'.format(
pex(self.sender),
self.transfer
)
def __eq__(self, other):
return (
isinstance(other, ReceiveTransferRefundCancelRoute) and
self.sender == other.sender and
self.transfer == other.transfer and
self.routes == other.routes and
self.secret == other.secret and
self.hashlock == other.hashlock
)
def __ne__(self, other):
return not self.__eq__(other)
class ReceiveTransferRefund(StateChange):
def __init__(self, sender, transfer: LockedTransferSignedState):
if not isinstance(transfer, LockedTransferSignedState):
raise ValueError('transfer must be an instance of LockedTransferSignedState')
self.sender = sender
self.transfer = transfer
def __repr__(self):
return '<ReceiveTransferRefund sender:{} transfer:{}>'.format(
pex(self.sender),
self.transfer,
)
def __eq__(self, other):
return (
isinstance(other, ReceiveTransferRefund) and
self.sender == other.sender and
self.transfer == other.transfer
)
def __ne__(self, other):
return not self.__eq__(other)
class ReceiveBalanceProof(StateChange):
def __init__(self, identifier, node_address, balance_proof):
self.identifier = identifier
self.node_address = node_address
self.balance_proof = balance_proof
def __repr__(self):
return '<ReceiveBalanceProof id:{} node:{} balance_proof:{}>'.format(
self.identifier,
pex(self.node_address),
self.balance_proof,
)
def __eq__(self, other):
return (
isinstance(other, ReceiveBalanceProof) and
self.identifier == other.identifier and
self.node_address == other.node_address and
self.balance_proof == other.balance_proof
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractReceiveWithdraw(StateChange):
def __init__(self, channel_address, secret, receiver):
hashlock = sha3(secret)
self.channel_address = channel_address
self.hashlock = hashlock
self.receiver = receiver
self.secret = secret
def __repr__(self):
return '<ContractReceiveWithdraw channel:{} hashlock:{} receiver:{}>'.format(
pex(self.channel_address),
pex(self.hashlock),
pex(self.receiver),
)
def __eq__(self, other):
return (
isinstance(other, ContractReceiveWithdraw) and
self.channel_address == other.channel_address and
self.hashlock == other.hashlock and
self.receiver == other.receiver and
self.secret == other.secret
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractReceiveClosed(StateChange):
def __init__(self, channel_address, closing_address, block_number):
self.channel_address = channel_address
self.closing_address = closing_address
self.block_number = block_number
def __repr__(self):
return '<ContractReceiveClosed channel:{} closing:{} block_number:{}>'.format(
pex(self.channel_address),
pex(self.closing_address),
self.block_number,
)
def __eq__(self, other):
return (
isinstance(other, ContractReceiveClosed) and
self.channel_address == other.channel_address and
self.closing_address == other.closing_address and
self.block_number == other.block_number
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractReceiveSettled(StateChange):
def __init__(self, channel_address, block_number):
self.channel_address = channel_address
self.block_number = block_number
def __repr__(self):
return '<ContractReceiveSettled channel:{} block_number:{}>'.format(
pex(self.channel_address),
self.block_number,
)
def __eq__(self, other):
return (
isinstance(other, ContractReceiveSettled) and
self.channel_address == other.channel_address and
self.block_number == other.block_number
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractReceiveBalance(StateChange):
def __init__(
self,
channel_address,
token_address,
participant_address,
balance,
block_number):
self.channel_address = channel_address
self.token_address = token_address
self.participant_address = participant_address
self.balance = balance
self.block_number = block_number
def __repr__(self):
return (
'<ContractReceiveBalance'
' channel:{} token:{} participant:{} balance:{} block_number:{}'
'>'
).format(
pex(self.channel_address),
pex(self.token_address),
pex(self.participant_address),
self.balance,
self.block_number,
)
def __eq__(self, other):
return (
isinstance(other, ContractReceiveBalance) and
self.channel_address == other.channel_address and
self.token_address == other.token_address and
self.participant_address == other.participant_address and
self.block_number == other.block_number
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractReceiveNewChannel(StateChange):
def __init__(
self,
manager_address,
channel_address,
participant1,
participant2,
settle_timeout):
self.manager_address = manager_address
self.channel_address = channel_address
self.participant1 = participant1
self.participant2 = participant2
self.settle_timeout = settle_timeout
def __repr__(self):
return (
'<ContractReceiveNewChannel'
' manager:{} channel:{} participant1:{} participant2:{} settle_timeout:{}'
'>'
).format(
pex(self.manager_address),
pex(self.channel_address),
pex(self.participant1),
pex(self.participant2),
self.settle_timeout
)
def __eq__(self, other):
return (
isinstance(other, ContractReceiveNewChannel) and
self.manager_address == other.manager_address and
self.channel_address == other.channel_address and
self.participant1 == other.participant1 and
self.participant2 == other.participant2 and
self.settle_timeout == other.settle_timeout
)
def __ne__(self, other):
return not self.__eq__(other)
class ContractReceiveTokenAdded(StateChange):
def __init__(self, registry_address, token_address, manager_address):
self.registry_address = registry_address
self.token_address = token_address
self.manager_address = manager_address
def __repr__(self):
return '<ContractReceiveTokenAdded registry:{} token:{} manager:{}>'.format(
pex(self.registry_address),
pex(self.token_address),
pex(self.manager_address),
)
def __eq__(self, other):
return (
isinstance(other, ContractReceiveTokenAdded) and
self.registry_address == other.registry_address and
self.token_address == other.token_address and
self.manager_address == other.manager_address
)
def __ne__(self, other):
return not self.__eq__(other)
| true | true |
f7335e2c76e6d0dddc224e3f3cac3a81fe26e270 | 4,465 | py | Python | profiles_api/views.py | nahmad33/profiles-rest-api | 1bc0123d4494c047cd7fc271349f330296ec7aa1 | [
"MIT"
] | null | null | null | profiles_api/views.py | nahmad33/profiles-rest-api | 1bc0123d4494c047cd7fc271349f330296ec7aa1 | [
"MIT"
] | 8 | 2019-12-04T23:23:13.000Z | 2021-06-10T18:15:48.000Z | profiles_api/views.py | nahmad33/profiles-rest-api | 1bc0123d4494c047cd7fc271349f330296ec7aa1 | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from profiles_api import serializers
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from profiles_api import models
from profiles_api import permissions
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
'Uses HTTP methods as functions (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your logic',
'Is mapped manually to URLs',
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview})
def post(self, request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handle partial update of object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message."""
a_viewset = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLS using Routers',
'Provides more functionality with less code',
]
return Response({'message': 'Hello!', 'a_viewset': a_viewset})
def create(self, request):
"""Create a new hello message."""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""Handle getting an object by its ID"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""Handle updating an object"""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
"""Handle updating part of an object"""
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
"""Handle removing an object"""
return Response({'http_method': 'DELETE'})
class ProfileViewSet(viewsets.ModelViewSet):
"""Handle creating, creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginAPIView(ObtainAuthToken):
"""Handle creating user authentication tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creating reading and updating profile feed items"""
authentication_classes= (TokenAuthentication,)
serializer_class=serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (permissions.UpdateOwnStatus,IsAuthenticatedOrReadOnly)
def perform_create(self, serializer):
"""Sets the user profile to the logged in user"""
serializer.save(user_profile=self.request.user)
| 36.900826 | 80 | 0.670101 | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from profiles_api import serializers
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from profiles_api import models
from profiles_api import permissions
class HelloApiView(APIView):
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
an_apiview = [
'Uses HTTP methods as functions (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your logic',
'Is mapped manually to URLs',
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview})
def post(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
serializer_class = serializers.HelloSerializer
def list(self, request):
a_viewset = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLS using Routers',
'Provides more functionality with less code',
]
return Response({'message': 'Hello!', 'a_viewset': a_viewset})
def create(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
return Response({'http_method': 'DELETE'})
class ProfileViewSet(viewsets.ModelViewSet):
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginAPIView(ObtainAuthToken):
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
authentication_classes= (TokenAuthentication,)
serializer_class=serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (permissions.UpdateOwnStatus,IsAuthenticatedOrReadOnly)
def perform_create(self, serializer):
serializer.save(user_profile=self.request.user)
| true | true |
f7335ebb44389b47d5b4cbe414212e31ed9b6d99 | 1,253 | py | Python | workflow_scripts/test_generated_backend.py | L-Net-1992/onnx | acc127219b45bc27b0180b1fdc08299eac81b167 | [
"Apache-2.0"
] | 1 | 2022-03-04T03:29:37.000Z | 2022-03-04T03:29:37.000Z | workflow_scripts/test_generated_backend.py | alsj213/onnx | 35092895d9bf3592e58f4710d098f8131afef259 | [
"Apache-2.0"
] | null | null | null | workflow_scripts/test_generated_backend.py | alsj213/onnx | 35092895d9bf3592e58f4710d098f8131afef259 | [
"Apache-2.0"
] | 1 | 2022-03-27T19:17:02.000Z | 2022-03-27T19:17:02.000Z | # SPDX-License-Identifier: Apache-2.0
import config
import onnx
import os
import sys
def main():
script_dir = os.path.dirname(os.path.abspath(__file__))
directory = os.path.join(script_dir, '../onnx/backend/test/data/node')
count = failed_count = 0
for root, _, files in os.walk(directory):
for file in files:
if file.endswith('.onnx'):
test_dir_name = os.path.basename(os.path.normpath(root))
onnx_model_path = os.path.join(root, file)
try:
model = onnx.load(onnx_model_path)
# check model by ONNX checker
inferred_model = onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)
onnx.checker.check_model(inferred_model)
except Exception as e:
failed_count += 1
print("{} failed: {}".format(test_dir_name, e))
count += 1
print('-----------------------------')
if failed_count == 0:
print("{} backend models passed.".format(count))
else:
print("{} failed in {} backend models.".format(failed_count, count))
sys.exit(1)
if __name__ == '__main__':
main()
| 33.864865 | 112 | 0.561852 |
import config
import onnx
import os
import sys
def main():
script_dir = os.path.dirname(os.path.abspath(__file__))
directory = os.path.join(script_dir, '../onnx/backend/test/data/node')
count = failed_count = 0
for root, _, files in os.walk(directory):
for file in files:
if file.endswith('.onnx'):
test_dir_name = os.path.basename(os.path.normpath(root))
onnx_model_path = os.path.join(root, file)
try:
model = onnx.load(onnx_model_path)
inferred_model = onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)
onnx.checker.check_model(inferred_model)
except Exception as e:
failed_count += 1
print("{} failed: {}".format(test_dir_name, e))
count += 1
print('-----------------------------')
if failed_count == 0:
print("{} backend models passed.".format(count))
else:
print("{} failed in {} backend models.".format(failed_count, count))
sys.exit(1)
if __name__ == '__main__':
main()
| true | true |
f7336134fc7d178414150829b94c82c917adf6a3 | 758 | py | Python | multitask/thread/05_mutexlock.py | Leo-super/python_base | 733c4177b8fccab88563d91f7f262773d612f7a2 | [
"MIT"
] | null | null | null | multitask/thread/05_mutexlock.py | Leo-super/python_base | 733c4177b8fccab88563d91f7f262773d612f7a2 | [
"MIT"
] | null | null | null | multitask/thread/05_mutexlock.py | Leo-super/python_base | 733c4177b8fccab88563d91f7f262773d612f7a2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import threading
import time
g_num = 0
def work1(num):
global g_num
for i in range(num):
mutex.acquire() # 上锁
g_num += 1
mutex.release() # 解锁
# print("----in work1, g_num is %d---" % g_num)
def work2(num):
global g_num
for i in range(num):
mutex.acquire() # 上锁
g_num += 1
mutex.release() # 解锁
# print("----in work2, g_num is %d---" % g_num)
# 创建⼀个互斥锁
# 默认是未上锁的状态
mutex = threading.Lock()
t1 = threading.Thread(target=work1, args=(1000000,))
t2 = threading.Thread(target=work2, args=(1000000,))
t1.start()
t2.start()
# 如果t1、t2未执行完,则休眠1s
while len(threading.enumerate()) != 1:
time.sleep(1)
print("g_num最终值:%d" % g_num) | 18.487805 | 55 | 0.583113 |
import threading
import time
g_num = 0
def work1(num):
global g_num
for i in range(num):
mutex.acquire()
g_num += 1
mutex.release()
def work2(num):
global g_num
for i in range(num):
mutex.acquire()
g_num += 1
mutex.release()
mutex = threading.Lock()
t1 = threading.Thread(target=work1, args=(1000000,))
t2 = threading.Thread(target=work2, args=(1000000,))
t1.start()
t2.start()
while len(threading.enumerate()) != 1:
time.sleep(1)
print("g_num最终值:%d" % g_num) | true | true |
f7336155b5a987e374f7ee577fe28f3ad39fd62a | 3,303 | py | Python | esm/model.py | konstin/esm | a39894c079ce314e1c0aaa607e8ae498111910a0 | [
"MIT"
] | null | null | null | esm/model.py | konstin/esm | a39894c079ce314e1c0aaa607e8ae498111910a0 | [
"MIT"
] | null | null | null | esm/model.py | konstin/esm | a39894c079ce314e1c0aaa607e8ae498111910a0 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .modules import TransformerLayer, PositionalEmbedding # noqa
class ProteinBertModel(nn.Module):
@classmethod
def add_args(cls, parser):
parser.add_argument(
"--num_layers", default=36, type=int, metavar="N", help="number of layers"
)
parser.add_argument(
"--embed_dim", default=1280, type=int, metavar="N", help="embedding dimension"
)
parser.add_argument(
"--logit_bias", action="store_true", help="whether to apply bias to logits"
)
parser.add_argument(
"--ffn_embed_dim",
default=5120,
type=int,
metavar="N",
help="embedding dimension for FFN",
)
parser.add_argument(
"--attention_heads",
default=20,
type=int,
metavar="N",
help="number of attention heads",
)
def __init__(self, args, alphabet_size, padding_idx):
super().__init__()
self.args = args
self.alphabet_size = alphabet_size
self.padding_idx = padding_idx
self.embed_scale = math.sqrt(self.args.embed_dim)
self._init_submodules()
def _init_submodules(self):
self.embed_tokens = nn.Embedding(
self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx
)
self.embed_positions = PositionalEmbedding(self.args.embed_dim, self.padding_idx)
self.layers = nn.ModuleList(
[
TransformerLayer(
self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads
)
for _ in range(self.args.layers)
]
)
self.embed_out = nn.Parameter(
torch.zeros((self.alphabet_size, self.args.embed_dim))
)
self.embed_out_bias = None
if self.args.final_bias:
self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size))
def forward(self, tokens, repr_layers=[]):
assert tokens.ndim == 2
padding_mask = tokens.eq(self.padding_idx)
if not padding_mask.any():
padding_mask = None
x = self.embed_scale * self.embed_tokens(tokens)
x = x + self.embed_positions(tokens)
repr_layers = set(repr_layers)
hidden_representations = {}
if 0 in repr_layers:
hidden_representations[0] = x
# (B, T, E) => (T, B, E)
x = x.transpose(0, 1)
for layer_idx, layer in enumerate(self.layers):
x, _ = layer(x, self_attn_padding_mask=padding_mask)
if (layer_idx + 1) in repr_layers:
hidden_representations[layer_idx + 1] = x.transpose(0, 1)
x = F.linear(x, self.embed_out, bias=self.embed_out_bias)
# (T, B, E) => (B, T, E)
x = x.transpose(0, 1)
result = {"logits": x, "representations": hidden_representations}
return result
@property
def num_layers(self):
return self.args.layers
| 31.759615 | 91 | 0.594611 |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .modules import TransformerLayer, PositionalEmbedding
class ProteinBertModel(nn.Module):
@classmethod
def add_args(cls, parser):
parser.add_argument(
"--num_layers", default=36, type=int, metavar="N", help="number of layers"
)
parser.add_argument(
"--embed_dim", default=1280, type=int, metavar="N", help="embedding dimension"
)
parser.add_argument(
"--logit_bias", action="store_true", help="whether to apply bias to logits"
)
parser.add_argument(
"--ffn_embed_dim",
default=5120,
type=int,
metavar="N",
help="embedding dimension for FFN",
)
parser.add_argument(
"--attention_heads",
default=20,
type=int,
metavar="N",
help="number of attention heads",
)
def __init__(self, args, alphabet_size, padding_idx):
super().__init__()
self.args = args
self.alphabet_size = alphabet_size
self.padding_idx = padding_idx
self.embed_scale = math.sqrt(self.args.embed_dim)
self._init_submodules()
def _init_submodules(self):
self.embed_tokens = nn.Embedding(
self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx
)
self.embed_positions = PositionalEmbedding(self.args.embed_dim, self.padding_idx)
self.layers = nn.ModuleList(
[
TransformerLayer(
self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads
)
for _ in range(self.args.layers)
]
)
self.embed_out = nn.Parameter(
torch.zeros((self.alphabet_size, self.args.embed_dim))
)
self.embed_out_bias = None
if self.args.final_bias:
self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size))
def forward(self, tokens, repr_layers=[]):
assert tokens.ndim == 2
padding_mask = tokens.eq(self.padding_idx)
if not padding_mask.any():
padding_mask = None
x = self.embed_scale * self.embed_tokens(tokens)
x = x + self.embed_positions(tokens)
repr_layers = set(repr_layers)
hidden_representations = {}
if 0 in repr_layers:
hidden_representations[0] = x
x = x.transpose(0, 1)
for layer_idx, layer in enumerate(self.layers):
x, _ = layer(x, self_attn_padding_mask=padding_mask)
if (layer_idx + 1) in repr_layers:
hidden_representations[layer_idx + 1] = x.transpose(0, 1)
x = F.linear(x, self.embed_out, bias=self.embed_out_bias)
x = x.transpose(0, 1)
result = {"logits": x, "representations": hidden_representations}
return result
@property
def num_layers(self):
return self.args.layers
| true | true |
f733615c76692988cddbcb5a9d2af2e0862a723d | 716 | py | Python | setup.py | ZeitOnline/bugsnag_exporter | 5fccc42f7ae16d25fac086009b8f29a7316e2127 | [
"BSD-3-Clause"
] | 3 | 2019-11-29T23:09:54.000Z | 2022-03-29T21:41:42.000Z | setup.py | ZeitOnline/bugsnag_exporter | 5fccc42f7ae16d25fac086009b8f29a7316e2127 | [
"BSD-3-Clause"
] | 2 | 2021-06-24T11:17:26.000Z | 2021-07-28T16:13:00.000Z | setup.py | ZeitOnline/bugsnag_exporter | 5fccc42f7ae16d25fac086009b8f29a7316e2127 | [
"BSD-3-Clause"
] | 2 | 2021-07-27T11:42:45.000Z | 2021-09-17T10:47:38.000Z | from setuptools import setup, find_packages
setup(
name='bugsnag_exporter',
version='1.3.0.dev0',
author='Wolfgang Schnerring',
author_email='wolfgang.schnerring@zeit.de',
url='https://github.com/ZeitOnline/bugsnag_exporter',
description='',
long_description=(
open('README.rst').read() +
'\n\n' +
open('CHANGES.txt').read()),
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
license='BSD',
install_requires=[
'prometheus_client',
'requests',
'setuptools',
],
entry_points={'console_scripts': [
'bugsnag_exporter = bugsnag_exporter:main',
]}
)
| 24.689655 | 57 | 0.618715 | from setuptools import setup, find_packages
setup(
name='bugsnag_exporter',
version='1.3.0.dev0',
author='Wolfgang Schnerring',
author_email='wolfgang.schnerring@zeit.de',
url='https://github.com/ZeitOnline/bugsnag_exporter',
description='',
long_description=(
open('README.rst').read() +
'\n\n' +
open('CHANGES.txt').read()),
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
license='BSD',
install_requires=[
'prometheus_client',
'requests',
'setuptools',
],
entry_points={'console_scripts': [
'bugsnag_exporter = bugsnag_exporter:main',
]}
)
| true | true |
f733624b52386015544bec768023286f7efa8a41 | 131 | py | Python | app/repositories/__init__.py | VadymHutei/ukubuka-back | acd56c545b50fb65ed764c19bdd03a42be969ce4 | [
"MIT"
] | null | null | null | app/repositories/__init__.py | VadymHutei/ukubuka-back | acd56c545b50fb65ed764c19bdd03a42be969ce4 | [
"MIT"
] | null | null | null | app/repositories/__init__.py | VadymHutei/ukubuka-back | acd56c545b50fb65ed764c19bdd03a42be969ce4 | [
"MIT"
] | null | null | null | from repositories.category import CategoryRepo
from repositories.product import ProductRepo
from repositories.menu import MenuRepo
| 32.75 | 46 | 0.885496 | from repositories.category import CategoryRepo
from repositories.product import ProductRepo
from repositories.menu import MenuRepo
| true | true |
f733625ac95acffdcaa0b4adec910eb366c92187 | 4,487 | py | Python | aliyun-python-sdk-live/aliyunsdklive/request/v20161101/ModifyCasterLayoutRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-live/aliyunsdklive/request/v20161101/ModifyCasterLayoutRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | 1 | 2020-05-31T14:51:47.000Z | 2020-05-31T14:51:47.000Z | aliyun-python-sdk-live/aliyunsdklive/request/v20161101/ModifyCasterLayoutRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class ModifyCasterLayoutRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'ModifyCasterLayout','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BlendLists(self):
return self.get_query_params().get('BlendLists')
def set_BlendLists(self, BlendLists):
for depth1 in range(len(BlendLists)):
if BlendLists[depth1] is not None:
self.add_query_param('BlendList.' + str(depth1 + 1) , BlendLists[depth1])
def get_LayoutId(self):
return self.get_query_params().get('LayoutId')
def set_LayoutId(self,LayoutId):
self.add_query_param('LayoutId',LayoutId)
def get_CasterId(self):
return self.get_query_params().get('CasterId')
def set_CasterId(self,CasterId):
self.add_query_param('CasterId',CasterId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_AudioLayers(self):
return self.get_query_params().get('AudioLayers')
def set_AudioLayers(self, AudioLayers):
for depth1 in range(len(AudioLayers)):
if AudioLayers[depth1].get('VolumeRate') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.VolumeRate', AudioLayers[depth1].get('VolumeRate'))
if AudioLayers[depth1].get('ValidChannel') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.ValidChannel', AudioLayers[depth1].get('ValidChannel'))
if AudioLayers[depth1].get('FixedDelayDuration') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.FixedDelayDuration', AudioLayers[depth1].get('FixedDelayDuration'))
def get_VideoLayers(self):
return self.get_query_params().get('VideoLayers')
def set_VideoLayers(self, VideoLayers):
for depth1 in range(len(VideoLayers)):
if VideoLayers[depth1].get('FillMode') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.FillMode', VideoLayers[depth1].get('FillMode'))
if VideoLayers[depth1].get('HeightNormalized') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.HeightNormalized', VideoLayers[depth1].get('HeightNormalized'))
if VideoLayers[depth1].get('WidthNormalized') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.WidthNormalized', VideoLayers[depth1].get('WidthNormalized'))
if VideoLayers[depth1].get('PositionRefer') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.PositionRefer', VideoLayers[depth1].get('PositionRefer'))
if VideoLayers[depth1].get('PositionNormalized') is not None:
for depth2 in range(len(VideoLayers[depth1].get('PositionNormalized'))):
if VideoLayers[depth1].get('PositionNormalized')[depth2] is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.PositionNormalized.' + str(depth2 + 1) , VideoLayers[depth1].get('PositionNormalized')[depth2])
if VideoLayers[depth1].get('FixedDelayDuration') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.FixedDelayDuration', VideoLayers[depth1].get('FixedDelayDuration'))
def get_MixLists(self):
return self.get_query_params().get('MixLists')
def set_MixLists(self, MixLists):
for depth1 in range(len(MixLists)):
if MixLists[depth1] is not None:
self.add_query_param('MixList.' + str(depth1 + 1) , MixLists[depth1]) | 45.785714 | 159 | 0.733452 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class ModifyCasterLayoutRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'ModifyCasterLayout','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BlendLists(self):
return self.get_query_params().get('BlendLists')
def set_BlendLists(self, BlendLists):
for depth1 in range(len(BlendLists)):
if BlendLists[depth1] is not None:
self.add_query_param('BlendList.' + str(depth1 + 1) , BlendLists[depth1])
def get_LayoutId(self):
return self.get_query_params().get('LayoutId')
def set_LayoutId(self,LayoutId):
self.add_query_param('LayoutId',LayoutId)
def get_CasterId(self):
return self.get_query_params().get('CasterId')
def set_CasterId(self,CasterId):
self.add_query_param('CasterId',CasterId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_AudioLayers(self):
return self.get_query_params().get('AudioLayers')
def set_AudioLayers(self, AudioLayers):
for depth1 in range(len(AudioLayers)):
if AudioLayers[depth1].get('VolumeRate') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.VolumeRate', AudioLayers[depth1].get('VolumeRate'))
if AudioLayers[depth1].get('ValidChannel') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.ValidChannel', AudioLayers[depth1].get('ValidChannel'))
if AudioLayers[depth1].get('FixedDelayDuration') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.FixedDelayDuration', AudioLayers[depth1].get('FixedDelayDuration'))
def get_VideoLayers(self):
return self.get_query_params().get('VideoLayers')
def set_VideoLayers(self, VideoLayers):
for depth1 in range(len(VideoLayers)):
if VideoLayers[depth1].get('FillMode') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.FillMode', VideoLayers[depth1].get('FillMode'))
if VideoLayers[depth1].get('HeightNormalized') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.HeightNormalized', VideoLayers[depth1].get('HeightNormalized'))
if VideoLayers[depth1].get('WidthNormalized') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.WidthNormalized', VideoLayers[depth1].get('WidthNormalized'))
if VideoLayers[depth1].get('PositionRefer') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.PositionRefer', VideoLayers[depth1].get('PositionRefer'))
if VideoLayers[depth1].get('PositionNormalized') is not None:
for depth2 in range(len(VideoLayers[depth1].get('PositionNormalized'))):
if VideoLayers[depth1].get('PositionNormalized')[depth2] is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.PositionNormalized.' + str(depth2 + 1) , VideoLayers[depth1].get('PositionNormalized')[depth2])
if VideoLayers[depth1].get('FixedDelayDuration') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.FixedDelayDuration', VideoLayers[depth1].get('FixedDelayDuration'))
def get_MixLists(self):
return self.get_query_params().get('MixLists')
def set_MixLists(self, MixLists):
for depth1 in range(len(MixLists)):
if MixLists[depth1] is not None:
self.add_query_param('MixList.' + str(depth1 + 1) , MixLists[depth1]) | true | true |
f73363a78664137e9e5f7d5ca28b784582396acd | 301 | py | Python | bcc/config/docs.py | yuvabedev/Bcc | c62b949d31f9571d99d91439c1ee5e0dc70e3763 | [
"MIT"
] | null | null | null | bcc/config/docs.py | yuvabedev/Bcc | c62b949d31f9571d99d91439c1ee5e0dc70e3763 | [
"MIT"
] | null | null | null | bcc/config/docs.py | yuvabedev/Bcc | c62b949d31f9571d99d91439c1ee5e0dc70e3763 | [
"MIT"
] | null | null | null | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/bcc"
# docs_base_url = "https://[org_name].github.io/bcc"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Bcc"
| 25.083333 | 68 | 0.710963 |
def get_context(context):
context.brand_html = "Bcc"
| true | true |
f733640869f502ecd9982d60021c2246a22b2fbd | 1,689 | py | Python | tests/test_workflow_get_version_single.py | Optimally-com/temporal-python-sdk | f636e5234c3b3b7d52cc891c9c40e3d257493806 | [
"MIT"
] | null | null | null | tests/test_workflow_get_version_single.py | Optimally-com/temporal-python-sdk | f636e5234c3b3b7d52cc891c9c40e3d257493806 | [
"MIT"
] | null | null | null | tests/test_workflow_get_version_single.py | Optimally-com/temporal-python-sdk | f636e5234c3b3b7d52cc891c9c40e3d257493806 | [
"MIT"
] | null | null | null | import pytest
from temporal import DEFAULT_VERSION
from temporal.workflow import workflow_method, WorkflowClient, Workflow
TASK_QUEUE = "test_workflow_get_version_single_tq"
NAMESPACE = "default"
version_found_in_step_1_0 = None
version_found_in_step_1_1 = None
version_found_in_step_2_0 = None
version_found_in_step_2_1 = None
class GreetingWorkflow:
@workflow_method(task_queue=TASK_QUEUE)
async def get_greeting(self) -> None:
raise NotImplementedError
class GreetingWorkflowImpl(GreetingWorkflow):
async def get_greeting(self):
global version_found_in_step_1_0, version_found_in_step_1_1
global version_found_in_step_2_0, version_found_in_step_2_1
version_found_in_step_1_0 = Workflow.get_version(
"first-item", DEFAULT_VERSION, 2
)
version_found_in_step_1_1 = Workflow.get_version(
"first-item", DEFAULT_VERSION, 2
)
await Workflow.sleep(60)
version_found_in_step_2_0 = Workflow.get_version(
"first-item", DEFAULT_VERSION, 2
)
version_found_in_step_2_1 = Workflow.get_version(
"first-item", DEFAULT_VERSION, 2
)
@pytest.mark.asyncio
@pytest.mark.worker_config(
NAMESPACE, TASK_QUEUE, activities=[], workflows=[GreetingWorkflowImpl]
)
async def test(worker):
client = WorkflowClient.new_client(namespace=NAMESPACE)
greeting_workflow: GreetingWorkflow = client.new_workflow_stub(GreetingWorkflow)
await greeting_workflow.get_greeting()
assert version_found_in_step_1_0 == 2
assert version_found_in_step_1_1 == 2
assert version_found_in_step_2_0 == 2
assert version_found_in_step_2_1 == 2
| 31.867925 | 84 | 0.748372 | import pytest
from temporal import DEFAULT_VERSION
from temporal.workflow import workflow_method, WorkflowClient, Workflow
TASK_QUEUE = "test_workflow_get_version_single_tq"
NAMESPACE = "default"
version_found_in_step_1_0 = None
version_found_in_step_1_1 = None
version_found_in_step_2_0 = None
version_found_in_step_2_1 = None
class GreetingWorkflow:
@workflow_method(task_queue=TASK_QUEUE)
async def get_greeting(self) -> None:
raise NotImplementedError
class GreetingWorkflowImpl(GreetingWorkflow):
async def get_greeting(self):
global version_found_in_step_1_0, version_found_in_step_1_1
global version_found_in_step_2_0, version_found_in_step_2_1
version_found_in_step_1_0 = Workflow.get_version(
"first-item", DEFAULT_VERSION, 2
)
version_found_in_step_1_1 = Workflow.get_version(
"first-item", DEFAULT_VERSION, 2
)
await Workflow.sleep(60)
version_found_in_step_2_0 = Workflow.get_version(
"first-item", DEFAULT_VERSION, 2
)
version_found_in_step_2_1 = Workflow.get_version(
"first-item", DEFAULT_VERSION, 2
)
@pytest.mark.asyncio
@pytest.mark.worker_config(
NAMESPACE, TASK_QUEUE, activities=[], workflows=[GreetingWorkflowImpl]
)
async def test(worker):
client = WorkflowClient.new_client(namespace=NAMESPACE)
greeting_workflow: GreetingWorkflow = client.new_workflow_stub(GreetingWorkflow)
await greeting_workflow.get_greeting()
assert version_found_in_step_1_0 == 2
assert version_found_in_step_1_1 == 2
assert version_found_in_step_2_0 == 2
assert version_found_in_step_2_1 == 2
| true | true |
f7336498ebcfe3ed197095006aff2fe2f8850db4 | 7,743 | py | Python | cardinal_pythonlib/django/function_cache.py | bopopescu/pythonlib | 9c2187d6092ba133342ca3374eb7c86f9d296c30 | [
"Apache-2.0"
] | null | null | null | cardinal_pythonlib/django/function_cache.py | bopopescu/pythonlib | 9c2187d6092ba133342ca3374eb7c86f9d296c30 | [
"Apache-2.0"
] | null | null | null | cardinal_pythonlib/django/function_cache.py | bopopescu/pythonlib | 9c2187d6092ba133342ca3374eb7c86f9d296c30 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# cardinal_pythonlib/django/function_cache.py
"""
===============================================================================
Original code copyright (C) 2009-2020 Rudolf Cardinal (rudolf@pobox.com).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Cache the results of function calls using Django.**
Based on https://github.com/rchrd2/django-cache-decorator
but fixed for Python 3 / Django 1.10.
"""
import hashlib
from typing import Any, Callable, Dict, Tuple
# noinspection PyUnresolvedReferences
from django.core.cache import cache # default cache
from cardinal_pythonlib.logs import get_brace_style_log_with_null_handler
from cardinal_pythonlib.json.serialize import json_encode
log = get_brace_style_log_with_null_handler(__name__)
FunctionType = Callable[..., Any]
ArgsType = Tuple[Any, ...]
KwargsType = Dict[str, Any]
def get_call_signature(fn: FunctionType,
args: ArgsType,
kwargs: KwargsType,
debug_cache: bool = False) -> str:
"""
Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use indirectly as a
cache key. The string is a JSON representation. See ``make_cache_key`` for
a more suitable actual cache key.
"""
# Note that the function won't have the __self__ argument (as in
# fn.__self__), at this point, even if it's a member function.
try:
call_sig = json_encode((fn.__qualname__, args, kwargs))
except TypeError:
log.critical(
"\nTo decorate using @django_cache_function without specifying "
"cache_key, the decorated function's owning class and its "
"parameters must be JSON-serializable (see jsonfunc.py, "
"django_cache_fn.py).\n")
raise
if debug_cache:
log.debug("Making call signature {!r}", call_sig)
return call_sig
def make_cache_key(call_signature: str,
debug_cache: bool = False) -> str:
"""
Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use as a cache key.
The string is an MD5 hash of the JSON-encoded call signature.
The logic behind these decisions is as follows:
- We have a bunch of components of arbitrary type, and we need to get
a unique string out.
- We shouldn't use ``str()``, because that is often poorly specified; e.g.
is ``'a.b.c'`` a ``TableId``, or is it a ``ColumnId`` with no ``'db'``
field?
- We could use ``repr()``: sometimes that gives us helpful things that
could in principle be passed to ``eval()``, in which case ``repr()`` would
be fine, but sometimes it doesn't, and gives unhelpful things like
``'<__main__.Thing object at 0x7ff3093ebda0>'``.
- However, if something encodes to JSON, that representation should
be reversible and thus contain the right sort of information.
- Note also that bound methods will come with a ``self`` argument, for
which the address may be very relevant...
- Let's go with ``repr()``. Users of the cache decorator should not pass
objects whose ``repr()`` includes the memory address of the object unless
they want those objects to be treated as distinct.
- Ah, no. The cache itself will pickle and unpickle things, and this
will change memory addresses of objects. So we can't store a reference
to an object using ``repr()`` and using ``cache.add()``/``pickle()`` and
hope they'll come out the same.
- Use the JSON after all.
- And do it in ``get_call_signature()``, not here.
- That means that any class we wish to decorate WITHOUT specifying a
cache key manually must support JSON.
"""
key = hashlib.md5(call_signature.encode("utf-8")).hexdigest()
if debug_cache:
log.debug("Making cache key {} from call_signature {!r}",
key, call_signature)
return key
def django_cache_function(timeout: int = 5 * 60,
cache_key: str = '',
debug_cache: bool = False):
"""
Decorator to add caching to a function in Django.
Uses the Django default cache.
Args:
timeout: timeout in seconds; use None for "never expire", as 0 means
"do not cache".
cache_key: optional cache key to use (if falsy, we'll invent one)
debug_cache: show hits/misses?
"""
cache_key = cache_key or None
def decorator(fn):
def wrapper(*args, **kwargs):
# - NOTE that Django returns None from cache.get() for "not in
# cache", so can't cache a None value;
# https://docs.djangoproject.com/en/1.10/topics/cache/#basic-usage # noqa
# - We need to store a bit more than just the function result
# anyway, to detect hash collisions when the user doesn't specify
# the cache_key, so we may as well use that format even if the
# user does specify the cache_key, and then we can store a None
# result properly as well.
if cache_key:
# User specified a cache key. This is easy.
call_sig = ''
_cache_key = cache_key
check_stored_call_sig = False
else:
# User didn't specify a cache key, so we'll do one
# automatically. Since we do this via a hash, there is a small
# but non-zero chance of a hash collision.
call_sig = get_call_signature(fn, args, kwargs)
_cache_key = make_cache_key(call_sig)
check_stored_call_sig = True
if debug_cache:
log.critical("Checking cache for key: " + _cache_key)
cache_result_tuple = cache.get(_cache_key) # TALKS TO CACHE HERE
if cache_result_tuple is None:
if debug_cache:
log.debug("Cache miss")
else:
if debug_cache:
log.debug("Cache hit")
cached_call_sig, func_result = cache_result_tuple
if (not check_stored_call_sig) or cached_call_sig == call_sig:
return func_result
log.warning(
f"... Cache hit was due to hash collision; "
f"cached_call_sig {cached_call_sig!r} != "
f"call_sig {call_sig!r}")
# If we get here, either it wasn't in the cache, or something
# was in the cache that matched by cache_key but was actually a
# hash collision. Either way, we must do the real work.
func_result = fn(*args, **kwargs)
cache_result_tuple = (call_sig, func_result)
cache.set(key=_cache_key, value=cache_result_tuple,
timeout=timeout) # TALKS TO CACHE HERE
return func_result
return wrapper
return decorator
| 42.779006 | 88 | 0.62069 |
import hashlib
from typing import Any, Callable, Dict, Tuple
from django.core.cache import cache
from cardinal_pythonlib.logs import get_brace_style_log_with_null_handler
from cardinal_pythonlib.json.serialize import json_encode
log = get_brace_style_log_with_null_handler(__name__)
FunctionType = Callable[..., Any]
ArgsType = Tuple[Any, ...]
KwargsType = Dict[str, Any]
def get_call_signature(fn: FunctionType,
args: ArgsType,
kwargs: KwargsType,
debug_cache: bool = False) -> str:
# fn.__self__), at this point, even if it's a member function.
try:
call_sig = json_encode((fn.__qualname__, args, kwargs))
except TypeError:
log.critical(
"\nTo decorate using @django_cache_function without specifying "
"cache_key, the decorated function's owning class and its "
"parameters must be JSON-serializable (see jsonfunc.py, "
"django_cache_fn.py).\n")
raise
if debug_cache:
log.debug("Making call signature {!r}", call_sig)
return call_sig
def make_cache_key(call_signature: str,
debug_cache: bool = False) -> str:
key = hashlib.md5(call_signature.encode("utf-8")).hexdigest()
if debug_cache:
log.debug("Making cache key {} from call_signature {!r}",
key, call_signature)
return key
def django_cache_function(timeout: int = 5 * 60,
cache_key: str = '',
debug_cache: bool = False):
cache_key = cache_key or None
def decorator(fn):
def wrapper(*args, **kwargs):
# - NOTE that Django returns None from cache.get() for "not in
# cache", so can't cache a None value;
# the cache_key, so we may as well use that format even if the
# user does specify the cache_key, and then we can store a None
# result properly as well.
if cache_key:
# User specified a cache key. This is easy.
call_sig = ''
_cache_key = cache_key
check_stored_call_sig = False
else:
# User didn't specify a cache key, so we'll do one
# automatically. Since we do this via a hash, there is a small
# but non-zero chance of a hash collision.
call_sig = get_call_signature(fn, args, kwargs)
_cache_key = make_cache_key(call_sig)
check_stored_call_sig = True
if debug_cache:
log.critical("Checking cache for key: " + _cache_key)
cache_result_tuple = cache.get(_cache_key) # TALKS TO CACHE HERE
if cache_result_tuple is None:
if debug_cache:
log.debug("Cache miss")
else:
if debug_cache:
log.debug("Cache hit")
cached_call_sig, func_result = cache_result_tuple
if (not check_stored_call_sig) or cached_call_sig == call_sig:
return func_result
log.warning(
f"... Cache hit was due to hash collision; "
f"cached_call_sig {cached_call_sig!r} != "
f"call_sig {call_sig!r}")
# If we get here, either it wasn't in the cache, or something
func_result = fn(*args, **kwargs)
cache_result_tuple = (call_sig, func_result)
cache.set(key=_cache_key, value=cache_result_tuple,
timeout=timeout)
return func_result
return wrapper
return decorator
| true | true |
f733650de6530fb57a4ba3608b453116c353d1ef | 13,365 | py | Python | BlockServer/synoptic/synoptic_manager.py | ISISComputingGroup/EPICS-inst_servers | 056fed778ebd1190421e06b9ac9c8a0bdae0d317 | [
"BSD-3-Clause"
] | 1 | 2020-08-20T23:38:53.000Z | 2020-08-20T23:38:53.000Z | BlockServer/synoptic/synoptic_manager.py | ISISComputingGroup/EPICS-inst_servers | 056fed778ebd1190421e06b9ac9c8a0bdae0d317 | [
"BSD-3-Clause"
] | 88 | 2015-09-03T11:50:41.000Z | 2021-02-18T19:13:04.000Z | BlockServer/synoptic/synoptic_manager.py | ISISComputingGroup/EPICS-inst_servers | 056fed778ebd1190421e06b9ac9c8a0bdae0d317 | [
"BSD-3-Clause"
] | 1 | 2020-08-20T23:38:05.000Z | 2020-08-20T23:38:05.000Z | # This file is part of the ISIS IBEX application.
# Copyright (C) 2012-2016 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
import os
from typing import List, TYPE_CHECKING
if TYPE_CHECKING:
from block_server import BlockServer
from BlockServer.core.active_config_holder import ActiveConfigHolder
from BlockServer.core.config_list_manager import InvalidDeleteException
from BlockServer.core.file_path_manager import FILEPATH_MANAGER
from BlockServer.core.on_the_fly_pv_interface import OnTheFlyPvInterface
from BlockServer.fileIO.schema_checker import ConfigurationSchemaChecker
from lxml import etree
from server_common.common_exceptions import MaxAttemptsExceededException
from server_common.utilities import print_and_log, compress_and_hex, create_pv_name, \
convert_to_json, convert_from_json
from BlockServer.synoptic.synoptic_file_io import SynopticFileIO
# Synoptics PVs are of the form IN:DEMO:SYNOPTICS:XXXXX (no BLOCKSERVER in the name)
# This is to allow longer synoptic names without exceeded the maximum allowed length for PVs
SYNOPTIC_PRE = "SYNOPTICS:"
SYNOPTIC_GET = ":GET"
SYNOPTIC_SET = ":SET"
SYNOPTIC_NAMES = "NAMES"
SYNOPTIC_GET_DEFAULT = "GET_DEFAULT"
SYNOPTIC_BLANK = "__BLANK__"
SYNOPTIC_SET_DETAILS = "SET_DETAILS"
SYNOPTIC_DELETE = "DELETE"
SYNOPTIC_SCHEMA = "SCHEMA"
SYNOPTIC_SCHEMA_FILE = "synoptic.xsd"
class SynopticManager(OnTheFlyPvInterface):
"""Class for managing the PVs associated with synoptics"""
def __init__(self, block_server: 'BlockServer', schema_folder: str, active_configholder: ActiveConfigHolder, file_io: SynopticFileIO = SynopticFileIO()):
"""Constructor.
Args:
block_server: A reference to the BlockServer instance
schema_folder: The filepath for the synoptic schema
active_configholder: A reference to the active configuration
file_io: Responsible for file IO
"""
super(SynopticManager, self).__init__()
self.pvs_to_write.extend([SYNOPTIC_PRE + SYNOPTIC_DELETE, SYNOPTIC_PRE + SYNOPTIC_SET_DETAILS])
self._directory = FILEPATH_MANAGER.synoptic_dir
self._schema_folder = schema_folder
self._synoptic_pvs = dict()
self._bs = block_server
self._activech = active_configholder
self._file_io = file_io
self._default_syn_xml = b""
self._create_standard_pvs()
self._load_initial()
def handle_pv_write(self, pv: str, data: str):
try:
if pv == SYNOPTIC_PRE + SYNOPTIC_DELETE:
self.delete(convert_from_json(data))
self.update_monitors()
elif pv == SYNOPTIC_PRE + SYNOPTIC_SET_DETAILS:
self.save_synoptic_xml(bytes(data, encoding="utf-8"))
self.update_monitors()
except IOError as err:
print_and_log(f"Error accessing synoptic file: {err}", "MAJOR")
except Exception as err:
print_and_log(f"Error writing to PV {pv}: {err}", "MAJOR")
def handle_pv_read(self, pv):
# Nothing to do as it is all handled by monitors
pass
def update_monitors(self):
with self._bs.monitor_lock:
print_and_log("Updating synoptic monitors")
self._bs.setParam(SYNOPTIC_PRE + SYNOPTIC_GET_DEFAULT, compress_and_hex(str(self.get_default_synoptic_xml(), encoding="utf-8")))
names = convert_to_json(self.get_synoptic_list())
self._bs.setParam(SYNOPTIC_PRE + SYNOPTIC_NAMES, compress_and_hex(names))
self._bs.updatePVs()
print_and_log("Finished updating synoptic monitors")
def on_config_change(self, full_init=False):
# If the config has a default synoptic then set the PV to that
default = self._activech.get_config_meta().synoptic
self.set_default_synoptic(default)
self.update_monitors()
def _create_standard_pvs(self):
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_NAMES, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_GET_DEFAULT, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_BLANK + SYNOPTIC_GET, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_SET_DETAILS, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_DELETE, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_SCHEMA, 16000)
# Set values for PVs that don't change
self.update_pv_value(SYNOPTIC_PRE + SYNOPTIC_BLANK + SYNOPTIC_GET,
compress_and_hex(self.get_blank_synoptic()))
self.update_pv_value(SYNOPTIC_PRE + SYNOPTIC_SCHEMA, compress_and_hex(self.get_synoptic_schema()))
def _load_initial(self):
"""Create the PVs for all the synoptics found in the synoptics directory."""
for f in self._file_io.get_list_synoptic_files(self._directory):
# Load the data, checking the schema
try:
data = self._file_io.read_synoptic_file(self._directory, f)
ConfigurationSchemaChecker.check_xml_matches_schema(
os.path.join(self._schema_folder, SYNOPTIC_SCHEMA_FILE), data, "Synoptic")
# Get the synoptic name
self._create_pv(data)
except MaxAttemptsExceededException:
print_and_log(f"Could not open synoptic file {f}. Please check the file is "
f"not in use by another process.", "MAJOR")
except Exception as err:
print_and_log(f"Error creating synoptic PV: {err}", "MAJOR")
def _create_pv(self, data: bytes):
"""Creates a single PV based on a name and data. Adds this PV to the dictionary returned on get_synoptic_list
Args:
data (bytes): Starting data for the pv, the pv name is derived from the name tag of this
"""
name = self._get_synoptic_name_from_xml(data)
if name not in self._synoptic_pvs:
# Extra check, if a non-case sensitive match exist remove it
for key in self._synoptic_pvs.keys():
if name.lower() == key.lower():
self._synoptic_pvs.pop(key)
pv = create_pv_name(name, list(self._synoptic_pvs.values()), "SYNOPTIC")
self._synoptic_pvs[name] = pv
# Create the PV
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + self._synoptic_pvs[name] + SYNOPTIC_GET, 16000)
# Update the value
self.update_pv_value(SYNOPTIC_PRE + self._synoptic_pvs[name] + SYNOPTIC_GET, compress_and_hex(str(data, encoding="utf-8")))
def update_pv_value(self, name, data):
""" Updates value of a PV holding synoptic information with new data
Args:
name (string): The name of the edited synoptic
data (bytes): The new synoptic data
"""
self._bs.setParam(name, data)
self._bs.updatePVs()
def get_synoptic_list(self):
"""Gets the names and associated pvs of the synoptic files in the synoptics directory.
Returns:
list : Alphabetical list of synoptics files on the server, along with their associated pvs
"""
syn_list = []
default_is_none_synoptic = True
for k, v in self._synoptic_pvs.items():
is_default = False
if bytes(f"<name>{k}</name>", encoding="utf-8") in self._default_syn_xml:
default_is_none_synoptic = False
is_default = True
syn_list.append({"name": k, "pv": v, "is_default": is_default})
ans = sorted(syn_list, key=lambda x: x['name'].lower())
# Insert the "blank" synoptic
ans.insert(0, {"pv": "__BLANK__", "name": "-- NONE --", "is_default": default_is_none_synoptic})
return ans
def set_default_synoptic(self, name):
"""Sets the default synoptic.
Args:
name (string): the name of the synoptic to load
"""
fullname = name + ".xml"
f = self._file_io.get_list_synoptic_files(self._directory)
if fullname in f:
# Load the data
try:
data = self._file_io.read_synoptic_file(self._directory, fullname)
self._default_syn_xml = data
except MaxAttemptsExceededException:
print_and_log(f"Could not open synoptic file {fullname}. Please check the file is not "
f"in use by another process.", "MAJOR")
self._default_syn_xml = b""
else:
# No synoptic
self._default_syn_xml = b""
def get_default_synoptic_xml(self) -> bytes:
"""Gets the XML for the default synoptic.
Returns:
bytes : The XML for the synoptic
"""
return self._default_syn_xml
def _get_synoptic_name_from_xml(self, xml_data: bytes):
name = None
root = etree.fromstring(xml_data)
for child in root:
if child.tag.split('}', 1)[1] == "name":
name = child.text
if name is None:
raise Exception("Synoptic contains no name tag")
return name
def save_synoptic_xml(self, xml_data: bytes):
"""Saves the xml under the filename taken from the xml name tag.
Args:
xml_data (bytes): The XML to be saved
"""
try:
# Check against schema
ConfigurationSchemaChecker.check_xml_matches_schema(os.path.join(self._schema_folder, SYNOPTIC_SCHEMA_FILE),
xml_data, "Synoptic")
# Update PVs
self._create_pv(xml_data)
except Exception as err:
print_and_log(err)
raise
name = self._get_synoptic_name_from_xml(xml_data)
save_path = FILEPATH_MANAGER.get_synoptic_path(name)
try:
self._file_io.write_synoptic_file(name, save_path, xml_data)
except MaxAttemptsExceededException:
raise IOError(f"Could not save to synoptic file at {save_path}. Please check the file is "
f"not in use by another process.")
print_and_log("Synoptic saved: " + name)
def delete(self, delete_list: List[str]):
"""Takes a list of synoptics and removes them from the file system and any relevant PVs.
Args:
delete_list (list): The synoptics to delete
"""
print_and_log("Deleting: " + ', '.join(list(delete_list)), "INFO")
delete_list = set(delete_list)
if not delete_list.issubset(self._synoptic_pvs.keys()):
raise InvalidDeleteException("Delete list contains unknown configurations")
for synoptic in delete_list:
self._delete_synoptic(synoptic)
def _delete_synoptic(self, synoptic: str):
fullname = synoptic + ".xml"
try:
self._file_io.delete_synoptic(self._directory, fullname)
except MaxAttemptsExceededException:
print_and_log(f"Could not delete synoptic file {fullname}. Please check the file is "
f"not in use by another process.", "MINOR")
return
self._bs.delete_pv_from_db(SYNOPTIC_PRE + self._synoptic_pvs[synoptic] + SYNOPTIC_GET)
del self._synoptic_pvs[synoptic]
def update(self, xml_data: str):
"""Updates the synoptic list when modifications are made via the filesystem.
Args:
xml_data (str): The xml data to update the PV with
"""
# Convert to bytes
bytes_xml_data = bytes(xml_data, encoding="utf-8")
name = self._get_synoptic_name_from_xml(bytes_xml_data)
names = self._synoptic_pvs.keys()
if name in names:
self.update_pv_value(SYNOPTIC_PRE + self._synoptic_pvs[name] + SYNOPTIC_GET, compress_and_hex(xml_data))
else:
self._create_pv(bytes_xml_data)
self.update_monitors()
def get_synoptic_schema(self):
"""Gets the XSD data for the synoptic.
Returns:
string : The XML for the synoptic schema
"""
schema = ""
with open(os.path.join(self._schema_folder, SYNOPTIC_SCHEMA_FILE), 'r') as schemafile:
schema = schemafile.read()
return schema
def get_blank_synoptic(self):
"""Gets a blank synoptic.
Returns:
string : The XML for the blank synoptic
"""
return """<?xml version="1.0" ?><instrument xmlns="http://www.isis.stfc.ac.uk//instrument">
<name>-- NONE --</name><components/></instrument>"""
| 43.676471 | 157 | 0.655518 |
import os
from typing import List, TYPE_CHECKING
if TYPE_CHECKING:
from block_server import BlockServer
from BlockServer.core.active_config_holder import ActiveConfigHolder
from BlockServer.core.config_list_manager import InvalidDeleteException
from BlockServer.core.file_path_manager import FILEPATH_MANAGER
from BlockServer.core.on_the_fly_pv_interface import OnTheFlyPvInterface
from BlockServer.fileIO.schema_checker import ConfigurationSchemaChecker
from lxml import etree
from server_common.common_exceptions import MaxAttemptsExceededException
from server_common.utilities import print_and_log, compress_and_hex, create_pv_name, \
convert_to_json, convert_from_json
from BlockServer.synoptic.synoptic_file_io import SynopticFileIO
SYNOPTIC_PRE = "SYNOPTICS:"
SYNOPTIC_GET = ":GET"
SYNOPTIC_SET = ":SET"
SYNOPTIC_NAMES = "NAMES"
SYNOPTIC_GET_DEFAULT = "GET_DEFAULT"
SYNOPTIC_BLANK = "__BLANK__"
SYNOPTIC_SET_DETAILS = "SET_DETAILS"
SYNOPTIC_DELETE = "DELETE"
SYNOPTIC_SCHEMA = "SCHEMA"
SYNOPTIC_SCHEMA_FILE = "synoptic.xsd"
class SynopticManager(OnTheFlyPvInterface):
def __init__(self, block_server: 'BlockServer', schema_folder: str, active_configholder: ActiveConfigHolder, file_io: SynopticFileIO = SynopticFileIO()):
super(SynopticManager, self).__init__()
self.pvs_to_write.extend([SYNOPTIC_PRE + SYNOPTIC_DELETE, SYNOPTIC_PRE + SYNOPTIC_SET_DETAILS])
self._directory = FILEPATH_MANAGER.synoptic_dir
self._schema_folder = schema_folder
self._synoptic_pvs = dict()
self._bs = block_server
self._activech = active_configholder
self._file_io = file_io
self._default_syn_xml = b""
self._create_standard_pvs()
self._load_initial()
def handle_pv_write(self, pv: str, data: str):
try:
if pv == SYNOPTIC_PRE + SYNOPTIC_DELETE:
self.delete(convert_from_json(data))
self.update_monitors()
elif pv == SYNOPTIC_PRE + SYNOPTIC_SET_DETAILS:
self.save_synoptic_xml(bytes(data, encoding="utf-8"))
self.update_monitors()
except IOError as err:
print_and_log(f"Error accessing synoptic file: {err}", "MAJOR")
except Exception as err:
print_and_log(f"Error writing to PV {pv}: {err}", "MAJOR")
def handle_pv_read(self, pv):
pass
def update_monitors(self):
with self._bs.monitor_lock:
print_and_log("Updating synoptic monitors")
self._bs.setParam(SYNOPTIC_PRE + SYNOPTIC_GET_DEFAULT, compress_and_hex(str(self.get_default_synoptic_xml(), encoding="utf-8")))
names = convert_to_json(self.get_synoptic_list())
self._bs.setParam(SYNOPTIC_PRE + SYNOPTIC_NAMES, compress_and_hex(names))
self._bs.updatePVs()
print_and_log("Finished updating synoptic monitors")
def on_config_change(self, full_init=False):
default = self._activech.get_config_meta().synoptic
self.set_default_synoptic(default)
self.update_monitors()
def _create_standard_pvs(self):
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_NAMES, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_GET_DEFAULT, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_BLANK + SYNOPTIC_GET, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_SET_DETAILS, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_DELETE, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_SCHEMA, 16000)
self.update_pv_value(SYNOPTIC_PRE + SYNOPTIC_BLANK + SYNOPTIC_GET,
compress_and_hex(self.get_blank_synoptic()))
self.update_pv_value(SYNOPTIC_PRE + SYNOPTIC_SCHEMA, compress_and_hex(self.get_synoptic_schema()))
def _load_initial(self):
for f in self._file_io.get_list_synoptic_files(self._directory):
# Load the data, checking the schema
try:
data = self._file_io.read_synoptic_file(self._directory, f)
ConfigurationSchemaChecker.check_xml_matches_schema(
os.path.join(self._schema_folder, SYNOPTIC_SCHEMA_FILE), data, "Synoptic")
# Get the synoptic name
self._create_pv(data)
except MaxAttemptsExceededException:
print_and_log(f"Could not open synoptic file {f}. Please check the file is "
f"not in use by another process.", "MAJOR")
except Exception as err:
print_and_log(f"Error creating synoptic PV: {err}", "MAJOR")
def _create_pv(self, data: bytes):
name = self._get_synoptic_name_from_xml(data)
if name not in self._synoptic_pvs:
# Extra check, if a non-case sensitive match exist remove it
for key in self._synoptic_pvs.keys():
if name.lower() == key.lower():
self._synoptic_pvs.pop(key)
pv = create_pv_name(name, list(self._synoptic_pvs.values()), "SYNOPTIC")
self._synoptic_pvs[name] = pv
# Create the PV
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + self._synoptic_pvs[name] + SYNOPTIC_GET, 16000)
# Update the value
self.update_pv_value(SYNOPTIC_PRE + self._synoptic_pvs[name] + SYNOPTIC_GET, compress_and_hex(str(data, encoding="utf-8")))
def update_pv_value(self, name, data):
self._bs.setParam(name, data)
self._bs.updatePVs()
def get_synoptic_list(self):
syn_list = []
default_is_none_synoptic = True
for k, v in self._synoptic_pvs.items():
is_default = False
if bytes(f"<name>{k}</name>", encoding="utf-8") in self._default_syn_xml:
default_is_none_synoptic = False
is_default = True
syn_list.append({"name": k, "pv": v, "is_default": is_default})
ans = sorted(syn_list, key=lambda x: x['name'].lower())
# Insert the "blank" synoptic
ans.insert(0, {"pv": "__BLANK__", "name": "-- NONE --", "is_default": default_is_none_synoptic})
return ans
def set_default_synoptic(self, name):
fullname = name + ".xml"
f = self._file_io.get_list_synoptic_files(self._directory)
if fullname in f:
# Load the data
try:
data = self._file_io.read_synoptic_file(self._directory, fullname)
self._default_syn_xml = data
except MaxAttemptsExceededException:
print_and_log(f"Could not open synoptic file {fullname}. Please check the file is not "
f"in use by another process.", "MAJOR")
self._default_syn_xml = b""
else:
# No synoptic
self._default_syn_xml = b""
def get_default_synoptic_xml(self) -> bytes:
return self._default_syn_xml
def _get_synoptic_name_from_xml(self, xml_data: bytes):
name = None
root = etree.fromstring(xml_data)
for child in root:
if child.tag.split('}', 1)[1] == "name":
name = child.text
if name is None:
raise Exception("Synoptic contains no name tag")
return name
def save_synoptic_xml(self, xml_data: bytes):
try:
# Check against schema
ConfigurationSchemaChecker.check_xml_matches_schema(os.path.join(self._schema_folder, SYNOPTIC_SCHEMA_FILE),
xml_data, "Synoptic")
# Update PVs
self._create_pv(xml_data)
except Exception as err:
print_and_log(err)
raise
name = self._get_synoptic_name_from_xml(xml_data)
save_path = FILEPATH_MANAGER.get_synoptic_path(name)
try:
self._file_io.write_synoptic_file(name, save_path, xml_data)
except MaxAttemptsExceededException:
raise IOError(f"Could not save to synoptic file at {save_path}. Please check the file is "
f"not in use by another process.")
print_and_log("Synoptic saved: " + name)
def delete(self, delete_list: List[str]):
print_and_log("Deleting: " + ', '.join(list(delete_list)), "INFO")
delete_list = set(delete_list)
if not delete_list.issubset(self._synoptic_pvs.keys()):
raise InvalidDeleteException("Delete list contains unknown configurations")
for synoptic in delete_list:
self._delete_synoptic(synoptic)
def _delete_synoptic(self, synoptic: str):
fullname = synoptic + ".xml"
try:
self._file_io.delete_synoptic(self._directory, fullname)
except MaxAttemptsExceededException:
print_and_log(f"Could not delete synoptic file {fullname}. Please check the file is "
f"not in use by another process.", "MINOR")
return
self._bs.delete_pv_from_db(SYNOPTIC_PRE + self._synoptic_pvs[synoptic] + SYNOPTIC_GET)
del self._synoptic_pvs[synoptic]
def update(self, xml_data: str):
# Convert to bytes
bytes_xml_data = bytes(xml_data, encoding="utf-8")
name = self._get_synoptic_name_from_xml(bytes_xml_data)
names = self._synoptic_pvs.keys()
if name in names:
self.update_pv_value(SYNOPTIC_PRE + self._synoptic_pvs[name] + SYNOPTIC_GET, compress_and_hex(xml_data))
else:
self._create_pv(bytes_xml_data)
self.update_monitors()
def get_synoptic_schema(self):
schema = ""
with open(os.path.join(self._schema_folder, SYNOPTIC_SCHEMA_FILE), 'r') as schemafile:
schema = schemafile.read()
return schema
def get_blank_synoptic(self):
return """<?xml version="1.0" ?><instrument xmlns="http://www.isis.stfc.ac.uk//instrument">
<name>-- NONE --</name><components/></instrument>"""
| true | true |
f73365e6e3d420feda037f285b058fa882baab4e | 1,158 | py | Python | jokenpo/jokenpo.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | jokenpo/jokenpo.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | jokenpo/jokenpo.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 12:21:28 2021
@author: user15
"""
from random import choice
counter = 0
def randomize():
jokenpo_value = ['gu', 'choki', 'pa']
machine = choice(jokenpo_value)
return machine
# get input from user
def init_game(counter):
if counter == 0:
user = input("Jokenpo! \n ==>")
randomize()
check(user, randomize(), counter)
return user
elif counter >= 1:
user = input("Aiko desho \n ==>")
randomize()
check(user, randomize(), counter)
return user
# check if user vs machine
def check(user_input, machine, counter):
if user_input == machine:
counter += 1
init_game(counter)
elif ((user_input == 'gu' and machine == 'choki') or (user_input == 'choki' and machine == 'pa') or (user_input == 'pa' and machine == 'gu')):
print("user win")
elif ((user_input == 'gu' and machine == 'pa') or (user_input == 'pa' and machine == 'choki') or (user_input == 'choki' and machine == 'gu')):
print("machine win")
init_game(counter)
| 26.930233 | 147 | 0.554404 |
from random import choice
counter = 0
def randomize():
jokenpo_value = ['gu', 'choki', 'pa']
machine = choice(jokenpo_value)
return machine
def init_game(counter):
if counter == 0:
user = input("Jokenpo! \n ==>")
randomize()
check(user, randomize(), counter)
return user
elif counter >= 1:
user = input("Aiko desho \n ==>")
randomize()
check(user, randomize(), counter)
return user
def check(user_input, machine, counter):
if user_input == machine:
counter += 1
init_game(counter)
elif ((user_input == 'gu' and machine == 'choki') or (user_input == 'choki' and machine == 'pa') or (user_input == 'pa' and machine == 'gu')):
print("user win")
elif ((user_input == 'gu' and machine == 'pa') or (user_input == 'pa' and machine == 'choki') or (user_input == 'choki' and machine == 'gu')):
print("machine win")
init_game(counter)
| true | true |
f73366187f0693efac62a3212f465c090331d647 | 3,102 | py | Python | pypureclient/pure1/Pure1_1_1/models/volume_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/pure1/Pure1_1_1/models/volume_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/pure1/Pure1_1_1/models/volume_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
Pure1 Public REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.pure1.Pure1_1_1 import models
class VolumeResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[Volume]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.Volume]
):
"""
Keyword args:
items (list[Volume])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `VolumeResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VolumeResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VolumeResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.696429 | 105 | 0.540297 |
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.pure1.Pure1_1_1 import models
class VolumeResponse(object):
swagger_types = {
'items': 'list[Volume]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None,
):
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `VolumeResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VolumeResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, VolumeResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f73367d3c6911639c46868b04d5d6592fa7e6d0e | 6,205 | py | Python | common/response.py | weicunheng/test-studio | 5bff2d94e6252c1689d6ae74529d0fd30fb20c0d | [
"Apache-2.0"
] | 1 | 2020-06-07T04:38:10.000Z | 2020-06-07T04:38:10.000Z | common/response.py | weicunheng/test-studio | 5bff2d94e6252c1689d6ae74529d0fd30fb20c0d | [
"Apache-2.0"
] | null | null | null | common/response.py | weicunheng/test-studio | 5bff2d94e6252c1689d6ae74529d0fd30fb20c0d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import logging
from collections import OrderedDict
from rest_framework import exceptions
from rest_framework.views import APIView, exception_handler
from common import error_code
from django.utils.translation import gettext as _
logger = logging.getLogger("api-request")
def custom_exception_handler(exc, context):
response = exception_handler(exc, context)
request = context['request']
if response is not None:
data = response.data
response.data = {}
if 'detail' in data and not isinstance(data['detail'], (list, dict)):
if isinstance(data['detail'], str):
data['detail'] = data['detail']
if hasattr(error_code, data['detail']):
response.data['return_code'] = getattr(error_code, data['detail'])
response.data['return_msg'] = _(getattr(error_code.ZhError, data['detail']))
logger.error("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data),
"return": json.dumps(response.data), "data": data})
elif isinstance(data['detail'], exceptions.ErrorDetail):
code = str(data['detail'].code).upper()
if hasattr(error_code, code):
response.data['return_code'] = getattr(error_code, code)
response.data['return_msg'] = _(getattr(error_code.ZhError, code))
logger.error("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data),
"return": json.dumps(response.data), "data": data})
else:
response.data['return_code'] = getattr(error_code, 'SYSTEM_ERROR')
response.data['return_msg'] = _(data['detail'])
logger.error("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data),
"return": json.dumps(response.data), "data": data})
else:
response.data['return_code'] = getattr(error_code, 'SYSTEM_ERROR')
response.data['return_msg'] = _(data['detail'])
logger.error("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data),
"return": json.dumps(response.data), "data": data})
else:
if isinstance(exc, exceptions.ValidationError):
response.data['return_code'] = getattr(error_code, 'ERROR_CHECK_PARAM')
# response.data['return_msgEn'] = getattr(error_code.EnglishError, 'ERROR_CHECK_PARAM')
response.data['return_msg'] = _(getattr(error_code.ZhError, 'ERROR_CHECK_PARAM'))
response.data['data'] = data
try:
logger.error("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data),
"return": json.dumps(response.data), "data": data})
pass
except Exception as e:
pass
else:
response.data['return_code'] = getattr(error_code, 'UNDIFINED_ERROR')
response.data['return_msg'] = _(getattr(error_code.ZhError, 'UNDIFINED_ERROR'))
response.data['data'] = data
try:
logger.critical("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data),
"return": json.dumps(response.data), "data": data})
except Exception as e:
pass
else:
logger.critical("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data), "return": "None",
"data": repr(exc)})
return response
class Version2APIView(APIView):
def get_exception_handler(self):
return custom_exception_handler
class BaseAPIView(Version2APIView):
def finalize_response(self, request, response, *args, **kwargs):
response = super(BaseAPIView, self).finalize_response(request, response, *args, **kwargs)
if hasattr(response, 'render') and callable(response.render):
response.render()
if 200 <= response.status_code < 300:
if response.get('Content-Type', "").lower() == 'application/json':
response.content = json.dumps({"return_code": 0, "return_msg": _('成功'),
"data": json.loads(response.content, object_pairs_hook=OrderedDict)})
else:
if str(response.content).lower() != "success":
response.content = json.dumps(
{"return_code": 0, "return_msg": _('成功'), "data": _(response.content)})
else:
response.content = json.dumps({"return_code": 0, "return_msg": _('成功')})
response['Content-Type'] = 'application/json'
return response
def encode_error(self, error_no):
if hasattr(error_code, error_no):
return {
'return_code': getattr(error_code, error_no),
'return_msg': _(getattr(error_code.ZhError, error_no))
}
else:
return {
'return_code': getattr(error_code, 'SYSTEM_ERROR'),
'return_msg': _(error_no)
}
| 51.708333 | 118 | 0.560193 |
import json
import logging
from collections import OrderedDict
from rest_framework import exceptions
from rest_framework.views import APIView, exception_handler
from common import error_code
from django.utils.translation import gettext as _
logger = logging.getLogger("api-request")
def custom_exception_handler(exc, context):
response = exception_handler(exc, context)
request = context['request']
if response is not None:
data = response.data
response.data = {}
if 'detail' in data and not isinstance(data['detail'], (list, dict)):
if isinstance(data['detail'], str):
data['detail'] = data['detail']
if hasattr(error_code, data['detail']):
response.data['return_code'] = getattr(error_code, data['detail'])
response.data['return_msg'] = _(getattr(error_code.ZhError, data['detail']))
logger.error("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data),
"return": json.dumps(response.data), "data": data})
elif isinstance(data['detail'], exceptions.ErrorDetail):
code = str(data['detail'].code).upper()
if hasattr(error_code, code):
response.data['return_code'] = getattr(error_code, code)
response.data['return_msg'] = _(getattr(error_code.ZhError, code))
logger.error("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data),
"return": json.dumps(response.data), "data": data})
else:
response.data['return_code'] = getattr(error_code, 'SYSTEM_ERROR')
response.data['return_msg'] = _(data['detail'])
logger.error("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data),
"return": json.dumps(response.data), "data": data})
else:
response.data['return_code'] = getattr(error_code, 'SYSTEM_ERROR')
response.data['return_msg'] = _(data['detail'])
logger.error("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data),
"return": json.dumps(response.data), "data": data})
else:
if isinstance(exc, exceptions.ValidationError):
response.data['return_code'] = getattr(error_code, 'ERROR_CHECK_PARAM')
response.data['return_msg'] = _(getattr(error_code.ZhError, 'ERROR_CHECK_PARAM'))
response.data['data'] = data
try:
logger.error("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data),
"return": json.dumps(response.data), "data": data})
pass
except Exception as e:
pass
else:
response.data['return_code'] = getattr(error_code, 'UNDIFINED_ERROR')
response.data['return_msg'] = _(getattr(error_code.ZhError, 'UNDIFINED_ERROR'))
response.data['data'] = data
try:
logger.critical("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data),
"return": json.dumps(response.data), "data": data})
except Exception as e:
pass
else:
logger.critical("route:%(route)s\trequest:%(request)s\treturn:%(return)s\tdata:%(data)s" % {
"route": json.dumps(request.build_absolute_uri()), "request": json.dumps(request.data), "return": "None",
"data": repr(exc)})
return response
class Version2APIView(APIView):
def get_exception_handler(self):
return custom_exception_handler
class BaseAPIView(Version2APIView):
def finalize_response(self, request, response, *args, **kwargs):
response = super(BaseAPIView, self).finalize_response(request, response, *args, **kwargs)
if hasattr(response, 'render') and callable(response.render):
response.render()
if 200 <= response.status_code < 300:
if response.get('Content-Type', "").lower() == 'application/json':
response.content = json.dumps({"return_code": 0, "return_msg": _('成功'),
"data": json.loads(response.content, object_pairs_hook=OrderedDict)})
else:
if str(response.content).lower() != "success":
response.content = json.dumps(
{"return_code": 0, "return_msg": _('成功'), "data": _(response.content)})
else:
response.content = json.dumps({"return_code": 0, "return_msg": _('成功')})
response['Content-Type'] = 'application/json'
return response
def encode_error(self, error_no):
if hasattr(error_code, error_no):
return {
'return_code': getattr(error_code, error_no),
'return_msg': _(getattr(error_code.ZhError, error_no))
}
else:
return {
'return_code': getattr(error_code, 'SYSTEM_ERROR'),
'return_msg': _(error_no)
}
| true | true |
f7336916020b5d05bdc314a380e56dece473f4f4 | 42,589 | py | Python | src/m2ee.py | rus-kh/m2ee-tools | 70000796a53131bb1cd8d199f48cd5e7aab2c505 | [
"BSD-3-Clause"
] | 23 | 2015-11-26T12:05:45.000Z | 2022-03-17T10:24:22.000Z | src/m2ee.py | rus-kh/m2ee-tools | 70000796a53131bb1cd8d199f48cd5e7aab2c505 | [
"BSD-3-Clause"
] | 54 | 2016-07-26T12:44:07.000Z | 2022-02-17T10:08:01.000Z | src/m2ee.py | rus-kh/m2ee-tools | 70000796a53131bb1cd8d199f48cd5e7aab2c505 | [
"BSD-3-Clause"
] | 34 | 2015-01-04T07:05:48.000Z | 2022-02-15T10:23:52.000Z | #!/usr/bin/python
#
# Copyright (C) 2009 Mendix. All rights reserved.
#
from __future__ import print_function
import argparse
import atexit
import cmd
import datetime
import getpass
import logging
import os
import pwd
import random
import shlex
import signal
import string
import subprocess
import sys
import yaml
from m2ee import pgutil, M2EE, client_errno
import m2ee
logger = logging
if not sys.stdout.isatty():
import codecs
import locale
sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout)
try:
raw_input
except NameError:
raw_input = input
class CLI(cmd.Cmd, object):
def __init__(self, yaml_files=None, yolo_mode=False):
logger.debug('Using m2ee-tools version %s' % m2ee.__version__)
cmd.Cmd.__init__(self)
self.m2ee = M2EE(yaml_files=yaml_files)
self.yolo_mode = yolo_mode
self.prompt_username = pwd.getpwuid(os.getuid())[0]
self._default_prompt = "m2ee(%s): " % self.prompt_username
self.prompt = self._default_prompt
self.nodetach = False
def do_restart(self, args):
if self._stop():
self._start()
def do_stop(self, args):
self._stop()
def do_start(self, args):
self._start()
def _stop(self):
logger.debug("Trying to stop the application.")
stopped = self.m2ee.stop()
if stopped:
return True
answer = None
while answer not in ('y', 'n'):
answer = ('y' if self.yolo_mode
else raw_input("Do you want to try to signal the JVM "
"process to stop immediately? (y)es, (n)o? "))
if answer == 'y':
stopped = self.m2ee.terminate()
if stopped:
return True
elif answer == 'n':
logger.info("Doing nothing, use stop again to check if the "
"process finally disappeared...")
return False
else:
print("Unknown option %s" % answer)
answer = None
while answer not in ('y', 'n'):
answer = ('y' if self.yolo_mode
else raw_input("Do you want to kill the JVM process? "
"(y)es, (n)o? "))
if answer == 'y':
stopped = self.m2ee.kill()
if stopped:
return True
elif answer == 'n':
logger.info("Doing nothing, use stop again to check if the "
"process finally disappeared...")
return False
else:
print("Unknown option %s" % answer)
return False
def _start(self):
"""
This function deals with the start-up sequence of the Mendix Runtime.
Starting the Mendix Runtime can fail in both a temporary or permanent
way. See the client_errno for possible error codes.
"""
if not self.m2ee.config.all_systems_are_go():
raise m2ee.exceptions.M2EEException(
"The application cannot be started because no application "
"model is present, or because of other previous errors."
)
if not self.m2ee.config.get_runtime_path():
raise m2ee.exceptions.M2EEException(
"It appears that the Mendix Runtime version which has to be "
"used for your application is not present yet. You can try "
"downloading it using the download_runtime command."
)
self.m2ee.start_appcontainer(detach=not self.nodetach)
try:
self.m2ee.send_runtime_config()
except m2ee.client.M2EEAdminException as e:
logger.error("Sending configuration failed: %s" % e.cause)
logger.error("You'll have to fix the configuration and run start again...")
self._stop()
return
abort = False
fully_started = False
params = {}
while not (fully_started or abort):
try:
self.m2ee.start_runtime(params)
fully_started = True
except m2ee.client.M2EEAdminException as e:
logger.error(e)
if e.result == client_errno.start_NO_EXISTING_DB:
if self.yolo_mode:
# This call tries to create a database and immediately execute
# ddl commands.
self.m2ee.client.execute_ddl_commands()
else:
abort = True
elif e.result == client_errno.start_INVALID_DB_STRUCTURE:
answer = self._handle_ddl_commands()
if answer == 'a':
abort = True
elif e.result == client_errno.start_MISSING_MF_CONSTANT:
logger.error("You'll have to add the constant definitions "
"to the configuration in the "
"MicroflowConstants section.")
abort = True
elif e.result == client_errno.start_ADMIN_1:
users = e.feedback['users']
if self.yolo_mode:
self._handle_admin_1_yolo(users)
else:
answer = self._handle_admin_1(users)
if answer == 'a':
abort = True
else:
abort = True
if abort:
self._stop()
def _handle_ddl_commands(self):
feedback = self.m2ee.client.get_ddl_commands({"verbose": True})
answer = None
while answer not in ('v', 's', 'e', 'a'):
answer = ('e' if self.yolo_mode
else raw_input("Do you want to (v)iew queries, (s)ave them to "
"a file, (e)xecute and save them, or (a)bort: "))
if answer == 'a':
pass
elif answer == 'v':
print('\n'.join(feedback['ddl_commands']))
answer = None
elif answer in ('e', 's'):
ddl_commands = feedback['ddl_commands']
self.m2ee.save_ddl_commands(ddl_commands)
if answer == 'e':
self.m2ee.client.execute_ddl_commands()
else:
print("Unknown option %s" % answer)
return answer
def _handle_admin_1(self, users):
answer = None
while answer not in ('c', 'a'):
answer = raw_input("Do you want to (c)hange passwords or "
"(a)bort: ")
if answer == 'a':
pass
elif answer == 'c':
for username in users:
changed = False
while not changed:
newpw1 = getpass.getpass("Type new password for user "
"%s: " % username)
newpw2 = getpass.getpass("Type new password for user "
" %s again: " % username)
if newpw1 != newpw2:
print("The passwords are not equal!")
else:
try:
self.m2ee.client.update_admin_user(
{"username": username, "password": newpw1})
changed = True
except m2ee.client.M2EEAdminException as e:
logger.error(e)
else:
print("Unknown option %s" % answer)
return answer
def _handle_admin_1_yolo(self, users):
for username in users:
newpasswd = self._generate_password()
logger.info("Changing password for user %s to %s" %
(username, newpasswd))
self.m2ee.client.update_admin_user({
"username": username,
"password": newpasswd,
})
def _generate_password(self):
newpasswd_list = []
for choosefrom in [
string.ascii_lowercase,
string.ascii_uppercase,
string.digits,
string.punctuation,
]:
newpasswd_list.extend([random.choice(choosefrom)
for _ in range(random.randint(10, 20))])
random.shuffle(newpasswd_list)
return ''.join(newpasswd_list)
def do_create_admin_user(self, args=None):
if not self.m2ee.client.ping():
logger.warn("The application process needs to be running to "
"create a user object in the application.")
return
print("This option will create an administrative user account, using "
"the preset username and user role settings.")
newpw1 = getpass.getpass("Type new password for this user: ")
newpw2 = getpass.getpass("Type new password for this user again: ")
if newpw1 != newpw2:
print("The passwords are not equal!")
else:
self.m2ee.client.create_admin_user({"password": newpw1})
def do_update_admin_user(self, args=None):
if not self.m2ee.client.ping():
logger.warn("The application process needs to be running to "
"change user objects in the application.")
return
print("Using this function you can reset the password of an "
"administrative user account.")
username = raw_input("User name: ")
newpw1 = getpass.getpass("Type new password for user %s: " % username)
newpw2 = getpass.getpass("Type new password for user %s again: " %
username)
if newpw1 != newpw2:
print("The passwords are not equal!")
else:
self.m2ee.client.update_admin_user({"username": username, "password": newpw1})
def do_debug(self, args):
answer = raw_input("This command will throw you into a local python "
"debug session inside the M2EE object! Continue "
"(y/N)?")
if answer == 'y':
import code
code.interact(local=locals())
def do_status(self, args):
feedback = self.m2ee.client.runtime_status(timeout=3)
status = feedback['status']
logger.info("The application process is running, the MxRuntime has status: %s" % status)
if status != 'running':
return
critlist = self.m2ee.client.get_critical_log_messages()
if len(critlist) > 0:
logger.error("%d critical error(s) were logged. Use show_critical"
"_log_messages to view them." % len(critlist))
max_show_users = 10
total_users = self._who(max_show_users)
if total_users > max_show_users:
logger.info("Only showing %s logged in users. Use who to see a "
"complete list." % max_show_users)
def do_show_critical_log_messages(self, args):
errors = self.m2ee.client.get_critical_log_messages()
if len(errors) == 0:
logger.info("No messages were logged to a critical loglevel since "
"starting the application.")
return
for error in errors:
errorline = []
if 'message' in error and error['message'] != '':
errorline.append("- %s" % error['message'])
if 'cause' in error and error['cause'] != '':
errorline.append("- Caused by: %s" % error['cause'])
if len(errorline) == 0:
errorline.append("- [No message or cause was logged]")
errorline.insert(
0,
datetime.datetime.fromtimestamp(error['timestamp'] / 1000)
.strftime("%Y-%m-%d %H:%M:%S")
)
print(' '.join(errorline))
def do_check_health(self, args):
feedback = self.m2ee.client.check_health()
if feedback['health'] == 'healthy':
logger.info("Health check microflow says the application is healthy.")
elif feedback['health'] == 'sick':
logger.warning("Health check microflow says the application "
"is sick: %s" % feedback['diagnosis'])
elif feedback['health'] == 'unknown':
logger.info("Health check microflow is not configured, no "
"health information available.")
else:
logger.error("Unexpected health check status: %s" % feedback['health'])
def do_statistics(self, args):
stats = self.m2ee.client.runtime_statistics()
stats.update(self.m2ee.client.server_statistics())
print(yaml.safe_dump(stats, default_flow_style=False))
def do_show_cache_statistics(self, args):
stats = self.m2ee.client.cache_statistics()
print(yaml.safe_dump(stats, default_flow_style=False))
def do_munin_config(self, args):
m2ee.munin.print_config(
self.m2ee,
self.prompt_username,
)
def do_munin_values(self, args):
m2ee.munin.print_values(
self.m2ee,
self.prompt_username,
)
def do_nagios(self, args):
logger.info("The nagios plugin will exit m2ee after running, this is "
"by design, don't report it as bug.")
# TODO: implement as separate program after libraryfying m2ee
sys.exit(m2ee.nagios.check(self.m2ee.runner, self.m2ee.client))
def do_about(self, args):
print('Using m2ee-tools version %s' % m2ee.__version__)
feedback = self.m2ee.client.about()
print("Using %s version %s" % (feedback['name'], feedback['version']))
print(feedback['copyright'])
if self.m2ee.config.get_runtime_version() >= 4.4:
if 'model_version' in feedback:
print('Model version: %s' % feedback['model_version'])
def do_show_license_information(self, args):
feedback = self.m2ee.client.get_license_information()
if 'license' in feedback:
logger.debug(yaml.safe_dump(feedback['license'],
allow_unicode=True))
import copy
licensecopy = copy.deepcopy(feedback['license'])
self._print_license(licensecopy)
elif 'license_id' in feedback:
print("Unlicensed environment.")
print("Server ID: %s" % feedback['license_id'])
else:
print("Unlicensed environment.")
def _print_license(self, licensecopy):
print("Server ID: %s" % licensecopy.pop('LicenseID', 'Unknown'))
print("License Type: %s" % licensecopy.pop('LicenseType', 'Unknown'))
if 'ExpirationDate' in licensecopy:
print("Expiration Date: %s" %
datetime.datetime.fromtimestamp(
licensecopy.pop('ExpirationDate') / 1000
)
.strftime("%a, %d %b %Y %H:%M:%S %z")
.rstrip())
print("Runtime Mode: %s" % licensecopy.pop('RuntimeMode', 'Unknown'))
print("Company: %s" % licensecopy.pop('Company', 'Unknown'))
limitations = licensecopy.pop('UserLimitations', None)
separate_anonymous = licensecopy.pop('SeparateAnonymousUsers', True)
if limitations is not None:
print("License Limitations:")
for limitation in limitations:
self._print_license_limitation(limitation, separate_anonymous)
if len(licensecopy) > 1:
print(yaml.safe_dump(licensecopy, allow_unicode=True))
def _print_license_limitation(self, limitation, separate_anonymous):
if limitation['LimitationType'] == 'Named':
if limitation['AmountType'] == 'Unlimited':
print("- Unlimited named %suser accounts allowed." %
('' if separate_anonymous else "and anonymous "))
else:
print(" - %s named user account%s allowed" %
(limitation['NumberOfAllowedUsers'],
's' if limitation['NumberOfAllowedUsers'] != 1 else ''))
elif limitation['LimitationType'] == 'Concurrent':
if limitation['AmountType'] == 'Unlimited':
print("- Unlimited concurrent named %suser sessions allowed."
% ("" if separate_anonymous else "and anonymous "))
else:
print("- %s concurrent named %suser session%s allowed." %
(
limitation['NumberOfAllowedUsers'],
'' if separate_anonymous else "and anonymous ",
('s' if limitation['NumberOfAllowedUsers'] != 1
else '')))
elif (limitation['LimitationType'] == 'ConcurrentAnonymous' and
separate_anonymous):
if limitation['AmountType'] == 'Unlimited':
print("- Unlimited concurrent anonymous user sessions "
"allowed.")
else:
print("- %s concurrent anonymous session%s allowed." %
(
limitation['NumberOfAllowedUsers'],
('s' if limitation['NumberOfAllowedUsers'] != 1
else '')))
def do_activate_license(self, args):
self.m2ee.client.require_action("set_license")
print("The command activate_license will set the license key used in "
"this application.")
runtime_version = m2ee.version.MXVersion(self.m2ee.client.about()['version'])
if runtime_version < 4.1:
print("Mendix Runtime versions before 4.1 do not check the "
"submitted license key for validity, so incorrect input "
"will un-license your Mendix application without warning! "
"After setting the license, use show_license_information "
"to check the active license. Also... after setting the "
"license in versions before Mendix 4.1 you will need to "
"restart the application again to be sure it is fully "
"activated.")
answer = raw_input("Do you want to continue anyway? (type YES if "
"you want to): ")
if answer != 'YES':
print("Aborting.")
return
if not args:
license_key = raw_input("Paste your license key (a long text "
"string without newlines) or empty input "
"to abort: ")
else:
license_key = args
if not license_key:
print("Aborting.")
return
self.m2ee.client.set_license({'license_key': license_key})
def do_enable_debugger(self, args):
self.m2ee.client.require_action("enable_debugger")
if not args:
debugger_password = raw_input(
"Please enter the password to be used for remote debugger "
"access from the modeler, or leave blank to auto-generate "
"a password: ")
if not debugger_password:
debugger_password = ''.join(
random.choice(string.letters + string.digits)
for x in range(random.randint(20, 30)))
else:
debugger_password = args
self.m2ee.client.enable_debugger({'password': debugger_password})
logger.info("The remote debugger is now enabled, the password to "
"use is %s" % debugger_password)
logger.info("You can use the remote debugger option in the Mendix "
"Business Modeler to connect to the /debugger/ sub "
"url on your application (e.g. "
"https://app.example.com/debugger/). ")
def do_disable_debugger(self, args):
self.m2ee.client.disable_debugger()
logger.info("The remote debugger is now disabled.")
def do_show_debugger_status(self, args):
feedback = self.m2ee.client.get_debugger_status()
enabled = feedback['enabled']
connected = feedback['client_connected']
paused = feedback['number_of_paused_microflows']
logger.info("The remote debugger is currently %s." %
("enabled" if enabled else "disabled"))
if connected:
logger.info("A debugger session is connected.")
elif enabled:
logger.info("There is no connected debugger session.")
if enabled and paused == 0:
logger.info("There are no paused microflows.")
elif paused == 1:
logger.info("There is 1 paused microflow.")
elif paused > 1:
logger.info("There are %s paused microflows." % paused)
def do_who(self, args):
if args:
try:
limitint = int(args)
self._who(limitint)
except ValueError:
logger.warn("Could not parse argument to an integer. Use a "
"number as argument to limit the amount of logged "
"in users shown.")
else:
self._who()
def do_w(self, args):
self.do_who(args)
def do_reload(self, args):
logger.debug("Reloading configuration...")
self.m2ee.reload_config()
def do_dump_config(self, args):
self.m2ee.config.dump()
def do_set_database_password(self, args):
password = getpass.getpass("Database password: ")
self.m2ee.config.set_database_password(password)
def do_psql(self, args):
if not self.m2ee.config.is_using_postgresql():
logger.error("Only PostgreSQL databases are supported right now.")
return
pgutil.psql(self.m2ee.config)
def do_dumpdb(self, args):
if not self.m2ee.config.is_using_postgresql():
logger.error("Only PostgreSQL databases are supported right now.")
return
if len(args) > 0:
pgutil.dumpdb(self.m2ee.config, args)
else:
pgutil.dumpdb(self.m2ee.config)
def do_restoredb(self, args):
if not self.m2ee.config.allow_destroy_db():
logger.error("Refusing to do a destructive database operation "
"because the allow_destroy_db configuration option "
"is set to false.")
return
if not self.m2ee.config.is_using_postgresql():
logger.error("Only PostgreSQL databases are supported right now.")
return
if not args:
logger.error("restoredb needs the name of a dump file in %s as arg"
"ument" % self.m2ee.config.get_database_dump_path())
return
(pid_alive, m2ee_alive) = self.m2ee.check_alive()
if pid_alive or m2ee_alive:
logger.warn("The application is still running, refusing to "
"restore the database right now.")
return
database_name = self.m2ee.config.get_pg_environment()['PGDATABASE']
answer = ('y' if self.yolo_mode
else raw_input("This command will restore this dump into database "
"%s. Continue? (y)es, (N)o? " % database_name))
if answer != 'y':
logger.info("Aborting!")
return
pgutil.restoredb(self.m2ee.config, args)
def complete_restoredb(self, text, line, begidx, endidx):
if not self.m2ee.config.is_using_postgresql():
return []
database_dump_path = self.m2ee.config.get_database_dump_path()
return [f for f in os.listdir(database_dump_path)
if os.path.isfile(os.path.join(database_dump_path, f)) and
f.startswith(text) and
f.endswith(".backup")]
def do_emptydb(self, args):
if not self.m2ee.config.allow_destroy_db():
logger.error("Refusing to do a destructive database operation "
"because the allow_destroy_db configuration option "
"is set to false.")
return
if not self.m2ee.config.is_using_postgresql():
logger.error("Only PostgreSQL databases are supported right now.")
return
(pid_alive, m2ee_alive) = self.m2ee.check_alive()
if pid_alive or m2ee_alive:
logger.warn("The application process is still running, refusing "
"to empty the database right now.")
return
logger.info("This command will drop all tables and sequences in "
"database %s." %
self.m2ee.config.get_pg_environment()['PGDATABASE'])
answer = ('y' if self.yolo_mode
else raw_input("Continue? (y)es, (N)o? "))
if answer != 'y':
print("Aborting!")
return
pgutil.emptydb(self.m2ee.config)
def do_unpack(self, args):
if not args:
logger.error("unpack needs the name of a model upload zipfile in "
"%s as argument" %
self.m2ee.config.get_model_upload_path())
return
(pid_alive, m2ee_alive) = self.m2ee.check_alive()
if pid_alive or m2ee_alive:
logger.error("The application process is still running, refusing "
"to unpack a new application model right now.")
return
logger.info("This command will replace the contents of the model/ and "
"web/ locations, using the files extracted from the "
"archive")
answer = ('y' if self.yolo_mode
else raw_input("Continue? (y)es, (N)o? "))
if answer != 'y':
logger.info("Aborting!")
return
self.m2ee.unpack(args)
def complete_unpack(self, text, line, begidx, endidx):
# these complete functions seem to eat exceptions, which is very bad
# behaviour if anything here throws an excaption, you just won't get
# completion, without knowing why
model_upload_path = self.m2ee.config.get_model_upload_path()
logger.trace("complete_unpack: Looking for %s in %s" %
(text, model_upload_path))
return [f for f in os.listdir(model_upload_path)
if os.path.isfile(os.path.join(model_upload_path, f))
and f.startswith(text)
and (f.endswith(".zip") or f.endswith(".mda"))]
def do_check_constants(self, args):
constants_to_use, default_constants, obsolete_constants = self.m2ee.config.get_constants()
if len(default_constants) > 0:
logger.info('Missing constant definitions (model defaults will be used):')
for name in sorted(default_constants.keys()):
logger.info('- %s' % name)
else:
logger.info('All required constant definitions have explicit definitions.')
if len(obsolete_constants) > 0:
logger.info('Constants defined but not needed by the application:')
for name in sorted(obsolete_constants.keys()):
logger.info('- %s' % name)
def do_log(self, args):
if self._cleanup_logging():
return
logfile = self.m2ee.config.get_logfile()
if not logfile:
logger.warn("logfile location is not specified")
return
print("This command will start printing log information from the "
"application right in the middle of all of the other output on "
"your screen. This can be confusing, especially when you're "
"typing something and everything gets messed up by the logging. "
"Issuing the log command again will turn off logging output.")
answer = ('y' if self.yolo_mode
else raw_input("Do you want to start log output (y/N): "))
if answer == 'y':
cmd = ("tail", "-F", logfile)
proc = subprocess.Popen(cmd)
self.m2ee._logproc = proc
self.prompt = "LOG %s" % self._default_prompt
def do_loglevel(self, args):
try:
args = shlex.split(args)
except ValueError as ve:
logger.error("Input cannot be parsed: %s" % ve.message)
return
if len(args) == 3:
(subscriber, node, level) = args
self._set_log_level(subscriber, node, level)
else:
if len(args) == 0:
self._get_log_levels()
print("To adjust loglevels, use: loglevel <subscribername> "
"<lognodename> <level>")
print("Available levels: NONE, CRITICAL, ERROR, WARNING, INFO, "
"DEBUG, TRACE")
def _get_log_levels(self):
log_levels = self.m2ee.get_log_levels()
print("Current loglevels:")
log_subscribers = []
for (subscriber_name, node_names) in log_levels.items():
for (node_name, subscriber_level) in node_names.items():
log_subscribers.append("%s %s %s" %
(subscriber_name,
node_name,
subscriber_level))
log_subscribers.sort()
print("\n".join(log_subscribers))
def _set_log_level(self, subscriber, node, level):
level = level.upper()
try:
self.m2ee.set_log_level(subscriber, node, level)
logger.info("Loglevel for %s set to %s" % (node, level))
except m2ee.client.M2EEAdminException as e:
print("Remember, all parameters are case sensitive")
raise e
def do_show_current_runtime_requests(self, args):
feedback = self.m2ee.client.get_current_runtime_requests()
if len(feedback) == 0:
logger.info("There are no currently running runtime requests.")
else:
print("Current running Runtime Requests:")
print(yaml.safe_dump(feedback, default_flow_style=False))
def do_show_all_thread_stack_traces(self, args):
feedback = self.m2ee.client.get_all_thread_stack_traces()
print("Current JVM Thread Stacktraces:")
print(yaml.safe_dump(feedback, default_flow_style=False))
def do_interrupt_request(self, args):
if args == "":
logger.error("This function needs a request id as parameter")
logger.error("Use show_current_runtime_requests to view currently "
"running requests")
return
feedback = self.m2ee.client.interrupt_request({"request_id": args})
if feedback["result"] is False:
logger.error("A request with ID %s was not found" % args)
else:
logger.info("An attempt to cancel the running action was "
"made.")
def do_nodetach(self, args):
self.nodetach = True
logger.info("Setting nodetach, application process will not run in the background.")
def do_exit(self, args):
return self._exit()
def do_quit(self, args):
return self._exit()
def do_EOF(self, args):
print("exit")
return self._exit()
def _exit(self):
if self.m2ee.runner.check_attached_proc():
logger.warning("There is still an attached application process running. "
"Stop it first.")
return None
return -1
def do_download_runtime(self, args):
if args:
mxversion = m2ee.version.MXVersion(args)
else:
mxversion = self.m2ee.config.get_runtime_version()
if mxversion is None:
logger.info("You did not specify a Mendix Runtime version to "
"download, and no current unpacked application "
"model is available to determine the version from. "
"Specify a version number or use unpack first.")
return
if self.m2ee.config.lookup_in_mxjar_repo(str(mxversion)):
logger.info("The Mendix Runtime for version %s is already "
"installed. If you want to download another Runtime "
"version, specify the version number as argument to "
"download_runtime." % mxversion)
return
self.m2ee.download_and_unpack_runtime(mxversion)
def do_cleanup_runtimes(self, args):
self.m2ee.cleanup_runtimes_except([])
def do_cleanup_runtimes_except(self, args):
self.m2ee.cleanup_runtimes_except(args.split())
def complete_cleanup_runtimes_except(self, text, line, begidx, endidx):
words = line[:len(line)-len(text)].split()
found_versions = self.m2ee.list_installed_runtimes()
return ["%s " % version for version in found_versions
if version.startswith(text)
and version not in words[1:]]
def _cleanup_logging(self):
# atexit
if self.m2ee._logproc:
logger.debug("Stopping log output...")
self.prompt = self._default_prompt
if not self.m2ee._logproc.poll():
os.kill(self.m2ee._logproc.pid, signal.SIGTERM)
self.m2ee._logproc = None
return True
return False
def _who(self, limitint=None):
limit = {}
if limitint is not None:
limit = {"limit": limitint}
feedback = self.m2ee.client.get_logged_in_user_names(limit)
logger.info("Logged in users: (%s) %s" %
(feedback['count'], feedback['users']))
return feedback['count']
def precmd(self, line):
try:
self.m2ee.reload_config_if_changed()
except m2ee.exceptions.M2EEException as e:
logger.critical(e)
return line
if line:
logger.trace("Executing command: %s" % line)
return line
def cmdloop_handle_ctrl_c(self):
quit = False
while quit is not True:
try:
self.cmdloop()
quit = True
except KeyboardInterrupt:
sys.stdout.write('\n')
def onecmd(self, line):
try:
return super(CLI, self).onecmd(line)
except m2ee.client.M2EEAdminNotAvailable:
(pid_alive, m2ee_alive) = self.m2ee.check_alive()
if not pid_alive and not m2ee_alive:
logger.info("The application process is not running.")
except m2ee.client.M2EEAdminException as e:
logger.error(e)
except m2ee.client.M2EEAdminHTTPException as e:
logger.error(e)
except m2ee.client.M2EERuntimeNotFullyRunning as e:
logger.error(e)
except m2ee.client.M2EEAdminTimeout as e:
logger.error(e)
except m2ee.exceptions.M2EEException as e:
logger.error(e)
def unchecked_onecmd(self, line):
super(CLI, self).onecmd(line)
# if the emptyline function is not defined, Cmd will automagically
# repeat the previous command given, and that's not what we want
def emptyline(self):
pass
def completenames(self, text, *ignored):
do_text = "do_%s" % text
suggestions = [a[3:] for a in self.get_names() if a.startswith(do_text)]
if len(suggestions) == 1 \
and "complete_%s" % suggestions[0] in self.get_names():
suggestions[0] = "%s " % suggestions[0]
return suggestions
def do_help(self, args):
print("""Welcome to m2ee, the Mendix Runtime helper tools.
Available commands:
unpack - unpack an uploaded Mendix Deployment Archive from data/model-upload
download_runtime - download a missing Mendix Runtime distribution
start - try starting the application using the unpacked deployment files
stop - stop the application
restart - restart the application
status - display Mendix Runtime status (is the application running?
create_admin_user - create first user when starting with an empty database
update_admin_user - reset the password of an application user
who, w - show currently logged in users
log - follow live logging from the application
loglevel - view and configure loglevels
about - show Mendix Runtime version information
check_constants - check for missing or unneeded constant definitions
enable_debugger - enable remote debugger API
disable_debugger - disable remote debugger API
show_debugger_status - show whether debugger is enabled or not
show_current_runtime_requests - show action stack of current running requests
interrupt_request - cancel a running runtime request
show_license_information - show details about current mendix license key
show_cache_statistics - show details about the runtime object cache
cleanup_runtimes - clean up downloaded Mendix Runtime versions, except the
one currently in use
cleanup_runtimes_except [<version> <version> ...] - clean up downloaded Mendix
Runtime versions, except the one currently in use and other ones specified
exit, quit, <ctrl>-d - exit m2ee
""")
if self.m2ee.config.is_using_postgresql():
print("""When using PostgreSQL, you can also use:
psql - start the postgresql shell
dumpdb - create a database dump into the data/database folder
emptydb - drop all tables and sequences from the database
restoredb - restore a database dump from the data/database folder
""")
if args == 'expert':
print("""Advanced commands:
statistics - show all application statistics that can be used for monitoring
show_all_thread_stack_traces - show all low-level JVM threads with stack trace
check_health - manually execute health check
Extra commands you probably don't need:
debug - dive into a local python debug session inside this program
dump_config - dump the yaml configuration information
nodetach - do not detach the application process after starting
reload - reload configuration from yaml files (this is done automatically)
munin_config - configure option for the built-in munin plugin
munin_values - show monitoring output gathered by the built-in munin plugin
nagios - execute the built-in nagios plugin (will exit m2ee)
activate_license - DANGEROUS - replace/set license key
""")
print("Hint: use tab autocompletion for commands!")
if args != 'expert':
print("Use help expert to show expert and debugging commands")
def start_console_logging(level):
logger = logging.getLogger()
logger.setLevel(level)
consolelogformatter = logging.Formatter("%(levelname)s: %(message)s")
class M2EELogFilter(logging.Filter):
def __init__(self, level, ge):
self.level = level
# log levels greater than and equal to (True), or below (False)
self.ge = ge
def filter(self, record):
if self.ge:
return record.levelno >= self.level
return record.levelno < self.level
# log everything below ERROR to to stdout
stdoutlog = logging.StreamHandler(sys.stdout)
stdoutlog.setFormatter(consolelogformatter)
stdoutfilter = M2EELogFilter(logging.ERROR, False)
stdoutlog.addFilter(stdoutfilter)
# log everything that's ERROR and more serious to stderr
stderrlog = logging.StreamHandler(sys.stderr)
stderrlog.setFormatter(consolelogformatter)
stderrfilter = M2EELogFilter(logging.ERROR, True)
stderrlog.addFilter(stderrfilter)
logger.addHandler(stdoutlog)
logger.addHandler(stderrlog)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
action="append",
dest="yaml_files"
)
parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbose",
default=0,
help="increase verbosity of output (-vv to be even more verbose)"
)
parser.add_argument(
"-q",
"--quiet",
action="count",
dest="quiet",
default=0,
help="decrease verbosity of output (-qq to be even more quiet)"
)
parser.add_argument(
"-y",
"--yolo",
action="store_true",
default=False,
dest="yolo_mode",
help="automatically answer all questions to run as non-interactively as possible"
)
parser.add_argument(
"onecmd",
nargs='*',
)
args = parser.parse_args()
# how verbose should we be? see
# http://docs.python.org/release/2.7/library/logging.html#logging-levels
verbosity = args.quiet - args.verbose
if args.quiet:
verbosity = verbosity + args.quiet
if args.verbose:
verbosity = verbosity - args.verbose
verbosity = verbosity * 10 + 20
if verbosity > 50:
verbosity = 100
if verbosity < 5:
verbosity = 5
start_console_logging(verbosity)
try:
cli = CLI(
yaml_files=args.yaml_files,
yolo_mode=args.yolo_mode,
)
except m2ee.exceptions.M2EEException as e:
logger.critical(e)
sys.exit(1)
atexit.register(cli._cleanup_logging)
if args.onecmd:
try:
cli.unchecked_onecmd(' '.join(args.onecmd))
except (m2ee.client.M2EEAdminException,
m2ee.client.M2EEAdminHTTPException,
m2ee.client.M2EERuntimeNotFullyRunning,
m2ee.client.M2EEAdminTimeout,
m2ee.exceptions.M2EEException) as e:
logger.error(e)
sys.exit(1)
except m2ee.client.M2EEAdminNotAvailable:
pid_alive, m2ee_alive = cli.m2ee.check_alive()
if not pid_alive and not m2ee_alive:
logger.info("The application process is not running.")
sys.exit(0)
sys.exit(1)
else:
logger.info("Application Name: %s" % cli.m2ee.config.get_app_name())
cli.onecmd('status')
cli.cmdloop_handle_ctrl_c()
if __name__ == '__main__':
main()
| 40.52236 | 98 | 0.578248 |
from __future__ import print_function
import argparse
import atexit
import cmd
import datetime
import getpass
import logging
import os
import pwd
import random
import shlex
import signal
import string
import subprocess
import sys
import yaml
from m2ee import pgutil, M2EE, client_errno
import m2ee
logger = logging
if not sys.stdout.isatty():
import codecs
import locale
sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout)
try:
raw_input
except NameError:
raw_input = input
class CLI(cmd.Cmd, object):
def __init__(self, yaml_files=None, yolo_mode=False):
logger.debug('Using m2ee-tools version %s' % m2ee.__version__)
cmd.Cmd.__init__(self)
self.m2ee = M2EE(yaml_files=yaml_files)
self.yolo_mode = yolo_mode
self.prompt_username = pwd.getpwuid(os.getuid())[0]
self._default_prompt = "m2ee(%s): " % self.prompt_username
self.prompt = self._default_prompt
self.nodetach = False
def do_restart(self, args):
if self._stop():
self._start()
def do_stop(self, args):
self._stop()
def do_start(self, args):
self._start()
def _stop(self):
logger.debug("Trying to stop the application.")
stopped = self.m2ee.stop()
if stopped:
return True
answer = None
while answer not in ('y', 'n'):
answer = ('y' if self.yolo_mode
else raw_input("Do you want to try to signal the JVM "
"process to stop immediately? (y)es, (n)o? "))
if answer == 'y':
stopped = self.m2ee.terminate()
if stopped:
return True
elif answer == 'n':
logger.info("Doing nothing, use stop again to check if the "
"process finally disappeared...")
return False
else:
print("Unknown option %s" % answer)
answer = None
while answer not in ('y', 'n'):
answer = ('y' if self.yolo_mode
else raw_input("Do you want to kill the JVM process? "
"(y)es, (n)o? "))
if answer == 'y':
stopped = self.m2ee.kill()
if stopped:
return True
elif answer == 'n':
logger.info("Doing nothing, use stop again to check if the "
"process finally disappeared...")
return False
else:
print("Unknown option %s" % answer)
return False
def _start(self):
if not self.m2ee.config.all_systems_are_go():
raise m2ee.exceptions.M2EEException(
"The application cannot be started because no application "
"model is present, or because of other previous errors."
)
if not self.m2ee.config.get_runtime_path():
raise m2ee.exceptions.M2EEException(
"It appears that the Mendix Runtime version which has to be "
"used for your application is not present yet. You can try "
"downloading it using the download_runtime command."
)
self.m2ee.start_appcontainer(detach=not self.nodetach)
try:
self.m2ee.send_runtime_config()
except m2ee.client.M2EEAdminException as e:
logger.error("Sending configuration failed: %s" % e.cause)
logger.error("You'll have to fix the configuration and run start again...")
self._stop()
return
abort = False
fully_started = False
params = {}
while not (fully_started or abort):
try:
self.m2ee.start_runtime(params)
fully_started = True
except m2ee.client.M2EEAdminException as e:
logger.error(e)
if e.result == client_errno.start_NO_EXISTING_DB:
if self.yolo_mode:
# This call tries to create a database and immediately execute
# ddl commands.
self.m2ee.client.execute_ddl_commands()
else:
abort = True
elif e.result == client_errno.start_INVALID_DB_STRUCTURE:
answer = self._handle_ddl_commands()
if answer == 'a':
abort = True
elif e.result == client_errno.start_MISSING_MF_CONSTANT:
logger.error("You'll have to add the constant definitions "
"to the configuration in the "
"MicroflowConstants section.")
abort = True
elif e.result == client_errno.start_ADMIN_1:
users = e.feedback['users']
if self.yolo_mode:
self._handle_admin_1_yolo(users)
else:
answer = self._handle_admin_1(users)
if answer == 'a':
abort = True
else:
abort = True
if abort:
self._stop()
def _handle_ddl_commands(self):
feedback = self.m2ee.client.get_ddl_commands({"verbose": True})
answer = None
while answer not in ('v', 's', 'e', 'a'):
answer = ('e' if self.yolo_mode
else raw_input("Do you want to (v)iew queries, (s)ave them to "
"a file, (e)xecute and save them, or (a)bort: "))
if answer == 'a':
pass
elif answer == 'v':
print('\n'.join(feedback['ddl_commands']))
answer = None
elif answer in ('e', 's'):
ddl_commands = feedback['ddl_commands']
self.m2ee.save_ddl_commands(ddl_commands)
if answer == 'e':
self.m2ee.client.execute_ddl_commands()
else:
print("Unknown option %s" % answer)
return answer
def _handle_admin_1(self, users):
answer = None
while answer not in ('c', 'a'):
answer = raw_input("Do you want to (c)hange passwords or "
"(a)bort: ")
if answer == 'a':
pass
elif answer == 'c':
for username in users:
changed = False
while not changed:
newpw1 = getpass.getpass("Type new password for user "
"%s: " % username)
newpw2 = getpass.getpass("Type new password for user "
" %s again: " % username)
if newpw1 != newpw2:
print("The passwords are not equal!")
else:
try:
self.m2ee.client.update_admin_user(
{"username": username, "password": newpw1})
changed = True
except m2ee.client.M2EEAdminException as e:
logger.error(e)
else:
print("Unknown option %s" % answer)
return answer
def _handle_admin_1_yolo(self, users):
for username in users:
newpasswd = self._generate_password()
logger.info("Changing password for user %s to %s" %
(username, newpasswd))
self.m2ee.client.update_admin_user({
"username": username,
"password": newpasswd,
})
def _generate_password(self):
newpasswd_list = []
for choosefrom in [
string.ascii_lowercase,
string.ascii_uppercase,
string.digits,
string.punctuation,
]:
newpasswd_list.extend([random.choice(choosefrom)
for _ in range(random.randint(10, 20))])
random.shuffle(newpasswd_list)
return ''.join(newpasswd_list)
def do_create_admin_user(self, args=None):
if not self.m2ee.client.ping():
logger.warn("The application process needs to be running to "
"create a user object in the application.")
return
print("This option will create an administrative user account, using "
"the preset username and user role settings.")
newpw1 = getpass.getpass("Type new password for this user: ")
newpw2 = getpass.getpass("Type new password for this user again: ")
if newpw1 != newpw2:
print("The passwords are not equal!")
else:
self.m2ee.client.create_admin_user({"password": newpw1})
def do_update_admin_user(self, args=None):
if not self.m2ee.client.ping():
logger.warn("The application process needs to be running to "
"change user objects in the application.")
return
print("Using this function you can reset the password of an "
"administrative user account.")
username = raw_input("User name: ")
newpw1 = getpass.getpass("Type new password for user %s: " % username)
newpw2 = getpass.getpass("Type new password for user %s again: " %
username)
if newpw1 != newpw2:
print("The passwords are not equal!")
else:
self.m2ee.client.update_admin_user({"username": username, "password": newpw1})
def do_debug(self, args):
answer = raw_input("This command will throw you into a local python "
"debug session inside the M2EE object! Continue "
"(y/N)?")
if answer == 'y':
import code
code.interact(local=locals())
def do_status(self, args):
feedback = self.m2ee.client.runtime_status(timeout=3)
status = feedback['status']
logger.info("The application process is running, the MxRuntime has status: %s" % status)
if status != 'running':
return
critlist = self.m2ee.client.get_critical_log_messages()
if len(critlist) > 0:
logger.error("%d critical error(s) were logged. Use show_critical"
"_log_messages to view them." % len(critlist))
max_show_users = 10
total_users = self._who(max_show_users)
if total_users > max_show_users:
logger.info("Only showing %s logged in users. Use who to see a "
"complete list." % max_show_users)
def do_show_critical_log_messages(self, args):
errors = self.m2ee.client.get_critical_log_messages()
if len(errors) == 0:
logger.info("No messages were logged to a critical loglevel since "
"starting the application.")
return
for error in errors:
errorline = []
if 'message' in error and error['message'] != '':
errorline.append("- %s" % error['message'])
if 'cause' in error and error['cause'] != '':
errorline.append("- Caused by: %s" % error['cause'])
if len(errorline) == 0:
errorline.append("- [No message or cause was logged]")
errorline.insert(
0,
datetime.datetime.fromtimestamp(error['timestamp'] / 1000)
.strftime("%Y-%m-%d %H:%M:%S")
)
print(' '.join(errorline))
def do_check_health(self, args):
feedback = self.m2ee.client.check_health()
if feedback['health'] == 'healthy':
logger.info("Health check microflow says the application is healthy.")
elif feedback['health'] == 'sick':
logger.warning("Health check microflow says the application "
"is sick: %s" % feedback['diagnosis'])
elif feedback['health'] == 'unknown':
logger.info("Health check microflow is not configured, no "
"health information available.")
else:
logger.error("Unexpected health check status: %s" % feedback['health'])
def do_statistics(self, args):
stats = self.m2ee.client.runtime_statistics()
stats.update(self.m2ee.client.server_statistics())
print(yaml.safe_dump(stats, default_flow_style=False))
def do_show_cache_statistics(self, args):
stats = self.m2ee.client.cache_statistics()
print(yaml.safe_dump(stats, default_flow_style=False))
def do_munin_config(self, args):
m2ee.munin.print_config(
self.m2ee,
self.prompt_username,
)
def do_munin_values(self, args):
m2ee.munin.print_values(
self.m2ee,
self.prompt_username,
)
def do_nagios(self, args):
logger.info("The nagios plugin will exit m2ee after running, this is "
"by design, don't report it as bug.")
# TODO: implement as separate program after libraryfying m2ee
sys.exit(m2ee.nagios.check(self.m2ee.runner, self.m2ee.client))
def do_about(self, args):
print('Using m2ee-tools version %s' % m2ee.__version__)
feedback = self.m2ee.client.about()
print("Using %s version %s" % (feedback['name'], feedback['version']))
print(feedback['copyright'])
if self.m2ee.config.get_runtime_version() >= 4.4:
if 'model_version' in feedback:
print('Model version: %s' % feedback['model_version'])
def do_show_license_information(self, args):
feedback = self.m2ee.client.get_license_information()
if 'license' in feedback:
logger.debug(yaml.safe_dump(feedback['license'],
allow_unicode=True))
import copy
licensecopy = copy.deepcopy(feedback['license'])
self._print_license(licensecopy)
elif 'license_id' in feedback:
print("Unlicensed environment.")
print("Server ID: %s" % feedback['license_id'])
else:
print("Unlicensed environment.")
def _print_license(self, licensecopy):
print("Server ID: %s" % licensecopy.pop('LicenseID', 'Unknown'))
print("License Type: %s" % licensecopy.pop('LicenseType', 'Unknown'))
if 'ExpirationDate' in licensecopy:
print("Expiration Date: %s" %
datetime.datetime.fromtimestamp(
licensecopy.pop('ExpirationDate') / 1000
)
.strftime("%a, %d %b %Y %H:%M:%S %z")
.rstrip())
print("Runtime Mode: %s" % licensecopy.pop('RuntimeMode', 'Unknown'))
print("Company: %s" % licensecopy.pop('Company', 'Unknown'))
limitations = licensecopy.pop('UserLimitations', None)
separate_anonymous = licensecopy.pop('SeparateAnonymousUsers', True)
if limitations is not None:
print("License Limitations:")
for limitation in limitations:
self._print_license_limitation(limitation, separate_anonymous)
if len(licensecopy) > 1:
print(yaml.safe_dump(licensecopy, allow_unicode=True))
def _print_license_limitation(self, limitation, separate_anonymous):
if limitation['LimitationType'] == 'Named':
if limitation['AmountType'] == 'Unlimited':
print("- Unlimited named %suser accounts allowed." %
('' if separate_anonymous else "and anonymous "))
else:
print(" - %s named user account%s allowed" %
(limitation['NumberOfAllowedUsers'],
's' if limitation['NumberOfAllowedUsers'] != 1 else ''))
elif limitation['LimitationType'] == 'Concurrent':
if limitation['AmountType'] == 'Unlimited':
print("- Unlimited concurrent named %suser sessions allowed."
% ("" if separate_anonymous else "and anonymous "))
else:
print("- %s concurrent named %suser session%s allowed." %
(
limitation['NumberOfAllowedUsers'],
'' if separate_anonymous else "and anonymous ",
('s' if limitation['NumberOfAllowedUsers'] != 1
else '')))
elif (limitation['LimitationType'] == 'ConcurrentAnonymous' and
separate_anonymous):
if limitation['AmountType'] == 'Unlimited':
print("- Unlimited concurrent anonymous user sessions "
"allowed.")
else:
print("- %s concurrent anonymous session%s allowed." %
(
limitation['NumberOfAllowedUsers'],
('s' if limitation['NumberOfAllowedUsers'] != 1
else '')))
def do_activate_license(self, args):
self.m2ee.client.require_action("set_license")
print("The command activate_license will set the license key used in "
"this application.")
runtime_version = m2ee.version.MXVersion(self.m2ee.client.about()['version'])
if runtime_version < 4.1:
print("Mendix Runtime versions before 4.1 do not check the "
"submitted license key for validity, so incorrect input "
"will un-license your Mendix application without warning! "
"After setting the license, use show_license_information "
"to check the active license. Also... after setting the "
"license in versions before Mendix 4.1 you will need to "
"restart the application again to be sure it is fully "
"activated.")
answer = raw_input("Do you want to continue anyway? (type YES if "
"you want to): ")
if answer != 'YES':
print("Aborting.")
return
if not args:
license_key = raw_input("Paste your license key (a long text "
"string without newlines) or empty input "
"to abort: ")
else:
license_key = args
if not license_key:
print("Aborting.")
return
self.m2ee.client.set_license({'license_key': license_key})
def do_enable_debugger(self, args):
self.m2ee.client.require_action("enable_debugger")
if not args:
debugger_password = raw_input(
"Please enter the password to be used for remote debugger "
"access from the modeler, or leave blank to auto-generate "
"a password: ")
if not debugger_password:
debugger_password = ''.join(
random.choice(string.letters + string.digits)
for x in range(random.randint(20, 30)))
else:
debugger_password = args
self.m2ee.client.enable_debugger({'password': debugger_password})
logger.info("The remote debugger is now enabled, the password to "
"use is %s" % debugger_password)
logger.info("You can use the remote debugger option in the Mendix "
"Business Modeler to connect to the /debugger/ sub "
"url on your application (e.g. "
"https://app.example.com/debugger/). ")
def do_disable_debugger(self, args):
self.m2ee.client.disable_debugger()
logger.info("The remote debugger is now disabled.")
def do_show_debugger_status(self, args):
feedback = self.m2ee.client.get_debugger_status()
enabled = feedback['enabled']
connected = feedback['client_connected']
paused = feedback['number_of_paused_microflows']
logger.info("The remote debugger is currently %s." %
("enabled" if enabled else "disabled"))
if connected:
logger.info("A debugger session is connected.")
elif enabled:
logger.info("There is no connected debugger session.")
if enabled and paused == 0:
logger.info("There are no paused microflows.")
elif paused == 1:
logger.info("There is 1 paused microflow.")
elif paused > 1:
logger.info("There are %s paused microflows." % paused)
def do_who(self, args):
if args:
try:
limitint = int(args)
self._who(limitint)
except ValueError:
logger.warn("Could not parse argument to an integer. Use a "
"number as argument to limit the amount of logged "
"in users shown.")
else:
self._who()
def do_w(self, args):
self.do_who(args)
def do_reload(self, args):
logger.debug("Reloading configuration...")
self.m2ee.reload_config()
def do_dump_config(self, args):
self.m2ee.config.dump()
def do_set_database_password(self, args):
password = getpass.getpass("Database password: ")
self.m2ee.config.set_database_password(password)
def do_psql(self, args):
if not self.m2ee.config.is_using_postgresql():
logger.error("Only PostgreSQL databases are supported right now.")
return
pgutil.psql(self.m2ee.config)
def do_dumpdb(self, args):
if not self.m2ee.config.is_using_postgresql():
logger.error("Only PostgreSQL databases are supported right now.")
return
if len(args) > 0:
pgutil.dumpdb(self.m2ee.config, args)
else:
pgutil.dumpdb(self.m2ee.config)
def do_restoredb(self, args):
if not self.m2ee.config.allow_destroy_db():
logger.error("Refusing to do a destructive database operation "
"because the allow_destroy_db configuration option "
"is set to false.")
return
if not self.m2ee.config.is_using_postgresql():
logger.error("Only PostgreSQL databases are supported right now.")
return
if not args:
logger.error("restoredb needs the name of a dump file in %s as arg"
"ument" % self.m2ee.config.get_database_dump_path())
return
(pid_alive, m2ee_alive) = self.m2ee.check_alive()
if pid_alive or m2ee_alive:
logger.warn("The application is still running, refusing to "
"restore the database right now.")
return
database_name = self.m2ee.config.get_pg_environment()['PGDATABASE']
answer = ('y' if self.yolo_mode
else raw_input("This command will restore this dump into database "
"%s. Continue? (y)es, (N)o? " % database_name))
if answer != 'y':
logger.info("Aborting!")
return
pgutil.restoredb(self.m2ee.config, args)
def complete_restoredb(self, text, line, begidx, endidx):
if not self.m2ee.config.is_using_postgresql():
return []
database_dump_path = self.m2ee.config.get_database_dump_path()
return [f for f in os.listdir(database_dump_path)
if os.path.isfile(os.path.join(database_dump_path, f)) and
f.startswith(text) and
f.endswith(".backup")]
def do_emptydb(self, args):
if not self.m2ee.config.allow_destroy_db():
logger.error("Refusing to do a destructive database operation "
"because the allow_destroy_db configuration option "
"is set to false.")
return
if not self.m2ee.config.is_using_postgresql():
logger.error("Only PostgreSQL databases are supported right now.")
return
(pid_alive, m2ee_alive) = self.m2ee.check_alive()
if pid_alive or m2ee_alive:
logger.warn("The application process is still running, refusing "
"to empty the database right now.")
return
logger.info("This command will drop all tables and sequences in "
"database %s." %
self.m2ee.config.get_pg_environment()['PGDATABASE'])
answer = ('y' if self.yolo_mode
else raw_input("Continue? (y)es, (N)o? "))
if answer != 'y':
print("Aborting!")
return
pgutil.emptydb(self.m2ee.config)
def do_unpack(self, args):
if not args:
logger.error("unpack needs the name of a model upload zipfile in "
"%s as argument" %
self.m2ee.config.get_model_upload_path())
return
(pid_alive, m2ee_alive) = self.m2ee.check_alive()
if pid_alive or m2ee_alive:
logger.error("The application process is still running, refusing "
"to unpack a new application model right now.")
return
logger.info("This command will replace the contents of the model/ and "
"web/ locations, using the files extracted from the "
"archive")
answer = ('y' if self.yolo_mode
else raw_input("Continue? (y)es, (N)o? "))
if answer != 'y':
logger.info("Aborting!")
return
self.m2ee.unpack(args)
def complete_unpack(self, text, line, begidx, endidx):
# these complete functions seem to eat exceptions, which is very bad
# behaviour if anything here throws an excaption, you just won't get
model_upload_path = self.m2ee.config.get_model_upload_path()
logger.trace("complete_unpack: Looking for %s in %s" %
(text, model_upload_path))
return [f for f in os.listdir(model_upload_path)
if os.path.isfile(os.path.join(model_upload_path, f))
and f.startswith(text)
and (f.endswith(".zip") or f.endswith(".mda"))]
def do_check_constants(self, args):
constants_to_use, default_constants, obsolete_constants = self.m2ee.config.get_constants()
if len(default_constants) > 0:
logger.info('Missing constant definitions (model defaults will be used):')
for name in sorted(default_constants.keys()):
logger.info('- %s' % name)
else:
logger.info('All required constant definitions have explicit definitions.')
if len(obsolete_constants) > 0:
logger.info('Constants defined but not needed by the application:')
for name in sorted(obsolete_constants.keys()):
logger.info('- %s' % name)
def do_log(self, args):
if self._cleanup_logging():
return
logfile = self.m2ee.config.get_logfile()
if not logfile:
logger.warn("logfile location is not specified")
return
print("This command will start printing log information from the "
"application right in the middle of all of the other output on "
"your screen. This can be confusing, especially when you're "
"typing something and everything gets messed up by the logging. "
"Issuing the log command again will turn off logging output.")
answer = ('y' if self.yolo_mode
else raw_input("Do you want to start log output (y/N): "))
if answer == 'y':
cmd = ("tail", "-F", logfile)
proc = subprocess.Popen(cmd)
self.m2ee._logproc = proc
self.prompt = "LOG %s" % self._default_prompt
def do_loglevel(self, args):
try:
args = shlex.split(args)
except ValueError as ve:
logger.error("Input cannot be parsed: %s" % ve.message)
return
if len(args) == 3:
(subscriber, node, level) = args
self._set_log_level(subscriber, node, level)
else:
if len(args) == 0:
self._get_log_levels()
print("To adjust loglevels, use: loglevel <subscribername> "
"<lognodename> <level>")
print("Available levels: NONE, CRITICAL, ERROR, WARNING, INFO, "
"DEBUG, TRACE")
def _get_log_levels(self):
log_levels = self.m2ee.get_log_levels()
print("Current loglevels:")
log_subscribers = []
for (subscriber_name, node_names) in log_levels.items():
for (node_name, subscriber_level) in node_names.items():
log_subscribers.append("%s %s %s" %
(subscriber_name,
node_name,
subscriber_level))
log_subscribers.sort()
print("\n".join(log_subscribers))
def _set_log_level(self, subscriber, node, level):
level = level.upper()
try:
self.m2ee.set_log_level(subscriber, node, level)
logger.info("Loglevel for %s set to %s" % (node, level))
except m2ee.client.M2EEAdminException as e:
print("Remember, all parameters are case sensitive")
raise e
def do_show_current_runtime_requests(self, args):
feedback = self.m2ee.client.get_current_runtime_requests()
if len(feedback) == 0:
logger.info("There are no currently running runtime requests.")
else:
print("Current running Runtime Requests:")
print(yaml.safe_dump(feedback, default_flow_style=False))
def do_show_all_thread_stack_traces(self, args):
feedback = self.m2ee.client.get_all_thread_stack_traces()
print("Current JVM Thread Stacktraces:")
print(yaml.safe_dump(feedback, default_flow_style=False))
def do_interrupt_request(self, args):
if args == "":
logger.error("This function needs a request id as parameter")
logger.error("Use show_current_runtime_requests to view currently "
"running requests")
return
feedback = self.m2ee.client.interrupt_request({"request_id": args})
if feedback["result"] is False:
logger.error("A request with ID %s was not found" % args)
else:
logger.info("An attempt to cancel the running action was "
"made.")
def do_nodetach(self, args):
self.nodetach = True
logger.info("Setting nodetach, application process will not run in the background.")
def do_exit(self, args):
return self._exit()
def do_quit(self, args):
return self._exit()
def do_EOF(self, args):
print("exit")
return self._exit()
def _exit(self):
if self.m2ee.runner.check_attached_proc():
logger.warning("There is still an attached application process running. "
"Stop it first.")
return None
return -1
def do_download_runtime(self, args):
if args:
mxversion = m2ee.version.MXVersion(args)
else:
mxversion = self.m2ee.config.get_runtime_version()
if mxversion is None:
logger.info("You did not specify a Mendix Runtime version to "
"download, and no current unpacked application "
"model is available to determine the version from. "
"Specify a version number or use unpack first.")
return
if self.m2ee.config.lookup_in_mxjar_repo(str(mxversion)):
logger.info("The Mendix Runtime for version %s is already "
"installed. If you want to download another Runtime "
"version, specify the version number as argument to "
"download_runtime." % mxversion)
return
self.m2ee.download_and_unpack_runtime(mxversion)
def do_cleanup_runtimes(self, args):
self.m2ee.cleanup_runtimes_except([])
def do_cleanup_runtimes_except(self, args):
self.m2ee.cleanup_runtimes_except(args.split())
def complete_cleanup_runtimes_except(self, text, line, begidx, endidx):
words = line[:len(line)-len(text)].split()
found_versions = self.m2ee.list_installed_runtimes()
return ["%s " % version for version in found_versions
if version.startswith(text)
and version not in words[1:]]
def _cleanup_logging(self):
# atexit
if self.m2ee._logproc:
logger.debug("Stopping log output...")
self.prompt = self._default_prompt
if not self.m2ee._logproc.poll():
os.kill(self.m2ee._logproc.pid, signal.SIGTERM)
self.m2ee._logproc = None
return True
return False
def _who(self, limitint=None):
limit = {}
if limitint is not None:
limit = {"limit": limitint}
feedback = self.m2ee.client.get_logged_in_user_names(limit)
logger.info("Logged in users: (%s) %s" %
(feedback['count'], feedback['users']))
return feedback['count']
def precmd(self, line):
try:
self.m2ee.reload_config_if_changed()
except m2ee.exceptions.M2EEException as e:
logger.critical(e)
return line
if line:
logger.trace("Executing command: %s" % line)
return line
def cmdloop_handle_ctrl_c(self):
quit = False
while quit is not True:
try:
self.cmdloop()
quit = True
except KeyboardInterrupt:
sys.stdout.write('\n')
def onecmd(self, line):
try:
return super(CLI, self).onecmd(line)
except m2ee.client.M2EEAdminNotAvailable:
(pid_alive, m2ee_alive) = self.m2ee.check_alive()
if not pid_alive and not m2ee_alive:
logger.info("The application process is not running.")
except m2ee.client.M2EEAdminException as e:
logger.error(e)
except m2ee.client.M2EEAdminHTTPException as e:
logger.error(e)
except m2ee.client.M2EERuntimeNotFullyRunning as e:
logger.error(e)
except m2ee.client.M2EEAdminTimeout as e:
logger.error(e)
except m2ee.exceptions.M2EEException as e:
logger.error(e)
def unchecked_onecmd(self, line):
super(CLI, self).onecmd(line)
# if the emptyline function is not defined, Cmd will automagically
# repeat the previous command given, and that's not what we want
def emptyline(self):
pass
def completenames(self, text, *ignored):
do_text = "do_%s" % text
suggestions = [a[3:] for a in self.get_names() if a.startswith(do_text)]
if len(suggestions) == 1 \
and "complete_%s" % suggestions[0] in self.get_names():
suggestions[0] = "%s " % suggestions[0]
return suggestions
def do_help(self, args):
print("""Welcome to m2ee, the Mendix Runtime helper tools.
Available commands:
unpack - unpack an uploaded Mendix Deployment Archive from data/model-upload
download_runtime - download a missing Mendix Runtime distribution
start - try starting the application using the unpacked deployment files
stop - stop the application
restart - restart the application
status - display Mendix Runtime status (is the application running?
create_admin_user - create first user when starting with an empty database
update_admin_user - reset the password of an application user
who, w - show currently logged in users
log - follow live logging from the application
loglevel - view and configure loglevels
about - show Mendix Runtime version information
check_constants - check for missing or unneeded constant definitions
enable_debugger - enable remote debugger API
disable_debugger - disable remote debugger API
show_debugger_status - show whether debugger is enabled or not
show_current_runtime_requests - show action stack of current running requests
interrupt_request - cancel a running runtime request
show_license_information - show details about current mendix license key
show_cache_statistics - show details about the runtime object cache
cleanup_runtimes - clean up downloaded Mendix Runtime versions, except the
one currently in use
cleanup_runtimes_except [<version> <version> ...] - clean up downloaded Mendix
Runtime versions, except the one currently in use and other ones specified
exit, quit, <ctrl>-d - exit m2ee
""")
if self.m2ee.config.is_using_postgresql():
print("""When using PostgreSQL, you can also use:
psql - start the postgresql shell
dumpdb - create a database dump into the data/database folder
emptydb - drop all tables and sequences from the database
restoredb - restore a database dump from the data/database folder
""")
if args == 'expert':
print("""Advanced commands:
statistics - show all application statistics that can be used for monitoring
show_all_thread_stack_traces - show all low-level JVM threads with stack trace
check_health - manually execute health check
Extra commands you probably don't need:
debug - dive into a local python debug session inside this program
dump_config - dump the yaml configuration information
nodetach - do not detach the application process after starting
reload - reload configuration from yaml files (this is done automatically)
munin_config - configure option for the built-in munin plugin
munin_values - show monitoring output gathered by the built-in munin plugin
nagios - execute the built-in nagios plugin (will exit m2ee)
activate_license - DANGEROUS - replace/set license key
""")
print("Hint: use tab autocompletion for commands!")
if args != 'expert':
print("Use help expert to show expert and debugging commands")
def start_console_logging(level):
logger = logging.getLogger()
logger.setLevel(level)
consolelogformatter = logging.Formatter("%(levelname)s: %(message)s")
class M2EELogFilter(logging.Filter):
def __init__(self, level, ge):
self.level = level
# log levels greater than and equal to (True), or below (False)
self.ge = ge
def filter(self, record):
if self.ge:
return record.levelno >= self.level
return record.levelno < self.level
# log everything below ERROR to to stdout
stdoutlog = logging.StreamHandler(sys.stdout)
stdoutlog.setFormatter(consolelogformatter)
stdoutfilter = M2EELogFilter(logging.ERROR, False)
stdoutlog.addFilter(stdoutfilter)
# log everything that's ERROR and more serious to stderr
stderrlog = logging.StreamHandler(sys.stderr)
stderrlog.setFormatter(consolelogformatter)
stderrfilter = M2EELogFilter(logging.ERROR, True)
stderrlog.addFilter(stderrfilter)
logger.addHandler(stdoutlog)
logger.addHandler(stderrlog)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
action="append",
dest="yaml_files"
)
parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbose",
default=0,
help="increase verbosity of output (-vv to be even more verbose)"
)
parser.add_argument(
"-q",
"--quiet",
action="count",
dest="quiet",
default=0,
help="decrease verbosity of output (-qq to be even more quiet)"
)
parser.add_argument(
"-y",
"--yolo",
action="store_true",
default=False,
dest="yolo_mode",
help="automatically answer all questions to run as non-interactively as possible"
)
parser.add_argument(
"onecmd",
nargs='*',
)
args = parser.parse_args()
= args.quiet - args.verbose
if args.quiet:
verbosity = verbosity + args.quiet
if args.verbose:
verbosity = verbosity - args.verbose
verbosity = verbosity * 10 + 20
if verbosity > 50:
verbosity = 100
if verbosity < 5:
verbosity = 5
start_console_logging(verbosity)
try:
cli = CLI(
yaml_files=args.yaml_files,
yolo_mode=args.yolo_mode,
)
except m2ee.exceptions.M2EEException as e:
logger.critical(e)
sys.exit(1)
atexit.register(cli._cleanup_logging)
if args.onecmd:
try:
cli.unchecked_onecmd(' '.join(args.onecmd))
except (m2ee.client.M2EEAdminException,
m2ee.client.M2EEAdminHTTPException,
m2ee.client.M2EERuntimeNotFullyRunning,
m2ee.client.M2EEAdminTimeout,
m2ee.exceptions.M2EEException) as e:
logger.error(e)
sys.exit(1)
except m2ee.client.M2EEAdminNotAvailable:
pid_alive, m2ee_alive = cli.m2ee.check_alive()
if not pid_alive and not m2ee_alive:
logger.info("The application process is not running.")
sys.exit(0)
sys.exit(1)
else:
logger.info("Application Name: %s" % cli.m2ee.config.get_app_name())
cli.onecmd('status')
cli.cmdloop_handle_ctrl_c()
if __name__ == '__main__':
main()
| true | true |
f73369f05498ba862606e49029c1076513b94743 | 48,777 | py | Python | Bot/src/funhouse/image.py | AryamanSrii/Mecha-Karen | 4a5c7318f8c458495eee72a13be5db8a0113ed28 | [
"Apache-2.0"
] | 181 | 2021-05-26T17:37:40.000Z | 2022-02-26T08:36:07.000Z | Bot/src/funhouse/image.py | AryamanSrii/Mecha-Karen | 4a5c7318f8c458495eee72a13be5db8a0113ed28 | [
"Apache-2.0"
] | 24 | 2021-05-14T19:47:34.000Z | 2021-09-06T17:16:17.000Z | Bot/src/funhouse/image.py | AryamanSrii/Mecha-Karen | 4a5c7318f8c458495eee72a13be5db8a0113ed28 | [
"Apache-2.0"
] | 16 | 2021-07-02T09:40:56.000Z | 2022-01-21T10:07:08.000Z | # !/usr/bin/python
"""
Copyright ©️: 2020 Seniatical / _-*™#7519
License: Apache 2.0
A permissive license whose main conditions require preservation of copyright and license notices.
Contributors provide an express grant of patent rights.
Licensed works, modifications, and larger works may be distributed under different terms and without source code.
FULL LICENSE CAN BE FOUND AT:
https://www.apache.org/licenses/LICENSE-2.0.html
Any violation to the license, will result in moderate action
You are legally required to mention (original author, license, source and any changes made)
"""
import typing
import discord
from discord.ext import commands
from discord.ext.commands import BucketType
from PIL import Image, ImageDraw
from io import BytesIO
import aiohttp
import MK
import numpy as np
import random
import cv2
from core._ import extract_
from core._.image.effects import *
from core._.image._ import sort_size, save_image
from core._.image.cloud import APISESSION
class _Image(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.converter = commands.MemberConverter()
self.vac_api = APISESSION.Client()
self.client = MK.Async.Client(bot.env('API_TOKEN'))
self.ses = aiohttp.ClientSession()
self.cache = bot.cache
self.loop = bot.loop
self.beard_image = Image.open('./storage/images/beard.png')
self.wasted_template = Image.open('./storage/images/wasted.png').resize((900, 900))
self.emoji_c = commands.PartialEmojiConverter()
bot.api_c = self.client
@staticmethod
def pixelate(image_to_pixelate: Image) -> Image:
return image_to_pixelate.resize((32, 32), resample=Image.NEAREST).resize((1024, 1024), resample=Image.NEAREST)
@staticmethod
def quantize(image_to_quantize: Image) -> Image:
return image_to_quantize.quantize()
@commands.command(name='Trash')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def trash(self, ctx, *, argument: str = None):
def execute(_author, _user):
im = Image.open('./storage/images/trash.jpg')
author = Image.open(_author).convert('RGBA').resize((130, 134))
member = Image.open(_user).convert('RGBA').resize((105, 109))
im.paste(author, (260, 120))
im.paste(member, (105, 7))
with BytesIO() as b:
im.save(b, 'PNG')
b.seek(0)
file = discord.File(fp=b, filename='trash.png')
return file
author_av = BytesIO(await ctx.author.avatar.read())
user_av = await extract_.get_stream(ctx, query=argument)
if not user_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av, user_av)
await future
await ctx.send(
embed=discord.Embed(title='Hes getting recycled', colour=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://trash.png'),
file=future.result())
@commands.command(name='Slap')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def slap(self, ctx, *, argument: str = None):
def execute(_author, _user):
im = Image.open('./storage/images/slap.jpg')
author = Image.open(_author).convert('RGBA').resize((310, 310))
member = Image.open(_user).convert('RGBA').resize((320, 320))
im = im.copy()
im.paste(author, (465, 70))
im.paste(member, (810, 350))
with BytesIO() as buffer:
im.save(buffer, format='PNG')
buffer.seek(0)
return discord.File(buffer, filename='slapped.png')
author_av = BytesIO(await ctx.author.avatar.read())
user_av = await extract_.get_stream(ctx, query=argument)
if not user_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av, user_av)
await future
embed = discord.Embed(title='He just got SLAPPED!',
color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://slapped.png')
await ctx.send(file=future.result(), embed=embed)
@commands.command(name='Spank')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def spank(self, ctx, *, argument: str = None):
def execute(_author, _user):
im = Image.open('./storage/images/spank.jpg').convert('RGBA')
author = Image.open(_author).convert('RGBA').resize((230, 230))
member = Image.open(_user).convert('RGBA').resize((320, 320))
im = im.copy()
im.paste(member, (750, 25))
im.paste(author, (1200, 455))
with BytesIO() as buffer:
im.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='spanked.png')
return file
author_av = await extract_.get_stream(ctx, query=argument)
user_av = BytesIO(await ctx.author.avatar.read())
if not author_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av, user_av)
await future
embed = discord.Embed(title='Who\'s being a naughty boy',
color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://spanked.png')
await ctx.send(file=future.result(), embed=embed)
@commands.command(name='Boot')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def boot(self, ctx, *, argument: str = None):
def execute(_author, _user):
im = Image.open('./storage/images/boot.jpg')
_author = Image.open(_author).convert('RGBA').resize((50, 54))
_user = Image.open(_user).convert('RGBA').resize((50, 54))
im = im.copy()
im.paste(_author, (183, 13))
im.paste(_user, (33, 12))
with BytesIO() as buffer:
im.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='booted.png')
return file
author_av = await extract_.get_stream(ctx, query=argument)
user_av = BytesIO(await ctx.author.avatar.read())
if not author_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av, user_av)
await future
embed = discord.Embed(title='Right in the sacks',
color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://booted.png')
await ctx.send(file=future.result(), embed=embed)
@commands.command(name='Obese')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def obese(self, ctx, *, argument: str = None):
def execute(_author):
im = Image.open('./storage/images/obese.jpg').convert('RGBA').resize((900, 900))
_author = Image.open(_author).convert('RGBA').resize((220, 220))
im.paste(_author, (457, 135))
with BytesIO() as buffer:
im.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='obese.png')
return file
author_av = await extract_.get_stream(ctx, query=argument)
if not author_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av)
await future
embed = discord.Embed(title='He\'s not that fat *yet*.',
color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://obese.png')
await ctx.send(file=future.result(), embed=embed)
@commands.command(name='Bird')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def bird(self, ctx, *, argument: str = None):
def execute(_author):
im = Image.open('./storage/images/bird.jpg').convert('RGBA').resize((900, 900))
_author = Image.open(_author).convert('RGBA').resize((220, 220))
im.paste(_author, (555, 60))
with BytesIO() as buffer:
im.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='bird.png')
return file
author_av = await extract_.get_stream(ctx, query=argument)
if not author_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av)
await future
embed = discord.Embed(title='Somebody is preparing to migrate',
colour=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://bird.png')
await ctx.send(file=future.result(), embed=embed)
@commands.command(name='Delete')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def delete(self, ctx, *, argument: str = None):
def execute(_author):
im = Image.open('./storage/images/delete.jpg').convert('RGB')
_author = Image.open(_author).convert('RGBA').resize((196, 196))
im.paste(_author, (121, 137))
with BytesIO() as buffer:
im.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='delete.png')
return file
author_av = await extract_.get_stream(ctx, query=argument)
if not author_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av)
await future
embed = discord.Embed(title='Moving file to the recycle bin',
color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://delete.png')
await ctx.send(file=future.result(), embed=embed)
@commands.command(name='Invert')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def invert(self, ctx, argument: str = None, animate: str = '--true', *size) -> typing.Union[discord.MessageReference, discord.Embed]:
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.invert, stream, animate, *size)
embed = discord.Embed(title='Inverted!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
## FILE TOO LARGE
return await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Equalize')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def equalize(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.equalize, stream, animate, *size)
embed = discord.Embed(title='Equalized!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
## FILE TOO LARGE
return await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Grayscale')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def grayscale(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.grayscale, stream, animate, *size)
embed = discord.Embed(title='Grayscaled!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
## FILE TOO LARGE
return await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Mirror')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def mirror(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.mirror, stream, animate, *size)
embed = discord.Embed(title='Mirrored!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
## FILE TOO LARGE
return await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Posterize')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def posterize(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.posterize, stream, animate, *size, {'bits': 1})
embed = discord.Embed(title='Posterized!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
## FILE TOO LARGE
return await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Solarize')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def solarize(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.solarize, stream, animate, *size, {'threshold': 255})
embed = discord.Embed(title='Solarized!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
return await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Transpose')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def transpose(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.exif_transpose, stream, animate, *size)
embed = discord.Embed(title='Transposed!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
## FILE TOO LARGE
return await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Flip')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def flip(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.flip, stream, animate, *size)
embed = discord.Embed(title='Flipped!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
## FILE TOO LARGE
return await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Gamma')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def gamma(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('gamma', str(img))
except Exception as e:
print(e)
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='gamma.png')
embed = discord.Embed(title='Gammafied!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://gamma.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Rainbow')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def rainbow(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('rainbow', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='autumn.png')
embed = discord.Embed(title='Autumn Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://autumn.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Autumn')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def autumn(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('autumn', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='autumn.png')
embed = discord.Embed(title='Autumn Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://autumn.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Inferno')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def inferno(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('hsv', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='inferno.png')
embed = discord.Embed(title='Inferno Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://inferno.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Twilight')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def twilight(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('twilight', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='twilight.png')
embed = discord.Embed(title='Twilight Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://twilight.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Warp')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def warp(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('warp', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='warp.png')
embed = discord.Embed(title='Warped Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://warp.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Blur')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def blur(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('blur', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='blur.png')
embed = discord.Embed(title='You now look like a foggy mirror!',
color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://blur.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Swirl')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, commands.BucketType.user)
async def swirl(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('swirl', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='swirl.png')
embed = discord.Embed(title='Round and a round', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://swirl.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Achievement')
@commands.cooldown(1, 10, BucketType.user)
async def achievement(self, ctx, *, message: str = None):
message = 'Nothing.' if not message else message
message = message.replace(' ', '%20')
url = 'https://minecraftskinstealer.com/achievement/{}/Achievement%20Earned!/{}'.format(random.randrange(40),
message)
embed = discord.Embed(colour=discord.Colour.red()).set_image(url=url)
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def cartoon(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('cartoon', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='cartoon.png')
embed = discord.Embed(title='Cartoon Filter', color=ctx.author.color).set_image(url='attachment://cartoon.png')
await ctx.send(file=file, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def beard(self, ctx, *args):
if not args:
user = ctx.author
pos_x: str = '290'
pos_y: str = '250'
beard_x: str = '300'
beard_y = '300'
else:
try:
user = await self.converter.convert(ctx, args[0])
except commands.errors.MemberNotFound:
user = ctx.author
if len(args) > 1:
pos_x = args[1]
else:
pos_x = '290'
if len(args) > 2:
pos_y = args[2]
else:
pos_y = '250'
if len(args) > 3:
beard_x = args[3]
else:
beard_x = '300'
if len(args) > 4:
beard_y = args[4]
else:
beard_y = '300'
try:
positions = [pos_x, pos_y, beard_x, beard_y]
new_pos = list(map(int, positions))
if any([i for i in new_pos if i > 900 or i < 1]):
return await ctx.send('Markers cannot be larger than 900 or less than 1')
except ValueError:
return await ctx.send('Markers to place or resize the beard must be numbers!')
user = user or ctx.author
raw_beard = self.beard_image
beard = raw_beard.resize((new_pos[2], new_pos[3]))
avatar = Image.open(BytesIO(await user.avatar.with_format(format='png').read())).convert(
'RGBA').resize((900, 900))
avatar.paste(beard, (new_pos[0], new_pos[1]), beard)
with BytesIO() as buffer:
avatar.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='bearded.jpg')
embed = discord.Embed(title=f'Given {user.display_name} a nice beard', color=user.color).set_image(
url='attachment://bearded.jpg')
await ctx.send(file=file, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def wasted(self, ctx, user: discord.Member = None):
user = user or ctx.author
def execute(image):
img = Image.open(image).convert('RGB').resize((900, 900))
img = img.point(lambda p: p * 0.5)
img.paste(self.wasted_template, (0, 0), self.wasted_template)
with BytesIO() as buffer:
img.save(buffer, 'PNG')
buffer.seek(0)
file = discord.File(fp=buffer, filename='wasted.jpg')
return file
image = await self.loop.run_in_executor(None,
execute,
BytesIO(await user.avatar.with_format(format='png').read())
)
await ctx.send(embed=discord.Embed(title='Wasted', colour=user.colour).set_image(url='attachment://wasted.jpg'),
file=image)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def gayify(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.send('Invalid image provided')
file = await self.loop.run_in_executor(None, gayify_, stream, animate, *size)
embed = discord.Embed(title=f'Gay Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
await ctx.send(file=file, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def distracted(self, ctx, user1: discord.Member = None, user2: discord.Member = None,
user3: discord.Member = None):
m1 = user1 or ctx.author
m2 = user2 or ctx.author
m3 = user3 or ctx.author
user = await self.vac_api.distracted_bf(m1.avatar.with_format(format='png'),
m2.avatar.with_format(format='png'),
m3.avatar.with_format(format='png'))
image_out = discord.File(fp=await user.read(), filename="distracted.png")
embed = discord.Embed(title=f'Oh no.', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://distracted.png')
await ctx.send(file=image_out, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def dos(self, ctx, user: discord.Member = None):
user = user or ctx.author
data = await self.vac_api.dock_of_shame(user.avatar.with_format(format='png'))
image_out = discord.File(fp=await data.read(), filename="dockofshame.png")
embed = discord.Embed(title=f'SHAME THEM!', color=user.colour).set_image(url='attachment://dockofshame.png')
await ctx.send(file=image_out, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def drip(self, ctx, user: discord.Member = None):
user = user or ctx.author
data = await self.vac_api.drip(user.avatar.with_format(format='png'))
image_out = discord.File(fp=await data.read(), filename="drip.png")
embed = discord.Embed(title=f'Speechless', color=user.colour).set_image(url='attachment://drip.png')
await ctx.send(file=image_out, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def cr(self, ctx, *, text: str):
user = await self.vac_api.car_reverse(text)
image_out = discord.File(fp=await user.read(), filename="carreverse.png")
embed = discord.Embed(title=f'Car Reverse Meme', color=ctx.author.colour).set_image(
url='attachment://carreverse.png')
await ctx.send(file=image_out, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def cmm(self, ctx, *, text: str):
user = await self.vac_api.change_my_mind(text)
image_out = discord.File(fp=await user.read(), filename="changemymind.png")
embed = discord.Embed(title=f'Change My Mind.', color=ctx.author.colour).set_image(
url='attachment://changemymind.png')
await ctx.send(file=image_out, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def heaven(self, ctx, user: discord.Member = None):
user = user or ctx.author
data = await self.vac_api.heaven(user.avatar.with_format(format='png'))
image_out = discord.File(fp=await data.read(), filename="heaven.png")
embed = discord.Embed(title=f'They have ascended.', color=user.colour).set_image(url='attachment://heaven.png')
await ctx.send(file=image_out, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def table_flip(self, ctx, user: discord.Member = None):
user = user or ctx.author
data = await self.vac_api.table_flip(user.avatar.with_format(format='png'))
image_out = discord.File(fp=await data.read(), filename="tableflip.png")
embed = discord.Embed(title=f'{user.display_name} looks fiesty.', color=user.colour).set_image(
url='attachment://tableflip.png')
await ctx.send(file=image_out, embed=embed)
@commands.command(aliases=['color'], name='Colour')
@commands.cooldown(1, 10, commands.BucketType.user)
async def get_colour(self, ctx, colour):
try:
colour = int((str((await self.converter.convert(ctx, colour)).colour)).replace('#', '0x'), 16)
except Exception:
try:
colour = int(colour.replace('#', '0x'), 16)
except Exception:
return await ctx.send('Invalid hex code provided.')
with BytesIO() as b:
new = Image.new(mode='RGB', size=(900, 900), color=colour)
new.save(b, 'PNG')
b.seek(0)
await ctx.send(file=discord.File(fp=b, filename='{}.png'.format(colour)),
embed=discord.Embed(title='Created new colour:', colour=colour).set_image(
url='attachment://{}.png'.format(colour)))
@commands.command(name='8bit')
@commands.cooldown(1, 10, commands.BucketType.user)
async def bittify(self, ctx, argument: str = None, animate: str = '--true', *size) -> discord.Embed:
_io = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.send('Invalid image provided')
def execute(_io, animate, size):
avatar = Image.open(_io)
duration = avatar.info.get('duration')
loops = avatar.info.get('loop')
if not size and not getattr(_io, 'discord', False):
size = avatar.size
else:
size = sort_size(*size)
if getattr(avatar, 'is_animated', False) and animate.lower() == '--true':
frames = []
for _ in range(avatar.n_frames):
avatar.seek(_)
frames.append(self.quantize(self.pixelate(avatar)).resize(size))
return save_image(frames, filename='8bit.gif', duration=duration, loop=loops)
eightbit = self.pixelate(avatar)
eightbit = self.quantize(eightbit).resize(size)
with BytesIO() as buffer:
eightbit.save(buffer, format="PNG")
buffer.seek(0)
file = discord.File(buffer, filename="8bit.png")
return file
if not _io:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, _io, animate, size)
await future
embed = discord.Embed(
title="8-Bit filter",
colour=ctx.author.colour
)
embed.set_image(url="attachment://{}".format(future.result().filename))
await ctx.send(file=future.result(), embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def oil(self, ctx, *, argument: str = None):
image = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.send('Invalid image provided')
def execute(image):
image.seek(0)
file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
cv2.waitKey(1)
try:
oil = cv2.xphoto.oilPainting(image, 7, 1)
except Exception:
return False
with BytesIO() as buffer:
image = Image.fromarray(oil)
image.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='oilpainting.png')
return file
future = self.loop.run_in_executor(None, execute, image)
await future
if not future.result():
return await ctx.send('Oh No! Looks like your image cannot be drawn.')
embed = discord.Embed(
title="Oil Painting",
colour=ctx.author.colour
)
embed.set_image(url="attachment://oilpainting.png")
await ctx.send(file=future.result(), embed=embed)
@commands.command(aliases=['watercolor'])
@commands.cooldown(1, 10, commands.BucketType.user)
async def watercolour(self, ctx, *, argument: str = None):
image = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.send('Invalid image provided')
def execute(image):
image.seek(0)
file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
cv2.waitKey(1)
try:
water_colour = cv2.stylization(image, sigma_s=60, sigma_r=0.6)
except Exception:
return False
with BytesIO() as buffer:
image = Image.fromarray(water_colour)
image.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='watercolour.png')
return file
future = self.loop.run_in_executor(None, execute, image)
await future
if not future.result():
return await ctx.send('Oh No! Looks like your image cannot be drawn.')
embed = discord.Embed(
title="Watercolour Painting",
colour=ctx.author.colour
)
embed.set_image(url="attachment://watercolour.png")
return await ctx.send(file=future.result(), embed=embed)
@commands.group(invoke_without_command=True)
@commands.cooldown(1, 10, commands.BucketType.user)
async def sketch(self, ctx, *, argument: str = None):
image = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.send('Invalid image provided')
def execute(image):
image.seek(0)
file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
cv2.waitKey(1)
try:
dst_gray, dst_color = cv2.pencilSketch(image, sigma_s=60, sigma_r=0.07, shade_factor=0.05)
except Exception:
return False
with BytesIO() as buffer:
image = Image.fromarray(dst_gray)
image.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='sketchnocolour.png')
return file
future = self.loop.run_in_executor(None, execute, image)
await future
if not future.result():
return await ctx.send('Oh No! Looks like your image cannot be drawn.')
embed = discord.Embed(
title="Sketched your image",
colour=ctx.author.colour
)
embed.set_image(url="attachment://sketchnocolour.png")
return await ctx.send(file=future.result(), embed=embed)
@sketch.command(aliases=['color'], name='colour')
@commands.cooldown(1, 10, commands.BucketType.user)
async def sketch_colour(self, ctx, *, argument: str = None):
image = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.send('Invalid image provided')
def execute(image):
image.seek(0)
file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
cv2.waitKey(1)
try:
dst_gray, dst_color = cv2.pencilSketch(image, sigma_s=60, sigma_r=0.07, shade_factor=0.05)
except Exception:
return False
with BytesIO() as buffer:
image = Image.fromarray(dst_color)
image.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='sketchcolour.png')
return file
future = self.loop.run_in_executor(None, execute, image)
await future
if not future.result():
return await ctx.send('Oh No! Looks like your image cannot be drawn.')
embed = discord.Embed(
title="Sketched your image",
colour=ctx.author.colour
)
embed.set_image(url="attachment://sketchcolour.png")
return await ctx.send(file=future.result(), embed=embed)
@commands.command()
async def expand(self, ctx, user: discord.Member = None):
user = user or ctx.author
message = await ctx.send(embed=discord.Embed(description='<a:online:834143953221582927> | Building GIF',
colour=discord.Colour.green()))
def execute(image):
images = []
width = 900
center = width // 2
color_1 = (0, 255, 0)
background_colour = (255, 255, 255)
max_radius = int(center * 1.5)
step = 55
avatar = Image.open(image).convert('RGB')
for i in range(1, max_radius, step):
im = Image.new('RGB', (width, width), background_colour)
image = avatar.resize((width, width))
npImage = np.array(image)
h, w = im.size
alpha = Image.new('L', image.size, 0)
draw = ImageDraw.Draw(alpha)
draw.pieslice((center - i, center - i, center + i, center + i), 0, 360, fill=255)
npAlpha = np.array(alpha)
npImage = np.dstack((npImage, npAlpha))
image = Image.fromarray(npImage).convert('RGBA')
im.paste(image, (0, 0), image)
images.append(im)
with BytesIO() as buffer:
images[0].save(buffer, format='GIF', optimize=False, duration=150, append_images=images[1:],
save_all=True, quality=1, loop=0)
buffer.seek(0)
return discord.File(buffer, filename='expand.gif')
image = BytesIO(await user.avatar.with_format(format='jpg').read())
future = self.loop.run_in_executor(None, execute, image)
await future
gif_message = await ctx.send(file=future.result())
return await message.edit(embed=discord.Embed(
description='<:Done:835812226345598986> | [Message Link]({}) | [Image Link]({})'.format(
gif_message.jump_url, gif_message.attachments[0].url),
colour=discord.Colour.green()))
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def glitch(self, ctx, argument: str = None, level: str = 'low', animated: str = '--true',
*size) -> typing.Union[typing.Optional[discord.Embed], discord.MessageReference]:
image = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
levels = {
'low': 2,
'medium': 5,
'high': 10
}
try:
level = levels.get(level.lower()) if level.lower() in levels else float(level)
except Exception:
level = 2
if level < 0 or level > 10:
return await ctx.send('Max level for glitching images starts at 0 and is capped at 10!')
future = self.loop.run_in_executor(None, glitch_, image, level, animated, size)
await future
try:
return await ctx.send(embed=discord.Embed(
title='Glitch Effect',
colour=random.randint(0x000000, 0xFFFFFF)
).set_image(url='attachment://glitched.gif'), file=future.result())
except Exception:
return await ctx.send('Oops, this level was abit too high for your image - please retry with a lower level')
@commands.command()
@commands.cooldown(1, 30, commands.BucketType.user)
async def image(self, ctx, *, query: str = None):
if not query:
return await ctx.send('Need to give an image to search for!')
url = 'https://api.pexels.com/v1/search?query={}&per_page={}'.format(query, random.randint(1, 100))
auth = self.bot.env('PEXEL_API_TOKEN')
r = requests.get(url, headers={'Authorization': auth}).json()
try:
await ctx.send(
embed=discord.Embed(
title='Search results for {}'.format(
query.title()
),
colour=discord.Color.red(),
).set_image(url=random.choice(r['photos'])['src']['large2x'])
)
except IndexError:
return await ctx.send('No Image was Found Under the Context **{}**'.format(query.title()))
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def spin(self, ctx, argument: str = None, animate: str = '--true') -> discord.Message:
image = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.send('Invalid image provided')
future = await self.loop.run_in_executor(None, spin_, image, animate)
return await ctx.send(embed=discord.Embed(
title='Spun around and around',
colour=random.randint(0x000000, 0xFFFFFF)
).set_image(url='attachment://spin.gif'), file=future)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def evilpatrick(self, ctx, argument: str = None) -> discord.MessageReference:
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Invalid image provided')
def execute(stream):
image = Image.open(stream).resize((150, 150)).convert('RGB')
frames = []
with BytesIO() as buffer:
with Image.open('./storage/images/evil.gif') as _base:
for _ in range(_base.n_frames):
_base.seek(_)
temp = _base.copy().convert('RGBA')
temp.paste(image, (205, 20))
frames.append(temp)
frames[0].save(
buffer, 'GIF',
append_images=frames[1:],
loop=0, duration=(_base.info.get('duration') or 0),
save_all=True
)
buffer.seek(0)
return discord.File(fp=buffer, filename='evil.gif')
image = await self.loop.run_in_executor(None, execute, stream)
return await ctx.message.reply(
embed=discord.Embed(
title='Evil!',
colour=discord.Colour.red()
).set_image(url='attachment://evil.gif'), file=image)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def salt(self, ctx, argument: str = None) -> discord.MessageReference:
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Invalid image provided')
def execute(stream):
image = Image.open(stream).resize((300, 300)).convert('RGB')
frames = []
with BytesIO() as buffer:
with Image.open('./storage/images/salty.gif') as _base:
for _ in range(_base.n_frames):
_base.seek(_)
temp = _base.copy().resize((200, 200)).convert('RGBA')
image_ = image.copy()
image_.paste(temp, (120, 10), temp)
frames.append(image_)
frames[0].save(
buffer, 'GIF',
append_images=frames[1:],
loop=0, duration=(_base.info.get('duration') or 0),
save_all=True
)
buffer.seek(0)
return discord.File(fp=buffer, filename='salty.gif')
image = await self.loop.run_in_executor(None, execute, stream)
return await ctx.message.reply(
embed=discord.Embed(
title='Salty!',
colour=discord.Colour.red()
).set_image(url='attachment://salty.gif'), file=image)
def setup(bot):
bot.add_cog(_Image(bot))
| 42.194637 | 143 | 0.605101 |
import typing
import discord
from discord.ext import commands
from discord.ext.commands import BucketType
from PIL import Image, ImageDraw
from io import BytesIO
import aiohttp
import MK
import numpy as np
import random
import cv2
from core._ import extract_
from core._.image.effects import *
from core._.image._ import sort_size, save_image
from core._.image.cloud import APISESSION
class _Image(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.converter = commands.MemberConverter()
self.vac_api = APISESSION.Client()
self.client = MK.Async.Client(bot.env('API_TOKEN'))
self.ses = aiohttp.ClientSession()
self.cache = bot.cache
self.loop = bot.loop
self.beard_image = Image.open('./storage/images/beard.png')
self.wasted_template = Image.open('./storage/images/wasted.png').resize((900, 900))
self.emoji_c = commands.PartialEmojiConverter()
bot.api_c = self.client
@staticmethod
def pixelate(image_to_pixelate: Image) -> Image:
return image_to_pixelate.resize((32, 32), resample=Image.NEAREST).resize((1024, 1024), resample=Image.NEAREST)
@staticmethod
def quantize(image_to_quantize: Image) -> Image:
return image_to_quantize.quantize()
@commands.command(name='Trash')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def trash(self, ctx, *, argument: str = None):
def execute(_author, _user):
im = Image.open('./storage/images/trash.jpg')
author = Image.open(_author).convert('RGBA').resize((130, 134))
member = Image.open(_user).convert('RGBA').resize((105, 109))
im.paste(author, (260, 120))
im.paste(member, (105, 7))
with BytesIO() as b:
im.save(b, 'PNG')
b.seek(0)
file = discord.File(fp=b, filename='trash.png')
return file
author_av = BytesIO(await ctx.author.avatar.read())
user_av = await extract_.get_stream(ctx, query=argument)
if not user_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av, user_av)
await future
await ctx.send(
embed=discord.Embed(title='Hes getting recycled', colour=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://trash.png'),
file=future.result())
@commands.command(name='Slap')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def slap(self, ctx, *, argument: str = None):
def execute(_author, _user):
im = Image.open('./storage/images/slap.jpg')
author = Image.open(_author).convert('RGBA').resize((310, 310))
member = Image.open(_user).convert('RGBA').resize((320, 320))
im = im.copy()
im.paste(author, (465, 70))
im.paste(member, (810, 350))
with BytesIO() as buffer:
im.save(buffer, format='PNG')
buffer.seek(0)
return discord.File(buffer, filename='slapped.png')
author_av = BytesIO(await ctx.author.avatar.read())
user_av = await extract_.get_stream(ctx, query=argument)
if not user_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av, user_av)
await future
embed = discord.Embed(title='He just got SLAPPED!',
color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://slapped.png')
await ctx.send(file=future.result(), embed=embed)
@commands.command(name='Spank')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def spank(self, ctx, *, argument: str = None):
def execute(_author, _user):
im = Image.open('./storage/images/spank.jpg').convert('RGBA')
author = Image.open(_author).convert('RGBA').resize((230, 230))
member = Image.open(_user).convert('RGBA').resize((320, 320))
im = im.copy()
im.paste(member, (750, 25))
im.paste(author, (1200, 455))
with BytesIO() as buffer:
im.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='spanked.png')
return file
author_av = await extract_.get_stream(ctx, query=argument)
user_av = BytesIO(await ctx.author.avatar.read())
if not author_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av, user_av)
await future
embed = discord.Embed(title='Who\'s being a naughty boy',
color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://spanked.png')
await ctx.send(file=future.result(), embed=embed)
@commands.command(name='Boot')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def boot(self, ctx, *, argument: str = None):
def execute(_author, _user):
im = Image.open('./storage/images/boot.jpg')
_author = Image.open(_author).convert('RGBA').resize((50, 54))
_user = Image.open(_user).convert('RGBA').resize((50, 54))
im = im.copy()
im.paste(_author, (183, 13))
im.paste(_user, (33, 12))
with BytesIO() as buffer:
im.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='booted.png')
return file
author_av = await extract_.get_stream(ctx, query=argument)
user_av = BytesIO(await ctx.author.avatar.read())
if not author_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av, user_av)
await future
embed = discord.Embed(title='Right in the sacks',
color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://booted.png')
await ctx.send(file=future.result(), embed=embed)
@commands.command(name='Obese')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def obese(self, ctx, *, argument: str = None):
def execute(_author):
im = Image.open('./storage/images/obese.jpg').convert('RGBA').resize((900, 900))
_author = Image.open(_author).convert('RGBA').resize((220, 220))
im.paste(_author, (457, 135))
with BytesIO() as buffer:
im.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='obese.png')
return file
author_av = await extract_.get_stream(ctx, query=argument)
if not author_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av)
await future
embed = discord.Embed(title='He\'s not that fat *yet*.',
color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://obese.png')
await ctx.send(file=future.result(), embed=embed)
@commands.command(name='Bird')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def bird(self, ctx, *, argument: str = None):
def execute(_author):
im = Image.open('./storage/images/bird.jpg').convert('RGBA').resize((900, 900))
_author = Image.open(_author).convert('RGBA').resize((220, 220))
im.paste(_author, (555, 60))
with BytesIO() as buffer:
im.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='bird.png')
return file
author_av = await extract_.get_stream(ctx, query=argument)
if not author_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av)
await future
embed = discord.Embed(title='Somebody is preparing to migrate',
colour=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://bird.png')
await ctx.send(file=future.result(), embed=embed)
@commands.command(name='Delete')
@commands.bot_has_guild_permissions(send_messages=True, attach_files=True)
@commands.cooldown(1, 10, BucketType.user)
async def delete(self, ctx, *, argument: str = None):
def execute(_author):
im = Image.open('./storage/images/delete.jpg').convert('RGB')
_author = Image.open(_author).convert('RGBA').resize((196, 196))
im.paste(_author, (121, 137))
with BytesIO() as buffer:
im.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='delete.png')
return file
author_av = await extract_.get_stream(ctx, query=argument)
if not author_av:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, author_av)
await future
embed = discord.Embed(title='Moving file to the recycle bin',
color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://delete.png')
await ctx.send(file=future.result(), embed=embed)
@commands.command(name='Invert')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def invert(self, ctx, argument: str = None, animate: str = '--true', *size) -> typing.Union[discord.MessageReference, discord.Embed]:
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.invert, stream, animate, *size)
embed = discord.Embed(title='Inverted!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
urn await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Equalize')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def equalize(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.equalize, stream, animate, *size)
embed = discord.Embed(title='Equalized!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
urn await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Grayscale')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def grayscale(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.grayscale, stream, animate, *size)
embed = discord.Embed(title='Grayscaled!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
urn await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Mirror')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def mirror(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.mirror, stream, animate, *size)
embed = discord.Embed(title='Mirrored!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
urn await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Posterize')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def posterize(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.posterize, stream, animate, *size, {'bits': 1})
embed = discord.Embed(title='Posterized!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
urn await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Solarize')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def solarize(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.solarize, stream, animate, *size, {'threshold': 255})
embed = discord.Embed(title='Solarized!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
return await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Transpose')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def transpose(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.exif_transpose, stream, animate, *size)
embed = discord.Embed(title='Transposed!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
urn await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Flip')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def flip(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
file = await self.loop.run_in_executor(None, IMAGEOPS, ImageOps.flip, stream, animate, *size)
embed = discord.Embed(title='Flipped!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
try:
await ctx.message.reply(file=file, embed=embed)
except Exception:
urn await ctx.message.reply(content='Oh No, This file was too large!')
@commands.command(name='Gamma')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def gamma(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('gamma', str(img))
except Exception as e:
print(e)
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='gamma.png')
embed = discord.Embed(title='Gammafied!', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://gamma.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Rainbow')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def rainbow(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('rainbow', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='autumn.png')
embed = discord.Embed(title='Autumn Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://autumn.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Autumn')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def autumn(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('autumn', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='autumn.png')
embed = discord.Embed(title='Autumn Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://autumn.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Inferno')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def inferno(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('hsv', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='inferno.png')
embed = discord.Embed(title='Inferno Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://inferno.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Twilight')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def twilight(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('twilight', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='twilight.png')
embed = discord.Embed(title='Twilight Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://twilight.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Warp')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def warp(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('warp', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='warp.png')
embed = discord.Embed(title='Warped Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://warp.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Blur')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, BucketType.user)
async def blur(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('blur', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='blur.png')
embed = discord.Embed(title='You now look like a foggy mirror!',
color=random.randint(0x000000, 0xFFFFFF)).set_image(url='attachment://blur.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Swirl')
@commands.bot_has_guild_permissions(send_messages=True, embed_links=True)
@commands.cooldown(1, 10, commands.BucketType.user)
async def swirl(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('swirl', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='swirl.png')
embed = discord.Embed(title='Round and a round', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://swirl.png')
await ctx.send(file=file, embed=embed)
@commands.command(name='Achievement')
@commands.cooldown(1, 10, BucketType.user)
async def achievement(self, ctx, *, message: str = None):
message = 'Nothing.' if not message else message
message = message.replace(' ', '%20')
url = 'https://minecraftskinstealer.com/achievement/{}/Achievement%20Earned!/{}'.format(random.randrange(40),
message)
embed = discord.Embed(colour=discord.Colour.red()).set_image(url=url)
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def cartoon(self, ctx, *, argument: str = None):
img = await extract_.get_url(ctx, query=argument)
try:
img = await self.client.image('cartoon', str(img))
except Exception:
return await ctx.send('Invalid image URL passed.')
file = discord.File(fp=img, filename='cartoon.png')
embed = discord.Embed(title='Cartoon Filter', color=ctx.author.color).set_image(url='attachment://cartoon.png')
await ctx.send(file=file, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def beard(self, ctx, *args):
if not args:
user = ctx.author
pos_x: str = '290'
pos_y: str = '250'
beard_x: str = '300'
beard_y = '300'
else:
try:
user = await self.converter.convert(ctx, args[0])
except commands.errors.MemberNotFound:
user = ctx.author
if len(args) > 1:
pos_x = args[1]
else:
pos_x = '290'
if len(args) > 2:
pos_y = args[2]
else:
pos_y = '250'
if len(args) > 3:
beard_x = args[3]
else:
beard_x = '300'
if len(args) > 4:
beard_y = args[4]
else:
beard_y = '300'
try:
positions = [pos_x, pos_y, beard_x, beard_y]
new_pos = list(map(int, positions))
if any([i for i in new_pos if i > 900 or i < 1]):
return await ctx.send('Markers cannot be larger than 900 or less than 1')
except ValueError:
return await ctx.send('Markers to place or resize the beard must be numbers!')
user = user or ctx.author
raw_beard = self.beard_image
beard = raw_beard.resize((new_pos[2], new_pos[3]))
avatar = Image.open(BytesIO(await user.avatar.with_format(format='png').read())).convert(
'RGBA').resize((900, 900))
avatar.paste(beard, (new_pos[0], new_pos[1]), beard)
with BytesIO() as buffer:
avatar.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='bearded.jpg')
embed = discord.Embed(title=f'Given {user.display_name} a nice beard', color=user.color).set_image(
url='attachment://bearded.jpg')
await ctx.send(file=file, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def wasted(self, ctx, user: discord.Member = None):
user = user or ctx.author
def execute(image):
img = Image.open(image).convert('RGB').resize((900, 900))
img = img.point(lambda p: p * 0.5)
img.paste(self.wasted_template, (0, 0), self.wasted_template)
with BytesIO() as buffer:
img.save(buffer, 'PNG')
buffer.seek(0)
file = discord.File(fp=buffer, filename='wasted.jpg')
return file
image = await self.loop.run_in_executor(None,
execute,
BytesIO(await user.avatar.with_format(format='png').read())
)
await ctx.send(embed=discord.Embed(title='Wasted', colour=user.colour).set_image(url='attachment://wasted.jpg'),
file=image)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def gayify(self, ctx, argument: str = None, animate: str = '--true', *size):
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.send('Invalid image provided')
file = await self.loop.run_in_executor(None, gayify_, stream, animate, *size)
embed = discord.Embed(title=f'Gay Filter', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://{}'.format(file.filename))
await ctx.send(file=file, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def distracted(self, ctx, user1: discord.Member = None, user2: discord.Member = None,
user3: discord.Member = None):
m1 = user1 or ctx.author
m2 = user2 or ctx.author
m3 = user3 or ctx.author
user = await self.vac_api.distracted_bf(m1.avatar.with_format(format='png'),
m2.avatar.with_format(format='png'),
m3.avatar.with_format(format='png'))
image_out = discord.File(fp=await user.read(), filename="distracted.png")
embed = discord.Embed(title=f'Oh no.', color=random.randint(0x000000, 0xFFFFFF)).set_image(
url='attachment://distracted.png')
await ctx.send(file=image_out, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def dos(self, ctx, user: discord.Member = None):
user = user or ctx.author
data = await self.vac_api.dock_of_shame(user.avatar.with_format(format='png'))
image_out = discord.File(fp=await data.read(), filename="dockofshame.png")
embed = discord.Embed(title=f'SHAME THEM!', color=user.colour).set_image(url='attachment://dockofshame.png')
await ctx.send(file=image_out, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def drip(self, ctx, user: discord.Member = None):
user = user or ctx.author
data = await self.vac_api.drip(user.avatar.with_format(format='png'))
image_out = discord.File(fp=await data.read(), filename="drip.png")
embed = discord.Embed(title=f'Speechless', color=user.colour).set_image(url='attachment://drip.png')
await ctx.send(file=image_out, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def cr(self, ctx, *, text: str):
user = await self.vac_api.car_reverse(text)
image_out = discord.File(fp=await user.read(), filename="carreverse.png")
embed = discord.Embed(title=f'Car Reverse Meme', color=ctx.author.colour).set_image(
url='attachment://carreverse.png')
await ctx.send(file=image_out, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def cmm(self, ctx, *, text: str):
user = await self.vac_api.change_my_mind(text)
image_out = discord.File(fp=await user.read(), filename="changemymind.png")
embed = discord.Embed(title=f'Change My Mind.', color=ctx.author.colour).set_image(
url='attachment://changemymind.png')
await ctx.send(file=image_out, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def heaven(self, ctx, user: discord.Member = None):
user = user or ctx.author
data = await self.vac_api.heaven(user.avatar.with_format(format='png'))
image_out = discord.File(fp=await data.read(), filename="heaven.png")
embed = discord.Embed(title=f'They have ascended.', color=user.colour).set_image(url='attachment://heaven.png')
await ctx.send(file=image_out, embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def table_flip(self, ctx, user: discord.Member = None):
user = user or ctx.author
data = await self.vac_api.table_flip(user.avatar.with_format(format='png'))
image_out = discord.File(fp=await data.read(), filename="tableflip.png")
embed = discord.Embed(title=f'{user.display_name} looks fiesty.', color=user.colour).set_image(
url='attachment://tableflip.png')
await ctx.send(file=image_out, embed=embed)
@commands.command(aliases=['color'], name='Colour')
@commands.cooldown(1, 10, commands.BucketType.user)
async def get_colour(self, ctx, colour):
try:
colour = int((str((await self.converter.convert(ctx, colour)).colour)).replace('#', '0x'), 16)
except Exception:
try:
colour = int(colour.replace('#', '0x'), 16)
except Exception:
return await ctx.send('Invalid hex code provided.')
with BytesIO() as b:
new = Image.new(mode='RGB', size=(900, 900), color=colour)
new.save(b, 'PNG')
b.seek(0)
await ctx.send(file=discord.File(fp=b, filename='{}.png'.format(colour)),
embed=discord.Embed(title='Created new colour:', colour=colour).set_image(
url='attachment://{}.png'.format(colour)))
@commands.command(name='8bit')
@commands.cooldown(1, 10, commands.BucketType.user)
async def bittify(self, ctx, argument: str = None, animate: str = '--true', *size) -> discord.Embed:
_io = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.send('Invalid image provided')
def execute(_io, animate, size):
avatar = Image.open(_io)
duration = avatar.info.get('duration')
loops = avatar.info.get('loop')
if not size and not getattr(_io, 'discord', False):
size = avatar.size
else:
size = sort_size(*size)
if getattr(avatar, 'is_animated', False) and animate.lower() == '--true':
frames = []
for _ in range(avatar.n_frames):
avatar.seek(_)
frames.append(self.quantize(self.pixelate(avatar)).resize(size))
return save_image(frames, filename='8bit.gif', duration=duration, loop=loops)
eightbit = self.pixelate(avatar)
eightbit = self.quantize(eightbit).resize(size)
with BytesIO() as buffer:
eightbit.save(buffer, format="PNG")
buffer.seek(0)
file = discord.File(buffer, filename="8bit.png")
return file
if not _io:
return await ctx.send('Invalid image provided')
future = self.loop.run_in_executor(None, execute, _io, animate, size)
await future
embed = discord.Embed(
title="8-Bit filter",
colour=ctx.author.colour
)
embed.set_image(url="attachment://{}".format(future.result().filename))
await ctx.send(file=future.result(), embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def oil(self, ctx, *, argument: str = None):
image = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.send('Invalid image provided')
def execute(image):
image.seek(0)
file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
cv2.waitKey(1)
try:
oil = cv2.xphoto.oilPainting(image, 7, 1)
except Exception:
return False
with BytesIO() as buffer:
image = Image.fromarray(oil)
image.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='oilpainting.png')
return file
future = self.loop.run_in_executor(None, execute, image)
await future
if not future.result():
return await ctx.send('Oh No! Looks like your image cannot be drawn.')
embed = discord.Embed(
title="Oil Painting",
colour=ctx.author.colour
)
embed.set_image(url="attachment://oilpainting.png")
await ctx.send(file=future.result(), embed=embed)
@commands.command(aliases=['watercolor'])
@commands.cooldown(1, 10, commands.BucketType.user)
async def watercolour(self, ctx, *, argument: str = None):
image = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.send('Invalid image provided')
def execute(image):
image.seek(0)
file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
cv2.waitKey(1)
try:
water_colour = cv2.stylization(image, sigma_s=60, sigma_r=0.6)
except Exception:
return False
with BytesIO() as buffer:
image = Image.fromarray(water_colour)
image.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='watercolour.png')
return file
future = self.loop.run_in_executor(None, execute, image)
await future
if not future.result():
return await ctx.send('Oh No! Looks like your image cannot be drawn.')
embed = discord.Embed(
title="Watercolour Painting",
colour=ctx.author.colour
)
embed.set_image(url="attachment://watercolour.png")
return await ctx.send(file=future.result(), embed=embed)
@commands.group(invoke_without_command=True)
@commands.cooldown(1, 10, commands.BucketType.user)
async def sketch(self, ctx, *, argument: str = None):
image = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.send('Invalid image provided')
def execute(image):
image.seek(0)
file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
cv2.waitKey(1)
try:
dst_gray, dst_color = cv2.pencilSketch(image, sigma_s=60, sigma_r=0.07, shade_factor=0.05)
except Exception:
return False
with BytesIO() as buffer:
image = Image.fromarray(dst_gray)
image.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='sketchnocolour.png')
return file
future = self.loop.run_in_executor(None, execute, image)
await future
if not future.result():
return await ctx.send('Oh No! Looks like your image cannot be drawn.')
embed = discord.Embed(
title="Sketched your image",
colour=ctx.author.colour
)
embed.set_image(url="attachment://sketchnocolour.png")
return await ctx.send(file=future.result(), embed=embed)
@sketch.command(aliases=['color'], name='colour')
@commands.cooldown(1, 10, commands.BucketType.user)
async def sketch_colour(self, ctx, *, argument: str = None):
image = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.send('Invalid image provided')
def execute(image):
image.seek(0)
file_bytes = np.asarray(bytearray(image.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
cv2.waitKey(1)
try:
dst_gray, dst_color = cv2.pencilSketch(image, sigma_s=60, sigma_r=0.07, shade_factor=0.05)
except Exception:
return False
with BytesIO() as buffer:
image = Image.fromarray(dst_color)
image.save(buffer, format='PNG')
buffer.seek(0)
file = discord.File(buffer, filename='sketchcolour.png')
return file
future = self.loop.run_in_executor(None, execute, image)
await future
if not future.result():
return await ctx.send('Oh No! Looks like your image cannot be drawn.')
embed = discord.Embed(
title="Sketched your image",
colour=ctx.author.colour
)
embed.set_image(url="attachment://sketchcolour.png")
return await ctx.send(file=future.result(), embed=embed)
@commands.command()
async def expand(self, ctx, user: discord.Member = None):
user = user or ctx.author
message = await ctx.send(embed=discord.Embed(description='<a:online:834143953221582927> | Building GIF',
colour=discord.Colour.green()))
def execute(image):
images = []
width = 900
center = width // 2
color_1 = (0, 255, 0)
background_colour = (255, 255, 255)
max_radius = int(center * 1.5)
step = 55
avatar = Image.open(image).convert('RGB')
for i in range(1, max_radius, step):
im = Image.new('RGB', (width, width), background_colour)
image = avatar.resize((width, width))
npImage = np.array(image)
h, w = im.size
alpha = Image.new('L', image.size, 0)
draw = ImageDraw.Draw(alpha)
draw.pieslice((center - i, center - i, center + i, center + i), 0, 360, fill=255)
npAlpha = np.array(alpha)
npImage = np.dstack((npImage, npAlpha))
image = Image.fromarray(npImage).convert('RGBA')
im.paste(image, (0, 0), image)
images.append(im)
with BytesIO() as buffer:
images[0].save(buffer, format='GIF', optimize=False, duration=150, append_images=images[1:],
save_all=True, quality=1, loop=0)
buffer.seek(0)
return discord.File(buffer, filename='expand.gif')
image = BytesIO(await user.avatar.with_format(format='jpg').read())
future = self.loop.run_in_executor(None, execute, image)
await future
gif_message = await ctx.send(file=future.result())
return await message.edit(embed=discord.Embed(
description='<:Done:835812226345598986> | [Message Link]({}) | [Image Link]({})'.format(
gif_message.jump_url, gif_message.attachments[0].url),
colour=discord.Colour.green()))
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def glitch(self, ctx, argument: str = None, level: str = 'low', animated: str = '--true',
*size) -> typing.Union[typing.Optional[discord.Embed], discord.MessageReference]:
image = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.message.reply(content='Oh No, Looks like you passed an invalid image URL!')
levels = {
'low': 2,
'medium': 5,
'high': 10
}
try:
level = levels.get(level.lower()) if level.lower() in levels else float(level)
except Exception:
level = 2
if level < 0 or level > 10:
return await ctx.send('Max level for glitching images starts at 0 and is capped at 10!')
future = self.loop.run_in_executor(None, glitch_, image, level, animated, size)
await future
try:
return await ctx.send(embed=discord.Embed(
title='Glitch Effect',
colour=random.randint(0x000000, 0xFFFFFF)
).set_image(url='attachment://glitched.gif'), file=future.result())
except Exception:
return await ctx.send('Oops, this level was abit too high for your image - please retry with a lower level')
@commands.command()
@commands.cooldown(1, 30, commands.BucketType.user)
async def image(self, ctx, *, query: str = None):
if not query:
return await ctx.send('Need to give an image to search for!')
url = 'https://api.pexels.com/v1/search?query={}&per_page={}'.format(query, random.randint(1, 100))
auth = self.bot.env('PEXEL_API_TOKEN')
r = requests.get(url, headers={'Authorization': auth}).json()
try:
await ctx.send(
embed=discord.Embed(
title='Search results for {}'.format(
query.title()
),
colour=discord.Color.red(),
).set_image(url=random.choice(r['photos'])['src']['large2x'])
)
except IndexError:
return await ctx.send('No Image was Found Under the Context **{}**'.format(query.title()))
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def spin(self, ctx, argument: str = None, animate: str = '--true') -> discord.Message:
image = await extract_.get_stream(ctx, query=argument)
if not image:
return await ctx.send('Invalid image provided')
future = await self.loop.run_in_executor(None, spin_, image, animate)
return await ctx.send(embed=discord.Embed(
title='Spun around and around',
colour=random.randint(0x000000, 0xFFFFFF)
).set_image(url='attachment://spin.gif'), file=future)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def evilpatrick(self, ctx, argument: str = None) -> discord.MessageReference:
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Invalid image provided')
def execute(stream):
image = Image.open(stream).resize((150, 150)).convert('RGB')
frames = []
with BytesIO() as buffer:
with Image.open('./storage/images/evil.gif') as _base:
for _ in range(_base.n_frames):
_base.seek(_)
temp = _base.copy().convert('RGBA')
temp.paste(image, (205, 20))
frames.append(temp)
frames[0].save(
buffer, 'GIF',
append_images=frames[1:],
loop=0, duration=(_base.info.get('duration') or 0),
save_all=True
)
buffer.seek(0)
return discord.File(fp=buffer, filename='evil.gif')
image = await self.loop.run_in_executor(None, execute, stream)
return await ctx.message.reply(
embed=discord.Embed(
title='Evil!',
colour=discord.Colour.red()
).set_image(url='attachment://evil.gif'), file=image)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def salt(self, ctx, argument: str = None) -> discord.MessageReference:
stream = await extract_.get_stream(ctx, query=argument)
if not stream:
return await ctx.message.reply(content='Invalid image provided')
def execute(stream):
image = Image.open(stream).resize((300, 300)).convert('RGB')
frames = []
with BytesIO() as buffer:
with Image.open('./storage/images/salty.gif') as _base:
for _ in range(_base.n_frames):
_base.seek(_)
temp = _base.copy().resize((200, 200)).convert('RGBA')
image_ = image.copy()
image_.paste(temp, (120, 10), temp)
frames.append(image_)
frames[0].save(
buffer, 'GIF',
append_images=frames[1:],
loop=0, duration=(_base.info.get('duration') or 0),
save_all=True
)
buffer.seek(0)
return discord.File(fp=buffer, filename='salty.gif')
image = await self.loop.run_in_executor(None, execute, stream)
return await ctx.message.reply(
embed=discord.Embed(
title='Salty!',
colour=discord.Colour.red()
).set_image(url='attachment://salty.gif'), file=image)
def setup(bot):
bot.add_cog(_Image(bot))
| true | true |
f7336aaaab6d57e94457316d900de647ea9d62a5 | 2,672 | py | Python | amt/load/load.py | bellockk/amtool | 3650697120ac384ac3a0020c84b790ca734df218 | [
"MIT"
] | null | null | null | amt/load/load.py | bellockk/amtool | 3650697120ac384ac3a0020c84b790ca734df218 | [
"MIT"
] | 2 | 2019-05-30T11:17:51.000Z | 2021-11-15T17:48:34.000Z | amt/load/load.py | bellockk/amtool | 3650697120ac384ac3a0020c84b790ca734df218 | [
"MIT"
] | null | null | null | """
amt-load -- Artifact Management Tool Reader
amt is a Tool for managing software artifacts
It defines classes_and_methods and a command line interface
@author: Kenneth E. Bellock
@copyright:
@contact: ken@bellock.net
"""
import os
import sys
import yaml
import logging
__all__ = ['load']
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(os.path.dirname(SCRIPT_PATH), 'meta'))
from meta import MetaDict
from meta import MetaList
def _load_file(filename):
"""
Loads a file into the specified node of the artifacts tree.
This function is a helper function to `load`. It processes a single
artifact file for inclusion into the overall artifacts tree.
Args:
result: (dict): Node to load artifacts into.
filename: (str): File to load artifacts from.
Kwargs:
verbose (int): Level to perform logging at.
"""
logging.debug('Loading File: %s', filename)
with open(filename, 'r') as f_obj:
loaded_file_content = yaml.full_load(f_obj)
logging.debug('Loaded File Content: %s', loaded_file_content)
if isinstance(loaded_file_content, dict):
metadict = MetaDict()
metadict.update(loaded_file_content)
metadict._file = filename
return metadict
if isinstance(loaded_file_content, (list, set, tuple)):
metadict = MetaList()
metadict.extend(loaded_file_content)
metadict._file = filename
return metadict
def load(target, toplevel=True):
"""
Load a directory or file containing artifacts.
The `target` can be a directory or file, and can contain plain yaml, or
canonicalized artifact data. If a directory is specified, it will be
walked recursively and all files will be loaded into the return data
structure.
Args:
target (str): The directory or file to be loaded.
toplevel (bool, optional): Utilized in recursive operations with this
function.
Returns:
dict or list or string: The fully read data structure containing all
artifacts from the loaded target.
"""
logging.debug('Loading Target: %s', target)
basename = os.path.basename(target)
if os.path.isfile(target):
if toplevel:
return _load_file(target)
else:
return {os.path.splitext(basename)[0]: _load_file(target)}
elif os.path.isdir(target):
result = {}
for path in os.listdir(target):
result.update(load(os.path.join(target, path), toplevel=False))
if toplevel:
return result
else:
return {basename: result}
| 29.362637 | 77 | 0.66729 | import os
import sys
import yaml
import logging
__all__ = ['load']
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(os.path.dirname(SCRIPT_PATH), 'meta'))
from meta import MetaDict
from meta import MetaList
def _load_file(filename):
logging.debug('Loading File: %s', filename)
with open(filename, 'r') as f_obj:
loaded_file_content = yaml.full_load(f_obj)
logging.debug('Loaded File Content: %s', loaded_file_content)
if isinstance(loaded_file_content, dict):
metadict = MetaDict()
metadict.update(loaded_file_content)
metadict._file = filename
return metadict
if isinstance(loaded_file_content, (list, set, tuple)):
metadict = MetaList()
metadict.extend(loaded_file_content)
metadict._file = filename
return metadict
def load(target, toplevel=True):
logging.debug('Loading Target: %s', target)
basename = os.path.basename(target)
if os.path.isfile(target):
if toplevel:
return _load_file(target)
else:
return {os.path.splitext(basename)[0]: _load_file(target)}
elif os.path.isdir(target):
result = {}
for path in os.listdir(target):
result.update(load(os.path.join(target, path), toplevel=False))
if toplevel:
return result
else:
return {basename: result}
| true | true |
f7336b17920237a1d0b1bc08dd860d5794e243b0 | 928 | py | Python | string_processor.py | Christakou/hand-odds | eb4dd29013af01ec15d806e5c23699174f4f9731 | [
"MIT"
] | null | null | null | string_processor.py | Christakou/hand-odds | eb4dd29013af01ec15d806e5c23699174f4f9731 | [
"MIT"
] | null | null | null | string_processor.py | Christakou/hand-odds | eb4dd29013af01ec15d806e5c23699174f4f9731 | [
"MIT"
] | null | null | null | import re
from parallel import hand_odds
if __name__ == '__main__':
while True:
class_string = input('Paste html extract:')
split = (class_string.split(','))
number_of_players = int(split[-1])
split = split[:-1]
print(split)
match_list = []
suits = []
ranks = []
for item in split:
extraction = re.search(r'(diamonds-(\w|\d))|(clubs-(\w|\d))|(hearts-(\w|\d))|(spades-(\w|\d))',item)[0]
extraction = extraction.replace('clubs-','C').replace('spades-','S').replace('diamonds-','D').replace('hearts-','H')
extraction = extraction.replace('Q','12').replace('K','13').replace('A','14').replace('J','11')
formatted = re.search(r'\d{1,2}',extraction)[0]+re.search(r'\w',extraction)[0]
match_list.append(formatted)
hand_odds(match_list[-2:],match_list[:-2],number_of_players-1,1000) | 44.190476 | 128 | 0.563578 | import re
from parallel import hand_odds
if __name__ == '__main__':
while True:
class_string = input('Paste html extract:')
split = (class_string.split(','))
number_of_players = int(split[-1])
split = split[:-1]
print(split)
match_list = []
suits = []
ranks = []
for item in split:
extraction = re.search(r'(diamonds-(\w|\d))|(clubs-(\w|\d))|(hearts-(\w|\d))|(spades-(\w|\d))',item)[0]
extraction = extraction.replace('clubs-','C').replace('spades-','S').replace('diamonds-','D').replace('hearts-','H')
extraction = extraction.replace('Q','12').replace('K','13').replace('A','14').replace('J','11')
formatted = re.search(r'\d{1,2}',extraction)[0]+re.search(r'\w',extraction)[0]
match_list.append(formatted)
hand_odds(match_list[-2:],match_list[:-2],number_of_players-1,1000) | true | true |
f7336b996d7cef44e383a2beac1901fa21fa9302 | 34,065 | py | Python | pyfolio/tears.py | NunoEdgarGFlowHub/pyfolio | 68efdcc2e2d0f140ddbc408a260c6318ac8b06d3 | [
"Apache-2.0"
] | null | null | null | pyfolio/tears.py | NunoEdgarGFlowHub/pyfolio | 68efdcc2e2d0f140ddbc408a260c6318ac8b06d3 | [
"Apache-2.0"
] | null | null | null | pyfolio/tears.py | NunoEdgarGFlowHub/pyfolio | 68efdcc2e2d0f140ddbc408a260c6318ac8b06d3 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from time import time
import warnings
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import scipy.stats
import pandas as pd
from . import timeseries
from . import utils
from . import pos
from . import txn
from . import round_trips
from . import plotting
from . import _seaborn as sns
from .plotting import plotting_context
try:
from . import bayesian
except ImportError:
warnings.warn(
"Could not import bayesian submodule due to missing pymc3 dependency.",
ImportWarning)
def timer(msg_body, previous_time):
current_time = time()
run_time = current_time - previous_time
message = "\nFinished " + msg_body + " (required {:.2f} seconds)."
print(message.format(run_time))
return current_time
def create_full_tear_sheet(returns,
positions=None,
transactions=None,
benchmark_rets=None,
gross_lev=None,
slippage=None,
live_start_date=None,
sector_mappings=None,
bayesian=False,
round_trips=False,
hide_positions=False,
cone_std=(1.0, 1.5, 2.0),
bootstrap=False,
set_context=True):
"""
Generate a number of tear sheets that are useful
for analyzing a strategy's performance.
- Fetches benchmarks if needed.
- Creates tear sheets for returns, and significant events.
If possible, also creates tear sheets for position analysis,
transaction analysis, and Bayesian analysis.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- Time series with decimal returns.
- Example:
2015-07-16 -0.012143
2015-07-17 0.045350
2015-07-20 0.030957
2015-07-21 0.004902
positions : pd.DataFrame, optional
Daily net position values.
- Time series of dollar amount invested in each position and cash.
- Days where stocks are not held can be represented by 0 or NaN.
- Non-working capital is labelled 'cash'
- Example:
index 'AAPL' 'MSFT' cash
2004-01-09 13939.3800 -14012.9930 711.5585
2004-01-12 14492.6300 -14624.8700 27.1821
2004-01-13 -13853.2800 13653.6400 -43.6375
transactions : pd.DataFrame, optional
Executed trade volumes and fill prices.
- One row per trade.
- Trades on different names that occur at the
same time will have identical indicies.
- Example:
index amount price symbol
2004-01-09 12:18:01 483 324.12 'AAPL'
2004-01-09 12:18:01 122 83.10 'MSFT'
2004-01-13 14:12:23 -75 340.43 'AAPL'
gross_lev : pd.Series, optional
The leverage of a strategy.
- Time series of the sum of long and short exposure per share
divided by net asset value.
- Example:
2009-12-04 0.999932
2009-12-07 0.999783
2009-12-08 0.999880
2009-12-09 1.000283
slippage : int/float, optional
Basis points of slippage to apply to returns before generating
tearsheet stats and plots.
If a value is provided, slippage parameter sweep
plots will be generated from the unadjusted returns.
Transactions and positions must also be passed.
- See txn.adjust_returns_for_slippage for more details.
live_start_date : datetime, optional
The point in time when the strategy began live trading,
after its backtest period. This datetime should be normalized.
hide_positions : bool, optional
If True, will not output any symbol names.
bayesian: boolean, optional
If True, causes the generation of a Bayesian tear sheet.
round_trips: boolean, optional
If True, causes the generation of a round trip tear sheet.
cone_std : float, or tuple, optional
If float, The standard deviation to use for the cone plots.
If tuple, Tuple of standard deviation values to use for the cone plots
- The cone is a normal distribution with this standard deviation
centered around a linear regression.
bootstrap : boolean (optional)
Whether to perform bootstrap analysis for the performance
metrics. Takes a few minutes longer.
set_context : boolean, optional
If True, set default plotting style context.
- See plotting.context().
"""
if benchmark_rets is None:
benchmark_rets = utils.get_symbol_rets('SPY')
# If the strategy's history is longer than the benchmark's, limit strategy
if returns.index[0] < benchmark_rets.index[0]:
returns = returns[returns.index > benchmark_rets.index[0]]
if slippage is not None and transactions is not None:
turnover = txn.get_turnover(positions, transactions,
period=None, average=False)
unadjusted_returns = returns.copy()
returns = txn.adjust_returns_for_slippage(returns, turnover, slippage)
else:
unadjusted_returns = None
create_returns_tear_sheet(
returns,
live_start_date=live_start_date,
cone_std=cone_std,
benchmark_rets=benchmark_rets,
bootstrap=bootstrap,
set_context=set_context)
create_interesting_times_tear_sheet(returns,
benchmark_rets=benchmark_rets,
set_context=set_context)
if positions is not None:
create_position_tear_sheet(returns, positions,
gross_lev=gross_lev,
hide_positions=hide_positions,
set_context=set_context,
sector_mappings=sector_mappings)
if transactions is not None:
create_txn_tear_sheet(returns, positions, transactions,
unadjusted_returns=unadjusted_returns,
set_context=set_context)
if round_trips:
create_round_trip_tear_sheet(
positions=positions,
transactions=transactions,
sector_mappings=sector_mappings)
if bayesian:
create_bayesian_tear_sheet(returns,
live_start_date=live_start_date,
benchmark_rets=benchmark_rets,
set_context=set_context)
@plotting_context
def create_returns_tear_sheet(returns, live_start_date=None,
cone_std=(1.0, 1.5, 2.0),
benchmark_rets=None,
bootstrap=False,
return_fig=False):
"""
Generate a number of plots for analyzing a strategy's returns.
- Fetches benchmarks, then creates the plots on a single figure.
- Plots: rolling returns (with cone), rolling beta, rolling sharpe,
rolling Fama-French risk factors, drawdowns, underwater plot, monthly
and annual return plots, daily similarity plots,
and return quantile box plot.
- Will also print the start and end dates of the strategy,
performance statistics, drawdown periods, and the return range.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
live_start_date : datetime, optional
The point in time when the strategy began live trading,
after its backtest period.
cone_std : float, or tuple, optional
If float, The standard deviation to use for the cone plots.
If tuple, Tuple of standard deviation values to use for the cone plots
- The cone is a normal distribution with this standard deviation
centered around a linear regression.
benchmark_rets : pd.Series, optional
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
bootstrap : boolean (optional)
Whether to perform bootstrap analysis for the performance
metrics. Takes a few minutes longer.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
set_context : boolean, optional
If True, set default plotting style context.
"""
if benchmark_rets is None:
benchmark_rets = utils.get_symbol_rets('SPY')
# If the strategy's history is longer than the benchmark's, limit
# strategy
if returns.index[0] < benchmark_rets.index[0]:
returns = returns[returns.index > benchmark_rets.index[0]]
df_cum_rets = timeseries.cum_returns(returns, starting_value=1)
print("Entire data start date: " + str(df_cum_rets
.index[0].strftime('%Y-%m-%d')))
print("Entire data end date: " + str(df_cum_rets
.index[-1].strftime('%Y-%m-%d')))
print('\n')
plotting.show_perf_stats(returns, benchmark_rets,
bootstrap=bootstrap,
live_start_date=live_start_date)
if live_start_date is not None:
vertical_sections = 11
live_start_date = utils.get_utc_timestamp(live_start_date)
else:
vertical_sections = 10
if bootstrap:
vertical_sections += 1
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_rolling_returns = plt.subplot(gs[:2, :])
ax_rolling_returns_vol_match = plt.subplot(gs[2, :],
sharex=ax_rolling_returns)
ax_rolling_beta = plt.subplot(gs[3, :], sharex=ax_rolling_returns)
ax_rolling_sharpe = plt.subplot(gs[4, :], sharex=ax_rolling_returns)
ax_rolling_risk = plt.subplot(gs[5, :], sharex=ax_rolling_returns)
ax_drawdown = plt.subplot(gs[6, :], sharex=ax_rolling_returns)
ax_underwater = plt.subplot(gs[7, :], sharex=ax_rolling_returns)
ax_monthly_heatmap = plt.subplot(gs[8, 0])
ax_annual_returns = plt.subplot(gs[8, 1])
ax_monthly_dist = plt.subplot(gs[8, 2])
ax_return_quantiles = plt.subplot(gs[9, :])
plotting.plot_rolling_returns(
returns,
factor_returns=benchmark_rets,
live_start_date=live_start_date,
cone_std=cone_std,
ax=ax_rolling_returns)
ax_rolling_returns.set_title(
'Cumulative Returns')
plotting.plot_rolling_returns(
returns,
factor_returns=benchmark_rets,
live_start_date=live_start_date,
cone_std=None,
volatility_match=True,
legend_loc=None,
ax=ax_rolling_returns_vol_match)
ax_rolling_returns_vol_match.set_title(
'Cumulative returns volatility matched to benchmark.')
plotting.plot_rolling_beta(
returns, benchmark_rets, ax=ax_rolling_beta)
plotting.plot_rolling_sharpe(
returns, ax=ax_rolling_sharpe)
plotting.plot_rolling_fama_french(
returns, ax=ax_rolling_risk)
# Drawdowns
plotting.plot_drawdown_periods(
returns, top=5, ax=ax_drawdown)
plotting.plot_drawdown_underwater(
returns=returns, ax=ax_underwater)
plotting.show_worst_drawdown_periods(returns)
df_weekly = timeseries.aggregate_returns(returns, 'weekly')
df_monthly = timeseries.aggregate_returns(returns, 'monthly')
print('\n')
plotting.show_return_range(returns, df_weekly)
plotting.plot_monthly_returns_heatmap(returns, ax=ax_monthly_heatmap)
plotting.plot_annual_returns(returns, ax=ax_annual_returns)
plotting.plot_monthly_returns_dist(returns, ax=ax_monthly_dist)
plotting.plot_return_quantiles(
returns,
df_weekly,
df_monthly,
ax=ax_return_quantiles)
if bootstrap:
ax_bootstrap = plt.subplot(gs[10, :])
plotting.plot_perf_stats(returns, benchmark_rets,
ax=ax_bootstrap)
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
plt.show()
if return_fig:
return fig
@plotting_context
def create_position_tear_sheet(returns, positions, gross_lev=None,
show_and_plot_top_pos=2, hide_positions=False,
return_fig=False, sector_mappings=None):
"""
Generate a number of plots for analyzing a
strategy's positions and holdings.
- Plots: gross leverage, exposures, top positions, and holdings.
- Will also print the top positions held.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
gross_lev : pd.Series, optional
The leverage of a strategy.
- See full explanation in create_full_tear_sheet.
show_and_plot_top_pos : int, optional
By default, this is 2, and both prints and plots the
top 10 positions.
If this is 0, it will only plot; if 1, it will only print.
hide_positions : bool, optional
If True, will not output any symbol names.
Overrides show_and_plot_top_pos to 0 to suppress text output.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
set_context : boolean, optional
If True, set default plotting style context.
sector_mappings : dict or pd.Series, optional
Security identifier to sector mapping.
Security ids as keys, sectors as values.
"""
if hide_positions:
show_and_plot_top_pos = 0
vertical_sections = 6 if sector_mappings is not None else 5
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_gross_leverage = plt.subplot(gs[0, :])
ax_exposures = plt.subplot(gs[1, :], sharex=ax_gross_leverage)
ax_top_positions = plt.subplot(gs[2, :], sharex=ax_gross_leverage)
ax_max_median_pos = plt.subplot(gs[3, :], sharex=ax_gross_leverage)
ax_holdings = plt.subplot(gs[4, :], sharex=ax_gross_leverage)
positions_alloc = pos.get_percent_alloc(positions)
if gross_lev is not None:
plotting.plot_gross_leverage(returns, gross_lev, ax=ax_gross_leverage)
plotting.plot_exposures(returns, positions_alloc, ax=ax_exposures)
plotting.show_and_plot_top_positions(
returns,
positions_alloc,
show_and_plot=show_and_plot_top_pos,
hide_positions=hide_positions,
ax=ax_top_positions)
plotting.plot_max_median_position_concentration(positions,
ax=ax_max_median_pos)
plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings)
if sector_mappings is not None:
sector_exposures = pos.get_sector_exposures(positions, sector_mappings)
if len(sector_exposures.columns) > 1:
sector_alloc = pos.get_percent_alloc(sector_exposures)
sector_alloc = sector_alloc.drop('cash', axis='columns')
ax_sector_alloc = plt.subplot(gs[5, :], sharex=ax_gross_leverage)
plotting.plot_sector_allocations(returns, sector_alloc,
ax=ax_sector_alloc)
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
plt.show()
if return_fig:
return fig
@plotting_context
def create_txn_tear_sheet(returns, positions, transactions,
unadjusted_returns=None, return_fig=False):
"""
Generate a number of plots for analyzing a strategy's transactions.
Plots: turnover, daily volume, and a histogram of daily volume.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
unadjusted_returns : pd.Series, optional
Daily unadjusted returns of the strategy, noncumulative.
Will plot additional swippage sweep analysis.
- See pyfolio.plotting.plot_swippage_sleep and
pyfolio.plotting.plot_slippage_sensitivity
return_fig : boolean, optional
If True, returns the figure that was plotted on.
"""
vertical_sections = 5 if unadjusted_returns is not None else 3
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_turnover = plt.subplot(gs[0, :])
ax_daily_volume = plt.subplot(gs[1, :], sharex=ax_turnover)
ax_turnover_hist = plt.subplot(gs[2, :])
plotting.plot_turnover(
returns,
transactions,
positions,
ax=ax_turnover)
plotting.plot_daily_volume(returns, transactions, ax=ax_daily_volume)
try:
plotting.plot_daily_turnover_hist(transactions, positions,
ax=ax_turnover_hist)
except ValueError:
warnings.warn('Unable to generate turnover plot.', UserWarning)
if unadjusted_returns is not None:
ax_slippage_sweep = plt.subplot(gs[3, :])
plotting.plot_slippage_sweep(unadjusted_returns,
transactions,
positions,
ax=ax_slippage_sweep
)
ax_slippage_sensitivity = plt.subplot(gs[4, :])
plotting.plot_slippage_sensitivity(unadjusted_returns,
transactions,
positions,
ax=ax_slippage_sensitivity
)
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
plt.show()
if return_fig:
return fig
@plotting_context
def create_round_trip_tear_sheet(positions, transactions,
sector_mappings=None,
return_fig=False):
"""
Generate a number of figures and plots describing the duration,
frequency, and profitability of trade "round trips."
A round trip is started when a new long or short position is
opened and is only completed when the number of shares in that
position returns to or crosses zero.
Parameters
----------
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
sector_mappings : dict or pd.Series, optional
Security identifier to sector mapping.
Security ids as keys, sectors as values.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
"""
transactions_closed = round_trips.add_closing_transactions(positions,
transactions)
trades = round_trips.extract_round_trips(transactions_closed)
if len(trades) < 5:
warnings.warn(
"""Fewer than 5 round-trip trades made.
Skipping round trip tearsheet.""", UserWarning)
return
ndays = len(positions)
print(trades.drop(['open_dt', 'close_dt', 'symbol'],
axis='columns').describe())
print('Percent of round trips profitable = {:.4}%'.format(
(trades.pnl > 0).mean() * 100))
winning_round_trips = trades[trades.pnl > 0]
losing_round_trips = trades[trades.pnl < 0]
print('Mean return per winning round trip = {:.4}'.format(
winning_round_trips.returns.mean()))
print('Mean return per losing round trip = {:.4}'.format(
losing_round_trips.returns.mean()))
print('A decision is made every {:.4} days.'.format(ndays / len(trades)))
print('{:.4} trading decisions per day.'.format(len(trades) * 1. / ndays))
print('{:.4} trading decisions per month.'.format(
len(trades) * 1. / (ndays / 21)))
plotting.show_profit_attribution(trades)
if sector_mappings is not None:
sector_trades = round_trips.apply_sector_mappings_to_round_trips(
trades, sector_mappings)
plotting.show_profit_attribution(sector_trades)
fig = plt.figure(figsize=(14, 3 * 6))
fig = plt.figure(figsize=(14, 3 * 6))
gs = gridspec.GridSpec(3, 2, wspace=0.5, hspace=0.5)
ax_trade_lifetimes = plt.subplot(gs[0, :])
ax_prob_profit_trade = plt.subplot(gs[1, 0])
ax_holding_time = plt.subplot(gs[1, 1])
ax_pnl_per_round_trip_dollars = plt.subplot(gs[2, 0])
ax_pnl_per_round_trip_pct = plt.subplot(gs[2, 1])
plotting.plot_round_trip_life_times(trades, ax=ax_trade_lifetimes)
plotting.plot_prob_profit_trade(trades, ax=ax_prob_profit_trade)
trade_holding_times = [x.days for x in trades['duration']]
sns.distplot(trade_holding_times, kde=False, ax=ax_holding_time)
ax_holding_time.set(xlabel='holding time in days')
sns.distplot(trades.pnl, kde=False, ax=ax_pnl_per_round_trip_dollars)
ax_pnl_per_round_trip_dollars.set(xlabel='PnL per round-trip trade in $')
sns.distplot(trades.returns * 100, kde=False,
ax=ax_pnl_per_round_trip_pct)
ax_pnl_per_round_trip_pct.set(
xlabel='Round-trip returns in %')
gs.tight_layout(fig)
plt.show()
if return_fig:
return fig
@plotting_context
def create_interesting_times_tear_sheet(
returns, benchmark_rets=None, legend_loc='best', return_fig=False):
"""
Generate a number of returns plots around interesting points in time,
like the flash crash and 9/11.
Plots: returns around the dotcom bubble burst, Lehmann Brothers' failure,
9/11, US downgrade and EU debt crisis, Fukushima meltdown, US housing
bubble burst, EZB IR, Great Recession (August 2007, March and September
of 2008, Q1 & Q2 2009), flash crash, April and October 2014.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
benchmark_rets : pd.Series, optional
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
legend_loc : plt.legend_loc, optional
The legend's location.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
set_context : boolean, optional
If True, set default plotting style context.
"""
rets_interesting = timeseries.extract_interesting_date_ranges(returns)
if len(rets_interesting) == 0:
warnings.warn('Passed returns do not overlap with any'
'interesting times.', UserWarning)
return
print('\nStress Events')
print(np.round(pd.DataFrame(rets_interesting).describe().transpose().loc[
:, ['mean', 'min', 'max']], 3))
if benchmark_rets is None:
benchmark_rets = utils.get_symbol_rets('SPY')
# If the strategy's history is longer than the benchmark's, limit
# strategy
if returns.index[0] < benchmark_rets.index[0]:
returns = returns[returns.index > benchmark_rets.index[0]]
bmark_interesting = timeseries.extract_interesting_date_ranges(
benchmark_rets)
num_plots = len(rets_interesting)
# 2 plots, 1 row; 3 plots, 2 rows; 4 plots, 2 rows; etc.
num_rows = int((num_plots + 1) / 2.0)
fig = plt.figure(figsize=(14, num_rows * 6.0))
gs = gridspec.GridSpec(num_rows, 2, wspace=0.5, hspace=0.5)
for i, (name, rets_period) in enumerate(rets_interesting.items()):
# i=0 -> 0, i=1 -> 0, i=2 -> 1 ;; i=0 -> 0, i=1 -> 1, i=2 -> 0
ax = plt.subplot(gs[int(i / 2.0), i % 2])
timeseries.cum_returns(rets_period).plot(
ax=ax, color='forestgreen', label='algo', alpha=0.7, lw=2)
timeseries.cum_returns(bmark_interesting[name]).plot(
ax=ax, color='gray', label='SPY', alpha=0.6)
ax.legend(['algo',
'SPY'],
loc=legend_loc)
ax.set_title(name, size=14)
ax.set_ylabel('Returns')
ax.set_xlabel('')
plt.show()
if return_fig:
return fig
@plotting_context
def create_bayesian_tear_sheet(returns, benchmark_rets=None,
live_start_date=None, samples=2000,
return_fig=False, stoch_vol=False):
"""
Generate a number of Bayesian distributions and a Bayesian
cone plot of returns.
Plots: Sharpe distribution, annual volatility distribution,
annual alpha distribution, beta distribution, predicted 1 and 5
day returns distributions, and a cumulative returns cone plot.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
benchmark_rets : pd.Series or pd.DataFrame, optional
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
live_start_date : datetime, optional
The point in time when the strategy began live
trading, after its backtest period.
samples : int, optional
Number of posterior samples to draw.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
set_context : boolean, optional
If True, set default plotting style context.
stoch_vol : boolean, optional
If True, run and plot the stochastic volatility model
"""
if live_start_date is None:
raise NotImplementedError(
'Bayesian tear sheet requires setting of live_start_date'
)
# start by benchmark is S&P500
fama_french = False
if benchmark_rets is None:
benchmark_rets = pd.DataFrame(
utils.get_symbol_rets('SPY',
start=returns.index[0],
end=returns.index[-1]))
# unless user indicates otherwise
elif isinstance(benchmark_rets, str) and (benchmark_rets ==
'Fama-French'):
fama_french = True
rolling_window = utils.APPROX_BDAYS_PER_MONTH * 6
benchmark_rets = timeseries.rolling_fama_french(
returns, rolling_window=rolling_window)
live_start_date = utils.get_utc_timestamp(live_start_date)
df_train = returns.loc[returns.index < live_start_date]
df_test = returns.loc[returns.index >= live_start_date]
# Run T model with missing data
print("Running T model")
previous_time = time()
# track the total run time of the Bayesian tear sheet
start_time = previous_time
trace_t, ppc_t = bayesian.run_model('t', df_train,
returns_test=df_test,
samples=samples, ppc=True)
previous_time = timer("T model", previous_time)
# Compute BEST model
print("\nRunning BEST model")
trace_best = bayesian.run_model('best', df_train,
returns_test=df_test,
samples=samples)
previous_time = timer("BEST model", previous_time)
# Plot results
fig = plt.figure(figsize=(14, 10 * 2))
gs = gridspec.GridSpec(9, 2, wspace=0.3, hspace=0.3)
axs = []
row = 0
# Plot Bayesian cone
ax_cone = plt.subplot(gs[row, :])
bayesian.plot_bayes_cone(df_train, df_test, ppc_t, ax=ax_cone)
previous_time = timer("plotting Bayesian cone", previous_time)
# Plot BEST results
row += 1
axs.append(plt.subplot(gs[row, 0]))
axs.append(plt.subplot(gs[row, 1]))
row += 1
axs.append(plt.subplot(gs[row, 0]))
axs.append(plt.subplot(gs[row, 1]))
row += 1
axs.append(plt.subplot(gs[row, 0]))
axs.append(plt.subplot(gs[row, 1]))
row += 1
# Effect size across two
axs.append(plt.subplot(gs[row, :]))
bayesian.plot_best(trace=trace_best, axs=axs)
previous_time = timer("plotting BEST results", previous_time)
# Compute Bayesian predictions
row += 1
ax_ret_pred_day = plt.subplot(gs[row, 0])
ax_ret_pred_week = plt.subplot(gs[row, 1])
day_pred = ppc_t[:, 0]
p5 = scipy.stats.scoreatpercentile(day_pred, 5)
sns.distplot(day_pred,
ax=ax_ret_pred_day
)
ax_ret_pred_day.axvline(p5, linestyle='--', linewidth=3.)
ax_ret_pred_day.set_xlabel('Predicted returns 1 day')
ax_ret_pred_day.set_ylabel('Frequency')
ax_ret_pred_day.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5,
verticalalignment='bottom',
horizontalalignment='right',
transform=ax_ret_pred_day.transAxes)
previous_time = timer("computing Bayesian predictions", previous_time)
# Plot Bayesian VaRs
week_pred = (
np.cumprod(ppc_t[:, :5] + 1, 1) - 1)[:, -1]
p5 = scipy.stats.scoreatpercentile(week_pred, 5)
sns.distplot(week_pred,
ax=ax_ret_pred_week
)
ax_ret_pred_week.axvline(p5, linestyle='--', linewidth=3.)
ax_ret_pred_week.set_xlabel('Predicted cum returns 5 days')
ax_ret_pred_week.set_ylabel('Frequency')
ax_ret_pred_week.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5,
verticalalignment='bottom',
horizontalalignment='right',
transform=ax_ret_pred_week.transAxes)
previous_time = timer("plotting Bayesian VaRs estimate", previous_time)
# Run alpha beta model
print("\nRunning alpha beta model")
benchmark_rets = benchmark_rets.loc[df_train.index]
trace_alpha_beta = bayesian.run_model('alpha_beta', df_train,
bmark=benchmark_rets,
samples=samples)
previous_time = timer("running alpha beta model", previous_time)
# Plot alpha and beta
row += 1
ax_alpha = plt.subplot(gs[row, 0])
ax_beta = plt.subplot(gs[row, 1])
if fama_french:
sns.distplot((1 + trace_alpha_beta['alpha'][100:])**252 - 1,
ax=ax_alpha)
betas = ['SMB', 'HML', 'UMD']
nbeta = trace_alpha_beta['beta'].shape[1]
for i in range(nbeta):
sns.distplot(trace_alpha_beta['beta'][100:, i], ax=ax_beta,
label=betas[i])
plt.legend()
else:
sns.distplot((1 + trace_alpha_beta['alpha'][100:])**252 - 1,
ax=ax_alpha)
sns.distplot(trace_alpha_beta['beta'][100:], ax=ax_beta)
ax_alpha.set_xlabel('Annual Alpha')
ax_alpha.set_ylabel('Belief')
ax_beta.set_xlabel('Beta')
ax_beta.set_ylabel('Belief')
previous_time = timer("plotting alpha beta model", previous_time)
if stoch_vol:
# run stochastic volatility model
returns_cutoff = 400
print(
"\nRunning stochastic volatility model on "
"most recent {} days of returns.".format(returns_cutoff)
)
if df_train.size > returns_cutoff:
df_train_truncated = df_train[-returns_cutoff:]
_, trace_stoch_vol = bayesian.model_stoch_vol(df_train_truncated)
previous_time = timer(
"running stochastic volatility model", previous_time)
# plot log(sigma) and log(nu)
print("\nPlotting stochastic volatility model")
row += 1
ax_sigma_log = plt.subplot(gs[row, 0])
ax_nu_log = plt.subplot(gs[row, 1])
sigma_log = trace_stoch_vol['sigma_log']
sns.distplot(sigma_log, ax=ax_sigma_log)
ax_sigma_log.set_xlabel('log(Sigma)')
ax_sigma_log.set_ylabel('Belief')
nu_log = trace_stoch_vol['nu_log']
sns.distplot(nu_log, ax=ax_nu_log)
ax_nu_log.set_xlabel('log(nu)')
ax_nu_log.set_ylabel('Belief')
# plot latent volatility
row += 1
ax_volatility = plt.subplot(gs[row, :])
bayesian.plot_stoch_vol(
df_train_truncated, trace=trace_stoch_vol, ax=ax_volatility)
previous_time = timer(
"plotting stochastic volatility model", previous_time)
total_time = time() - start_time
print("\nTotal runtime was {:.2f} seconds.".format(total_time))
gs.tight_layout(fig)
plt.show()
if return_fig:
return fig
| 38.535068 | 79 | 0.632144 |
from __future__ import division
from time import time
import warnings
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import scipy.stats
import pandas as pd
from . import timeseries
from . import utils
from . import pos
from . import txn
from . import round_trips
from . import plotting
from . import _seaborn as sns
from .plotting import plotting_context
try:
from . import bayesian
except ImportError:
warnings.warn(
"Could not import bayesian submodule due to missing pymc3 dependency.",
ImportWarning)
def timer(msg_body, previous_time):
current_time = time()
run_time = current_time - previous_time
message = "\nFinished " + msg_body + " (required {:.2f} seconds)."
print(message.format(run_time))
return current_time
def create_full_tear_sheet(returns,
positions=None,
transactions=None,
benchmark_rets=None,
gross_lev=None,
slippage=None,
live_start_date=None,
sector_mappings=None,
bayesian=False,
round_trips=False,
hide_positions=False,
cone_std=(1.0, 1.5, 2.0),
bootstrap=False,
set_context=True):
if benchmark_rets is None:
benchmark_rets = utils.get_symbol_rets('SPY')
if returns.index[0] < benchmark_rets.index[0]:
returns = returns[returns.index > benchmark_rets.index[0]]
if slippage is not None and transactions is not None:
turnover = txn.get_turnover(positions, transactions,
period=None, average=False)
unadjusted_returns = returns.copy()
returns = txn.adjust_returns_for_slippage(returns, turnover, slippage)
else:
unadjusted_returns = None
create_returns_tear_sheet(
returns,
live_start_date=live_start_date,
cone_std=cone_std,
benchmark_rets=benchmark_rets,
bootstrap=bootstrap,
set_context=set_context)
create_interesting_times_tear_sheet(returns,
benchmark_rets=benchmark_rets,
set_context=set_context)
if positions is not None:
create_position_tear_sheet(returns, positions,
gross_lev=gross_lev,
hide_positions=hide_positions,
set_context=set_context,
sector_mappings=sector_mappings)
if transactions is not None:
create_txn_tear_sheet(returns, positions, transactions,
unadjusted_returns=unadjusted_returns,
set_context=set_context)
if round_trips:
create_round_trip_tear_sheet(
positions=positions,
transactions=transactions,
sector_mappings=sector_mappings)
if bayesian:
create_bayesian_tear_sheet(returns,
live_start_date=live_start_date,
benchmark_rets=benchmark_rets,
set_context=set_context)
@plotting_context
def create_returns_tear_sheet(returns, live_start_date=None,
cone_std=(1.0, 1.5, 2.0),
benchmark_rets=None,
bootstrap=False,
return_fig=False):
if benchmark_rets is None:
benchmark_rets = utils.get_symbol_rets('SPY')
if returns.index[0] < benchmark_rets.index[0]:
returns = returns[returns.index > benchmark_rets.index[0]]
df_cum_rets = timeseries.cum_returns(returns, starting_value=1)
print("Entire data start date: " + str(df_cum_rets
.index[0].strftime('%Y-%m-%d')))
print("Entire data end date: " + str(df_cum_rets
.index[-1].strftime('%Y-%m-%d')))
print('\n')
plotting.show_perf_stats(returns, benchmark_rets,
bootstrap=bootstrap,
live_start_date=live_start_date)
if live_start_date is not None:
vertical_sections = 11
live_start_date = utils.get_utc_timestamp(live_start_date)
else:
vertical_sections = 10
if bootstrap:
vertical_sections += 1
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_rolling_returns = plt.subplot(gs[:2, :])
ax_rolling_returns_vol_match = plt.subplot(gs[2, :],
sharex=ax_rolling_returns)
ax_rolling_beta = plt.subplot(gs[3, :], sharex=ax_rolling_returns)
ax_rolling_sharpe = plt.subplot(gs[4, :], sharex=ax_rolling_returns)
ax_rolling_risk = plt.subplot(gs[5, :], sharex=ax_rolling_returns)
ax_drawdown = plt.subplot(gs[6, :], sharex=ax_rolling_returns)
ax_underwater = plt.subplot(gs[7, :], sharex=ax_rolling_returns)
ax_monthly_heatmap = plt.subplot(gs[8, 0])
ax_annual_returns = plt.subplot(gs[8, 1])
ax_monthly_dist = plt.subplot(gs[8, 2])
ax_return_quantiles = plt.subplot(gs[9, :])
plotting.plot_rolling_returns(
returns,
factor_returns=benchmark_rets,
live_start_date=live_start_date,
cone_std=cone_std,
ax=ax_rolling_returns)
ax_rolling_returns.set_title(
'Cumulative Returns')
plotting.plot_rolling_returns(
returns,
factor_returns=benchmark_rets,
live_start_date=live_start_date,
cone_std=None,
volatility_match=True,
legend_loc=None,
ax=ax_rolling_returns_vol_match)
ax_rolling_returns_vol_match.set_title(
'Cumulative returns volatility matched to benchmark.')
plotting.plot_rolling_beta(
returns, benchmark_rets, ax=ax_rolling_beta)
plotting.plot_rolling_sharpe(
returns, ax=ax_rolling_sharpe)
plotting.plot_rolling_fama_french(
returns, ax=ax_rolling_risk)
plotting.plot_drawdown_periods(
returns, top=5, ax=ax_drawdown)
plotting.plot_drawdown_underwater(
returns=returns, ax=ax_underwater)
plotting.show_worst_drawdown_periods(returns)
df_weekly = timeseries.aggregate_returns(returns, 'weekly')
df_monthly = timeseries.aggregate_returns(returns, 'monthly')
print('\n')
plotting.show_return_range(returns, df_weekly)
plotting.plot_monthly_returns_heatmap(returns, ax=ax_monthly_heatmap)
plotting.plot_annual_returns(returns, ax=ax_annual_returns)
plotting.plot_monthly_returns_dist(returns, ax=ax_monthly_dist)
plotting.plot_return_quantiles(
returns,
df_weekly,
df_monthly,
ax=ax_return_quantiles)
if bootstrap:
ax_bootstrap = plt.subplot(gs[10, :])
plotting.plot_perf_stats(returns, benchmark_rets,
ax=ax_bootstrap)
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
plt.show()
if return_fig:
return fig
@plotting_context
def create_position_tear_sheet(returns, positions, gross_lev=None,
show_and_plot_top_pos=2, hide_positions=False,
return_fig=False, sector_mappings=None):
if hide_positions:
show_and_plot_top_pos = 0
vertical_sections = 6 if sector_mappings is not None else 5
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_gross_leverage = plt.subplot(gs[0, :])
ax_exposures = plt.subplot(gs[1, :], sharex=ax_gross_leverage)
ax_top_positions = plt.subplot(gs[2, :], sharex=ax_gross_leverage)
ax_max_median_pos = plt.subplot(gs[3, :], sharex=ax_gross_leverage)
ax_holdings = plt.subplot(gs[4, :], sharex=ax_gross_leverage)
positions_alloc = pos.get_percent_alloc(positions)
if gross_lev is not None:
plotting.plot_gross_leverage(returns, gross_lev, ax=ax_gross_leverage)
plotting.plot_exposures(returns, positions_alloc, ax=ax_exposures)
plotting.show_and_plot_top_positions(
returns,
positions_alloc,
show_and_plot=show_and_plot_top_pos,
hide_positions=hide_positions,
ax=ax_top_positions)
plotting.plot_max_median_position_concentration(positions,
ax=ax_max_median_pos)
plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings)
if sector_mappings is not None:
sector_exposures = pos.get_sector_exposures(positions, sector_mappings)
if len(sector_exposures.columns) > 1:
sector_alloc = pos.get_percent_alloc(sector_exposures)
sector_alloc = sector_alloc.drop('cash', axis='columns')
ax_sector_alloc = plt.subplot(gs[5, :], sharex=ax_gross_leverage)
plotting.plot_sector_allocations(returns, sector_alloc,
ax=ax_sector_alloc)
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
plt.show()
if return_fig:
return fig
@plotting_context
def create_txn_tear_sheet(returns, positions, transactions,
unadjusted_returns=None, return_fig=False):
vertical_sections = 5 if unadjusted_returns is not None else 3
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_turnover = plt.subplot(gs[0, :])
ax_daily_volume = plt.subplot(gs[1, :], sharex=ax_turnover)
ax_turnover_hist = plt.subplot(gs[2, :])
plotting.plot_turnover(
returns,
transactions,
positions,
ax=ax_turnover)
plotting.plot_daily_volume(returns, transactions, ax=ax_daily_volume)
try:
plotting.plot_daily_turnover_hist(transactions, positions,
ax=ax_turnover_hist)
except ValueError:
warnings.warn('Unable to generate turnover plot.', UserWarning)
if unadjusted_returns is not None:
ax_slippage_sweep = plt.subplot(gs[3, :])
plotting.plot_slippage_sweep(unadjusted_returns,
transactions,
positions,
ax=ax_slippage_sweep
)
ax_slippage_sensitivity = plt.subplot(gs[4, :])
plotting.plot_slippage_sensitivity(unadjusted_returns,
transactions,
positions,
ax=ax_slippage_sensitivity
)
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
plt.show()
if return_fig:
return fig
@plotting_context
def create_round_trip_tear_sheet(positions, transactions,
sector_mappings=None,
return_fig=False):
transactions_closed = round_trips.add_closing_transactions(positions,
transactions)
trades = round_trips.extract_round_trips(transactions_closed)
if len(trades) < 5:
warnings.warn(
"""Fewer than 5 round-trip trades made.
Skipping round trip tearsheet.""", UserWarning)
return
ndays = len(positions)
print(trades.drop(['open_dt', 'close_dt', 'symbol'],
axis='columns').describe())
print('Percent of round trips profitable = {:.4}%'.format(
(trades.pnl > 0).mean() * 100))
winning_round_trips = trades[trades.pnl > 0]
losing_round_trips = trades[trades.pnl < 0]
print('Mean return per winning round trip = {:.4}'.format(
winning_round_trips.returns.mean()))
print('Mean return per losing round trip = {:.4}'.format(
losing_round_trips.returns.mean()))
print('A decision is made every {:.4} days.'.format(ndays / len(trades)))
print('{:.4} trading decisions per day.'.format(len(trades) * 1. / ndays))
print('{:.4} trading decisions per month.'.format(
len(trades) * 1. / (ndays / 21)))
plotting.show_profit_attribution(trades)
if sector_mappings is not None:
sector_trades = round_trips.apply_sector_mappings_to_round_trips(
trades, sector_mappings)
plotting.show_profit_attribution(sector_trades)
fig = plt.figure(figsize=(14, 3 * 6))
fig = plt.figure(figsize=(14, 3 * 6))
gs = gridspec.GridSpec(3, 2, wspace=0.5, hspace=0.5)
ax_trade_lifetimes = plt.subplot(gs[0, :])
ax_prob_profit_trade = plt.subplot(gs[1, 0])
ax_holding_time = plt.subplot(gs[1, 1])
ax_pnl_per_round_trip_dollars = plt.subplot(gs[2, 0])
ax_pnl_per_round_trip_pct = plt.subplot(gs[2, 1])
plotting.plot_round_trip_life_times(trades, ax=ax_trade_lifetimes)
plotting.plot_prob_profit_trade(trades, ax=ax_prob_profit_trade)
trade_holding_times = [x.days for x in trades['duration']]
sns.distplot(trade_holding_times, kde=False, ax=ax_holding_time)
ax_holding_time.set(xlabel='holding time in days')
sns.distplot(trades.pnl, kde=False, ax=ax_pnl_per_round_trip_dollars)
ax_pnl_per_round_trip_dollars.set(xlabel='PnL per round-trip trade in $')
sns.distplot(trades.returns * 100, kde=False,
ax=ax_pnl_per_round_trip_pct)
ax_pnl_per_round_trip_pct.set(
xlabel='Round-trip returns in %')
gs.tight_layout(fig)
plt.show()
if return_fig:
return fig
@plotting_context
def create_interesting_times_tear_sheet(
returns, benchmark_rets=None, legend_loc='best', return_fig=False):
rets_interesting = timeseries.extract_interesting_date_ranges(returns)
if len(rets_interesting) == 0:
warnings.warn('Passed returns do not overlap with any'
'interesting times.', UserWarning)
return
print('\nStress Events')
print(np.round(pd.DataFrame(rets_interesting).describe().transpose().loc[
:, ['mean', 'min', 'max']], 3))
if benchmark_rets is None:
benchmark_rets = utils.get_symbol_rets('SPY')
if returns.index[0] < benchmark_rets.index[0]:
returns = returns[returns.index > benchmark_rets.index[0]]
bmark_interesting = timeseries.extract_interesting_date_ranges(
benchmark_rets)
num_plots = len(rets_interesting)
num_rows = int((num_plots + 1) / 2.0)
fig = plt.figure(figsize=(14, num_rows * 6.0))
gs = gridspec.GridSpec(num_rows, 2, wspace=0.5, hspace=0.5)
for i, (name, rets_period) in enumerate(rets_interesting.items()):
ax = plt.subplot(gs[int(i / 2.0), i % 2])
timeseries.cum_returns(rets_period).plot(
ax=ax, color='forestgreen', label='algo', alpha=0.7, lw=2)
timeseries.cum_returns(bmark_interesting[name]).plot(
ax=ax, color='gray', label='SPY', alpha=0.6)
ax.legend(['algo',
'SPY'],
loc=legend_loc)
ax.set_title(name, size=14)
ax.set_ylabel('Returns')
ax.set_xlabel('')
plt.show()
if return_fig:
return fig
@plotting_context
def create_bayesian_tear_sheet(returns, benchmark_rets=None,
live_start_date=None, samples=2000,
return_fig=False, stoch_vol=False):
if live_start_date is None:
raise NotImplementedError(
'Bayesian tear sheet requires setting of live_start_date'
)
fama_french = False
if benchmark_rets is None:
benchmark_rets = pd.DataFrame(
utils.get_symbol_rets('SPY',
start=returns.index[0],
end=returns.index[-1]))
elif isinstance(benchmark_rets, str) and (benchmark_rets ==
'Fama-French'):
fama_french = True
rolling_window = utils.APPROX_BDAYS_PER_MONTH * 6
benchmark_rets = timeseries.rolling_fama_french(
returns, rolling_window=rolling_window)
live_start_date = utils.get_utc_timestamp(live_start_date)
df_train = returns.loc[returns.index < live_start_date]
df_test = returns.loc[returns.index >= live_start_date]
print("Running T model")
previous_time = time()
start_time = previous_time
trace_t, ppc_t = bayesian.run_model('t', df_train,
returns_test=df_test,
samples=samples, ppc=True)
previous_time = timer("T model", previous_time)
print("\nRunning BEST model")
trace_best = bayesian.run_model('best', df_train,
returns_test=df_test,
samples=samples)
previous_time = timer("BEST model", previous_time)
fig = plt.figure(figsize=(14, 10 * 2))
gs = gridspec.GridSpec(9, 2, wspace=0.3, hspace=0.3)
axs = []
row = 0
ax_cone = plt.subplot(gs[row, :])
bayesian.plot_bayes_cone(df_train, df_test, ppc_t, ax=ax_cone)
previous_time = timer("plotting Bayesian cone", previous_time)
row += 1
axs.append(plt.subplot(gs[row, 0]))
axs.append(plt.subplot(gs[row, 1]))
row += 1
axs.append(plt.subplot(gs[row, 0]))
axs.append(plt.subplot(gs[row, 1]))
row += 1
axs.append(plt.subplot(gs[row, 0]))
axs.append(plt.subplot(gs[row, 1]))
row += 1
axs.append(plt.subplot(gs[row, :]))
bayesian.plot_best(trace=trace_best, axs=axs)
previous_time = timer("plotting BEST results", previous_time)
row += 1
ax_ret_pred_day = plt.subplot(gs[row, 0])
ax_ret_pred_week = plt.subplot(gs[row, 1])
day_pred = ppc_t[:, 0]
p5 = scipy.stats.scoreatpercentile(day_pred, 5)
sns.distplot(day_pred,
ax=ax_ret_pred_day
)
ax_ret_pred_day.axvline(p5, linestyle='--', linewidth=3.)
ax_ret_pred_day.set_xlabel('Predicted returns 1 day')
ax_ret_pred_day.set_ylabel('Frequency')
ax_ret_pred_day.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5,
verticalalignment='bottom',
horizontalalignment='right',
transform=ax_ret_pred_day.transAxes)
previous_time = timer("computing Bayesian predictions", previous_time)
week_pred = (
np.cumprod(ppc_t[:, :5] + 1, 1) - 1)[:, -1]
p5 = scipy.stats.scoreatpercentile(week_pred, 5)
sns.distplot(week_pred,
ax=ax_ret_pred_week
)
ax_ret_pred_week.axvline(p5, linestyle='--', linewidth=3.)
ax_ret_pred_week.set_xlabel('Predicted cum returns 5 days')
ax_ret_pred_week.set_ylabel('Frequency')
ax_ret_pred_week.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5,
verticalalignment='bottom',
horizontalalignment='right',
transform=ax_ret_pred_week.transAxes)
previous_time = timer("plotting Bayesian VaRs estimate", previous_time)
print("\nRunning alpha beta model")
benchmark_rets = benchmark_rets.loc[df_train.index]
trace_alpha_beta = bayesian.run_model('alpha_beta', df_train,
bmark=benchmark_rets,
samples=samples)
previous_time = timer("running alpha beta model", previous_time)
row += 1
ax_alpha = plt.subplot(gs[row, 0])
ax_beta = plt.subplot(gs[row, 1])
if fama_french:
sns.distplot((1 + trace_alpha_beta['alpha'][100:])**252 - 1,
ax=ax_alpha)
betas = ['SMB', 'HML', 'UMD']
nbeta = trace_alpha_beta['beta'].shape[1]
for i in range(nbeta):
sns.distplot(trace_alpha_beta['beta'][100:, i], ax=ax_beta,
label=betas[i])
plt.legend()
else:
sns.distplot((1 + trace_alpha_beta['alpha'][100:])**252 - 1,
ax=ax_alpha)
sns.distplot(trace_alpha_beta['beta'][100:], ax=ax_beta)
ax_alpha.set_xlabel('Annual Alpha')
ax_alpha.set_ylabel('Belief')
ax_beta.set_xlabel('Beta')
ax_beta.set_ylabel('Belief')
previous_time = timer("plotting alpha beta model", previous_time)
if stoch_vol:
returns_cutoff = 400
print(
"\nRunning stochastic volatility model on "
"most recent {} days of returns.".format(returns_cutoff)
)
if df_train.size > returns_cutoff:
df_train_truncated = df_train[-returns_cutoff:]
_, trace_stoch_vol = bayesian.model_stoch_vol(df_train_truncated)
previous_time = timer(
"running stochastic volatility model", previous_time)
print("\nPlotting stochastic volatility model")
row += 1
ax_sigma_log = plt.subplot(gs[row, 0])
ax_nu_log = plt.subplot(gs[row, 1])
sigma_log = trace_stoch_vol['sigma_log']
sns.distplot(sigma_log, ax=ax_sigma_log)
ax_sigma_log.set_xlabel('log(Sigma)')
ax_sigma_log.set_ylabel('Belief')
nu_log = trace_stoch_vol['nu_log']
sns.distplot(nu_log, ax=ax_nu_log)
ax_nu_log.set_xlabel('log(nu)')
ax_nu_log.set_ylabel('Belief')
row += 1
ax_volatility = plt.subplot(gs[row, :])
bayesian.plot_stoch_vol(
df_train_truncated, trace=trace_stoch_vol, ax=ax_volatility)
previous_time = timer(
"plotting stochastic volatility model", previous_time)
total_time = time() - start_time
print("\nTotal runtime was {:.2f} seconds.".format(total_time))
gs.tight_layout(fig)
plt.show()
if return_fig:
return fig
| true | true |
f7336c722d6dcb3980e9f16f6deb293bb4996a15 | 5,006 | py | Python | sdks/python/pb_client/v1/code_ref_pb2.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | sdks/python/pb_client/v1/code_ref_pb2.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | sdks/python/pb_client/v1/code_ref_pb2.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v1/code_ref.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from v1 import base_pb2 as v1_dot_base__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='v1/code_ref.proto',
package='v1',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x11v1/code_ref.proto\x12\x02v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\rv1/base.proto\"&\n\x07\x43odeRef\x12\x0e\n\x06\x63ommit\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\"d\n\x12\x43odeRefBodyRequest\x12\x30\n\x06\x65ntity\x18\x01 \x01(\x0b\x32 .v1.ProjectEntityResourceRequest\x12\x1c\n\x07\x63oderef\x18\x02 \x01(\x0b\x32\x0b.v1.CodeRefb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,v1_dot_base__pb2.DESCRIPTOR,])
_CODEREF = _descriptor.Descriptor(
name='CodeRef',
full_name='v1.CodeRef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='commit', full_name='v1.CodeRef.commit', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='url', full_name='v1.CodeRef.url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=111,
)
_CODEREFBODYREQUEST = _descriptor.Descriptor(
name='CodeRefBodyRequest',
full_name='v1.CodeRefBodyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='entity', full_name='v1.CodeRefBodyRequest.entity', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='coderef', full_name='v1.CodeRefBodyRequest.coderef', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=213,
)
_CODEREFBODYREQUEST.fields_by_name['entity'].message_type = v1_dot_base__pb2._PROJECTENTITYRESOURCEREQUEST
_CODEREFBODYREQUEST.fields_by_name['coderef'].message_type = _CODEREF
DESCRIPTOR.message_types_by_name['CodeRef'] = _CODEREF
DESCRIPTOR.message_types_by_name['CodeRefBodyRequest'] = _CODEREFBODYREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CodeRef = _reflection.GeneratedProtocolMessageType('CodeRef', (_message.Message,), {
'DESCRIPTOR' : _CODEREF,
'__module__' : 'v1.code_ref_pb2'
# @@protoc_insertion_point(class_scope:v1.CodeRef)
})
_sym_db.RegisterMessage(CodeRef)
CodeRefBodyRequest = _reflection.GeneratedProtocolMessageType('CodeRefBodyRequest', (_message.Message,), {
'DESCRIPTOR' : _CODEREFBODYREQUEST,
'__module__' : 'v1.code_ref_pb2'
# @@protoc_insertion_point(class_scope:v1.CodeRefBodyRequest)
})
_sym_db.RegisterMessage(CodeRefBodyRequest)
# @@protoc_insertion_point(module_scope)
| 34.524138 | 384 | 0.754495 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from v1 import base_pb2 as v1_dot_base__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='v1/code_ref.proto',
package='v1',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x11v1/code_ref.proto\x12\x02v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\rv1/base.proto\"&\n\x07\x43odeRef\x12\x0e\n\x06\x63ommit\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\"d\n\x12\x43odeRefBodyRequest\x12\x30\n\x06\x65ntity\x18\x01 \x01(\x0b\x32 .v1.ProjectEntityResourceRequest\x12\x1c\n\x07\x63oderef\x18\x02 \x01(\x0b\x32\x0b.v1.CodeRefb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,v1_dot_base__pb2.DESCRIPTOR,])
_CODEREF = _descriptor.Descriptor(
name='CodeRef',
full_name='v1.CodeRef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='commit', full_name='v1.CodeRef.commit', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='url', full_name='v1.CodeRef.url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=111,
)
_CODEREFBODYREQUEST = _descriptor.Descriptor(
name='CodeRefBodyRequest',
full_name='v1.CodeRefBodyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='entity', full_name='v1.CodeRefBodyRequest.entity', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='coderef', full_name='v1.CodeRefBodyRequest.coderef', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=213,
)
_CODEREFBODYREQUEST.fields_by_name['entity'].message_type = v1_dot_base__pb2._PROJECTENTITYRESOURCEREQUEST
_CODEREFBODYREQUEST.fields_by_name['coderef'].message_type = _CODEREF
DESCRIPTOR.message_types_by_name['CodeRef'] = _CODEREF
DESCRIPTOR.message_types_by_name['CodeRefBodyRequest'] = _CODEREFBODYREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CodeRef = _reflection.GeneratedProtocolMessageType('CodeRef', (_message.Message,), {
'DESCRIPTOR' : _CODEREF,
'__module__' : 'v1.code_ref_pb2'
})
_sym_db.RegisterMessage(CodeRef)
CodeRefBodyRequest = _reflection.GeneratedProtocolMessageType('CodeRefBodyRequest', (_message.Message,), {
'DESCRIPTOR' : _CODEREFBODYREQUEST,
'__module__' : 'v1.code_ref_pb2'
})
_sym_db.RegisterMessage(CodeRefBodyRequest)
| true | true |
f7336d63b11b71ef3f7925c35364b9ed2482e280 | 1,499 | py | Python | webexteamssdk/config.py | fracaen/webexteamssdk | 673312779b8e05cf0535bea8b96599015cccbff1 | [
"MIT"
] | 118 | 2018-09-13T18:06:34.000Z | 2022-03-30T05:16:36.000Z | webexteamssdk/config.py | fracaen/webexteamssdk | 673312779b8e05cf0535bea8b96599015cccbff1 | [
"MIT"
] | 88 | 2018-10-08T21:57:02.000Z | 2022-02-26T06:33:33.000Z | webexteamssdk/config.py | fracaen/webexteamssdk | 673312779b8e05cf0535bea8b96599015cccbff1 | [
"MIT"
] | 86 | 2018-09-10T09:42:01.000Z | 2022-03-30T05:38:14.000Z | # -*- coding: utf-8 -*-
"""Package configuration.
Copyright (c) 2016-2020 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Package Constants
DEFAULT_BASE_URL = "https://webexapis.com/v1/"
DEFAULT_SINGLE_REQUEST_TIMEOUT = 60
DEFAULT_WAIT_ON_RATE_LIMIT = True
ACCESS_TOKEN_ENVIRONMENT_VARIABLE = "WEBEX_TEAMS_ACCESS_TOKEN"
LEGACY_ACCESS_TOKEN_ENVIRONMENT_VARIABLES = [
"SPARK_ACCESS_TOKEN",
"CISCO_SPARK_ACCESS_TOKEN",
]
WEBEX_TEAMS_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
| 36.560976 | 78 | 0.791194 |
DEFAULT_BASE_URL = "https://webexapis.com/v1/"
DEFAULT_SINGLE_REQUEST_TIMEOUT = 60
DEFAULT_WAIT_ON_RATE_LIMIT = True
ACCESS_TOKEN_ENVIRONMENT_VARIABLE = "WEBEX_TEAMS_ACCESS_TOKEN"
LEGACY_ACCESS_TOKEN_ENVIRONMENT_VARIABLES = [
"SPARK_ACCESS_TOKEN",
"CISCO_SPARK_ACCESS_TOKEN",
]
WEBEX_TEAMS_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
| true | true |
f7336e1195419937326e1ec785bc38d6a76f9995 | 2,418 | py | Python | attune/_discrete_tune.py | wright-group/WrightTune | e9a4937b6fda24dfd10baf2fc674c15542861aba | [
"MIT"
] | 2 | 2020-08-20T14:26:19.000Z | 2021-07-19T22:51:19.000Z | attune/_discrete_tune.py | wright-group/attune | 67b7e5590a11e9b09056e7d334fd7d7286a24c5f | [
"MIT"
] | 96 | 2017-10-12T01:16:01.000Z | 2022-02-23T19:43:57.000Z | attune/_discrete_tune.py | wright-group/WrightTune | e9a4937b6fda24dfd10baf2fc674c15542861aba | [
"MIT"
] | 2 | 2021-08-17T22:22:06.000Z | 2021-09-01T02:30:06.000Z | __all__ = ["DiscreteTune"]
from typing import Dict, Tuple, Optional
import WrightTools as wt
class DiscreteTune:
def __init__(
self, ranges: Dict[str, Tuple[float, float]], default: Optional[str] = None, **kwargs
):
"""A Tune which maps one set of inputs to associated output points.
Currently all tunes are assumed to have "nm" as their independent units.
Parameters
----------
ranges: dict[str, tuple[float, float]]
dictionary mapping the key (string identifier of a discrete position)
to a 2-tuple of (min, max) for the range for which that identifier should be used.
This dict is ordered, the first result with a matching range (inclusive of boundaries)
will be the one returned when called.
default: Optional[str]
The result to return if no matching range is represented.
Default is None
Note: kwargs are provided to make the serialized dictionary with ind_units
easy to initialize into a DiscreteTune object, but are currently ignored.
"""
self._ind_units = "nm"
self._ranges = {k: tuple(v) for k, v in ranges.items()}
self._default = default
def __repr__(self):
return f"DiscreteTune({repr(self.ranges)}, {repr(self.default)})"
def __call__(self, ind_value, *, ind_units=None, dep_units=None):
if ind_units is not None and self._ind_units is not None:
ind_value = wt.units.convert(ind_value, ind_units, self._ind_units)
for key, (min, max) in self.ranges.items():
if min <= ind_value <= max:
return key
return self.default
def __eq__(self, other):
return self.ranges == other.ranges and self.default == other.default
def as_dict(self):
"""Serialize this Tune as a python dictionary."""
out = {}
out["ranges"] = self.ranges
out["ind_units"] = self.ind_units
out["default"] = self.default
return out
@property
def ranges(self):
"""The ranges for discrete setpoints."""
return self._ranges
@property
def ind_units(self):
"""The units of the independent (input) values."""
return self._ind_units
@property
def default(self):
"""The value returned if no supplied range applies."""
return self._default
| 34.542857 | 98 | 0.62531 | __all__ = ["DiscreteTune"]
from typing import Dict, Tuple, Optional
import WrightTools as wt
class DiscreteTune:
def __init__(
self, ranges: Dict[str, Tuple[float, float]], default: Optional[str] = None, **kwargs
):
self._ind_units = "nm"
self._ranges = {k: tuple(v) for k, v in ranges.items()}
self._default = default
def __repr__(self):
return f"DiscreteTune({repr(self.ranges)}, {repr(self.default)})"
def __call__(self, ind_value, *, ind_units=None, dep_units=None):
if ind_units is not None and self._ind_units is not None:
ind_value = wt.units.convert(ind_value, ind_units, self._ind_units)
for key, (min, max) in self.ranges.items():
if min <= ind_value <= max:
return key
return self.default
def __eq__(self, other):
return self.ranges == other.ranges and self.default == other.default
def as_dict(self):
out = {}
out["ranges"] = self.ranges
out["ind_units"] = self.ind_units
out["default"] = self.default
return out
@property
def ranges(self):
return self._ranges
@property
def ind_units(self):
return self._ind_units
@property
def default(self):
return self._default
| true | true |
f7336e26b0d826f1fdcbd47cad6a86e63f03e812 | 51,236 | py | Python | btcgreen/timelord/timelord.py | BTChia-Network/btchia-blockchain | 2ab991f6b207872b17ce237ebe409defb96cd524 | [
"Apache-2.0"
] | 19 | 2021-08-09T21:21:09.000Z | 2022-03-18T02:27:13.000Z | btcgreen/timelord/timelord.py | BTChia-Network/btchia-blockchain | 2ab991f6b207872b17ce237ebe409defb96cd524 | [
"Apache-2.0"
] | 29 | 2021-08-13T12:05:09.000Z | 2022-03-20T19:30:36.000Z | btcgreen/timelord/timelord.py | BTChia-Network/btchia-blockchain | 2ab991f6b207872b17ce237ebe409defb96cd524 | [
"Apache-2.0"
] | 4 | 2021-08-18T16:42:30.000Z | 2022-03-15T08:24:58.000Z | import asyncio
import dataclasses
import io
import logging
import random
import time
import traceback
from typing import Callable, Dict, List, Optional, Tuple, Set
from chiavdf import create_discriminant
from btcgreen.consensus.constants import ConsensusConstants
from btcgreen.consensus.pot_iterations import calculate_sp_iters, is_overflow_block
from btcgreen.protocols import timelord_protocol
from btcgreen.protocols.protocol_message_types import ProtocolMessageTypes
from btcgreen.server.outbound_message import NodeType, make_msg
from btcgreen.server.server import BTCgreenServer
from btcgreen.timelord.iters_from_block import iters_from_block
from btcgreen.timelord.timelord_state import LastState
from btcgreen.timelord.types import Chain, IterationType, StateType
from btcgreen.types.blockchain_format.classgroup import ClassgroupElement
from btcgreen.types.blockchain_format.reward_chain_block import RewardChainBlock
from btcgreen.types.blockchain_format.sized_bytes import bytes32
from btcgreen.types.blockchain_format.slots import (
ChallengeChainSubSlot,
InfusedChallengeChainSubSlot,
RewardChainSubSlot,
SubSlotProofs,
)
from btcgreen.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from btcgreen.types.blockchain_format.vdf import VDFInfo, VDFProof
from btcgreen.types.end_of_slot_bundle import EndOfSubSlotBundle
from btcgreen.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
class Timelord:
def __init__(self, root_path, config: Dict, constants: ConsensusConstants):
self.config = config
self.root_path = root_path
self.constants = constants
self._shut_down = False
self.free_clients: List[Tuple[str, asyncio.StreamReader, asyncio.StreamWriter]] = []
self.potential_free_clients: List = []
self.ip_whitelist = self.config["vdf_clients"]["ip"]
self.server: Optional[BTCgreenServer] = None
self.chain_type_to_stream: Dict[Chain, Tuple[str, asyncio.StreamReader, asyncio.StreamWriter]] = {}
self.chain_start_time: Dict = {}
# Chains that currently don't have a vdf_client.
self.unspawned_chains: List[Chain] = [
Chain.CHALLENGE_CHAIN,
Chain.REWARD_CHAIN,
Chain.INFUSED_CHALLENGE_CHAIN,
]
# Chains that currently accept iterations.
self.allows_iters: List[Chain] = []
# Last peak received, None if it's already processed.
self.new_peak: Optional[timelord_protocol.NewPeakTimelord] = None
# Last end of subslot bundle, None if we built a peak on top of it.
self.new_subslot_end: Optional[EndOfSubSlotBundle] = None
# Last state received. Can either be a new peak or a new EndOfSubslotBundle.
# Unfinished block info, iters adjusted to the last peak.
self.unfinished_blocks: List[timelord_protocol.NewUnfinishedBlockTimelord] = []
# Signage points iters, adjusted to the last peak.
self.signage_point_iters: List[Tuple[uint64, uint8]] = []
# For each chain, send those info when the process spawns.
self.iters_to_submit: Dict[Chain, List[uint64]] = {}
self.iters_submitted: Dict[Chain, List[uint64]] = {}
self.iters_finished: Set = set()
# For each iteration submitted, know if it's a signage point, an infusion point or an end of slot.
self.iteration_to_proof_type: Dict[uint64, IterationType] = {}
# List of proofs finished.
self.proofs_finished: List[Tuple[Chain, VDFInfo, VDFProof, int]] = []
# Data to send at vdf_client initialization.
self.overflow_blocks: List[timelord_protocol.NewUnfinishedBlockTimelord] = []
# Incremented each time `reset_chains` has been called.
# Used to label proofs in `finished_proofs` and to only filter proofs corresponding to the most recent state.
self.num_resets: int = 0
self.process_communication_tasks: List[asyncio.Task] = []
self.main_loop = None
self.vdf_server = None
self._shut_down = False
self.vdf_failures: List[Tuple[Chain, Optional[int]]] = []
self.vdf_failures_count: int = 0
self.vdf_failure_time: float = 0
self.total_unfinished: int = 0
self.total_infused: int = 0
self.state_changed_callback: Optional[Callable] = None
self.sanitizer_mode = self.config["sanitizer_mode"]
self.pending_bluebox_info: List[Tuple[float, timelord_protocol.RequestCompactProofOfTime]] = []
self.last_active_time = time.time()
async def _start(self):
self.lock: asyncio.Lock = asyncio.Lock()
self.vdf_server = await asyncio.start_server(
self._handle_client,
self.config["vdf_server"]["host"],
self.config["vdf_server"]["port"],
)
self.last_state: LastState = LastState(self.constants)
if not self.sanitizer_mode:
self.main_loop = asyncio.create_task(self._manage_chains())
else:
self.main_loop = asyncio.create_task(self._manage_discriminant_queue_sanitizer())
log.info("Started timelord.")
def _close(self):
self._shut_down = True
for task in self.process_communication_tasks:
task.cancel()
if self.main_loop is not None:
self.main_loop.cancel()
async def _await_closed(self):
pass
def set_server(self, server: BTCgreenServer):
self.server = server
async def _handle_client(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
async with self.lock:
client_ip = writer.get_extra_info("peername")[0]
log.debug(f"New timelord connection from client: {client_ip}.")
if client_ip in self.ip_whitelist:
self.free_clients.append((client_ip, reader, writer))
log.debug(f"Added new VDF client {client_ip}.")
for ip, end_time in list(self.potential_free_clients):
if ip == client_ip:
self.potential_free_clients.remove((ip, end_time))
break
async def _stop_chain(self, chain: Chain):
try:
while chain not in self.allows_iters:
self.lock.release()
await asyncio.sleep(0.05)
log.error(f"Trying to stop {chain} before its initialization.")
await self.lock.acquire()
if chain not in self.chain_type_to_stream:
log.warning(f"Trying to stop a crashed chain: {chain}.")
return None
stop_ip, _, stop_writer = self.chain_type_to_stream[chain]
self.potential_free_clients.append((stop_ip, time.time()))
stop_writer.write(b"010")
await stop_writer.drain()
if chain in self.allows_iters:
self.allows_iters.remove(chain)
if chain not in self.unspawned_chains:
self.unspawned_chains.append(chain)
if chain in self.chain_type_to_stream:
del self.chain_type_to_stream[chain]
except ConnectionResetError as e:
log.error(f"{e}")
def _can_infuse_unfinished_block(self, block: timelord_protocol.NewUnfinishedBlockTimelord) -> Optional[uint64]:
assert self.last_state is not None
sub_slot_iters = self.last_state.get_sub_slot_iters()
difficulty = self.last_state.get_difficulty()
ip_iters = self.last_state.get_last_ip()
rc_block = block.reward_chain_block
try:
block_sp_iters, block_ip_iters = iters_from_block(
self.constants,
rc_block,
sub_slot_iters,
difficulty,
)
except Exception as e:
log.warning(f"Received invalid unfinished block: {e}.")
return None
block_sp_total_iters = self.last_state.total_iters - ip_iters + block_sp_iters
if is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
block_sp_total_iters -= self.last_state.get_sub_slot_iters()
found_index = -1
for index, (rc, total_iters) in enumerate(self.last_state.reward_challenge_cache):
if rc == block.rc_prev:
found_index = index
break
if found_index == -1:
log.warning(f"Will not infuse {block.rc_prev} because its reward chain challenge is not in the chain")
return None
if ip_iters > block_ip_iters:
log.warning("Too late to infuse block")
return None
new_block_iters = uint64(block_ip_iters - ip_iters)
if len(self.last_state.reward_challenge_cache) > found_index + 1:
if self.last_state.reward_challenge_cache[found_index + 1][1] < block_sp_total_iters:
log.warning(
f"Will not infuse unfinished block {block.rc_prev} sp total iters {block_sp_total_iters}, "
f"because there is another infusion before its SP"
)
return None
if self.last_state.reward_challenge_cache[found_index][1] > block_sp_total_iters:
if not is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
log.error(
f"Will not infuse unfinished block {block.rc_prev}, sp total iters: {block_sp_total_iters}, "
f"because its iters are too low"
)
return None
if new_block_iters > 0:
return new_block_iters
return None
async def _reset_chains(self, first_run=False, only_eos=False):
# First, stop all chains.
self.last_active_time = time.time()
log.debug("Resetting chains")
ip_iters = self.last_state.get_last_ip()
sub_slot_iters = self.last_state.get_sub_slot_iters()
if not first_run:
for chain in list(self.chain_type_to_stream.keys()):
await self._stop_chain(chain)
# Adjust all signage points iterations to the peak.
iters_per_signage = uint64(sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT)
self.signage_point_iters = [
(k * iters_per_signage - ip_iters, k)
for k in range(1, self.constants.NUM_SPS_SUB_SLOT)
if k * iters_per_signage - ip_iters > 0
]
for sp, k in self.signage_point_iters:
assert k * iters_per_signage > 0
assert k * iters_per_signage < sub_slot_iters
# Adjust all unfinished blocks iterations to the peak.
new_unfinished_blocks = []
self.iters_finished = set()
self.proofs_finished = []
self.num_resets += 1
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
self.iters_to_submit[chain] = []
self.iters_submitted[chain] = []
self.iteration_to_proof_type = {}
if not only_eos:
for block in self.unfinished_blocks + self.overflow_blocks:
new_block_iters: Optional[uint64] = self._can_infuse_unfinished_block(block)
# Does not add duplicates, or blocks that we cannot infuse
if new_block_iters and new_block_iters not in self.iters_to_submit[Chain.CHALLENGE_CHAIN]:
if block not in self.unfinished_blocks:
self.total_unfinished += 1
new_unfinished_blocks.append(block)
for chain in [Chain.REWARD_CHAIN, Chain.CHALLENGE_CHAIN]:
self.iters_to_submit[chain].append(new_block_iters)
if self.last_state.get_deficit() < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
self.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(new_block_iters)
self.iteration_to_proof_type[new_block_iters] = IterationType.INFUSION_POINT
# Remove all unfinished blocks that have already passed.
self.unfinished_blocks = new_unfinished_blocks
# Signage points.
if not only_eos and len(self.signage_point_iters) > 0:
count_signage = 0
for signage, k in self.signage_point_iters:
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]:
self.iters_to_submit[chain].append(signage)
self.iteration_to_proof_type[signage] = IterationType.SIGNAGE_POINT
count_signage += 1
if count_signage == 3:
break
left_subslot_iters = sub_slot_iters - ip_iters
assert left_subslot_iters > 0
if self.last_state.get_deficit() < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
self.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(left_subslot_iters)
self.iters_to_submit[Chain.CHALLENGE_CHAIN].append(left_subslot_iters)
self.iters_to_submit[Chain.REWARD_CHAIN].append(left_subslot_iters)
self.iteration_to_proof_type[left_subslot_iters] = IterationType.END_OF_SUBSLOT
for chain, iters in self.iters_to_submit.items():
for iteration in iters:
assert iteration > 0
async def _handle_new_peak(self):
assert self.new_peak is not None
self.last_state.set_state(self.new_peak)
if self.total_unfinished > 0:
remove_unfinished = []
for unf_block_timelord in self.unfinished_blocks + self.overflow_blocks:
if (
unf_block_timelord.reward_chain_block.get_hash()
== self.new_peak.reward_chain_block.get_unfinished().get_hash()
):
if unf_block_timelord not in self.unfinished_blocks:
# We never got the EOS for this, but we have the block in overflow list
self.total_unfinished += 1
remove_unfinished.append(unf_block_timelord)
if len(remove_unfinished) > 0:
self.total_infused += 1
for block in remove_unfinished:
if block in self.unfinished_blocks:
self.unfinished_blocks.remove(block)
if block in self.overflow_blocks:
self.overflow_blocks.remove(block)
infusion_rate = round(self.total_infused / self.total_unfinished * 100.0, 2)
log.info(
f"Total unfinished blocks: {self.total_unfinished}. "
f"Total infused blocks: {self.total_infused}. "
f"Infusion rate: {infusion_rate}%."
)
self.new_peak = None
await self._reset_chains()
async def _handle_subslot_end(self):
self.last_state.set_state(self.new_subslot_end)
for block in self.unfinished_blocks:
if self._can_infuse_unfinished_block(block) is not None:
self.total_unfinished += 1
self.new_subslot_end = None
await self._reset_chains()
async def _map_chains_with_vdf_clients(self):
while not self._shut_down:
picked_chain = None
async with self.lock:
if len(self.free_clients) == 0:
break
ip, reader, writer = self.free_clients[0]
for chain_type in self.unspawned_chains:
challenge = self.last_state.get_challenge(chain_type)
initial_form = self.last_state.get_initial_form(chain_type)
if challenge is not None and initial_form is not None:
picked_chain = chain_type
break
if picked_chain is None:
break
picked_chain = self.unspawned_chains[0]
self.chain_type_to_stream[picked_chain] = (ip, reader, writer)
self.free_clients = self.free_clients[1:]
self.unspawned_chains = self.unspawned_chains[1:]
self.chain_start_time[picked_chain] = time.time()
log.debug(f"Mapping free vdf_client with chain: {picked_chain}.")
self.process_communication_tasks.append(
asyncio.create_task(
self._do_process_communication(
picked_chain, challenge, initial_form, ip, reader, writer, proof_label=self.num_resets
)
)
)
async def _submit_iterations(self):
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
if chain in self.allows_iters:
_, _, writer = self.chain_type_to_stream[chain]
for iteration in self.iters_to_submit[chain]:
if iteration in self.iters_submitted[chain]:
continue
log.debug(f"Submitting iterations to {chain}: {iteration}")
assert iteration > 0
prefix = str(len(str(iteration)))
if len(str(iteration)) < 10:
prefix = "0" + prefix
iter_str = prefix + str(iteration)
writer.write(iter_str.encode())
await writer.drain()
self.iters_submitted[chain].append(iteration)
def _clear_proof_list(self, iters: uint64):
return [
(chain, info, proof, label)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations != iters
]
async def _check_for_new_sp(self, iter_to_look_for: uint64):
signage_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.SIGNAGE_POINT
]
if len(signage_iters) == 0:
return None
to_remove = []
for potential_sp_iters, signage_point_index in self.signage_point_iters:
if potential_sp_iters not in signage_iters or potential_sp_iters != iter_to_look_for:
continue
signage_iter = potential_sp_iters
proofs_with_iter = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == signage_iter and label == self.num_resets
]
# Wait for both cc and rc to have the signage point.
if len(proofs_with_iter) == 2:
cc_info: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_info: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in proofs_with_iter:
if chain == Chain.CHALLENGE_CHAIN:
cc_info = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_info = info
rc_proof = proof
if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None:
log.error(f"Insufficient signage point data {signage_iter}")
continue
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_info.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(f"SP: Do not have correct challenge {rc_challenge.hex()}" f" has {rc_info.challenge}")
# This proof is on an outdated challenge, so don't use it
continue
iters_from_sub_slot_start = cc_info.number_of_iterations + self.last_state.get_last_ip()
response = timelord_protocol.NewSignagePointVDF(
signage_point_index,
dataclasses.replace(cc_info, number_of_iterations=iters_from_sub_slot_start),
cc_proof,
rc_info,
rc_proof,
)
if self.server is not None:
msg = make_msg(ProtocolMessageTypes.new_signage_point_vdf, response)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# Cleanup the signage point from memory.
to_remove.append((signage_iter, signage_point_index))
self.proofs_finished = self._clear_proof_list(signage_iter)
# Send the next 3 signage point to the chains.
next_iters_count = 0
for next_sp, k in self.signage_point_iters:
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]:
if next_sp not in self.iters_submitted[chain] and next_sp not in self.iters_to_submit[chain]:
self.iters_to_submit[chain].append(next_sp)
self.iteration_to_proof_type[next_sp] = IterationType.SIGNAGE_POINT
next_iters_count += 1
if next_iters_count == 3:
break
# Break so we alternate between checking SP and IP
break
for r in to_remove:
self.signage_point_iters.remove(r)
async def _check_for_new_ip(self, iter_to_look_for: uint64):
if len(self.unfinished_blocks) == 0:
return None
infusion_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.INFUSION_POINT
]
for iteration in infusion_iters:
if iteration != iter_to_look_for:
continue
proofs_with_iter = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == iteration and label == self.num_resets
]
if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None:
chain_count = 3
else:
chain_count = 2
if len(proofs_with_iter) == chain_count:
block = None
ip_iters = None
for unfinished_block in self.unfinished_blocks:
try:
_, ip_iters = iters_from_block(
self.constants,
unfinished_block.reward_chain_block,
self.last_state.get_sub_slot_iters(),
self.last_state.get_difficulty(),
)
except Exception as e:
log.error(f"Error {e}")
continue
if ip_iters - self.last_state.get_last_ip() == iteration:
block = unfinished_block
break
assert ip_iters is not None
if block is not None:
ip_total_iters = self.last_state.get_total_iters() + iteration
challenge = block.reward_chain_block.get_hash()
icc_info: Optional[VDFInfo] = None
icc_proof: Optional[VDFProof] = None
cc_info: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_info: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in proofs_with_iter:
if chain == Chain.CHALLENGE_CHAIN:
cc_info = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_info = info
rc_proof = proof
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
icc_info = info
icc_proof = proof
if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None:
log.error(f"Insufficient VDF proofs for infusion point ch: {challenge} iterations:{iteration}")
return None
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_info.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(
f"Do not have correct challenge {rc_challenge.hex()} "
f"has {rc_info.challenge}, partial hash {block.reward_chain_block.get_hash()}"
)
# This proof is on an outdated challenge, so don't use it
continue
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
log.debug(f"Generated infusion point for challenge: {challenge} iterations: {iteration}.")
overflow = is_overflow_block(self.constants, block.reward_chain_block.signage_point_index)
if not self.last_state.can_infuse_block(overflow):
log.warning("Too many blocks, or overflow in new epoch, cannot infuse, discarding")
return None
cc_info = dataclasses.replace(cc_info, number_of_iterations=ip_iters)
response = timelord_protocol.NewInfusionPointVDF(
challenge,
cc_info,
cc_proof,
rc_info,
rc_proof,
icc_info,
icc_proof,
)
msg = make_msg(ProtocolMessageTypes.new_infusion_point_vdf, response)
if self.server is not None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
self.proofs_finished = self._clear_proof_list(iteration)
if (
self.last_state.get_last_block_total_iters() is None
and not self.last_state.state_type == StateType.FIRST_SUB_SLOT
):
# We don't know when the last block was, so we can't make peaks
return None
sp_total_iters = (
ip_total_iters
- ip_iters
+ calculate_sp_iters(
self.constants,
block.sub_slot_iters,
block.reward_chain_block.signage_point_index,
)
- (block.sub_slot_iters if overflow else 0)
)
if self.last_state.state_type == StateType.FIRST_SUB_SLOT:
is_transaction_block = True
height: uint32 = uint32(0)
else:
last_block_ti = self.last_state.get_last_block_total_iters()
assert last_block_ti is not None
is_transaction_block = last_block_ti < sp_total_iters
height = uint32(self.last_state.get_height() + 1)
if height < 5:
# Don't directly update our state for the first few blocks, because we cannot validate
# whether the pre-farm is correct
return None
new_reward_chain_block = RewardChainBlock(
uint128(self.last_state.get_weight() + block.difficulty),
height,
uint128(ip_total_iters),
block.reward_chain_block.signage_point_index,
block.reward_chain_block.pos_ss_cc_challenge_hash,
block.reward_chain_block.proof_of_space,
block.reward_chain_block.challenge_chain_sp_vdf,
block.reward_chain_block.challenge_chain_sp_signature,
cc_info,
block.reward_chain_block.reward_chain_sp_vdf,
block.reward_chain_block.reward_chain_sp_signature,
rc_info,
icc_info,
is_transaction_block,
)
if self.last_state.state_type == StateType.FIRST_SUB_SLOT:
# Genesis
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
elif overflow and self.last_state.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
if self.last_state.peak is not None:
assert self.last_state.subslot_end is None
# This means the previous block is also an overflow block, and did not manage
# to lower the deficit, therefore we cannot lower it either. (new slot)
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else:
# This means we are the first infusion in this sub-slot. This may be a new slot or not.
assert self.last_state.subslot_end is not None
if self.last_state.subslot_end.infused_challenge_chain is None:
# There is no ICC, which means we are not finishing a slot. We can reduce the deficit.
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
else:
# There is an ICC, which means we are finishing a slot. Different slot, so can't change
# the deficit
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else:
new_deficit = max(self.last_state.deficit - 1, 0)
if new_deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
last_csb_or_eos = ip_total_iters
else:
last_csb_or_eos = self.last_state.last_challenge_sb_or_eos_total_iters
if self.last_state.just_infused_sub_epoch_summary():
new_sub_epoch_summary = None
passed_ses_height_but_not_yet_included = False
else:
new_sub_epoch_summary = block.sub_epoch_summary
if new_reward_chain_block.height % self.constants.SUB_EPOCH_BLOCKS == 0:
passed_ses_height_but_not_yet_included = True
else:
passed_ses_height_but_not_yet_included = (
self.last_state.get_passed_ses_height_but_not_yet_included()
)
self.new_peak = timelord_protocol.NewPeakTimelord(
new_reward_chain_block,
block.difficulty,
uint8(new_deficit),
block.sub_slot_iters,
new_sub_epoch_summary,
self.last_state.reward_challenge_cache,
uint128(last_csb_or_eos),
passed_ses_height_but_not_yet_included,
)
await self._handle_new_peak()
# Break so we alternate between checking SP and IP
break
async def _check_for_end_of_subslot(self, iter_to_look_for: uint64):
left_subslot_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.END_OF_SUBSLOT
]
if len(left_subslot_iters) == 0:
return None
if left_subslot_iters[0] != iter_to_look_for:
return None
chains_finished = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == left_subslot_iters[0] and label == self.num_resets
]
if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None:
chain_count = 3
else:
chain_count = 2
if len(chains_finished) == chain_count:
icc_ip_vdf: Optional[VDFInfo] = None
icc_ip_proof: Optional[VDFProof] = None
cc_vdf: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_vdf: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in chains_finished:
if chain == Chain.CHALLENGE_CHAIN:
cc_vdf = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_vdf = info
rc_proof = proof
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
icc_ip_vdf = info
icc_ip_proof = proof
assert cc_proof is not None and rc_proof is not None and cc_vdf is not None and rc_vdf is not None
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_vdf.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(f"Do not have correct challenge {rc_challenge.hex()} has" f" {rc_vdf.challenge}")
# This proof is on an outdated challenge, so don't use it
return None
log.debug("Collected end of subslot vdfs.")
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
iters_from_sub_slot_start = cc_vdf.number_of_iterations + self.last_state.get_last_ip()
cc_vdf = dataclasses.replace(cc_vdf, number_of_iterations=iters_from_sub_slot_start)
if icc_ip_vdf is not None:
if self.last_state.peak is not None:
total_iters = (
self.last_state.get_total_iters()
- self.last_state.get_last_ip()
+ self.last_state.get_sub_slot_iters()
)
else:
total_iters = self.last_state.get_total_iters() + self.last_state.get_sub_slot_iters()
iters_from_cb = uint64(total_iters - self.last_state.last_challenge_sb_or_eos_total_iters)
if iters_from_cb > self.last_state.sub_slot_iters:
log.error(f"{self.last_state.peak}")
log.error(f"{self.last_state.subslot_end}")
assert False
assert iters_from_cb <= self.last_state.sub_slot_iters
icc_ip_vdf = dataclasses.replace(icc_ip_vdf, number_of_iterations=iters_from_cb)
icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = (
None if icc_ip_vdf is None else InfusedChallengeChainSubSlot(icc_ip_vdf)
)
if self.last_state.get_deficit() == 0:
assert icc_sub_slot is not None
icc_sub_slot_hash = icc_sub_slot.get_hash()
else:
icc_sub_slot_hash = None
next_ses: Optional[SubEpochSummary] = self.last_state.get_next_sub_epoch_summary()
if next_ses is not None:
log.info(f"Including sub epoch summary{next_ses}")
ses_hash = next_ses.get_hash()
new_sub_slot_iters = next_ses.new_sub_slot_iters
new_difficulty = next_ses.new_difficulty
else:
ses_hash = None
new_sub_slot_iters = None
new_difficulty = None
cc_sub_slot = ChallengeChainSubSlot(cc_vdf, icc_sub_slot_hash, ses_hash, new_sub_slot_iters, new_difficulty)
eos_deficit: uint8 = (
self.last_state.get_deficit()
if self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK > self.last_state.get_deficit() > 0
else self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
)
rc_sub_slot = RewardChainSubSlot(
rc_vdf,
cc_sub_slot.get_hash(),
icc_sub_slot.get_hash() if icc_sub_slot is not None else None,
eos_deficit,
)
eos_bundle = EndOfSubSlotBundle(
cc_sub_slot,
icc_sub_slot,
rc_sub_slot,
SubSlotProofs(cc_proof, icc_ip_proof, rc_proof),
)
if self.server is not None:
msg = make_msg(
ProtocolMessageTypes.new_end_of_sub_slot_vdf,
timelord_protocol.NewEndOfSubSlotVDF(eos_bundle),
)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
log.info(
f"Built end of subslot bundle. cc hash: {eos_bundle.challenge_chain.get_hash()}. New_difficulty: "
f"{eos_bundle.challenge_chain.new_difficulty} New ssi: {eos_bundle.challenge_chain.new_sub_slot_iters}"
)
if next_ses is None or next_ses.new_difficulty is None:
self.unfinished_blocks = self.overflow_blocks.copy()
else:
# No overflow blocks in a new epoch
self.unfinished_blocks = []
self.overflow_blocks = []
self.new_subslot_end = eos_bundle
await self._handle_subslot_end()
async def _handle_failures(self):
if len(self.vdf_failures) > 0:
# This can happen if one of the VDF processes has an issue. In this case, we abort all other
# infusion points and signage points, and go straight to the end of slot, so we avoid potential
# issues with the number of iterations that failed.
failed_chain, proof_label = self.vdf_failures[0]
log.error(
f"Vdf clients failed {self.vdf_failures_count} times. Last failure: {failed_chain}, "
f"label {proof_label}, current: {self.num_resets}"
)
if proof_label == self.num_resets:
await self._reset_chains(only_eos=True)
self.vdf_failure_time = time.time()
self.vdf_failures = []
# If something goes wrong in the VDF client due to a failed thread, we might get stuck in a situation where we
# are waiting for that client to finish. Usually other peers will finish the VDFs and reset us. In the case that
# there are no other timelords, this reset should bring the timelord back to a running state.
if time.time() - self.vdf_failure_time < self.constants.SUB_SLOT_TIME_TARGET * 3:
# If we have recently had a failure, allow some more time to finish the slot (we can be up to 3x slower)
active_time_threshold = self.constants.SUB_SLOT_TIME_TARGET * 3
else:
# If there were no failures recently trigger a reset after 60 seconds of no activity.
# Signage points should be every 9 seconds
active_time_threshold = 60
if time.time() - self.last_active_time > active_time_threshold:
log.error(f"Not active for {active_time_threshold} seconds, restarting all chains")
await self._reset_chains()
async def _manage_chains(self):
async with self.lock:
await asyncio.sleep(5)
await self._reset_chains(True)
while not self._shut_down:
try:
await asyncio.sleep(0.1)
async with self.lock:
await self._handle_failures()
# We've got a new peak, process it.
if self.new_peak is not None:
await self._handle_new_peak()
# Map free vdf_clients to unspawned chains.
await self._map_chains_with_vdf_clients()
async with self.lock:
# Submit pending iterations.
await self._submit_iterations()
not_finished_iters = [
it for it in self.iters_submitted[Chain.REWARD_CHAIN] if it not in self.iters_finished
]
if len(not_finished_iters) == 0:
await asyncio.sleep(0.1)
continue
selected_iter = min(not_finished_iters)
# Check for new infusion point and broadcast it if present.
await self._check_for_new_ip(selected_iter)
# Check for new signage point and broadcast it if present.
await self._check_for_new_sp(selected_iter)
# Check for end of subslot, respawn chains and build EndOfSubslotBundle.
await self._check_for_end_of_subslot(selected_iter)
except Exception:
tb = traceback.format_exc()
log.error(f"Error while handling message: {tb}")
async def _do_process_communication(
self,
chain: Chain,
challenge: bytes32,
initial_form: ClassgroupElement,
ip: str,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
# Data specific only when running in bluebox mode.
bluebox_iteration: Optional[uint64] = None,
header_hash: Optional[bytes32] = None,
height: Optional[uint32] = None,
field_vdf: Optional[uint8] = None,
# Labels a proof to the current state only
proof_label: Optional[int] = None,
):
disc: int = create_discriminant(challenge, self.constants.DISCRIMINANT_SIZE_BITS)
try:
# Depending on the flags 'fast_algorithm' and 'sanitizer_mode',
# the timelord tells the vdf_client what to execute.
async with self.lock:
if self.sanitizer_mode:
writer.write(b"S")
else:
if self.config["fast_algorithm"]:
# Run n-wesolowski (fast) algorithm.
writer.write(b"N")
else:
# Run two-wesolowski (slow) algorithm.
writer.write(b"T")
await writer.drain()
prefix = str(len(str(disc)))
if len(prefix) == 1:
prefix = "00" + prefix
if len(prefix) == 2:
prefix = "0" + prefix
async with self.lock:
writer.write((prefix + str(disc)).encode())
await writer.drain()
# Send initial_form prefixed with its length.
async with self.lock:
writer.write(bytes([len(initial_form.data)]) + initial_form.data)
await writer.drain()
try:
ok = await reader.readexactly(2)
except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
return None
if ok.decode() != "OK":
return None
log.debug("Got handshake with VDF client.")
if not self.sanitizer_mode:
async with self.lock:
self.allows_iters.append(chain)
else:
async with self.lock:
assert chain is Chain.BLUEBOX
assert bluebox_iteration is not None
prefix = str(len(str(bluebox_iteration)))
if len(str(bluebox_iteration)) < 10:
prefix = "0" + prefix
iter_str = prefix + str(bluebox_iteration)
writer.write(iter_str.encode())
await writer.drain()
# Listen to the client until "STOP" is received.
while True:
try:
data = await reader.readexactly(4)
except (
asyncio.IncompleteReadError,
ConnectionResetError,
Exception,
) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
break
msg = ""
try:
msg = data.decode()
except Exception:
pass
if msg == "STOP":
log.debug(f"Stopped client running on ip {ip}.")
async with self.lock:
writer.write(b"ACK")
await writer.drain()
break
else:
try:
# This must be a proof, 4 bytes is length prefix
length = int.from_bytes(data, "big")
proof = await reader.readexactly(length)
stdout_bytes_io: io.BytesIO = io.BytesIO(bytes.fromhex(proof.decode()))
except (
asyncio.IncompleteReadError,
ConnectionResetError,
Exception,
) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
break
iterations_needed = uint64(int.from_bytes(stdout_bytes_io.read(8), "big", signed=True))
y_size_bytes = stdout_bytes_io.read(8)
y_size = uint64(int.from_bytes(y_size_bytes, "big", signed=True))
y_bytes = stdout_bytes_io.read(y_size)
witness_type = uint8(int.from_bytes(stdout_bytes_io.read(1), "big", signed=True))
proof_bytes: bytes = stdout_bytes_io.read()
# Verifies our own proof just in case
form_size = ClassgroupElement.get_size(self.constants)
output = ClassgroupElement.from_bytes(y_bytes[:form_size])
if not self.sanitizer_mode:
time_taken = time.time() - self.chain_start_time[chain]
ips = int(iterations_needed / time_taken * 10) / 10
log.info(
f"Finished PoT chall:{challenge[:10].hex()}.. {iterations_needed}"
f" iters, "
f"Estimated IPS: {ips}, Chain: {chain}"
)
vdf_info: VDFInfo = VDFInfo(
challenge,
iterations_needed,
output,
)
vdf_proof: VDFProof = VDFProof(
witness_type,
proof_bytes,
self.sanitizer_mode,
)
if not vdf_proof.is_valid(self.constants, initial_form, vdf_info):
log.error("Invalid proof of time!")
if not self.sanitizer_mode:
async with self.lock:
assert proof_label is not None
self.proofs_finished.append((chain, vdf_info, vdf_proof, proof_label))
else:
async with self.lock:
writer.write(b"010")
await writer.drain()
assert header_hash is not None
assert field_vdf is not None
assert height is not None
response = timelord_protocol.RespondCompactProofOfTime(
vdf_info, vdf_proof, header_hash, height, field_vdf
)
if self.server is not None:
message = make_msg(ProtocolMessageTypes.respond_compact_proof_of_time, response)
await self.server.send_to_all([message], NodeType.FULL_NODE)
except ConnectionResetError as e:
log.debug(f"Connection reset with VDF client {e}")
async def _manage_discriminant_queue_sanitizer(self):
while not self._shut_down:
async with self.lock:
try:
while len(self.pending_bluebox_info) > 0 and len(self.free_clients) > 0:
# Select randomly the field_vdf we're creating a compact vdf for.
# This is done because CC_SP and CC_IP are more frequent than
# CC_EOS and ICC_EOS. This guarantees everything is picked uniformly.
target_field_vdf = random.randint(1, 4)
info = next(
(info for info in self.pending_bluebox_info if info[1].field_vdf == target_field_vdf),
None,
)
if info is None:
# Nothing found with target_field_vdf, just pick the first VDFInfo.
info = self.pending_bluebox_info[0]
ip, reader, writer = self.free_clients[0]
self.process_communication_tasks.append(
asyncio.create_task(
self._do_process_communication(
Chain.BLUEBOX,
info[1].new_proof_of_time.challenge,
ClassgroupElement.get_default_element(),
ip,
reader,
writer,
info[1].new_proof_of_time.number_of_iterations,
info[1].header_hash,
info[1].height,
info[1].field_vdf,
)
)
)
self.pending_bluebox_info.remove(info)
self.free_clients = self.free_clients[1:]
except Exception as e:
log.error(f"Exception manage discriminant queue: {e}")
await asyncio.sleep(0.1)
| 49.455598 | 120 | 0.562905 | import asyncio
import dataclasses
import io
import logging
import random
import time
import traceback
from typing import Callable, Dict, List, Optional, Tuple, Set
from chiavdf import create_discriminant
from btcgreen.consensus.constants import ConsensusConstants
from btcgreen.consensus.pot_iterations import calculate_sp_iters, is_overflow_block
from btcgreen.protocols import timelord_protocol
from btcgreen.protocols.protocol_message_types import ProtocolMessageTypes
from btcgreen.server.outbound_message import NodeType, make_msg
from btcgreen.server.server import BTCgreenServer
from btcgreen.timelord.iters_from_block import iters_from_block
from btcgreen.timelord.timelord_state import LastState
from btcgreen.timelord.types import Chain, IterationType, StateType
from btcgreen.types.blockchain_format.classgroup import ClassgroupElement
from btcgreen.types.blockchain_format.reward_chain_block import RewardChainBlock
from btcgreen.types.blockchain_format.sized_bytes import bytes32
from btcgreen.types.blockchain_format.slots import (
ChallengeChainSubSlot,
InfusedChallengeChainSubSlot,
RewardChainSubSlot,
SubSlotProofs,
)
from btcgreen.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from btcgreen.types.blockchain_format.vdf import VDFInfo, VDFProof
from btcgreen.types.end_of_slot_bundle import EndOfSubSlotBundle
from btcgreen.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
class Timelord:
def __init__(self, root_path, config: Dict, constants: ConsensusConstants):
self.config = config
self.root_path = root_path
self.constants = constants
self._shut_down = False
self.free_clients: List[Tuple[str, asyncio.StreamReader, asyncio.StreamWriter]] = []
self.potential_free_clients: List = []
self.ip_whitelist = self.config["vdf_clients"]["ip"]
self.server: Optional[BTCgreenServer] = None
self.chain_type_to_stream: Dict[Chain, Tuple[str, asyncio.StreamReader, asyncio.StreamWriter]] = {}
self.chain_start_time: Dict = {}
self.unspawned_chains: List[Chain] = [
Chain.CHALLENGE_CHAIN,
Chain.REWARD_CHAIN,
Chain.INFUSED_CHALLENGE_CHAIN,
]
# Chains that currently accept iterations.
self.allows_iters: List[Chain] = []
# Last peak received, None if it's already processed.
self.new_peak: Optional[timelord_protocol.NewPeakTimelord] = None
self.new_subslot_end: Optional[EndOfSubSlotBundle] = None
self.unfinished_blocks: List[timelord_protocol.NewUnfinishedBlockTimelord] = []
self.signage_point_iters: List[Tuple[uint64, uint8]] = []
self.iters_to_submit: Dict[Chain, List[uint64]] = {}
self.iters_submitted: Dict[Chain, List[uint64]] = {}
self.iters_finished: Set = set()
self.iteration_to_proof_type: Dict[uint64, IterationType] = {}
# List of proofs finished.
self.proofs_finished: List[Tuple[Chain, VDFInfo, VDFProof, int]] = []
# Data to send at vdf_client initialization.
self.overflow_blocks: List[timelord_protocol.NewUnfinishedBlockTimelord] = []
# Incremented each time `reset_chains` has been called.
# Used to label proofs in `finished_proofs` and to only filter proofs corresponding to the most recent state.
self.num_resets: int = 0
self.process_communication_tasks: List[asyncio.Task] = []
self.main_loop = None
self.vdf_server = None
self._shut_down = False
self.vdf_failures: List[Tuple[Chain, Optional[int]]] = []
self.vdf_failures_count: int = 0
self.vdf_failure_time: float = 0
self.total_unfinished: int = 0
self.total_infused: int = 0
self.state_changed_callback: Optional[Callable] = None
self.sanitizer_mode = self.config["sanitizer_mode"]
self.pending_bluebox_info: List[Tuple[float, timelord_protocol.RequestCompactProofOfTime]] = []
self.last_active_time = time.time()
async def _start(self):
self.lock: asyncio.Lock = asyncio.Lock()
self.vdf_server = await asyncio.start_server(
self._handle_client,
self.config["vdf_server"]["host"],
self.config["vdf_server"]["port"],
)
self.last_state: LastState = LastState(self.constants)
if not self.sanitizer_mode:
self.main_loop = asyncio.create_task(self._manage_chains())
else:
self.main_loop = asyncio.create_task(self._manage_discriminant_queue_sanitizer())
log.info("Started timelord.")
def _close(self):
self._shut_down = True
for task in self.process_communication_tasks:
task.cancel()
if self.main_loop is not None:
self.main_loop.cancel()
async def _await_closed(self):
pass
def set_server(self, server: BTCgreenServer):
self.server = server
async def _handle_client(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
async with self.lock:
client_ip = writer.get_extra_info("peername")[0]
log.debug(f"New timelord connection from client: {client_ip}.")
if client_ip in self.ip_whitelist:
self.free_clients.append((client_ip, reader, writer))
log.debug(f"Added new VDF client {client_ip}.")
for ip, end_time in list(self.potential_free_clients):
if ip == client_ip:
self.potential_free_clients.remove((ip, end_time))
break
async def _stop_chain(self, chain: Chain):
try:
while chain not in self.allows_iters:
self.lock.release()
await asyncio.sleep(0.05)
log.error(f"Trying to stop {chain} before its initialization.")
await self.lock.acquire()
if chain not in self.chain_type_to_stream:
log.warning(f"Trying to stop a crashed chain: {chain}.")
return None
stop_ip, _, stop_writer = self.chain_type_to_stream[chain]
self.potential_free_clients.append((stop_ip, time.time()))
stop_writer.write(b"010")
await stop_writer.drain()
if chain in self.allows_iters:
self.allows_iters.remove(chain)
if chain not in self.unspawned_chains:
self.unspawned_chains.append(chain)
if chain in self.chain_type_to_stream:
del self.chain_type_to_stream[chain]
except ConnectionResetError as e:
log.error(f"{e}")
def _can_infuse_unfinished_block(self, block: timelord_protocol.NewUnfinishedBlockTimelord) -> Optional[uint64]:
assert self.last_state is not None
sub_slot_iters = self.last_state.get_sub_slot_iters()
difficulty = self.last_state.get_difficulty()
ip_iters = self.last_state.get_last_ip()
rc_block = block.reward_chain_block
try:
block_sp_iters, block_ip_iters = iters_from_block(
self.constants,
rc_block,
sub_slot_iters,
difficulty,
)
except Exception as e:
log.warning(f"Received invalid unfinished block: {e}.")
return None
block_sp_total_iters = self.last_state.total_iters - ip_iters + block_sp_iters
if is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
block_sp_total_iters -= self.last_state.get_sub_slot_iters()
found_index = -1
for index, (rc, total_iters) in enumerate(self.last_state.reward_challenge_cache):
if rc == block.rc_prev:
found_index = index
break
if found_index == -1:
log.warning(f"Will not infuse {block.rc_prev} because its reward chain challenge is not in the chain")
return None
if ip_iters > block_ip_iters:
log.warning("Too late to infuse block")
return None
new_block_iters = uint64(block_ip_iters - ip_iters)
if len(self.last_state.reward_challenge_cache) > found_index + 1:
if self.last_state.reward_challenge_cache[found_index + 1][1] < block_sp_total_iters:
log.warning(
f"Will not infuse unfinished block {block.rc_prev} sp total iters {block_sp_total_iters}, "
f"because there is another infusion before its SP"
)
return None
if self.last_state.reward_challenge_cache[found_index][1] > block_sp_total_iters:
if not is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
log.error(
f"Will not infuse unfinished block {block.rc_prev}, sp total iters: {block_sp_total_iters}, "
f"because its iters are too low"
)
return None
if new_block_iters > 0:
return new_block_iters
return None
async def _reset_chains(self, first_run=False, only_eos=False):
# First, stop all chains.
self.last_active_time = time.time()
log.debug("Resetting chains")
ip_iters = self.last_state.get_last_ip()
sub_slot_iters = self.last_state.get_sub_slot_iters()
if not first_run:
for chain in list(self.chain_type_to_stream.keys()):
await self._stop_chain(chain)
# Adjust all signage points iterations to the peak.
iters_per_signage = uint64(sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT)
self.signage_point_iters = [
(k * iters_per_signage - ip_iters, k)
for k in range(1, self.constants.NUM_SPS_SUB_SLOT)
if k * iters_per_signage - ip_iters > 0
]
for sp, k in self.signage_point_iters:
assert k * iters_per_signage > 0
assert k * iters_per_signage < sub_slot_iters
# Adjust all unfinished blocks iterations to the peak.
new_unfinished_blocks = []
self.iters_finished = set()
self.proofs_finished = []
self.num_resets += 1
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
self.iters_to_submit[chain] = []
self.iters_submitted[chain] = []
self.iteration_to_proof_type = {}
if not only_eos:
for block in self.unfinished_blocks + self.overflow_blocks:
new_block_iters: Optional[uint64] = self._can_infuse_unfinished_block(block)
# Does not add duplicates, or blocks that we cannot infuse
if new_block_iters and new_block_iters not in self.iters_to_submit[Chain.CHALLENGE_CHAIN]:
if block not in self.unfinished_blocks:
self.total_unfinished += 1
new_unfinished_blocks.append(block)
for chain in [Chain.REWARD_CHAIN, Chain.CHALLENGE_CHAIN]:
self.iters_to_submit[chain].append(new_block_iters)
if self.last_state.get_deficit() < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
self.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(new_block_iters)
self.iteration_to_proof_type[new_block_iters] = IterationType.INFUSION_POINT
# Remove all unfinished blocks that have already passed.
self.unfinished_blocks = new_unfinished_blocks
# Signage points.
if not only_eos and len(self.signage_point_iters) > 0:
count_signage = 0
for signage, k in self.signage_point_iters:
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]:
self.iters_to_submit[chain].append(signage)
self.iteration_to_proof_type[signage] = IterationType.SIGNAGE_POINT
count_signage += 1
if count_signage == 3:
break
left_subslot_iters = sub_slot_iters - ip_iters
assert left_subslot_iters > 0
if self.last_state.get_deficit() < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
self.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(left_subslot_iters)
self.iters_to_submit[Chain.CHALLENGE_CHAIN].append(left_subslot_iters)
self.iters_to_submit[Chain.REWARD_CHAIN].append(left_subslot_iters)
self.iteration_to_proof_type[left_subslot_iters] = IterationType.END_OF_SUBSLOT
for chain, iters in self.iters_to_submit.items():
for iteration in iters:
assert iteration > 0
async def _handle_new_peak(self):
assert self.new_peak is not None
self.last_state.set_state(self.new_peak)
if self.total_unfinished > 0:
remove_unfinished = []
for unf_block_timelord in self.unfinished_blocks + self.overflow_blocks:
if (
unf_block_timelord.reward_chain_block.get_hash()
== self.new_peak.reward_chain_block.get_unfinished().get_hash()
):
if unf_block_timelord not in self.unfinished_blocks:
# We never got the EOS for this, but we have the block in overflow list
self.total_unfinished += 1
remove_unfinished.append(unf_block_timelord)
if len(remove_unfinished) > 0:
self.total_infused += 1
for block in remove_unfinished:
if block in self.unfinished_blocks:
self.unfinished_blocks.remove(block)
if block in self.overflow_blocks:
self.overflow_blocks.remove(block)
infusion_rate = round(self.total_infused / self.total_unfinished * 100.0, 2)
log.info(
f"Total unfinished blocks: {self.total_unfinished}. "
f"Total infused blocks: {self.total_infused}. "
f"Infusion rate: {infusion_rate}%."
)
self.new_peak = None
await self._reset_chains()
async def _handle_subslot_end(self):
self.last_state.set_state(self.new_subslot_end)
for block in self.unfinished_blocks:
if self._can_infuse_unfinished_block(block) is not None:
self.total_unfinished += 1
self.new_subslot_end = None
await self._reset_chains()
async def _map_chains_with_vdf_clients(self):
while not self._shut_down:
picked_chain = None
async with self.lock:
if len(self.free_clients) == 0:
break
ip, reader, writer = self.free_clients[0]
for chain_type in self.unspawned_chains:
challenge = self.last_state.get_challenge(chain_type)
initial_form = self.last_state.get_initial_form(chain_type)
if challenge is not None and initial_form is not None:
picked_chain = chain_type
break
if picked_chain is None:
break
picked_chain = self.unspawned_chains[0]
self.chain_type_to_stream[picked_chain] = (ip, reader, writer)
self.free_clients = self.free_clients[1:]
self.unspawned_chains = self.unspawned_chains[1:]
self.chain_start_time[picked_chain] = time.time()
log.debug(f"Mapping free vdf_client with chain: {picked_chain}.")
self.process_communication_tasks.append(
asyncio.create_task(
self._do_process_communication(
picked_chain, challenge, initial_form, ip, reader, writer, proof_label=self.num_resets
)
)
)
async def _submit_iterations(self):
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
if chain in self.allows_iters:
_, _, writer = self.chain_type_to_stream[chain]
for iteration in self.iters_to_submit[chain]:
if iteration in self.iters_submitted[chain]:
continue
log.debug(f"Submitting iterations to {chain}: {iteration}")
assert iteration > 0
prefix = str(len(str(iteration)))
if len(str(iteration)) < 10:
prefix = "0" + prefix
iter_str = prefix + str(iteration)
writer.write(iter_str.encode())
await writer.drain()
self.iters_submitted[chain].append(iteration)
def _clear_proof_list(self, iters: uint64):
return [
(chain, info, proof, label)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations != iters
]
async def _check_for_new_sp(self, iter_to_look_for: uint64):
signage_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.SIGNAGE_POINT
]
if len(signage_iters) == 0:
return None
to_remove = []
for potential_sp_iters, signage_point_index in self.signage_point_iters:
if potential_sp_iters not in signage_iters or potential_sp_iters != iter_to_look_for:
continue
signage_iter = potential_sp_iters
proofs_with_iter = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == signage_iter and label == self.num_resets
]
# Wait for both cc and rc to have the signage point.
if len(proofs_with_iter) == 2:
cc_info: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_info: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in proofs_with_iter:
if chain == Chain.CHALLENGE_CHAIN:
cc_info = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_info = info
rc_proof = proof
if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None:
log.error(f"Insufficient signage point data {signage_iter}")
continue
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_info.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(f"SP: Do not have correct challenge {rc_challenge.hex()}" f" has {rc_info.challenge}")
# This proof is on an outdated challenge, so don't use it
continue
iters_from_sub_slot_start = cc_info.number_of_iterations + self.last_state.get_last_ip()
response = timelord_protocol.NewSignagePointVDF(
signage_point_index,
dataclasses.replace(cc_info, number_of_iterations=iters_from_sub_slot_start),
cc_proof,
rc_info,
rc_proof,
)
if self.server is not None:
msg = make_msg(ProtocolMessageTypes.new_signage_point_vdf, response)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
to_remove.append((signage_iter, signage_point_index))
self.proofs_finished = self._clear_proof_list(signage_iter)
next_iters_count = 0
for next_sp, k in self.signage_point_iters:
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]:
if next_sp not in self.iters_submitted[chain] and next_sp not in self.iters_to_submit[chain]:
self.iters_to_submit[chain].append(next_sp)
self.iteration_to_proof_type[next_sp] = IterationType.SIGNAGE_POINT
next_iters_count += 1
if next_iters_count == 3:
break
break
for r in to_remove:
self.signage_point_iters.remove(r)
async def _check_for_new_ip(self, iter_to_look_for: uint64):
if len(self.unfinished_blocks) == 0:
return None
infusion_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.INFUSION_POINT
]
for iteration in infusion_iters:
if iteration != iter_to_look_for:
continue
proofs_with_iter = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == iteration and label == self.num_resets
]
if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None:
chain_count = 3
else:
chain_count = 2
if len(proofs_with_iter) == chain_count:
block = None
ip_iters = None
for unfinished_block in self.unfinished_blocks:
try:
_, ip_iters = iters_from_block(
self.constants,
unfinished_block.reward_chain_block,
self.last_state.get_sub_slot_iters(),
self.last_state.get_difficulty(),
)
except Exception as e:
log.error(f"Error {e}")
continue
if ip_iters - self.last_state.get_last_ip() == iteration:
block = unfinished_block
break
assert ip_iters is not None
if block is not None:
ip_total_iters = self.last_state.get_total_iters() + iteration
challenge = block.reward_chain_block.get_hash()
icc_info: Optional[VDFInfo] = None
icc_proof: Optional[VDFProof] = None
cc_info: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_info: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in proofs_with_iter:
if chain == Chain.CHALLENGE_CHAIN:
cc_info = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_info = info
rc_proof = proof
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
icc_info = info
icc_proof = proof
if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None:
log.error(f"Insufficient VDF proofs for infusion point ch: {challenge} iterations:{iteration}")
return None
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_info.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(
f"Do not have correct challenge {rc_challenge.hex()} "
f"has {rc_info.challenge}, partial hash {block.reward_chain_block.get_hash()}"
)
continue
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
log.debug(f"Generated infusion point for challenge: {challenge} iterations: {iteration}.")
overflow = is_overflow_block(self.constants, block.reward_chain_block.signage_point_index)
if not self.last_state.can_infuse_block(overflow):
log.warning("Too many blocks, or overflow in new epoch, cannot infuse, discarding")
return None
cc_info = dataclasses.replace(cc_info, number_of_iterations=ip_iters)
response = timelord_protocol.NewInfusionPointVDF(
challenge,
cc_info,
cc_proof,
rc_info,
rc_proof,
icc_info,
icc_proof,
)
msg = make_msg(ProtocolMessageTypes.new_infusion_point_vdf, response)
if self.server is not None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
self.proofs_finished = self._clear_proof_list(iteration)
if (
self.last_state.get_last_block_total_iters() is None
and not self.last_state.state_type == StateType.FIRST_SUB_SLOT
):
# We don't know when the last block was, so we can't make peaks
return None
sp_total_iters = (
ip_total_iters
- ip_iters
+ calculate_sp_iters(
self.constants,
block.sub_slot_iters,
block.reward_chain_block.signage_point_index,
)
- (block.sub_slot_iters if overflow else 0)
)
if self.last_state.state_type == StateType.FIRST_SUB_SLOT:
is_transaction_block = True
height: uint32 = uint32(0)
else:
last_block_ti = self.last_state.get_last_block_total_iters()
assert last_block_ti is not None
is_transaction_block = last_block_ti < sp_total_iters
height = uint32(self.last_state.get_height() + 1)
if height < 5:
# Don't directly update our state for the first few blocks, because we cannot validate
return None
new_reward_chain_block = RewardChainBlock(
uint128(self.last_state.get_weight() + block.difficulty),
height,
uint128(ip_total_iters),
block.reward_chain_block.signage_point_index,
block.reward_chain_block.pos_ss_cc_challenge_hash,
block.reward_chain_block.proof_of_space,
block.reward_chain_block.challenge_chain_sp_vdf,
block.reward_chain_block.challenge_chain_sp_signature,
cc_info,
block.reward_chain_block.reward_chain_sp_vdf,
block.reward_chain_block.reward_chain_sp_signature,
rc_info,
icc_info,
is_transaction_block,
)
if self.last_state.state_type == StateType.FIRST_SUB_SLOT:
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
elif overflow and self.last_state.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
if self.last_state.peak is not None:
assert self.last_state.subslot_end is None
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else:
assert self.last_state.subslot_end is not None
if self.last_state.subslot_end.infused_challenge_chain is None:
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
else:
# the deficit
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else:
new_deficit = max(self.last_state.deficit - 1, 0)
if new_deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
last_csb_or_eos = ip_total_iters
else:
last_csb_or_eos = self.last_state.last_challenge_sb_or_eos_total_iters
if self.last_state.just_infused_sub_epoch_summary():
new_sub_epoch_summary = None
passed_ses_height_but_not_yet_included = False
else:
new_sub_epoch_summary = block.sub_epoch_summary
if new_reward_chain_block.height % self.constants.SUB_EPOCH_BLOCKS == 0:
passed_ses_height_but_not_yet_included = True
else:
passed_ses_height_but_not_yet_included = (
self.last_state.get_passed_ses_height_but_not_yet_included()
)
self.new_peak = timelord_protocol.NewPeakTimelord(
new_reward_chain_block,
block.difficulty,
uint8(new_deficit),
block.sub_slot_iters,
new_sub_epoch_summary,
self.last_state.reward_challenge_cache,
uint128(last_csb_or_eos),
passed_ses_height_but_not_yet_included,
)
await self._handle_new_peak()
# Break so we alternate between checking SP and IP
break
async def _check_for_end_of_subslot(self, iter_to_look_for: uint64):
left_subslot_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.END_OF_SUBSLOT
]
if len(left_subslot_iters) == 0:
return None
if left_subslot_iters[0] != iter_to_look_for:
return None
chains_finished = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == left_subslot_iters[0] and label == self.num_resets
]
if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None:
chain_count = 3
else:
chain_count = 2
if len(chains_finished) == chain_count:
icc_ip_vdf: Optional[VDFInfo] = None
icc_ip_proof: Optional[VDFProof] = None
cc_vdf: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_vdf: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in chains_finished:
if chain == Chain.CHALLENGE_CHAIN:
cc_vdf = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_vdf = info
rc_proof = proof
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
icc_ip_vdf = info
icc_ip_proof = proof
assert cc_proof is not None and rc_proof is not None and cc_vdf is not None and rc_vdf is not None
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_vdf.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(f"Do not have correct challenge {rc_challenge.hex()} has" f" {rc_vdf.challenge}")
# This proof is on an outdated challenge, so don't use it
return None
log.debug("Collected end of subslot vdfs.")
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
iters_from_sub_slot_start = cc_vdf.number_of_iterations + self.last_state.get_last_ip()
cc_vdf = dataclasses.replace(cc_vdf, number_of_iterations=iters_from_sub_slot_start)
if icc_ip_vdf is not None:
if self.last_state.peak is not None:
total_iters = (
self.last_state.get_total_iters()
- self.last_state.get_last_ip()
+ self.last_state.get_sub_slot_iters()
)
else:
total_iters = self.last_state.get_total_iters() + self.last_state.get_sub_slot_iters()
iters_from_cb = uint64(total_iters - self.last_state.last_challenge_sb_or_eos_total_iters)
if iters_from_cb > self.last_state.sub_slot_iters:
log.error(f"{self.last_state.peak}")
log.error(f"{self.last_state.subslot_end}")
assert False
assert iters_from_cb <= self.last_state.sub_slot_iters
icc_ip_vdf = dataclasses.replace(icc_ip_vdf, number_of_iterations=iters_from_cb)
icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = (
None if icc_ip_vdf is None else InfusedChallengeChainSubSlot(icc_ip_vdf)
)
if self.last_state.get_deficit() == 0:
assert icc_sub_slot is not None
icc_sub_slot_hash = icc_sub_slot.get_hash()
else:
icc_sub_slot_hash = None
next_ses: Optional[SubEpochSummary] = self.last_state.get_next_sub_epoch_summary()
if next_ses is not None:
log.info(f"Including sub epoch summary{next_ses}")
ses_hash = next_ses.get_hash()
new_sub_slot_iters = next_ses.new_sub_slot_iters
new_difficulty = next_ses.new_difficulty
else:
ses_hash = None
new_sub_slot_iters = None
new_difficulty = None
cc_sub_slot = ChallengeChainSubSlot(cc_vdf, icc_sub_slot_hash, ses_hash, new_sub_slot_iters, new_difficulty)
eos_deficit: uint8 = (
self.last_state.get_deficit()
if self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK > self.last_state.get_deficit() > 0
else self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
)
rc_sub_slot = RewardChainSubSlot(
rc_vdf,
cc_sub_slot.get_hash(),
icc_sub_slot.get_hash() if icc_sub_slot is not None else None,
eos_deficit,
)
eos_bundle = EndOfSubSlotBundle(
cc_sub_slot,
icc_sub_slot,
rc_sub_slot,
SubSlotProofs(cc_proof, icc_ip_proof, rc_proof),
)
if self.server is not None:
msg = make_msg(
ProtocolMessageTypes.new_end_of_sub_slot_vdf,
timelord_protocol.NewEndOfSubSlotVDF(eos_bundle),
)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
log.info(
f"Built end of subslot bundle. cc hash: {eos_bundle.challenge_chain.get_hash()}. New_difficulty: "
f"{eos_bundle.challenge_chain.new_difficulty} New ssi: {eos_bundle.challenge_chain.new_sub_slot_iters}"
)
if next_ses is None or next_ses.new_difficulty is None:
self.unfinished_blocks = self.overflow_blocks.copy()
else:
self.unfinished_blocks = []
self.overflow_blocks = []
self.new_subslot_end = eos_bundle
await self._handle_subslot_end()
async def _handle_failures(self):
if len(self.vdf_failures) > 0:
failed_chain, proof_label = self.vdf_failures[0]
log.error(
f"Vdf clients failed {self.vdf_failures_count} times. Last failure: {failed_chain}, "
f"label {proof_label}, current: {self.num_resets}"
)
if proof_label == self.num_resets:
await self._reset_chains(only_eos=True)
self.vdf_failure_time = time.time()
self.vdf_failures = []
if time.time() - self.vdf_failure_time < self.constants.SUB_SLOT_TIME_TARGET * 3:
active_time_threshold = self.constants.SUB_SLOT_TIME_TARGET * 3
else:
active_time_threshold = 60
if time.time() - self.last_active_time > active_time_threshold:
log.error(f"Not active for {active_time_threshold} seconds, restarting all chains")
await self._reset_chains()
async def _manage_chains(self):
async with self.lock:
await asyncio.sleep(5)
await self._reset_chains(True)
while not self._shut_down:
try:
await asyncio.sleep(0.1)
async with self.lock:
await self._handle_failures()
if self.new_peak is not None:
await self._handle_new_peak()
# Map free vdf_clients to unspawned chains.
await self._map_chains_with_vdf_clients()
async with self.lock:
# Submit pending iterations.
await self._submit_iterations()
not_finished_iters = [
it for it in self.iters_submitted[Chain.REWARD_CHAIN] if it not in self.iters_finished
]
if len(not_finished_iters) == 0:
await asyncio.sleep(0.1)
continue
selected_iter = min(not_finished_iters)
# Check for new infusion point and broadcast it if present.
await self._check_for_new_ip(selected_iter)
# Check for new signage point and broadcast it if present.
await self._check_for_new_sp(selected_iter)
# Check for end of subslot, respawn chains and build EndOfSubslotBundle.
await self._check_for_end_of_subslot(selected_iter)
except Exception:
tb = traceback.format_exc()
log.error(f"Error while handling message: {tb}")
async def _do_process_communication(
self,
chain: Chain,
challenge: bytes32,
initial_form: ClassgroupElement,
ip: str,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
# Data specific only when running in bluebox mode.
bluebox_iteration: Optional[uint64] = None,
header_hash: Optional[bytes32] = None,
height: Optional[uint32] = None,
field_vdf: Optional[uint8] = None,
# Labels a proof to the current state only
proof_label: Optional[int] = None,
):
disc: int = create_discriminant(challenge, self.constants.DISCRIMINANT_SIZE_BITS)
try:
# Depending on the flags 'fast_algorithm' and 'sanitizer_mode',
# the timelord tells the vdf_client what to execute.
async with self.lock:
if self.sanitizer_mode:
writer.write(b"S")
else:
if self.config["fast_algorithm"]:
# Run n-wesolowski (fast) algorithm.
writer.write(b"N")
else:
# Run two-wesolowski (slow) algorithm.
writer.write(b"T")
await writer.drain()
prefix = str(len(str(disc)))
if len(prefix) == 1:
prefix = "00" + prefix
if len(prefix) == 2:
prefix = "0" + prefix
async with self.lock:
writer.write((prefix + str(disc)).encode())
await writer.drain()
# Send initial_form prefixed with its length.
async with self.lock:
writer.write(bytes([len(initial_form.data)]) + initial_form.data)
await writer.drain()
try:
ok = await reader.readexactly(2)
except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
return None
if ok.decode() != "OK":
return None
log.debug("Got handshake with VDF client.")
if not self.sanitizer_mode:
async with self.lock:
self.allows_iters.append(chain)
else:
async with self.lock:
assert chain is Chain.BLUEBOX
assert bluebox_iteration is not None
prefix = str(len(str(bluebox_iteration)))
if len(str(bluebox_iteration)) < 10:
prefix = "0" + prefix
iter_str = prefix + str(bluebox_iteration)
writer.write(iter_str.encode())
await writer.drain()
# Listen to the client until "STOP" is received.
while True:
try:
data = await reader.readexactly(4)
except (
asyncio.IncompleteReadError,
ConnectionResetError,
Exception,
) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
break
msg = ""
try:
msg = data.decode()
except Exception:
pass
if msg == "STOP":
log.debug(f"Stopped client running on ip {ip}.")
async with self.lock:
writer.write(b"ACK")
await writer.drain()
break
else:
try:
# This must be a proof, 4 bytes is length prefix
length = int.from_bytes(data, "big")
proof = await reader.readexactly(length)
stdout_bytes_io: io.BytesIO = io.BytesIO(bytes.fromhex(proof.decode()))
except (
asyncio.IncompleteReadError,
ConnectionResetError,
Exception,
) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
break
iterations_needed = uint64(int.from_bytes(stdout_bytes_io.read(8), "big", signed=True))
y_size_bytes = stdout_bytes_io.read(8)
y_size = uint64(int.from_bytes(y_size_bytes, "big", signed=True))
y_bytes = stdout_bytes_io.read(y_size)
witness_type = uint8(int.from_bytes(stdout_bytes_io.read(1), "big", signed=True))
proof_bytes: bytes = stdout_bytes_io.read()
# Verifies our own proof just in case
form_size = ClassgroupElement.get_size(self.constants)
output = ClassgroupElement.from_bytes(y_bytes[:form_size])
if not self.sanitizer_mode:
time_taken = time.time() - self.chain_start_time[chain]
ips = int(iterations_needed / time_taken * 10) / 10
log.info(
f"Finished PoT chall:{challenge[:10].hex()}.. {iterations_needed}"
f" iters, "
f"Estimated IPS: {ips}, Chain: {chain}"
)
vdf_info: VDFInfo = VDFInfo(
challenge,
iterations_needed,
output,
)
vdf_proof: VDFProof = VDFProof(
witness_type,
proof_bytes,
self.sanitizer_mode,
)
if not vdf_proof.is_valid(self.constants, initial_form, vdf_info):
log.error("Invalid proof of time!")
if not self.sanitizer_mode:
async with self.lock:
assert proof_label is not None
self.proofs_finished.append((chain, vdf_info, vdf_proof, proof_label))
else:
async with self.lock:
writer.write(b"010")
await writer.drain()
assert header_hash is not None
assert field_vdf is not None
assert height is not None
response = timelord_protocol.RespondCompactProofOfTime(
vdf_info, vdf_proof, header_hash, height, field_vdf
)
if self.server is not None:
message = make_msg(ProtocolMessageTypes.respond_compact_proof_of_time, response)
await self.server.send_to_all([message], NodeType.FULL_NODE)
except ConnectionResetError as e:
log.debug(f"Connection reset with VDF client {e}")
async def _manage_discriminant_queue_sanitizer(self):
while not self._shut_down:
async with self.lock:
try:
while len(self.pending_bluebox_info) > 0 and len(self.free_clients) > 0:
# Select randomly the field_vdf we're creating a compact vdf for.
target_field_vdf = random.randint(1, 4)
info = next(
(info for info in self.pending_bluebox_info if info[1].field_vdf == target_field_vdf),
None,
)
if info is None:
info = self.pending_bluebox_info[0]
ip, reader, writer = self.free_clients[0]
self.process_communication_tasks.append(
asyncio.create_task(
self._do_process_communication(
Chain.BLUEBOX,
info[1].new_proof_of_time.challenge,
ClassgroupElement.get_default_element(),
ip,
reader,
writer,
info[1].new_proof_of_time.number_of_iterations,
info[1].header_hash,
info[1].height,
info[1].field_vdf,
)
)
)
self.pending_bluebox_info.remove(info)
self.free_clients = self.free_clients[1:]
except Exception as e:
log.error(f"Exception manage discriminant queue: {e}")
await asyncio.sleep(0.1)
| true | true |
f7336e48fc87caab09cea47cc49e91a87202fb5f | 1,502 | py | Python | saarland/saarlouis.py | risklayer/corona-landkreis-crawler | 2e82448ff614240365de9493eafa0e6a620ac615 | [
"Unlicense"
] | 12 | 2022-02-23T11:06:06.000Z | 2022-03-04T17:21:44.000Z | saarland/saarlouis.py | risklayer/corona-landkreis-crawler | 2e82448ff614240365de9493eafa0e6a620ac615 | [
"Unlicense"
] | null | null | null | saarland/saarlouis.py | risklayer/corona-landkreis-crawler | 2e82448ff614240365de9493eafa0e6a620ac615 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python3
from botbase import *
_saarlouis_c = re.compile(r"insgesamt:\s*([0-9.]+)")
_saarlouis_d = re.compile(r"Verstorben:\s*([0-9.]+)")
_saarlouis_g = re.compile(r"Genesen:\s*([0-9.]+)")
def saarlouis(sheets):
import bs4
soup = get_soup("https://www.kreis-saarlouis.de/Corona-Virus/Corona-Ticker.htm?")
content = soup.find(id="content_frame")
ps = []
cur = next(content.find("hr").parent.children)
stop = cur.findNext("hr")
while cur is not None:
if isinstance(cur, bs4.Tag): ps.extend([p for p in cur.find_all(text=True) if not p.strip() == ""])
cur = cur.nextSibling
if cur == stop:
if len(ps) >= 4: break
stop = cur.findNext("hr")
#for p in ps: print("A",p)
#date = check_date(p[0], "Merzig-Wadern")
if not (today().strftime("%d.%m.%Y") in ps[0] or today().strftime("%d.%m.%y") in ps[0]): raise NotYetAvailableException("Saarlouis noch alt: "+ps[0])
args={}
for p in ps:
m = _saarlouis_c.search(p)
if m: args["c"] = force_int(m.group(1))
m = _saarlouis_d.search(p)
if m: args["d"] = force_int(m.group(1))
m = _saarlouis_g.search(p)
if m: args["g"] = force_int(m.group(1))
assert "c" in args and "d" in args and "g" in args, "No data - yet?"
update(sheets, 10044, **args, sig="Bot", ignore_delta=False)
return True
schedule.append(Task(15, 30, 20, 35, 600, saarlouis, 10044))
if __name__ == '__main__': saarlouis(googlesheets())
| 39.526316 | 153 | 0.601864 |
from botbase import *
_saarlouis_c = re.compile(r"insgesamt:\s*([0-9.]+)")
_saarlouis_d = re.compile(r"Verstorben:\s*([0-9.]+)")
_saarlouis_g = re.compile(r"Genesen:\s*([0-9.]+)")
def saarlouis(sheets):
import bs4
soup = get_soup("https://www.kreis-saarlouis.de/Corona-Virus/Corona-Ticker.htm?")
content = soup.find(id="content_frame")
ps = []
cur = next(content.find("hr").parent.children)
stop = cur.findNext("hr")
while cur is not None:
if isinstance(cur, bs4.Tag): ps.extend([p for p in cur.find_all(text=True) if not p.strip() == ""])
cur = cur.nextSibling
if cur == stop:
if len(ps) >= 4: break
stop = cur.findNext("hr")
if not (today().strftime("%d.%m.%Y") in ps[0] or today().strftime("%d.%m.%y") in ps[0]): raise NotYetAvailableException("Saarlouis noch alt: "+ps[0])
args={}
for p in ps:
m = _saarlouis_c.search(p)
if m: args["c"] = force_int(m.group(1))
m = _saarlouis_d.search(p)
if m: args["d"] = force_int(m.group(1))
m = _saarlouis_g.search(p)
if m: args["g"] = force_int(m.group(1))
assert "c" in args and "d" in args and "g" in args, "No data - yet?"
update(sheets, 10044, **args, sig="Bot", ignore_delta=False)
return True
schedule.append(Task(15, 30, 20, 35, 600, saarlouis, 10044))
if __name__ == '__main__': saarlouis(googlesheets())
| true | true |
f7336ebeb8aababd8d8856c28bbf476489d49a70 | 6,193 | py | Python | bin/circlepack.py | brsr/mapproj | 1ec1694149a69da6393ecb94650f7164e3cfd2e1 | [
"MIT"
] | null | null | null | bin/circlepack.py | brsr/mapproj | 1ec1694149a69da6393ecb94650f7164e3cfd2e1 | [
"MIT"
] | null | null | null | bin/circlepack.py | brsr/mapproj | 1ec1694149a69da6393ecb94650f7164e3cfd2e1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 14 14:15:06 2021
@author: brsr
"""
import numpy as np
import matplotlib.pyplot as plt
import mapproj
import fiona
from shapely.geometry import Point, LineString, MultiPolygon, Polygon
import geopandas
import pyproj
geod = pyproj.Geod(a=1, f=0)
n = 9
a = np.arctan(1/2)/np.pi*180
actrlpts3 = np.array([[15+0, 15+36, 15-36],
[-a, a, a]])
#actrlpts3 = np.array([[ 0, 0, 90],
# [90, 0, 0]])
ctrlpoly3 = mapproj.geodesics(actrlpts3[0], actrlpts3[1], geod, includepts=True)
tgtpts3 = mapproj.complex_to_float2d(1j*np.exp(2j/3*np.arange(3)*np.pi)).T
bp = mapproj.Barycentric(tgtpts3)
grid3 = mapproj.Barycentric.grid(1/8)
gridp3 = mapproj.Barycentric.gridpolys(n=9)
#%%
gridbary = mapproj.transeach(bp.transform, gridp3)
conformal = mapproj.ConformalTri3(actrlpts3, tgtpts3)
invframe = mapproj.transeach(conformal.invtransform, gridbary)#slooooow
invframev = mapproj.transeach(mapproj.UnitVector.transform, invframe)
invframe.plot()
#%%
res = geod.inv(actrlpts3[0], actrlpts3[1],
np.roll(actrlpts3[0], -1), np.roll(actrlpts3[1], -1))
cornerangle = np.pi/180*(res[0] - np.roll(res[1], 1)).mean() #np.pi*2/5 #
edgelength = res[2].mean()
initial = conformal.ctrlpts_v
anglesumtarget = np.ones(shape=(n+1,n+1))
anglesumtarget = np.tril(anglesumtarget, -1)[::-1]
#anglesumtarget[..., 0] = 0
#anglesumtarget[-1] = 0
anglesumtarget[anglesumtarget == 0] = np.nan
ind = np.arange(0,n)
edgeweight = np.ones(n)*2
edgeweight[[0, -1]] = 1
edge1 = (ind, 0)
edge2 = (0, ind)
edge3 = (ind,ind[::-1])
anglesumtarget[edge1] = 1/2
anglesumtarget[edge2] = 1/2
anglesumtarget[edge3] = 1/2
anglesumtarget *= 2*np.pi
anglesumtarget[0, 0] = cornerangle
anglesumtarget[-2, 0] = cornerangle
anglesumtarget[0, -2] = cornerangle
msplitframe = np.array([[0, 1, 2],
[2, 0, 1]])
msplit1 = np.tile(msplitframe, (3, n, n))[..., :n,:n]
msplit = (msplit1 + np.arange(3)[:, np.newaxis, np.newaxis]) % 3
msplit = msplit == 0
msplit[:, ~np.isfinite(anglesumtarget[:-1,:-1])] = False
#neighbors like this
# n n
# n x n
# n n
neighbors = np.array([[ 1, 1, 0, -1, -1, 0],
[ 0, -1, -1, 0, 1, 1]])
grindex = np.array(np.meshgrid(ind, ind))
neighborhood = neighbors[..., np.newaxis, np.newaxis] + grindex[:,np.newaxis]
findex = np.array(np.where(np.isfinite(anglesumtarget))).T
r = np.ones(shape=anglesumtarget.shape, dtype=float)*cornerangle/(2*n-2)
r[~np.isfinite(anglesumtarget)] = np.nan
r[[0, -2, 0], [0, 0, -2]] /= 3
#%%
for i in range(128):
x = r[:-1, :-1]
y = r[neighborhood[0], neighborhood[1]]
z = np.roll(y, 1, axis=0)
if np.any(x+y+z > np.pi):
break
locos_x_yz = np.arccos((np.cos(y+z) - np.cos(x+y)*np.cos(x+z))/
(np.sin(x+y)*np.sin(x+z)))
#locos_x_yz = np.arccos(((x+y)**2 + (x+z)**2 - (y+z)**2)/
# (2*(x+y)*(x+z)))
anglesum = np.nansum(locos_x_yz, axis=0)
pctdiff = (anglesum/anglesumtarget[:-1,:-1])
pctdiff /= np.nanmean(pctdiff)
#pctdiff -= np.clip(pctdiff, 0.9, 1.1)
#pctdiff /= np.nanmean(pctdiff)
#ind = np.unravel_index(np.nanargmax(abs(pctdiff)), pctdiff.shape)
r[:-1, :-1] *= pctdiff
r *= edgelength/(r[edge1]@edgeweight)
print(i, np.nanmax(abs(pctdiff-1)))
if np.nanmax(abs(pctdiff-1)) < 1E-7:
break
#print(ind, r[ind], pctdiff[ind])
#print(r[edge1]@edgeweight, edgelength)
print(np.round(r[:-1,:-1], 3))
#%%0.9999999999999746 1.0000000000000149
#%%
for i in range(36*256):
ind = findex[i % findex.shape[0]]
x = r[ind[0], ind[1]]
y = r[neighbors[0] + ind[0], neighbors[1] + ind[1]]
z = np.roll(y, 1, axis=0)
locos_x_yz = np.arccos((np.cos(y+z) - np.cos(x+y)*np.cos(x+z))/
(np.sin(x+y)*np.sin(x+z)))
anglesum = np.nansum(locos_x_yz, axis=0)
pctdiff = anglesum/anglesumtarget[ind[0],ind[1]]#np.clip(, 0.8, 1.2)
r[ind[0], ind[1]] *= pctdiff
r *= edgelength/(r[edge1]@edgeweight)
#print(ind, r[ind[0], ind[1]], pctdiff)
print(r[edge1]@edgeweight, np.pi/2)
print(np.round(r[:-1,:-1], 3))
#%%
vertices = np.ones((3,n+1,n+1))*np.nan
vertices[:,0,0] = initial[:,0]
vertices[:,-2,0] = initial[:,1]
vertices[:,0,-2] = initial[:,2]
r1 = r[edge1]
t = (r1[:-1] + r1[1:]).cumsum()/edgelength
t = np.concatenate([[0,], t])
e1 = mapproj.slerp(initial[:,0], initial[:,1], t[:, np.newaxis]).T
e2 = mapproj.slerp(initial[:,0], initial[:,2], t[:, np.newaxis]).T
e3 = mapproj.slerp(initial[:,2], initial[:,1], t[:, np.newaxis]).T
vertices[:,edge1[0], edge1[1]] = e1
vertices[:,edge2[0], edge2[1]] = e2
vertices[:,edge3[0], edge3[1]] = e3
#%%
for i in range(1, n-1):
for j in range(1, n-i-1):
index = np.array([i, j])
indexnb = index[:,np.newaxis] + neighbors
vertexnb = vertices[:, indexnb[0], indexnb[1]]
rnb = r[indexnb[0], indexnb[1]]
ri = r[i, j]
filled = np.all(np.isfinite(vertexnb), axis=0)
vertexnb = vertexnb[:, filled]
rnb = rnb[filled]
cl = np.cos(rnb+ri)
lq = np.linalg.lstsq(vertexnb.T, cl)
v = lq[0]
norm = np.linalg.norm(v)
v /= norm
vertices[:, i, j] = v
print(i, j, filled.sum(), lq, norm)
vindex = np.all(np.isfinite(vertices), axis=0)
result = mapproj.UnitVector.invtransform_v(vertices)
#%%
fig, axes = plt.subplots(ncols = 3, figsize=(10, 8), sharex=True, sharey=True)
axes[0].plot(vertices[0], vertices[1])
axes[1].plot(vertices[0], vertices[2])
axes[2].plot(vertices[1], vertices[2])
for ax in axes:
ax.set_aspect('equal')
#%%
fig, ax = plt.subplots(figsize=(10, 8))
invframe.plot(ax=ax)
ax.scatter(*result, color='k')
ax.scatter(*actrlpts3, color='y')
#%%
triframe = np.array([[[0,0,1],
[0,1,0]],
[[1,0,1],
[1,1,0]]])
tris = []
for i in range(n-1):
for j in range(n-i-1):
for tf in triframe:
xy = result[:,i+tf[0], j+tf[1]]
if np.all(np.isfinite(xy)):
tris.append(Polygon(xy.T))
gptris = geopandas.GeoSeries(tris)
#use geopandas.intersect to determine which grid cell a point lands in | 32.941489 | 80 | 0.596964 |
import numpy as np
import matplotlib.pyplot as plt
import mapproj
import fiona
from shapely.geometry import Point, LineString, MultiPolygon, Polygon
import geopandas
import pyproj
geod = pyproj.Geod(a=1, f=0)
n = 9
a = np.arctan(1/2)/np.pi*180
actrlpts3 = np.array([[15+0, 15+36, 15-36],
[-a, a, a]])
ctrlpoly3 = mapproj.geodesics(actrlpts3[0], actrlpts3[1], geod, includepts=True)
tgtpts3 = mapproj.complex_to_float2d(1j*np.exp(2j/3*np.arange(3)*np.pi)).T
bp = mapproj.Barycentric(tgtpts3)
grid3 = mapproj.Barycentric.grid(1/8)
gridp3 = mapproj.Barycentric.gridpolys(n=9)
gridbary = mapproj.transeach(bp.transform, gridp3)
conformal = mapproj.ConformalTri3(actrlpts3, tgtpts3)
invframe = mapproj.transeach(conformal.invtransform, gridbary)
invframev = mapproj.transeach(mapproj.UnitVector.transform, invframe)
invframe.plot()
res = geod.inv(actrlpts3[0], actrlpts3[1],
np.roll(actrlpts3[0], -1), np.roll(actrlpts3[1], -1))
cornerangle = np.pi/180*(res[0] - np.roll(res[1], 1)).mean() edgelength = res[2].mean()
initial = conformal.ctrlpts_v
anglesumtarget = np.ones(shape=(n+1,n+1))
anglesumtarget = np.tril(anglesumtarget, -1)[::-1]
anglesumtarget[anglesumtarget == 0] = np.nan
ind = np.arange(0,n)
edgeweight = np.ones(n)*2
edgeweight[[0, -1]] = 1
edge1 = (ind, 0)
edge2 = (0, ind)
edge3 = (ind,ind[::-1])
anglesumtarget[edge1] = 1/2
anglesumtarget[edge2] = 1/2
anglesumtarget[edge3] = 1/2
anglesumtarget *= 2*np.pi
anglesumtarget[0, 0] = cornerangle
anglesumtarget[-2, 0] = cornerangle
anglesumtarget[0, -2] = cornerangle
msplitframe = np.array([[0, 1, 2],
[2, 0, 1]])
msplit1 = np.tile(msplitframe, (3, n, n))[..., :n,:n]
msplit = (msplit1 + np.arange(3)[:, np.newaxis, np.newaxis]) % 3
msplit = msplit == 0
msplit[:, ~np.isfinite(anglesumtarget[:-1,:-1])] = False
neighbors = np.array([[ 1, 1, 0, -1, -1, 0],
[ 0, -1, -1, 0, 1, 1]])
grindex = np.array(np.meshgrid(ind, ind))
neighborhood = neighbors[..., np.newaxis, np.newaxis] + grindex[:,np.newaxis]
findex = np.array(np.where(np.isfinite(anglesumtarget))).T
r = np.ones(shape=anglesumtarget.shape, dtype=float)*cornerangle/(2*n-2)
r[~np.isfinite(anglesumtarget)] = np.nan
r[[0, -2, 0], [0, 0, -2]] /= 3
for i in range(128):
x = r[:-1, :-1]
y = r[neighborhood[0], neighborhood[1]]
z = np.roll(y, 1, axis=0)
if np.any(x+y+z > np.pi):
break
locos_x_yz = np.arccos((np.cos(y+z) - np.cos(x+y)*np.cos(x+z))/
(np.sin(x+y)*np.sin(x+z)))
anglesum = np.nansum(locos_x_yz, axis=0)
pctdiff = (anglesum/anglesumtarget[:-1,:-1])
pctdiff /= np.nanmean(pctdiff)
r[:-1, :-1] *= pctdiff
r *= edgelength/(r[edge1]@edgeweight)
print(i, np.nanmax(abs(pctdiff-1)))
if np.nanmax(abs(pctdiff-1)) < 1E-7:
break
print(np.round(r[:-1,:-1], 3))
for i in range(36*256):
ind = findex[i % findex.shape[0]]
x = r[ind[0], ind[1]]
y = r[neighbors[0] + ind[0], neighbors[1] + ind[1]]
z = np.roll(y, 1, axis=0)
locos_x_yz = np.arccos((np.cos(y+z) - np.cos(x+y)*np.cos(x+z))/
(np.sin(x+y)*np.sin(x+z)))
anglesum = np.nansum(locos_x_yz, axis=0)
pctdiff = anglesum/anglesumtarget[ind[0],ind[1]]
r[ind[0], ind[1]] *= pctdiff
r *= edgelength/(r[edge1]@edgeweight)
print(r[edge1]@edgeweight, np.pi/2)
print(np.round(r[:-1,:-1], 3))
vertices = np.ones((3,n+1,n+1))*np.nan
vertices[:,0,0] = initial[:,0]
vertices[:,-2,0] = initial[:,1]
vertices[:,0,-2] = initial[:,2]
r1 = r[edge1]
t = (r1[:-1] + r1[1:]).cumsum()/edgelength
t = np.concatenate([[0,], t])
e1 = mapproj.slerp(initial[:,0], initial[:,1], t[:, np.newaxis]).T
e2 = mapproj.slerp(initial[:,0], initial[:,2], t[:, np.newaxis]).T
e3 = mapproj.slerp(initial[:,2], initial[:,1], t[:, np.newaxis]).T
vertices[:,edge1[0], edge1[1]] = e1
vertices[:,edge2[0], edge2[1]] = e2
vertices[:,edge3[0], edge3[1]] = e3
for i in range(1, n-1):
for j in range(1, n-i-1):
index = np.array([i, j])
indexnb = index[:,np.newaxis] + neighbors
vertexnb = vertices[:, indexnb[0], indexnb[1]]
rnb = r[indexnb[0], indexnb[1]]
ri = r[i, j]
filled = np.all(np.isfinite(vertexnb), axis=0)
vertexnb = vertexnb[:, filled]
rnb = rnb[filled]
cl = np.cos(rnb+ri)
lq = np.linalg.lstsq(vertexnb.T, cl)
v = lq[0]
norm = np.linalg.norm(v)
v /= norm
vertices[:, i, j] = v
print(i, j, filled.sum(), lq, norm)
vindex = np.all(np.isfinite(vertices), axis=0)
result = mapproj.UnitVector.invtransform_v(vertices)
fig, axes = plt.subplots(ncols = 3, figsize=(10, 8), sharex=True, sharey=True)
axes[0].plot(vertices[0], vertices[1])
axes[1].plot(vertices[0], vertices[2])
axes[2].plot(vertices[1], vertices[2])
for ax in axes:
ax.set_aspect('equal')
fig, ax = plt.subplots(figsize=(10, 8))
invframe.plot(ax=ax)
ax.scatter(*result, color='k')
ax.scatter(*actrlpts3, color='y')
triframe = np.array([[[0,0,1],
[0,1,0]],
[[1,0,1],
[1,1,0]]])
tris = []
for i in range(n-1):
for j in range(n-i-1):
for tf in triframe:
xy = result[:,i+tf[0], j+tf[1]]
if np.all(np.isfinite(xy)):
tris.append(Polygon(xy.T))
gptris = geopandas.GeoSeries(tris)
| true | true |
f7336f7066b821195e768f37cb19a037ee3fe872 | 568 | py | Python | src/yaplox/yaplox_callable.py | RoelAdriaans/yaplox | 1cfce99cf44318a50838503872b61b2740b010d6 | [
"Unlicense"
] | 6 | 2020-11-22T09:42:04.000Z | 2021-12-13T02:12:48.000Z | src/yaplox/yaplox_callable.py | cfbolz/yaplox | 1efcdfad44890567a4a6c09325e5f1a377431bcc | [
"Unlicense"
] | 103 | 2020-08-13T10:06:41.000Z | 2021-06-23T07:14:31.000Z | src/yaplox/yaplox_callable.py | cfbolz/yaplox | 1efcdfad44890567a4a6c09325e5f1a377431bcc | [
"Unlicense"
] | 1 | 2021-01-15T10:09:17.000Z | 2021-01-15T10:09:17.000Z | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, List
# This is a hack to prevent circular imports; since Interpreter imports from this file,
# we will only import it during the type_checking run from mypy
if TYPE_CHECKING:
from yaplox.interpreter import Interpreter
class YaploxCallable(ABC):
@abstractmethod
def call(self, interpreter: Interpreter, arguments: List[Any]):
raise NotImplementedError
@abstractmethod
def arity(self) -> int:
raise NotImplementedError
| 28.4 | 87 | 0.762324 | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, List
if TYPE_CHECKING:
from yaplox.interpreter import Interpreter
class YaploxCallable(ABC):
@abstractmethod
def call(self, interpreter: Interpreter, arguments: List[Any]):
raise NotImplementedError
@abstractmethod
def arity(self) -> int:
raise NotImplementedError
| true | true |
f7336f81bb070dbec84f33919939245747a64b7a | 12,269 | py | Python | minitests/opentitan/src.vivado/runme.py | common-config-bot/prjuray | c550b03a26b4c4a9c4453353bd642a21f710b3ec | [
"Apache-2.0"
] | 2 | 2022-03-18T00:17:38.000Z | 2022-03-28T22:57:58.000Z | minitests/opentitan/src.vivado/runme.py | common-config-bot/prjuray | c550b03a26b4c4a9c4453353bd642a21f710b3ec | [
"Apache-2.0"
] | null | null | null | minitests/opentitan/src.vivado/runme.py | common-config-bot/prjuray | c550b03a26b4c4a9c4453353bd642a21f710b3ec | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import edalize
import os
work_root = 'build'
post_imp_file = os.path.realpath(os.path.join(work_root, 'post.tcl'))
os.makedirs(work_root, exist_ok=True)
synth_tool = 'vivado'
srcs = [
'lowrisc_constants_top_pkg_0/rtl/top_pkg.sv',
'lowrisc_dv_pins_if_0/pins_if.sv',
'lowrisc_prim_generic_clock_gating_0/rtl/prim_generic_clock_gating.sv',
'lowrisc_prim_generic_clock_mux2_0/rtl/prim_generic_clock_mux2.sv',
'lowrisc_prim_generic_flash_0/rtl/prim_generic_flash.sv',
'lowrisc_prim_generic_pad_wrapper_0/rtl/prim_generic_pad_wrapper.sv',
'lowrisc_prim_generic_ram_1p_0/rtl/prim_generic_ram_1p.sv',
'lowrisc_prim_generic_ram_2p_0/rtl/prim_generic_ram_2p.sv',
'lowrisc_prim_prim_pkg_0.1/rtl/prim_pkg.sv',
'lowrisc_prim_xilinx_clock_gating_0/rtl/prim_xilinx_clock_gating.sv',
'lowrisc_prim_xilinx_clock_mux2_0/rtl/prim_xilinx_clock_mux2.sv',
'lowrisc_prim_xilinx_pad_wrapper_0/rtl/prim_xilinx_pad_wrapper.sv',
'lowrisc_prim_xilinx_ram_2p_0/rtl/prim_xilinx_ram_2p.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_pkg.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_alu.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_compressed_decoder.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_controller.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_cs_registers.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_decoder.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_ex_block.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_fetch_fifo.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_id_stage.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_if_stage.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_load_store_unit.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_multdiv_fast.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_multdiv_slow.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_prefetch_buffer.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_pmp.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_register_file_ff.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_core.sv',
'lowrisc_ip_flash_ctrl_pkg_0.1/rtl/flash_ctrl_pkg.sv',
'lowrisc_prim_clock_gating_0/abstract/prim_clock_gating.sv',
'lowrisc_prim_clock_mux2_0/abstract/prim_clock_mux2.sv',
'lowrisc_prim_diff_decode_0/rtl/prim_diff_decode.sv',
'lowrisc_prim_pad_wrapper_0/abstract/prim_pad_wrapper.sv',
'lowrisc_prim_ram_1p_0/abstract/prim_ram_1p.sv',
'lowrisc_prim_ram_2p_0/abstract/prim_ram_2p.sv',
'lowrisc_tlul_headers_0.1/rtl/tlul_pkg.sv',
'lowrisc_prim_all_0.1/rtl/prim_clock_inverter.sv',
'lowrisc_prim_all_0.1/rtl/prim_alert_receiver.sv',
'lowrisc_prim_all_0.1/rtl/prim_alert_sender.sv',
'lowrisc_prim_all_0.1/rtl/prim_arbiter_ppc.sv',
'lowrisc_prim_all_0.1/rtl/prim_arbiter_tree.sv',
'lowrisc_prim_all_0.1/rtl/prim_esc_receiver.sv',
'lowrisc_prim_all_0.1/rtl/prim_esc_sender.sv',
'lowrisc_prim_all_0.1/rtl/prim_sram_arbiter.sv',
'lowrisc_prim_all_0.1/rtl/prim_fifo_async.sv',
'lowrisc_prim_all_0.1/rtl/prim_fifo_sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_flop_2sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_lfsr.sv',
'lowrisc_prim_all_0.1/rtl/prim_packer.sv',
'lowrisc_prim_all_0.1/rtl/prim_pulse_sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_filter.sv',
'lowrisc_prim_all_0.1/rtl/prim_filter_ctr.sv',
'lowrisc_prim_all_0.1/rtl/prim_subreg.sv',
'lowrisc_prim_all_0.1/rtl/prim_subreg_ext.sv',
'lowrisc_prim_all_0.1/rtl/prim_intr_hw.sv',
'lowrisc_prim_all_0.1/rtl/prim_secded_39_32_enc.sv',
'lowrisc_prim_all_0.1/rtl/prim_secded_39_32_dec.sv',
'lowrisc_prim_all_0.1/rtl/prim_ram_2p_adv.sv',
'lowrisc_prim_all_0.1/rtl/prim_ram_2p_async_adv.sv',
'lowrisc_prim_flash_0/abstract/prim_flash.sv',
'lowrisc_top_earlgrey_alert_handler_reg_0.1/rtl/autogen/alert_handler_reg_pkg.sv',
'lowrisc_top_earlgrey_alert_handler_reg_0.1/rtl/autogen/alert_handler_reg_top.sv',
'lowrisc_top_earlgrey_pinmux_reg_0.1/rtl/autogen/pinmux_reg_pkg.sv',
'lowrisc_top_earlgrey_pinmux_reg_0.1/rtl/autogen/pinmux_reg_top.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_consts_pkg.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_in_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_out_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_rx.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_tx.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_tx_mux.sv',
'lowrisc_prim_generic_rom_0/rtl/prim_generic_rom.sv',
'lowrisc_prim_xilinx_rom_0/rtl/prim_xilinx_rom.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_fifo_sync.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_fifo_async.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_assert.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_err.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_assert_multiple.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/debug_rom/debug_rom.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_pkg.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_sba.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_csrs.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_mem.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_cdc.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_jtag.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_jtag_tap.sv',
'lowrisc_prim_rom_0/abstract/prim_rom.sv',
'lowrisc_tlul_adapter_reg_0.1/rtl/tlul_adapter_reg.sv',
'lowrisc_tlul_adapter_sram_0.1/rtl/tlul_adapter_sram.sv',
'lowrisc_tlul_socket_1n_0.1/rtl/tlul_err_resp.sv',
'lowrisc_tlul_socket_1n_0.1/rtl/tlul_socket_1n.sv',
'lowrisc_tlul_socket_m1_0.1/rtl/tlul_socket_m1.sv',
'lowrisc_tlul_sram2tlul_0.1/rtl/sram2tlul.sv',
'lowrisc_ip_aes_0.5/rtl/aes_pkg.sv',
'lowrisc_ip_aes_0.5/rtl/aes_reg_pkg.sv',
'lowrisc_ip_aes_0.5/rtl/aes_reg_top.sv',
'lowrisc_ip_aes_0.5/rtl/aes_core.sv',
'lowrisc_ip_aes_0.5/rtl/aes_control.sv',
'lowrisc_ip_aes_0.5/rtl/aes_cipher_core.sv',
'lowrisc_ip_aes_0.5/rtl/aes_cipher_control.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sub_bytes.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox_lut.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox_canright.sv',
'lowrisc_ip_aes_0.5/rtl/aes_shift_rows.sv',
'lowrisc_ip_aes_0.5/rtl/aes_mix_columns.sv',
'lowrisc_ip_aes_0.5/rtl/aes_mix_single_column.sv',
'lowrisc_ip_aes_0.5/rtl/aes_key_expand.sv',
'lowrisc_ip_aes_0.5/rtl/aes.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_pkg.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_reg_wrap.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_class.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_ping_timer.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_esc_timer.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_accu.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl_reg_pkg.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl_reg_top.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_erase_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_prog_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_rd_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_mp.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_phy.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio_reg_pkg.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio_reg_top.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_pkg.sv',
'lowrisc_ip_hmac_0.1/rtl/sha2.sv',
'lowrisc_ip_hmac_0.1/rtl/sha2_pad.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_reg_pkg.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_reg_top.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_core.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen_reg_pkg.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen_reg_top.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen.sv',
'lowrisc_ip_pinmux_component_0.1/rtl/pinmux.sv',
'lowrisc_ip_rv_core_ibex_0.1/rtl/rv_core_ibex.sv',
'lowrisc_ip_rv_dm_0.1/rtl/rv_dm.sv',
'lowrisc_ip_rv_dm_0.1/rtl/tlul_adapter_host.sv',
'lowrisc_ip_rv_plic_component_0.1/rtl/rv_plic_gateway.sv',
'lowrisc_ip_rv_plic_component_0.1/rtl/rv_plic_target.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer_reg_pkg.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer_reg_top.sv',
'lowrisc_ip_rv_timer_0.1/rtl/timer_core.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_reg_pkg.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_reg_top.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_pkg.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwm_rxf_ctrl.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwm_txf_ctrl.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwmode.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device.sv',
'lowrisc_ip_uart_0.1/rtl/uart_reg_pkg.sv',
'lowrisc_ip_uart_0.1/rtl/uart_reg_top.sv',
'lowrisc_ip_uart_0.1/rtl/uart_rx.sv',
'lowrisc_ip_uart_0.1/rtl/uart_tx.sv',
'lowrisc_ip_uart_0.1/rtl/uart_core.sv',
'lowrisc_ip_uart_0.1/rtl/uart.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_reg_pkg.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_reg_top.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_usbif.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_flop_2syncpulse.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_linkstate.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_iomux.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev.sv',
'lowrisc_ip_xbar_main_0.1/tl_main_pkg.sv',
'lowrisc_ip_xbar_main_0.1/xbar_main.sv',
'lowrisc_ip_xbar_peri_0.1/tl_peri_pkg.sv',
'lowrisc_ip_xbar_peri_0.1/xbar_peri.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic_reg_pkg.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic_reg_top.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic.sv',
'lowrisc_systems_top_earlgrey_0.1/rtl/padctl.sv',
'lowrisc_systems_top_earlgrey_0.1/rtl/autogen/top_earlgrey.sv',
'lowrisc_systems_top_earlgrey_zcu104_0.1/rtl/clkgen_xilusp.sv',
'lowrisc_systems_top_earlgrey_zcu104_0.1/rtl/top_earlgrey_zcu104.sv',
]
with open(post_imp_file, 'w') as f:
f.write('write_checkpoint -force design.dcp')
files = [{
'name':
os.path.realpath(
'lowrisc_systems_top_earlgrey_zcu104_0.1/data/pins_zcu104.xdc'),
'file_type':
'xdc'
},
{
'name':
os.path.realpath('lowrisc_prim_assert_0.1/rtl/prim_assert.sv'),
'file_type':
'systemVerilogSource',
'is_include_file':
'true'
}]
parameters = {
'ROM_INIT_FILE': {
'datatype': 'str',
'paramtype': 'vlogdefine'
},
'PRIM_DEFAULT_IMPL': {
'datatype': 'str',
'paramtype': 'vlogdefine'
},
}
for src in srcs:
files.append({
'name': os.path.realpath(src),
'file_type': 'systemVerilogSource'
})
tool = 'vivado'
incdirs = [os.path.realpath('lowrisc_prim_assert_0.1/rtl')]
edam = {
'files': files,
'name': 'design',
'toplevel': 'top_earlgrey_zcu104',
'parameters': parameters,
'tool_options': {
'vivado': {
'part': os.environ['URAY_PART'],
'post_imp': post_imp_file,
'synth': synth_tool
}
}
}
backend = edalize.get_edatool(tool)(edam=edam, work_root=work_root)
args = [
'--ROM_INIT_FILE={}'.format(
os.path.realpath('boot_rom_fpga_nexysvideo.vmem')),
'--PRIM_DEFAULT_IMPL=prim_pkg::ImplXilinx'
]
backend.configure(args)
backend.build()
| 44.133094 | 86 | 0.754666 |
import edalize
import os
work_root = 'build'
post_imp_file = os.path.realpath(os.path.join(work_root, 'post.tcl'))
os.makedirs(work_root, exist_ok=True)
synth_tool = 'vivado'
srcs = [
'lowrisc_constants_top_pkg_0/rtl/top_pkg.sv',
'lowrisc_dv_pins_if_0/pins_if.sv',
'lowrisc_prim_generic_clock_gating_0/rtl/prim_generic_clock_gating.sv',
'lowrisc_prim_generic_clock_mux2_0/rtl/prim_generic_clock_mux2.sv',
'lowrisc_prim_generic_flash_0/rtl/prim_generic_flash.sv',
'lowrisc_prim_generic_pad_wrapper_0/rtl/prim_generic_pad_wrapper.sv',
'lowrisc_prim_generic_ram_1p_0/rtl/prim_generic_ram_1p.sv',
'lowrisc_prim_generic_ram_2p_0/rtl/prim_generic_ram_2p.sv',
'lowrisc_prim_prim_pkg_0.1/rtl/prim_pkg.sv',
'lowrisc_prim_xilinx_clock_gating_0/rtl/prim_xilinx_clock_gating.sv',
'lowrisc_prim_xilinx_clock_mux2_0/rtl/prim_xilinx_clock_mux2.sv',
'lowrisc_prim_xilinx_pad_wrapper_0/rtl/prim_xilinx_pad_wrapper.sv',
'lowrisc_prim_xilinx_ram_2p_0/rtl/prim_xilinx_ram_2p.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_pkg.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_alu.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_compressed_decoder.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_controller.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_cs_registers.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_decoder.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_ex_block.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_fetch_fifo.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_id_stage.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_if_stage.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_load_store_unit.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_multdiv_fast.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_multdiv_slow.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_prefetch_buffer.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_pmp.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_register_file_ff.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_core.sv',
'lowrisc_ip_flash_ctrl_pkg_0.1/rtl/flash_ctrl_pkg.sv',
'lowrisc_prim_clock_gating_0/abstract/prim_clock_gating.sv',
'lowrisc_prim_clock_mux2_0/abstract/prim_clock_mux2.sv',
'lowrisc_prim_diff_decode_0/rtl/prim_diff_decode.sv',
'lowrisc_prim_pad_wrapper_0/abstract/prim_pad_wrapper.sv',
'lowrisc_prim_ram_1p_0/abstract/prim_ram_1p.sv',
'lowrisc_prim_ram_2p_0/abstract/prim_ram_2p.sv',
'lowrisc_tlul_headers_0.1/rtl/tlul_pkg.sv',
'lowrisc_prim_all_0.1/rtl/prim_clock_inverter.sv',
'lowrisc_prim_all_0.1/rtl/prim_alert_receiver.sv',
'lowrisc_prim_all_0.1/rtl/prim_alert_sender.sv',
'lowrisc_prim_all_0.1/rtl/prim_arbiter_ppc.sv',
'lowrisc_prim_all_0.1/rtl/prim_arbiter_tree.sv',
'lowrisc_prim_all_0.1/rtl/prim_esc_receiver.sv',
'lowrisc_prim_all_0.1/rtl/prim_esc_sender.sv',
'lowrisc_prim_all_0.1/rtl/prim_sram_arbiter.sv',
'lowrisc_prim_all_0.1/rtl/prim_fifo_async.sv',
'lowrisc_prim_all_0.1/rtl/prim_fifo_sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_flop_2sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_lfsr.sv',
'lowrisc_prim_all_0.1/rtl/prim_packer.sv',
'lowrisc_prim_all_0.1/rtl/prim_pulse_sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_filter.sv',
'lowrisc_prim_all_0.1/rtl/prim_filter_ctr.sv',
'lowrisc_prim_all_0.1/rtl/prim_subreg.sv',
'lowrisc_prim_all_0.1/rtl/prim_subreg_ext.sv',
'lowrisc_prim_all_0.1/rtl/prim_intr_hw.sv',
'lowrisc_prim_all_0.1/rtl/prim_secded_39_32_enc.sv',
'lowrisc_prim_all_0.1/rtl/prim_secded_39_32_dec.sv',
'lowrisc_prim_all_0.1/rtl/prim_ram_2p_adv.sv',
'lowrisc_prim_all_0.1/rtl/prim_ram_2p_async_adv.sv',
'lowrisc_prim_flash_0/abstract/prim_flash.sv',
'lowrisc_top_earlgrey_alert_handler_reg_0.1/rtl/autogen/alert_handler_reg_pkg.sv',
'lowrisc_top_earlgrey_alert_handler_reg_0.1/rtl/autogen/alert_handler_reg_top.sv',
'lowrisc_top_earlgrey_pinmux_reg_0.1/rtl/autogen/pinmux_reg_pkg.sv',
'lowrisc_top_earlgrey_pinmux_reg_0.1/rtl/autogen/pinmux_reg_top.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_consts_pkg.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_in_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_out_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_rx.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_tx.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_tx_mux.sv',
'lowrisc_prim_generic_rom_0/rtl/prim_generic_rom.sv',
'lowrisc_prim_xilinx_rom_0/rtl/prim_xilinx_rom.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_fifo_sync.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_fifo_async.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_assert.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_err.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_assert_multiple.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/debug_rom/debug_rom.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_pkg.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_sba.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_csrs.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_mem.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_cdc.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_jtag.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_jtag_tap.sv',
'lowrisc_prim_rom_0/abstract/prim_rom.sv',
'lowrisc_tlul_adapter_reg_0.1/rtl/tlul_adapter_reg.sv',
'lowrisc_tlul_adapter_sram_0.1/rtl/tlul_adapter_sram.sv',
'lowrisc_tlul_socket_1n_0.1/rtl/tlul_err_resp.sv',
'lowrisc_tlul_socket_1n_0.1/rtl/tlul_socket_1n.sv',
'lowrisc_tlul_socket_m1_0.1/rtl/tlul_socket_m1.sv',
'lowrisc_tlul_sram2tlul_0.1/rtl/sram2tlul.sv',
'lowrisc_ip_aes_0.5/rtl/aes_pkg.sv',
'lowrisc_ip_aes_0.5/rtl/aes_reg_pkg.sv',
'lowrisc_ip_aes_0.5/rtl/aes_reg_top.sv',
'lowrisc_ip_aes_0.5/rtl/aes_core.sv',
'lowrisc_ip_aes_0.5/rtl/aes_control.sv',
'lowrisc_ip_aes_0.5/rtl/aes_cipher_core.sv',
'lowrisc_ip_aes_0.5/rtl/aes_cipher_control.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sub_bytes.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox_lut.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox_canright.sv',
'lowrisc_ip_aes_0.5/rtl/aes_shift_rows.sv',
'lowrisc_ip_aes_0.5/rtl/aes_mix_columns.sv',
'lowrisc_ip_aes_0.5/rtl/aes_mix_single_column.sv',
'lowrisc_ip_aes_0.5/rtl/aes_key_expand.sv',
'lowrisc_ip_aes_0.5/rtl/aes.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_pkg.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_reg_wrap.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_class.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_ping_timer.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_esc_timer.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_accu.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl_reg_pkg.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl_reg_top.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_erase_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_prog_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_rd_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_mp.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_phy.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio_reg_pkg.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio_reg_top.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_pkg.sv',
'lowrisc_ip_hmac_0.1/rtl/sha2.sv',
'lowrisc_ip_hmac_0.1/rtl/sha2_pad.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_reg_pkg.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_reg_top.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_core.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen_reg_pkg.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen_reg_top.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen.sv',
'lowrisc_ip_pinmux_component_0.1/rtl/pinmux.sv',
'lowrisc_ip_rv_core_ibex_0.1/rtl/rv_core_ibex.sv',
'lowrisc_ip_rv_dm_0.1/rtl/rv_dm.sv',
'lowrisc_ip_rv_dm_0.1/rtl/tlul_adapter_host.sv',
'lowrisc_ip_rv_plic_component_0.1/rtl/rv_plic_gateway.sv',
'lowrisc_ip_rv_plic_component_0.1/rtl/rv_plic_target.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer_reg_pkg.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer_reg_top.sv',
'lowrisc_ip_rv_timer_0.1/rtl/timer_core.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_reg_pkg.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_reg_top.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_pkg.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwm_rxf_ctrl.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwm_txf_ctrl.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwmode.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device.sv',
'lowrisc_ip_uart_0.1/rtl/uart_reg_pkg.sv',
'lowrisc_ip_uart_0.1/rtl/uart_reg_top.sv',
'lowrisc_ip_uart_0.1/rtl/uart_rx.sv',
'lowrisc_ip_uart_0.1/rtl/uart_tx.sv',
'lowrisc_ip_uart_0.1/rtl/uart_core.sv',
'lowrisc_ip_uart_0.1/rtl/uart.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_reg_pkg.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_reg_top.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_usbif.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_flop_2syncpulse.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_linkstate.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_iomux.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev.sv',
'lowrisc_ip_xbar_main_0.1/tl_main_pkg.sv',
'lowrisc_ip_xbar_main_0.1/xbar_main.sv',
'lowrisc_ip_xbar_peri_0.1/tl_peri_pkg.sv',
'lowrisc_ip_xbar_peri_0.1/xbar_peri.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic_reg_pkg.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic_reg_top.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic.sv',
'lowrisc_systems_top_earlgrey_0.1/rtl/padctl.sv',
'lowrisc_systems_top_earlgrey_0.1/rtl/autogen/top_earlgrey.sv',
'lowrisc_systems_top_earlgrey_zcu104_0.1/rtl/clkgen_xilusp.sv',
'lowrisc_systems_top_earlgrey_zcu104_0.1/rtl/top_earlgrey_zcu104.sv',
]
with open(post_imp_file, 'w') as f:
f.write('write_checkpoint -force design.dcp')
files = [{
'name':
os.path.realpath(
'lowrisc_systems_top_earlgrey_zcu104_0.1/data/pins_zcu104.xdc'),
'file_type':
'xdc'
},
{
'name':
os.path.realpath('lowrisc_prim_assert_0.1/rtl/prim_assert.sv'),
'file_type':
'systemVerilogSource',
'is_include_file':
'true'
}]
parameters = {
'ROM_INIT_FILE': {
'datatype': 'str',
'paramtype': 'vlogdefine'
},
'PRIM_DEFAULT_IMPL': {
'datatype': 'str',
'paramtype': 'vlogdefine'
},
}
for src in srcs:
files.append({
'name': os.path.realpath(src),
'file_type': 'systemVerilogSource'
})
tool = 'vivado'
incdirs = [os.path.realpath('lowrisc_prim_assert_0.1/rtl')]
edam = {
'files': files,
'name': 'design',
'toplevel': 'top_earlgrey_zcu104',
'parameters': parameters,
'tool_options': {
'vivado': {
'part': os.environ['URAY_PART'],
'post_imp': post_imp_file,
'synth': synth_tool
}
}
}
backend = edalize.get_edatool(tool)(edam=edam, work_root=work_root)
args = [
'--ROM_INIT_FILE={}'.format(
os.path.realpath('boot_rom_fpga_nexysvideo.vmem')),
'--PRIM_DEFAULT_IMPL=prim_pkg::ImplXilinx'
]
backend.configure(args)
backend.build()
| true | true |
f7336fab8f9af014b50119e9dc329873ac910b54 | 3,205 | py | Python | main.py | Marco-Cen/cuHack | 8e84fc2f49e7ba93d424ae5297980592b2b7f68c | [
"MIT"
] | null | null | null | main.py | Marco-Cen/cuHack | 8e84fc2f49e7ba93d424ae5297980592b2b7f68c | [
"MIT"
] | null | null | null | main.py | Marco-Cen/cuHack | 8e84fc2f49e7ba93d424ae5297980592b2b7f68c | [
"MIT"
] | null | null | null | ########################################################################
# Impoorts Required for Flask Server Functionality
########################################################################
from flask import Flask, render_template, url_for, abort, redirect, request
########################################################################
# Primative and non-Primitve user-data imports for text/database
########################################################################
from data import GenericData, OrganizationEmails
from database import database
from mailing import send_message
########################################################################
# Server Settup / Socket Address Initilization and Referects
########################################################################
app = Flask(__name__)
database = database()
emails = OrganizationEmails()
#route a new port for connection to the general domain in the case a connection
#has been made without an intended html file
@app.route('/', methods=['post', 'get'])
def index():
if request.method == 'POST':
#setup all the required pieces of data for transmition to the mysql server
#so that the custom google maps can have the adresses added in the future
organization = request.form.get('Organization')
street_address = request.form.get('Street Address')
postal_code = request.form.get('PostalCode')
country = request.form.get('country')
description = request.form.get('Description')
#we need to format the data for street address, country, and postal code so that
#it can fit within one element of the 'location' column on the database
location = f"{street_address} {country} {postal_code}"
#send a mysql query in order to store the request on the server
database.connection_data_push(organization, description, location, "none", "reported")
#send an email using mailgun so that the orignization selected can be informed about an
#ongoing issue reported by an anonymous user
send_message(emails['Demo Organization'], organization, location, description)
return render_template('cuhacks.html', title='GeoMap | Report an Issue')
#route a new port for connection to the main page containing the general report
#bar as well as questions and about-us on a paralax style page
@app.route('/report', methods=['post', 'get'])
def home():
#we are directing them to the main page so call the function made to handle
#generic requests made to the server
return index()
#route a new port for connection to the secondary page which displays a live view
#of all the ongoing reports made to the server by anonymous individuals
@app.route('/display')
def about():
return render_template('pagetwo.html', title='GeoMap | Current Reports Around You')
#in the case the user has typed a sub-address that does not exist direct them to a
#cannot be found page | once again we start by routing a new port for connection
@app.errorhandler(404)
def handle(error):
return index()
#we only want to run this server from this file, only during the build-phase should we have
#debugging turned on which will automaticly update the page on commit
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0') | 47.835821 | 91 | 0.663027 | true | true | |
f73371878d00808fee668072135c8280384ffd51 | 1,268 | py | Python | Python3/21_Merge_Two_Sorted_List.py | yangjiahao106/LeetCode | c30ba0ef06f444951f7ab8eee495ac43613d7f4f | [
"RSA-MD"
] | 1 | 2018-04-28T09:07:11.000Z | 2018-04-28T09:07:11.000Z | Python3/21_Merge_Two_Sorted_List.py | yangjiahao106/LeetCode | c30ba0ef06f444951f7ab8eee495ac43613d7f4f | [
"RSA-MD"
] | 1 | 2018-02-24T16:26:30.000Z | 2018-02-24T16:26:44.000Z | Python3/21_Merge_Two_Sorted_List.py | yangjiahao106/LeetCode | c30ba0ef06f444951f7ab8eee495ac43613d7f4f | [
"RSA-MD"
] | null | null | null | #! python3
# __author__ = "YangJiaHao"
# date: 2018/2/1
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
if l1 == None:
return l2
if l2 == None:
return l1
head = ListNode(0)
node = head
while l1 and l2:
if l2.val <= l1.val:
node.next = l2
l2 = l2.next
else:
node.next = l1
l1 = l1.next
node = node.next
node.next = l1 if l1 else l2
return head.next
class Solution2:
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
if l1 == None:
return l2
if l2 == None:
return l1
if l2.val >= l1.val:
head = l1
head.next = self.mergeTwoLists(l1.next, l2)
else:
head = l2
head.next = self.mergeTwoLists(l1, l2.next)
return head
if __name__ == '__main__':
pass | 22.642857 | 55 | 0.467666 |
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def mergeTwoLists(self, l1, l2):
if l1 == None:
return l2
if l2 == None:
return l1
head = ListNode(0)
node = head
while l1 and l2:
if l2.val <= l1.val:
node.next = l2
l2 = l2.next
else:
node.next = l1
l1 = l1.next
node = node.next
node.next = l1 if l1 else l2
return head.next
class Solution2:
def mergeTwoLists(self, l1, l2):
if l1 == None:
return l2
if l2 == None:
return l1
if l2.val >= l1.val:
head = l1
head.next = self.mergeTwoLists(l1.next, l2)
else:
head = l2
head.next = self.mergeTwoLists(l1, l2.next)
return head
if __name__ == '__main__':
pass | true | true |
f7337218de8aef79c314ae5060aa193c2e74b80b | 16,098 | py | Python | coremltools/converters/mil/mil/ops/defs/scatter_gather.py | odedzewi/coremltools | 055d4bf9c00dee8a38258128d6599609df9ae32c | [
"BSD-3-Clause"
] | 1 | 2022-02-10T10:54:28.000Z | 2022-02-10T10:54:28.000Z | coremltools/converters/mil/mil/ops/defs/scatter_gather.py | 0xgpapad/coremltools | fdd5630c423c0fc4f1a04c3f5a3c17b808a15505 | [
"BSD-3-Clause"
] | null | null | null | coremltools/converters/mil/mil/ops/defs/scatter_gather.py | 0xgpapad/coremltools | fdd5630c423c0fc4f1a04c3f5a3c17b808a15505 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
import numbers
from coremltools.converters.mil.mil import Operation, types
from coremltools.converters.mil.mil.input_type import (
DefaultInputs,
InputSpec,
IntInputType,
IntTensorInputType,
TensorInputType,
StringInputType,
)
from coremltools.converters.mil.mil.operation import precondition
from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op
from coremltools.converters.mil.mil.types.symbolic import is_compatible_symbolic_vector, is_symbolic
from coremltools.converters.mil.mil.operation import (
SYMBOL,
VALUE
)
@register_op(doc_str="")
class gather(Operation):
"""
Gather slices from input ``x`` along dimension ``axis`` according to ``indices``,
similar to `tf.gather <https://www.tensorflow.org/api_docs/python/tf/gather>`_.
* If ``indices`` is scalar (0-D):
.. math::
output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] =
.. math::
x[p_0, ..., p_{axis-1}, ~~~~~~~~~ indices, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}]
Where ``rank(x)`` is the rank of ``x``. The ``output`` has rank ``rank(x) - 1``.
* If ``indices`` is 1-D tensor:
.. math::
output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~ i, ~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}] =
.. math::
x[p_0, ..., p_{axis-1}, ~~~~~~~~ indices[i], ~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}]
The output has rank ``rank(x)``.
* In general:
.. math::
output[p_0, ..., p_{axis-1}, ~~~~~~~~ i_0, ..., i_{M-1}, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] =
.. math::
x[p_0, ..., p_{axis-1}, ~~~~~~~ indices[i_0, ..., i_{M-1}], ~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}]
Where ``M = rank(x)``.
Parameters
----------
x: tensor<\*D,T> (Required)
indices: tensor<\*N,i32> (Required)
* Indices values may be negative. More precisely, ``-D[axis]<= v < D[axis]`` for ``v`` in ``indices``.
axis: const i32 (Optional. Default=``0``)
* Negative axis is supported.
Returns
-------
tensor<\*K,T>
* Where ``K = D[:axis] + N + D[axis+1:]``.
Attributes
----------
T: fp32
References
----------
See `tf.gather <https://www.tensorflow.org/api_docs/python/tf/gather>`_.
"""
input_spec = InputSpec(
x=TensorInputType(),
indices=IntInputType(),
axis=IntInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
axis=0,
)
def __init__(self, **kwargs):
super(gather, self).__init__(**kwargs)
@precondition(allow=VALUE | SYMBOL)
def value_inference(self):
x = self.x.sym_val
indices = self.indices.val
if indices is None:
# only allow x to be symbolic. indices cannot.
return None
scalar_indices = isinstance(indices, numbers.Integral)
axis = self.axis.val
if scalar_indices:
res = np.take(x, [indices], axis)
res2 = np.squeeze(res, axis=axis)
if isinstance(res2, np.ndarray) and len(res2.shape) == 0:
# res2 is a scalar, but represented as np.array(symbol,
# dtype=np.object) which np.squeeze can't remove.
return res2.item()
return res2
return np.take(x, indices, axis)
def type_inference(self):
out_type = self.x.dtype
if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
output_rank = self.x.rank - 1 + self.indices.rank
if output_rank == 0:
# output scalar
return out_type
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.x.rank
out_shape = self.x.shape[:axis] + self.indices.shape + self.x.shape[axis + 1 :]
return types.tensor(out_type, out_shape)
@register_op(doc_str="")
class scatter(Operation):
"""
Scatter ``updates`` to ``data`` at locations ``indices`` at dimension ``axis``
by operation ``mode``.
Example: ``mode == update``.
* For ``i`` in ``[0, len(indices)]``:
.. math::
output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =
.. math::
updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
* For ``j! = i``:
.. math::
output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
.. math::
data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
Example: ``mode == add``.
* For ``i`` in ``[0, len(indices)]``:
.. math::
output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =
.. math::
updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] +
.. math::
x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D]
* For ``j! = i``:
.. math::
output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
.. math::
data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
Parameters
----------
data: tensor<\*D, T> (Required)
indices: tensor<[C],T> (Required)
* 1-D tensor.
updates: tensor<\*K, T> (Required)
* ``K = data.shape[:axis] + [len(indices)] + data.shape[axis+1:]``.
axis: const i32 (Optional)
* Default to ``0``.
mode: const string (Optional)
* Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,
``div``, ``max``, ``min``.
* Default value is ``update``.
Returns
-------
tensor<\*D, T>
* With the same type and shape as input ``x``.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
data=TensorInputType(),
indices=IntTensorInputType(),
updates=TensorInputType(),
axis=IntInputType(const=True, optional=True),
mode=StringInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
axis=0,
mode="add",
)
def __init__(self, **kwargs):
super(scatter, self).__init__(**kwargs)
def type_inference(self):
if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.data.rank
expected_updates_shape = (
self.data.shape[:axis] + self.indices.shape + self.data.shape[axis + 1 :]
)
err = "Updates shape {} is incorrect. It should be {}.".format(self.updates.shape, expected_updates_shape)
assert is_compatible_symbolic_vector(
self.updates.shape, tuple(expected_updates_shape)
), err
return self.data.sym_type
@register_op(doc_str="")
class gather_along_axis(Operation):
"""
Take the values along ``axis`` at locations ``indices``.
.. math::
idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
.. math::
output[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] = = x[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D]
Parameters
----------
x: tensor<\*D, T> (Required)
indices: tensor<\*K, T> (Required)
* ``rank(indices) == rank(x)``.
axis: const i32 (Optional):
* Default to ``0``.
Returns
-------
tensor<\*D, T>:
* Output tensor has the same shape as ``indices``.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=TensorInputType(),
indices=IntTensorInputType(),
axis=IntInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
axis=0,
)
def __init__(self, **kwargs):
super(gather_along_axis, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
x = self.x.val
indices = self.indices.val
axis = self.axis.val
return np.take_along_axis(x, indices, axis)
def type_inference(self):
if self.x.rank != self.indices.rank:
raise ValueError(
"Rank mismatch between input and indices. \
Input rank: {}, indices rank: {}".format(
self.x.rank, self.indices.rank
)
)
if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.x.rank
for i in range(self.x.rank):
if i != axis:
assert self.x.shape[i] == self.indices.shape[i]
return types.tensor(self.x.dtype, self.indices.shape)
@register_op(doc_str="")
class scatter_along_axis(Operation):
"""
Scatter ``updates`` to ``data`` at locations ``indices`` at dimension ``axis``
by operation ``mode``.
Example: ``mode == update``.
* For ``i`` in ``[0, len(indices)]``:
.. math::
idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
.. math::
output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] =
.. math::
updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
* For ``j! = i``:
.. math::
output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
.. math::
data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
Example: ``mode == add``.
* For ``i`` in ``[0, len(indices)]``:
.. math::
idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
.. math::
output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] =
.. math::
updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] +
.. math::
x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D]
* For ``j! = i``:
.. math::
output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
.. math::
data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
Parameters
----------
data: tensor<\*D, T> (Required)
indices: tensor<\*K,T> (Required)
* ``rank(indices) == rank(data)``.
updates: tensor<\*K, T> (Required)
* Must be the same shape as ``indices``.
axis: const i32 (Optional)
* Default to ``0``.
mode: const string (Optional)
* Default to ``add``.
* Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,
``div``, ``max``, ``min``.
Returns
-------
tensor<\*D, T>
* With the same type and shape as input ``x``.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
data=TensorInputType(),
indices=IntTensorInputType(),
updates=TensorInputType(),
axis=IntInputType(const=True, optional=True),
mode=StringInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
axis=0,
mode="add",
)
def __init__(self, **kwargs):
super(scatter_along_axis, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
data = np.copy(self.data.val)
indices = self.indices.val
updates = self.updates.val
axis = self.axis.val
np_output = data
np.put_along_axis(np_output, indices, updates, axis=axis)
return np_output
def type_inference(self):
if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.data.rank
assert is_compatible_symbolic_vector(
self.indices.shape, self.updates.shape
)
assert self.data.rank == self.indices.rank
for i in range(self.data.rank):
if i != axis:
assert self.data.shape[i] == self.indices.shape[i]
return self.data.sym_type
@register_op(doc_str="")
class gather_nd(Operation):
"""
Gather slices from ``x`` according to ``indices``, similar to `tf.gather_nd <https://www.tensorflow.org/api_docs/python/tf/gather_nd>`_.
The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a slice
of ``x``:
.. math::
output[i_0, ..., i_{K-2}]= x[indices[i_0, ..., i_{K-2}]]
Where ``K = rank(indices)`` and ``x[indices[i_0, ..., i_{K-2}]]`` has rank
``rank(x) - indices.shape[-1]``.
Parameters
----------
x: tensor<\*D,T> (Required)
indices: tensor<\*K,i32> (Required)
Returns
-------
tensor<\*V,T>
* ``V = K[:-1] + D[K[-1]:]``, where ``D = x.shape`` and ``K = indices.shape``.
Attributes
----------
T: fp32
References
----------
See `tf.gather_nd <https://www.tensorflow.org/api_docs/python/tf/gather_nd>`_.
"""
input_spec = InputSpec(
x=TensorInputType(),
indices=IntTensorInputType(),
)
def __init__(self, **kwargs):
super(gather_nd, self).__init__(**kwargs)
def type_inference(self):
assert self.indices.shape[-1] <= self.x.rank
out_type = self.x.dtype
out_shape = self.indices.shape[:-1] + self.x.shape[self.indices.shape[-1] :]
return types.tensor(out_type, out_shape)
@register_op(doc_str="")
class scatter_nd(Operation):
"""
Scatter ``updates`` to ``data`` at locations ``indices``.
The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a
slice of ``data``, ``K = rank(indices)``, and ``data[indices[i_0, ..., i_{K-2}]]``
has rank ``rank(data) - indices.shape[-1]``.
* Example: ``mode == update``: The ``output`` is set to ``data`` initially, and
the op updates ``output`` as follows:
.. math::
output[indices[i_0, ..., i_{K-2}]]= updates[indices[i_0, ..., i_{K-2}]]
* Example: ``mode == add``. The update rule is:
.. math::
output[indices[i_0, ..., i_{K-2}]] += updates[indices[i_0, ..., i_{K-2}]]
Parameters
----------
data: tensor<\*D,T> (Required)
indices: tensor<\*K,i32> (Required)
updates: tensor<\*K, T> (Required)
* Must be the shape as ``K[:-1]+data.shape[K[-1]:]``.
mode: const string (Optional)
* Default to ``add``.
* Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,
``div``, ``max``, ``min``.
Returns
-------
tensor<\*D,T>
* A tensor with the same shape and type as ``data``.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
data=TensorInputType(),
indices=IntTensorInputType(),
updates=TensorInputType(),
mode=StringInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
mode="add",
)
def __init__(self, **kwargs):
super(scatter_nd, self).__init__(**kwargs)
def type_inference(self):
assert self.indices.shape[-1] <= self.data.rank
expected_updates_shape = (
self.indices.shape[:-1] + self.data.shape[self.indices.shape[-1] :]
)
assert is_compatible_symbolic_vector(
self.updates.shape, tuple(expected_updates_shape)
)
return self.data.sym_type
| 29.483516 | 140 | 0.531867 |
import numpy as np
import numbers
from coremltools.converters.mil.mil import Operation, types
from coremltools.converters.mil.mil.input_type import (
DefaultInputs,
InputSpec,
IntInputType,
IntTensorInputType,
TensorInputType,
StringInputType,
)
from coremltools.converters.mil.mil.operation import precondition
from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op
from coremltools.converters.mil.mil.types.symbolic import is_compatible_symbolic_vector, is_symbolic
from coremltools.converters.mil.mil.operation import (
SYMBOL,
VALUE
)
@register_op(doc_str="")
class gather(Operation):
input_spec = InputSpec(
x=TensorInputType(),
indices=IntInputType(),
axis=IntInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
axis=0,
)
def __init__(self, **kwargs):
super(gather, self).__init__(**kwargs)
@precondition(allow=VALUE | SYMBOL)
def value_inference(self):
x = self.x.sym_val
indices = self.indices.val
if indices is None:
return None
scalar_indices = isinstance(indices, numbers.Integral)
axis = self.axis.val
if scalar_indices:
res = np.take(x, [indices], axis)
res2 = np.squeeze(res, axis=axis)
if isinstance(res2, np.ndarray) and len(res2.shape) == 0:
return res2.item()
return res2
return np.take(x, indices, axis)
def type_inference(self):
out_type = self.x.dtype
if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
output_rank = self.x.rank - 1 + self.indices.rank
if output_rank == 0:
# output scalar
return out_type
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.x.rank
out_shape = self.x.shape[:axis] + self.indices.shape + self.x.shape[axis + 1 :]
return types.tensor(out_type, out_shape)
@register_op(doc_str="")
class scatter(Operation):
input_spec = InputSpec(
data=TensorInputType(),
indices=IntTensorInputType(),
updates=TensorInputType(),
axis=IntInputType(const=True, optional=True),
mode=StringInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
axis=0,
mode="add",
)
def __init__(self, **kwargs):
super(scatter, self).__init__(**kwargs)
def type_inference(self):
if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.data.rank
expected_updates_shape = (
self.data.shape[:axis] + self.indices.shape + self.data.shape[axis + 1 :]
)
err = "Updates shape {} is incorrect. It should be {}.".format(self.updates.shape, expected_updates_shape)
assert is_compatible_symbolic_vector(
self.updates.shape, tuple(expected_updates_shape)
), err
return self.data.sym_type
@register_op(doc_str="")
class gather_along_axis(Operation):
input_spec = InputSpec(
x=TensorInputType(),
indices=IntTensorInputType(),
axis=IntInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
axis=0,
)
def __init__(self, **kwargs):
super(gather_along_axis, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
x = self.x.val
indices = self.indices.val
axis = self.axis.val
return np.take_along_axis(x, indices, axis)
def type_inference(self):
if self.x.rank != self.indices.rank:
raise ValueError(
"Rank mismatch between input and indices. \
Input rank: {}, indices rank: {}".format(
self.x.rank, self.indices.rank
)
)
if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.x.rank
for i in range(self.x.rank):
if i != axis:
assert self.x.shape[i] == self.indices.shape[i]
return types.tensor(self.x.dtype, self.indices.shape)
@register_op(doc_str="")
class scatter_along_axis(Operation):
input_spec = InputSpec(
data=TensorInputType(),
indices=IntTensorInputType(),
updates=TensorInputType(),
axis=IntInputType(const=True, optional=True),
mode=StringInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
axis=0,
mode="add",
)
def __init__(self, **kwargs):
super(scatter_along_axis, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
data = np.copy(self.data.val)
indices = self.indices.val
updates = self.updates.val
axis = self.axis.val
np_output = data
np.put_along_axis(np_output, indices, updates, axis=axis)
return np_output
def type_inference(self):
if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.data.rank
assert is_compatible_symbolic_vector(
self.indices.shape, self.updates.shape
)
assert self.data.rank == self.indices.rank
for i in range(self.data.rank):
if i != axis:
assert self.data.shape[i] == self.indices.shape[i]
return self.data.sym_type
@register_op(doc_str="")
class gather_nd(Operation):
input_spec = InputSpec(
x=TensorInputType(),
indices=IntTensorInputType(),
)
def __init__(self, **kwargs):
super(gather_nd, self).__init__(**kwargs)
def type_inference(self):
assert self.indices.shape[-1] <= self.x.rank
out_type = self.x.dtype
out_shape = self.indices.shape[:-1] + self.x.shape[self.indices.shape[-1] :]
return types.tensor(out_type, out_shape)
@register_op(doc_str="")
class scatter_nd(Operation):
input_spec = InputSpec(
data=TensorInputType(),
indices=IntTensorInputType(),
updates=TensorInputType(),
mode=StringInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
mode="add",
)
def __init__(self, **kwargs):
super(scatter_nd, self).__init__(**kwargs)
def type_inference(self):
assert self.indices.shape[-1] <= self.data.rank
expected_updates_shape = (
self.indices.shape[:-1] + self.data.shape[self.indices.shape[-1] :]
)
assert is_compatible_symbolic_vector(
self.updates.shape, tuple(expected_updates_shape)
)
return self.data.sym_type
| true | true |
f733728277c58a28a7d19dce8537b1e9b63ec0df | 2,051 | py | Python | Golden-Miner/Python/functions.py | Henvy-Mango/Golden-Miner | 4e6ec94bafd491e3e867548736711d8ff4c0d9bf | [
"MIT"
] | 30 | 2018-04-09T03:06:26.000Z | 2022-02-24T08:33:11.000Z | Golden-Miner/Python/functions.py | Henvy-Mango/Golden-Miner | 4e6ec94bafd491e3e867548736711d8ff4c0d9bf | [
"MIT"
] | 4 | 2018-07-10T08:31:21.000Z | 2022-01-07T07:21:07.000Z | Golden-Miner/Python/functions.py | Henvy-Mango/Golden-Miner | 4e6ec94bafd491e3e867548736711d8ff4c0d9bf | [
"MIT"
] | 8 | 2018-07-25T08:38:40.000Z | 2020-01-06T06:09:34.000Z | import logging
import os
import time
def tap_screen(x, y,device_x,device_y):
#模拟点击
base_x, base_y = 1920, 1080
real_x = int(x / base_x * device_x)
real_y = int(y / base_y * device_y)
os.system('adb shell input tap {} {}'.format(real_x, real_y))
def VT_init():
#虚拟机adb初始化
os.system('adb connect 127.0.0.1:7555')
def first_working(wait,device_x,device_y):
logging.debug('#1 First time...')
tap_screen(1450, 910,device_x,device_y)
time.sleep(wait[0])
logging.debug('#2 Auto mode active...')
tap_screen(1780, 40,device_x,device_y)
for i in range(wait[1]):
tap_screen(1720, 80,device_x,device_y)
time.sleep(1)
logging.debug('#3 Repeating...\n')
tap_screen(1600, 980,device_x,device_y)
time.sleep(wait[2])
def working(wait,device_x,device_y):
logging.debug('#1 Starting...')
tap_screen(1450, 910,device_x,device_y)
time.sleep(wait[0])
logging.debug('#2 Waiting...')
for i in range(4*wait[1]):
tap_screen(1720, 80,device_x,device_y)
time.sleep(0.25)
logging.debug('#3 Repeating...\n')
tap_screen(1600, 980,device_x,device_y)
time.sleep(wait[2])
def main(wait,device_x,device_y):
#输出日志
logging.basicConfig(format='%(asctime)s %(message)s',
datefmt='%Y/%m/%d %I:%M:%S %p',level=logging.DEBUG)
#询问需求
gain_money = input('需要获得的金币数(回车默认刷满):')
if gain_money == '':
gain_money = 4200
else:
gain_money = int(gain_money)
#判断模块
if gain_money >= 0 and gain_money <= 4200:
repeat = 1 + int(gain_money / 19)
print('还有' + str(repeat) + '次完成\n')
print('三秒后开始')
print('3')
time.sleep(1)
print('2')
time.sleep(1)
print('1\n')
time.sleep(1)
#初始化
#first_working(wait,device_x,device_y)
#循环模块
for i in range(repeat):
logging.info('Time #{}'.format(i + 1))
working(wait,device_x,device_y)
else:
print('Error!') | 25.962025 | 65 | 0.588981 | import logging
import os
import time
def tap_screen(x, y,device_x,device_y):
base_x, base_y = 1920, 1080
real_x = int(x / base_x * device_x)
real_y = int(y / base_y * device_y)
os.system('adb shell input tap {} {}'.format(real_x, real_y))
def VT_init():
os.system('adb connect 127.0.0.1:7555')
def first_working(wait,device_x,device_y):
logging.debug('#1 First time...')
tap_screen(1450, 910,device_x,device_y)
time.sleep(wait[0])
logging.debug('#2 Auto mode active...')
tap_screen(1780, 40,device_x,device_y)
for i in range(wait[1]):
tap_screen(1720, 80,device_x,device_y)
time.sleep(1)
logging.debug('#3 Repeating...\n')
tap_screen(1600, 980,device_x,device_y)
time.sleep(wait[2])
def working(wait,device_x,device_y):
logging.debug('#1 Starting...')
tap_screen(1450, 910,device_x,device_y)
time.sleep(wait[0])
logging.debug('#2 Waiting...')
for i in range(4*wait[1]):
tap_screen(1720, 80,device_x,device_y)
time.sleep(0.25)
logging.debug('#3 Repeating...\n')
tap_screen(1600, 980,device_x,device_y)
time.sleep(wait[2])
def main(wait,device_x,device_y):
logging.basicConfig(format='%(asctime)s %(message)s',
datefmt='%Y/%m/%d %I:%M:%S %p',level=logging.DEBUG)
gain_money = input('需要获得的金币数(回车默认刷满):')
if gain_money == '':
gain_money = 4200
else:
gain_money = int(gain_money)
if gain_money >= 0 and gain_money <= 4200:
repeat = 1 + int(gain_money / 19)
print('还有' + str(repeat) + '次完成\n')
print('三秒后开始')
print('3')
time.sleep(1)
print('2')
time.sleep(1)
print('1\n')
time.sleep(1)
for i in range(repeat):
logging.info('Time #{}'.format(i + 1))
working(wait,device_x,device_y)
else:
print('Error!') | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.