hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
518ab9154fba09ef3df412ee902f6d9802099c49 | 675 | py | Python | bumps/dream/__init__.py | cyankaet/bumps | 427d077fd95f2d9a09eeb8677d045547061cff42 | [
"MIT"
] | 44 | 2015-03-28T06:48:43.000Z | 2022-01-09T11:29:00.000Z | bumps/dream/__init__.py | cyankaet/bumps | 427d077fd95f2d9a09eeb8677d045547061cff42 | [
"MIT"
] | 68 | 2015-08-21T11:28:54.000Z | 2022-03-30T22:14:13.000Z | bumps/dream/__init__.py | cyankaet/bumps | 427d077fd95f2d9a09eeb8677d045547061cff42 | [
"MIT"
] | 27 | 2015-06-22T19:25:27.000Z | 2021-06-15T18:20:06.000Z | """
Notes on random numbers
=======================
Uses dream.util.rng as the random number generator.
You can set the seed using::
dream.util.rng = numpy.random.RandomState(seed)
This interface doesn't feel right, since one instance of DREAM may
influence another if they are running within one another. Putting
the rng on the dream class may be a better option.
"""
from .model import MCMCModel
from .core import Dream
#from .core import dream
#from .initpop import * # cov_init, lhs_init
#from .model import * #
#from .state import * # load_state, save_state
#from .views import * # plotting routines
#from .util import console
#from .stats import *
| 25.961538 | 66 | 0.712593 |
48ffc30deab76747754a516f9dcf31d07e5f540f | 6,952 | py | Python | generated-libraries/python/netapp/options/__init__.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | 2 | 2017-03-28T15:31:26.000Z | 2018-08-16T22:15:18.000Z | generated-libraries/python/netapp/options/__init__.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | generated-libraries/python/netapp/options/__init__.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | from netapp.connection import NaConnection
from options_modify_iter_info import OptionsModifyIterInfo # 3 properties
from option_info import OptionInfo # 8 properties
from options_modify_iter_key_td import OptionsModifyIterKeyTd # 2 properties
from options_get_iter_key_td import OptionsGetIterKeyTd # 2 properties
class OptionsConnection(NaConnection):
def options_list_info(self):
"""
Get a list of all options
"""
return self.request( "options-list-info", {
}, {
'options': [ OptionInfo, True ],
} )
def options_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Iterate over a list of options objects.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
options object.
All options objects matching this query up to 'max-records' will
be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "options-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ OptionInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ OptionInfo, 'None' ], False ],
}, {
'attributes-list': [ OptionInfo, True ],
} )
def options_modify_iter(self, query, attributes, max_failure_count=None, max_records=None, return_success_list=None, tag=None, continue_on_failure=None, return_failure_list=None):
"""
Modify the attributes of options or a group of
options objects.
:param query: If modifying a specific options, this input element must specify
all keys.
If modifying options objects based on query, this input element
must specify a query.
:param attributes: Specify at least one modifiable element.
Do not specify any other element.
:param max_failure_count: When allowing failures ('continue-on-failure' is set to true),
then this input element may be provided to limit the number of
failed modify operations before the server gives up and returns.
If set, the API will continue modifying the next matching options
even when the modification of a previous matching options fails,
and do so until the total number of objects failed to be modified
reaches the maximum specified.
If set to the maximum or not provided, then there will be no
limit on the number of failed modify operations.
Only applicable if 'continue-on-failure' is set to true.
Default: 2^32-1
:param max_records: The maximum number of objects to be modified in this call.
Default: 20
:param return_success_list: If set to true, the API will return the list of options objects
(just keys) that were successfully updated.
If set to false, the list of options objects modified will not be
returned.
Default: true
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param continue_on_failure: This input element is useful when multiple options objects match
a given query.
If set to true, the API will continue modifying the next matching
options even when modification of a previous options fails.
If set to false, the API will return on the first failure.
Default: false
:param return_failure_list: If set to true, the API will return the list of options objects
(just keys) that were not modified due to some error.
If set to false, the list of options objects not modified will
not be returned.
Default: true
"""
return self.request( "options-modify-iter", {
'max_failure_count': [ max_failure_count, 'max-failure-count', [ int, 'None' ], False ],
'max_records': max_records,
'return_success_list': [ return_success_list, 'return-success-list', [ bool, 'None' ], False ],
'tag': tag,
'continue_on_failure': [ continue_on_failure, 'continue-on-failure', [ bool, 'None' ], False ],
'return_failure_list': [ return_failure_list, 'return-failure-list', [ bool, 'None' ], False ],
'query': [ query, 'query', [ OptionInfo, 'None' ], False ],
'attributes': [ attributes, 'attributes', [ OptionInfo, 'None' ], False ],
}, {
'num-succeeded': [ int, False ],
'num-failed': [ int, False ],
'success-list': [ OptionsModifyIterInfo, True ],
'failure-list': [ OptionsModifyIterInfo, True ],
} )
def options_set(self, name, value):
"""
Set the value of a single option.
For complete list of names and values of options,
please refer to options man pages.
:param name: Name of the option.
:param value: Value of the option.
"""
return self.request( "options-set", {
'name': [ name, 'name', [ basestring, 'None' ], False ],
'value': [ value, 'value', [ basestring, 'None' ], False ],
}, {
'message': [ basestring, False ],
'cluster-constraint': [ basestring, False ],
} )
def options_get(self, name):
"""
Get the value of a single option.
:param name: Name of the option.
"""
return self.request( "options-get", {
'name': [ name, 'name', [ basestring, 'None' ], False ],
}, {
'cluster-constraint': [ basestring, False ],
'value': [ basestring, False ],
} )
| 46.657718 | 183 | 0.587169 |
4115bd180d7eaaec50fd43055816726e06576ee6 | 8,790 | py | Python | mitmproxy/tools/console/options.py | ravitejavalluri/mproxy | ccd8e1e617cdbf3b34ec01ac130093396197101f | [
"MIT"
] | null | null | null | mitmproxy/tools/console/options.py | ravitejavalluri/mproxy | ccd8e1e617cdbf3b34ec01ac130093396197101f | [
"MIT"
] | 1 | 2021-05-09T11:18:14.000Z | 2021-05-09T11:18:14.000Z | mitmproxy/tools/console/options.py | ravitejavalluri/mproxy | ccd8e1e617cdbf3b34ec01ac130093396197101f | [
"MIT"
] | 1 | 2020-11-07T08:54:29.000Z | 2020-11-07T08:54:29.000Z | import urwid
from mitmproxy import contentviews
from mitmproxy.tools.console import common
from mitmproxy.tools.console import grideditor
from mitmproxy.tools.console import select
from mitmproxy.tools.console import signals
from mitmproxy.addons import replace
from mitmproxy.addons import setheaders
footer = [
('heading_key', "enter/space"), ":toggle ",
('heading_key', "C"), ":clear all ",
('heading_key', "W"), ":save ",
]
def _mkhelp():
text = []
keys = [
("enter/space", "activate option"),
("C", "clear all options"),
("w", "save options"),
]
text.extend(common.format_keyvals(keys, key="key", val="text", indent=4))
return text
help_context = _mkhelp()
def checker(opt, options):
def _check():
return options.has_changed(opt)
return _check
class Options(urwid.WidgetWrap):
def __init__(self, master):
self.master = master
self.lb = select.Select(
[
select.Heading("Traffic Manipulation"),
select.Option(
"Header Set Patterns",
"H",
checker("setheaders", master.options),
self.setheaders
),
select.Option(
"Ignore Patterns",
"I",
checker("ignore_hosts", master.options),
self.ignore_hosts
),
select.Option(
"Replacement Patterns",
"R",
checker("replacements", master.options),
self.replacepatterns
),
select.Option(
"Scripts",
"S",
checker("scripts", master.options),
self.scripts
),
select.Heading("Interface"),
select.Option(
"Default Display Mode",
"M",
checker("default_contentview", master.options),
self.default_displaymode
),
select.Option(
"Palette",
"P",
checker("console_palette", master.options),
self.palette
),
select.Option(
"Show Host",
"w",
checker("showhost", master.options),
master.options.toggler("showhost")
),
select.Heading("Network"),
select.Option(
"No Upstream Certs",
"U",
checker("no_upstream_cert", master.options),
master.options.toggler("no_upstream_cert")
),
select.Option(
"TCP Proxying",
"T",
checker("tcp_hosts", master.options),
self.tcp_hosts
),
select.Option(
"Don't Verify SSL/TLS Certificates",
"V",
checker("ssl_insecure", master.options),
master.options.toggler("ssl_insecure")
),
select.Heading("Utility"),
select.Option(
"Anti-Cache",
"a",
checker("anticache", master.options),
master.options.toggler("anticache")
),
select.Option(
"Anti-Compression",
"o",
checker("anticomp", master.options),
master.options.toggler("anticomp")
),
select.Option(
"Kill Extra",
"x",
checker("replay_kill_extra", master.options),
master.options.toggler("replay_kill_extra")
),
select.Option(
"No Refresh",
"f",
checker("refresh_server_playback", master.options),
master.options.toggler("refresh_server_playback")
),
select.Option(
"Sticky Auth",
"A",
checker("stickyauth", master.options),
self.sticky_auth
),
select.Option(
"Sticky Cookies",
"t",
checker("stickycookie", master.options),
self.sticky_cookie
),
]
)
title = urwid.Text("Options")
title = urwid.Padding(title, align="left", width=("relative", 100))
title = urwid.AttrWrap(title, "heading")
w = urwid.Frame(
self.lb,
header = title
)
super().__init__(w)
self.master.loop.widget.footer.update("")
signals.update_settings.connect(self.sig_update_settings)
master.options.changed.connect(self.sig_update_settings)
def sig_update_settings(self, sender, updated=None):
self.lb.walker._modified()
def keypress(self, size, key):
if key == "C":
self.clearall()
return None
if key == "W":
self.save()
return None
return super().keypress(size, key)
def do_save(self, path):
self.master.options.save(path)
return "Saved"
def save(self):
signals.status_prompt_path.send(
prompt = "Save options to file",
callback = self.do_save
)
def clearall(self):
self.master.options.reset()
signals.update_settings.send(self)
signals.status_message.send(
message = "Options cleared",
expire = 1
)
def setheaders(self):
data = []
for d in self.master.options.setheaders:
if isinstance(d, str):
data.append(setheaders.parse_setheader(d))
else:
data.append(d)
self.master.view_grideditor(
grideditor.SetHeadersEditor(
self.master,
data,
self.master.options.setter("setheaders")
)
)
def tcp_hosts(self):
self.master.view_grideditor(
grideditor.HostPatternEditor(
self.master,
self.master.options.tcp_hosts,
self.master.options.setter("tcp_hosts")
)
)
def ignore_hosts(self):
self.master.view_grideditor(
grideditor.HostPatternEditor(
self.master,
self.master.options.ignore_hosts,
self.master.options.setter("ignore_hosts")
)
)
def replacepatterns(self):
data = []
for d in self.master.options.replacements:
if isinstance(d, str):
data.append(replace.parse_hook(d))
else:
data.append(d)
self.master.view_grideditor(
grideditor.ReplaceEditor(
self.master,
data,
self.master.options.setter("replacements")
)
)
def scripts(self):
def edit_scripts(scripts):
self.master.options.scripts = [x[0] for x in scripts]
self.master.view_grideditor(
grideditor.ScriptEditor(
self.master,
[[i] for i in self.master.options.scripts],
edit_scripts
)
)
def default_displaymode(self):
signals.status_prompt_onekey.send(
prompt = "Global default display mode",
keys = contentviews.view_prompts,
callback = self.change_default_display_mode
)
def change_default_display_mode(self, t):
v = contentviews.get_by_shortcut(t)
self.master.options.default_contentview = v.name
if self.master.view.focus.flow:
signals.flow_change.send(self, flow = self.master.view.focus.flow)
def sticky_auth(self):
signals.status_prompt.send(
prompt = "Sticky auth filter",
text = self.master.options.stickyauth,
callback = self.master.options.setter("stickyauth")
)
def sticky_cookie(self):
signals.status_prompt.send(
prompt = "Sticky cookie filter",
text = self.master.options.stickycookie,
callback = self.master.options.setter("stickycookie")
)
def palette(self):
self.master.view_palette_picker()
| 31.281139 | 78 | 0.487144 |
959ea78f8a73d45b9b610b743b68bb90db61ed65 | 6,697 | py | Python | tests/test_examples.py | SimeonSimjanovski/RP2018-19 | 10d548a28ab5883666b9fdf9f838665c384d86a4 | [
"MIT"
] | 3 | 2018-05-03T05:08:56.000Z | 2021-09-29T12:54:07.000Z | tests/test_examples.py | SimeonSimjanovski/RP2018-19 | 10d548a28ab5883666b9fdf9f838665c384d86a4 | [
"MIT"
] | null | null | null | tests/test_examples.py | SimeonSimjanovski/RP2018-19 | 10d548a28ab5883666b9fdf9f838665c384d86a4 | [
"MIT"
] | null | null | null | '''
Unit test for all of the example scripts provided in the examples folder.
'''
from __future__ import absolute_import, division, print_function
import os
import unittest
import mloop.testing as mlt
import mloop.launchers as mll
import mloop.utilities as mlu
import logging
import numpy as np
import multiprocessing as mp
class TestExamples(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.chdir(mlu.mloop_path + '/../tests')
cls.override_dict = {'file_log_level':logging.WARNING,'console_log_level':logging.DEBUG,'visualizations':False}
@classmethod
def tearDownClass(cls):
pass
def test_controller_config(self):
controller = mll.launch_from_file(mlu.mloop_path+'/../examples/controller_config.txt',
interface_type = 'test',
no_delay = False,
**self.override_dict)
self.asserts_for_cost_and_params(controller)
def test_extras_config(self):
controller = mll.launch_from_file(mlu.mloop_path+'/../examples/extras_config.txt',
num_params=1,
min_boundary = [-1.0],
max_boundary = [1.0],
target_cost = 0.1,
interface_type = 'test',
no_delay = False,
**self.override_dict)
self.asserts_for_cost_and_params(controller)
def test_logging_config(self):
controller = mll.launch_from_file(mlu.mloop_path+'/../examples/logging_config.txt',
num_params=1,
min_boundary = [-1.0],
max_boundary = [1.0],
target_cost = 0.1,
interface_type = 'test',
no_delay = False,
**self.override_dict)
self.asserts_for_cost_and_params(controller)
def test_random_simple_config(self):
_ = mll.launch_from_file(mlu.mloop_path+'/../examples/random_simple_config.txt',
interface_type = 'test',
**self.override_dict)
def test_random_complete_config(self):
_ = mll.launch_from_file(mlu.mloop_path+'/../examples/random_complete_config.txt',
interface_type = 'test',
**self.override_dict)
def test_nelder_mead_simple_config(self):
controller = mll.launch_from_file(mlu.mloop_path+'/../examples/nelder_mead_simple_config.txt',
interface_type = 'test',
**self.override_dict)
self.asserts_for_cost_and_params(controller)
def test_nelder_mead_complete_config(self):
controller = mll.launch_from_file(mlu.mloop_path+'/../examples/nelder_mead_complete_config.txt',
interface_type = 'test',
**self.override_dict)
self.asserts_for_cost_and_params(controller)
def test_differential_evolution_simple_config(self):
controller = mll.launch_from_file(mlu.mloop_path+'/../examples/differential_evolution_simple_config.txt',
interface_type = 'test',
**self.override_dict)
self.asserts_for_cost_and_params(controller)
def test_differential_evolution_complete_config(self):
controller = mll.launch_from_file(mlu.mloop_path+'/../examples/differential_evolution_complete_config.txt',
interface_type = 'test',
**self.override_dict)
self.asserts_for_cost_and_params(controller)
def test_gaussian_process_simple_config(self):
controller = mll.launch_from_file(mlu.mloop_path+'/../examples/gaussian_process_simple_config.txt',
interface_type = 'test',
no_delay = False,
**self.override_dict)
self.asserts_for_cost_and_params(controller)
def test_gaussian_process_complete_config(self):
controller = mll.launch_from_file(mlu.mloop_path+'/../examples/gaussian_process_complete_config.txt',
interface_type = 'test',
no_delay = False,
**self.override_dict)
self.asserts_for_cost_and_params(controller)
def test_tutorial_config(self):
fake_experiment = mlt.FakeExperiment()
fake_experiment.start()
controller = mll.launch_from_file(mlu.mloop_path+'/../examples/tutorial_config.txt',
**self.override_dict)
self.asserts_for_cost_and_params(controller)
fake_experiment.end_event.set()
fake_experiment.join()
def test_file_interface_config(self):
fake_experiment = mlt.FakeExperiment()
fake_experiment.start()
controller = mll.launch_from_file(mlu.mloop_path+'/../examples/file_interface_config.txt',
num_params=1,
target_cost = 0.1,
**self.override_dict)
self.asserts_for_cost_and_params(controller)
fake_experiment.end_event.set()
fake_experiment.join()
def test_shell_interface_config(self):
controller = mll.launch_from_file(mlu.mloop_path+'/../examples/shell_interface_config.txt',
num_params=1,
target_cost = 0.1,
no_delay = False,
**self.override_dict)
self.asserts_for_cost_and_params(controller)
def asserts_for_cost_and_params(self,controller):
self.assertTrue(controller.best_cost<=controller.target_cost)
self.assertTrue(np.sum(np.square(controller.best_params))<=controller.target_cost)
if __name__ == "__main__":
mp.freeze_support()
unittest.main() | 48.528986 | 119 | 0.537405 |
71b9ca6d5a1f706e6ed3ff7bf70684f9f67d137e | 289 | py | Python | razorpay_integration/razorpay_integration/doctype/razorpay_log/test_razorpay_log.py | frappe/rajorpay_integration | d6a2efb84f7b5f3914816e8b66bc24f92dafbc97 | [
"MIT"
] | 10 | 2016-05-03T13:22:40.000Z | 2022-03-11T17:01:52.000Z | razorpay_integration/razorpay_integration/doctype/razorpay_log/test_razorpay_log.py | frappe/rajorpay_integration | d6a2efb84f7b5f3914816e8b66bc24f92dafbc97 | [
"MIT"
] | 2 | 2020-12-28T07:01:37.000Z | 2021-06-30T06:54:24.000Z | razorpay_integration/razorpay_integration/doctype/razorpay_log/test_razorpay_log.py | frappe/razorpay_integration | d6a2efb84f7b5f3914816e8b66bc24f92dafbc97 | [
"MIT"
] | 17 | 2016-04-20T06:23:16.000Z | 2022-03-15T07:23:18.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Razorpay Log')
class TestRazorpayLog(unittest.TestCase):
pass
| 22.230769 | 68 | 0.771626 |
f69909d05bf8a375abb7622bbc544ce532c0168c | 7,665 | py | Python | dataset/container.py | rubenve95/Deep-Cuboid-Detection | c4984ccf1edf5c01bd3538ca49ba863cbfa9b7d0 | [
"MIT"
] | 5 | 2020-12-16T03:00:16.000Z | 2022-01-14T07:41:58.000Z | dataset/container.py | rubenve95/Deep-Cuboid-Detection | c4984ccf1edf5c01bd3538ca49ba863cbfa9b7d0 | [
"MIT"
] | 1 | 2021-08-12T09:18:30.000Z | 2021-08-12T09:18:30.000Z | dataset/container.py | rubenve95/Deep-Cuboid-Detection | c4984ccf1edf5c01bd3538ca49ba863cbfa9b7d0 | [
"MIT"
] | 1 | 2021-09-28T02:51:39.000Z | 2021-09-28T02:51:39.000Z | import json
import os
import pickle
import random
from typing import List, Tuple, Dict
import torch
import torch.utils.data.dataset
from PIL import Image, ImageOps
from torch import Tensor
from torchvision.datasets import CocoDetection
from tqdm import tqdm
from torch.nn import functional as F
import numpy as np
from voc_eval import voc_ap
from bbox import BBox
from dataset.base import Base
from io import StringIO
import sys
class Container(Base):
class Annotation(object):
class Object(object):
def __init__(self, bbox: BBox, label: int):
super().__init__()
self.bbox = bbox
self.label = label
def __repr__(self) -> str:
return 'Object[label={:d}, bbox={!s}]'.format(
self.label, self.bbox)
def __init__(self, filename: str, objects: List[Object]):
super().__init__()
self.filename = filename
self.objects = objects
CATEGORY_TO_LABEL_DICT = {'Non-Cuboid': 0, 'Cuboid': 1}
LABEL_TO_CATEGORY_DICT = {v: k for k, v in CATEGORY_TO_LABEL_DICT.items()}
def __init__(self, path_to_data_dir: str, mode: Base.Mode, image_min_side: float, image_max_side: float, val_folder='MVI_3015.MP4'):
super().__init__(path_to_data_dir, mode, image_min_side, image_max_side)
base_path = os.path.join(self._path_to_data_dir, 'container')
self.images_base = os.path.join(base_path, 'images')
self.annotations_base = os.path.join(base_path, 'vertices')
if self._mode == Container.Mode.TRAIN:
train_folders = os.listdir(self.images_base)
train_folders.remove(val_folder)
self.image_folders = train_folders
elif self._mode == Container.Mode.EVAL:
self.image_folders = [val_folder]
else:
raise ValueError('invalid mode')
print(self.image_folders)
self.annotations = {}
self.img_paths = []
for img_folder in self.image_folders:
new_imgs = os.listdir(os.path.join(self.images_base, img_folder))
self.img_paths.extend([os.path.join(img_folder, x) for x in new_imgs])
with open(os.path.join(self.annotations_base, img_folder + '.json')) as f:
self.annotations[img_folder] = json.load(f)
self._image_ratios = []
for img_path in self.img_paths:
image = Image.open(os.path.join(self.images_base, img_path)).convert('RGB') # for some grayscale images
ratio = float(image.width / image.height)
self._image_ratios.append(ratio)
def __len__(self) -> int:
return len(self.img_paths)
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]:
image_path = self.img_paths[index]
image_name = image_path.split('/')[-1]
image_folder = image_path.split('/')[0]
annotation = self.annotations[image_folder][image_name]
bboxes = [obj['bbox'] for obj in annotation]
vertices = [obj['vertices'] for obj in annotation]
labels = [0] if len(bboxes) == 0 else [1]*len(bboxes)
if labels[-1] == 0:
bboxes = torch.zeros((1,4), dtype=torch.float)
vertices = torch.zeros((1,2,8), dtype=torch.float)
else:
bboxes = torch.tensor(bboxes, dtype=torch.float)
vertices = torch.tensor(vertices, dtype=torch.float)
labels = torch.tensor(labels, dtype=torch.long)
image = Image.open(os.path.join(self.images_base, image_path)).convert('RGB') # for some grayscale images
# random flip on only training mode
if self._mode == Container.Mode.TRAIN and random.random() > 0.5:
image = ImageOps.mirror(image)
if labels[-1] == 1:
bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively
vertices[:,0,:] = image.width - vertices[:,0,:] #Might need to check the correctness here
for i,vert in enumerate(vertices):
if vert[0,2] < vert[0,6]:
vertex_copy = vert.clone()
vertices[i,:,0] = vertex_copy[:,2]
vertices[i,:,1] = vertex_copy[:,3]
vertices[i,:,2] = vertex_copy[:,0]
vertices[i,:,3] = vertex_copy[:,1]
vertices[i,:,4] = vertex_copy[:,6]
vertices[i,:,5] = vertex_copy[:,7]
vertices[i,:,6] = vertex_copy[:,4]
vertices[i,:,7] = vertex_copy[:,5]
else:
vertex_copy = vert.clone()
vertices[i,:,0] = vertex_copy[:,6]
vertices[i,:,1] = vertex_copy[:,7]
vertices[i,:,6] = vertex_copy[:,0]
vertices[i,:,7] = vertex_copy[:,1]
image, scale = Container.preprocess(image, self._image_min_side, self._image_max_side)
scale = torch.tensor(scale, dtype=torch.float)
if bboxes.nelement() > 0:
bboxes *= scale
vertices *= scale
return image_path, image, scale, bboxes, labels, vertices
def evaluate(self, all_image_ids, all_detection_bboxes, all_detection_vertices, metric='apk'):
if metric == 'ap':
npos = 0
for img_folder in self.annotations:
for img_name in self.annotations[img_folder]:
npos += len(self.annotations[img_folder][img_name])
tp = np.zeros(len(all_image_ids))
fp = np.zeros(len(all_image_ids))
y_scores = torch.ones(len(all_image_ids))
for i,(img_id, bbox) in enumerate(zip(all_image_ids, all_detection_bboxes)):
image_name = img_id.split('/')[-1]
image_folder = img_id.split('/')[0]
gt_bboxes = [obj['bbox'] for obj in self.annotations[image_folder][image_name]]
if len(gt_bboxes) == 0:
fp[i] = 1
continue
gt_bboxes = torch.Tensor(gt_bboxes)
bbox = torch.Tensor(bbox).unsqueeze(0)
ious = BBox.iou(bbox, gt_bboxes)
max_ious, _ = ious.max(dim=2)
if max_ious.item() > 0.5:
tp[i] = 1
else:
fp[i] = 1
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
score = voc_ap(rec, prec)
return score, metric
def pck(self, all_detection_vertices, gt_vertices, gt_bbox):
pck = []
for vert,gt_vert,bbox in zip(all_detection_vertices, gt_vertices, gt_bbox):
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
max_dim = max(width,height)
vert_tensor = torch.Tensor(vert)
gt_vert_tensor = torch.Tensor(gt_vert)
t_dist = [torch.norm(vert_tensor[:,j] - gt_vert_tensor[:,j], 2) for j in range(8)]
nb_correct = sum([d.item() < 0.1*max_dim for d in t_dist])
pck.append(float(nb_correct/8))
pck = float(sum(pck)/len(pck))
return pck, 'pck'
@property
def image_ratios(self) -> List[float]:
return self._image_ratios
@staticmethod
def num_classes() -> int:
return 2 | 39.715026 | 136 | 0.56908 |
c116db8a51b7b84a83c19d3c965b3dd9040062fa | 1,920 | py | Python | src/archive/LSTM/LSTMAE30width.py | RiceD2KLab/TCH_CardiacSignals_F20 | ea6e84703086ddb7bfc5ba164aa67acdc9e78b7d | [
"BSD-2-Clause"
] | 1 | 2022-01-27T07:03:20.000Z | 2022-01-27T07:03:20.000Z | src/archive/LSTM/LSTMAE30width.py | RiceD2KLab/TCH_CardiacSignals_F20 | ea6e84703086ddb7bfc5ba164aa67acdc9e78b7d | [
"BSD-2-Clause"
] | null | null | null | src/archive/LSTM/LSTMAE30width.py | RiceD2KLab/TCH_CardiacSignals_F20 | ea6e84703086ddb7bfc5ba164aa67acdc9e78b7d | [
"BSD-2-Clause"
] | null | null | null | import tensorflow
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout, RepeatVector, TimeDistributed
import numpy as np
import os
import matplotlib.pyplot as plt
import sys
def create_model(X):
model = Sequential()
model.add(LSTM(30, input_shape=(X.shape[1], X.shape[2])))
model.add(Dropout(rate=0.2))
model.add(RepeatVector(X.shape[1]))
model.add(LSTM(30, return_sequences=True))
model.add(Dropout(rate=0.2))
model.add(TimeDistributed(Dense(X.shape[2])))
model.compile(optimizer='adam', loss='mse')
model.summary()
history = model.fit(X, X, epochs=100, batch_size=1, validation_split=0.1,
callbacks=[keras.callbacks.EarlyStopping(monitor='loss', patience=3, mode='min')],
shuffle=False)
model.save('Working_Data/lstm_model')
# model.predict(X[0:10, :])
# plot the loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig("Working_Data/lstm_loss.png")
plt.show()
print("loss of the model is: ")
print(history.history['loss'])
def create_sequences(data):
Xs, ys = [], []
time_steps = 5
for i in range(len(data) - time_steps):
Xs.append(data[i:(i + time_steps)].reshape(100*time_steps,4))
ys.append(data[i + time_steps].reshape(100,4))
return np.array(Xs), np.array(ys)
data = np.load(os.path.join("Working_Data/Normalized_Fixed_Dim_HBs_Idx" + str(1) + ".npy"))
data = data[0:1000, :, :]
# print(data[0:10].reshape(10000,4).shape)
X, y = create_sequences(data)
print(X.shape, y.shape)
# create_model(X)
model = keras.models.load_model('Working_Data/lstm_model')
model.predict(create_sequences(X[0:5, :, :])[0])
| 29.538462 | 106 | 0.666146 |
73168c4e742c1777947956a2023dd14a1ff4d5f3 | 57,104 | py | Python | nova/virt/vmwareapi/vm_util.py | nkrinner/nova | 1372397d4f5f8c155af6f1f4ab5dc68be00c9c01 | [
"Apache-2.0"
] | null | null | null | nova/virt/vmwareapi/vm_util.py | nkrinner/nova | 1372397d4f5f8c155af6f1f4ab5dc68be00c9c01 | [
"Apache-2.0"
] | null | null | null | nova/virt/vmwareapi/vm_util.py | nkrinner/nova | 1372397d4f5f8c155af6f1f4ab5dc68be00c9c01 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The VMware API VM utility module to build SOAP object specs.
"""
import collections
import copy
import functools
from oslo.config import cfg
from nova import exception
from nova.network import model as network_model
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova import utils
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim_util
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ALL_SUPPORTED_NETWORK_DEVICES = ['VirtualE1000', 'VirtualE1000e',
'VirtualPCNet32', 'VirtualSriovEthernetCard',
'VirtualVmxnet']
DSRecord = collections.namedtuple(
'DSRecord', ['datastore', 'name', 'capacity', 'freespace'])
# A cache for VM references. The key will be the VM name
# and the value is the VM reference. The VM name is unique. This
# is either the UUID of the instance or UUID-rescue in the case
# that this is a rescue VM. This is in order to prevent
# unnecessary communication with the backend.
_VM_REFS_CACHE = {}
def vm_refs_cache_reset():
global _VM_REFS_CACHE
_VM_REFS_CACHE = {}
def vm_ref_cache_delete(id):
_VM_REFS_CACHE.pop(id, None)
def vm_ref_cache_update(id, vm_ref):
_VM_REFS_CACHE[id] = vm_ref
def vm_ref_cache_get(id):
return _VM_REFS_CACHE.get(id)
def _vm_ref_cache(id, func, session, data):
vm_ref = vm_ref_cache_get(id)
if not vm_ref:
vm_ref = func(session, data)
vm_ref_cache_update(id, vm_ref)
return vm_ref
def vm_ref_cache_from_instance(func):
@functools.wraps(func)
def wrapper(session, instance):
id = instance['uuid']
return _vm_ref_cache(id, func, session, instance)
return wrapper
def vm_ref_cache_from_name(func):
@functools.wraps(func)
def wrapper(session, name):
id = name
return _vm_ref_cache(id, func, session, name)
return wrapper
# the config key which stores the VNC port
VNC_CONFIG_KEY = 'config.extraConfig["RemoteDisplay.vnc.port"]'
def get_vm_create_spec(client_factory, instance, name, data_store_name,
vif_infos, os_type="otherGuest"):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = name
config_spec.guestId = os_type
# The name is the unique identifier for the VM. This will either be the
# instance UUID or the instance UUID with suffix '-rescue' for VM's that
# are in rescue mode
config_spec.instanceUuid = name
# Allow nested ESX instances to host 64 bit VMs.
if os_type == "vmkernel5Guest":
config_spec.nestedHVEnabled = "True"
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = int(instance['vcpus'])
config_spec.memoryMB = int(instance['memory_mb'])
vif_spec_list = []
for vif_info in vif_infos:
vif_spec = create_network_spec(client_factory, vif_info)
vif_spec_list.append(vif_spec)
device_config_spec = vif_spec_list
config_spec.deviceChange = device_config_spec
# add vm-uuid and iface-id.x values for Neutron
extra_config = []
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.vm-uuid"
opt.value = instance['uuid']
extra_config.append(opt)
i = 0
for vif_info in vif_infos:
if vif_info['iface_id']:
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.iface-id.%d" % i
opt.value = vif_info['iface_id']
extra_config.append(opt)
i += 1
config_spec.extraConfig = extra_config
return config_spec
def get_vm_resize_spec(client_factory, instance):
"""Provides updates for a VM spec."""
resize_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
resize_spec.numCPUs = int(instance['vcpus'])
resize_spec.memoryMB = int(instance['memory_mb'])
return resize_spec
def create_controller_spec(client_factory, key, adapter_type="lsiLogic"):
"""Builds a Config Spec for the LSI or Bus Logic Controller's addition
which acts as the controller for the virtual hard disk to be attached
to the VM.
"""
# Create a controller for the Virtual Hard Disk
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if adapter_type == "busLogic":
virtual_controller = client_factory.create(
'ns0:VirtualBusLogicController')
elif adapter_type == "lsiLogicsas":
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicSASController')
else:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicController')
virtual_controller.key = key
virtual_controller.busNumber = 0
virtual_controller.sharedBus = "noSharing"
virtual_device_config.device = virtual_controller
return virtual_device_config
def _convert_vif_model(name):
"""Converts standard VIF_MODEL types to the internal VMware ones."""
if name == network_model.VIF_MODEL_E1000:
return 'VirtualE1000'
if name == network_model.VIF_MODEL_E1000E:
return 'VirtualE1000e'
if name not in ALL_SUPPORTED_NETWORK_DEVICES:
msg = _('%s is not supported.') % name
raise exception.Invalid(msg)
return name
def create_network_spec(client_factory, vif_info):
"""Builds a config spec for the addition of a new network
adapter to the VM.
"""
network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
network_spec.operation = "add"
# Keep compatible with other Hyper vif model parameter.
vif_info['vif_model'] = _convert_vif_model(vif_info['vif_model'])
vif = 'ns0:' + vif_info['vif_model']
net_device = client_factory.create(vif)
# NOTE(asomya): Only works on ESXi if the portgroup binding is set to
# ephemeral. Invalid configuration if set to static and the NIC does
# not come up on boot if set to dynamic.
network_ref = vif_info['network_ref']
network_name = vif_info['network_name']
mac_address = vif_info['mac_address']
backing = None
if network_ref and network_ref['type'] == 'OpaqueNetwork':
backing_name = ''.join(['ns0:VirtualEthernetCard',
'OpaqueNetworkBackingInfo'])
backing = client_factory.create(backing_name)
backing.opaqueNetworkId = network_ref['network-id']
backing.opaqueNetworkType = network_ref['network-type']
elif (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
backing_name = ''.join(['ns0:VirtualEthernetCardDistributed',
'VirtualPortBackingInfo'])
backing = client_factory.create(backing_name)
portgroup = client_factory.create(
'ns0:DistributedVirtualSwitchPortConnection')
portgroup.switchUuid = network_ref['dvsw']
portgroup.portgroupKey = network_ref['dvpg']
backing.port = portgroup
else:
backing = client_factory.create(
'ns0:VirtualEthernetCardNetworkBackingInfo')
backing.deviceName = network_name
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = True
connectable_spec.connected = True
net_device.connectable = connectable_spec
net_device.backing = backing
# The Server assigns a Key to the device. Here we pass a -ve temporary key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
net_device.key = -47
net_device.addressType = "manual"
net_device.macAddress = mac_address
net_device.wakeOnLanEnabled = True
network_spec.device = net_device
return network_spec
def get_vmdk_attach_config_spec(client_factory,
disk_type="preallocated",
file_path=None,
disk_size=None,
linked_clone=False,
controller_key=None,
unit_number=None,
device_name=None):
"""Builds the vmdk attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_disk_spec(client_factory,
controller_key, disk_type, file_path,
disk_size, linked_clone,
unit_number, device_name)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_cdrom_attach_config_spec(client_factory,
datastore,
file_path,
controller_key,
cdrom_unit_number):
"""Builds and returns the cdrom attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vmdk_detach_config_spec(client_factory, device,
destroy_disk=False):
"""Builds the vmdk detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = detach_virtual_disk_spec(client_factory,
device,
destroy_disk)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vm_extra_config_spec(client_factory, extra_opts):
"""Builds extra spec fields from a dictionary."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
# add the key value pairs
extra_config = []
for key, value in extra_opts.iteritems():
opt = client_factory.create('ns0:OptionValue')
opt.key = key
opt.value = value
extra_config.append(opt)
config_spec.extraConfig = extra_config
return config_spec
def get_vmdk_path_and_adapter_type(hardware_devices, uuid=None):
"""Gets the vmdk file path and the storage adapter type."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
vmdk_file_path = None
vmdk_controller_key = None
disk_type = None
adapter_type_dict = {}
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
if device.backing.__class__.__name__ == \
"VirtualDiskFlatVer2BackingInfo":
if uuid:
if uuid in device.backing.fileName:
vmdk_file_path = device.backing.fileName
else:
vmdk_file_path = device.backing.fileName
vmdk_controller_key = device.controllerKey
if getattr(device.backing, 'thinProvisioned', False):
disk_type = "thin"
else:
if getattr(device.backing, 'eagerlyScrub', False):
disk_type = "eagerZeroedThick"
else:
disk_type = "preallocated"
elif device.__class__.__name__ == "VirtualLsiLogicController":
adapter_type_dict[device.key] = "lsiLogic"
elif device.__class__.__name__ == "VirtualBusLogicController":
adapter_type_dict[device.key] = "busLogic"
elif device.__class__.__name__ == "VirtualIDEController":
adapter_type_dict[device.key] = "ide"
elif device.__class__.__name__ == "VirtualLsiLogicSASController":
adapter_type_dict[device.key] = "lsiLogicsas"
adapter_type = adapter_type_dict.get(vmdk_controller_key, "")
return (vmdk_file_path, adapter_type, disk_type)
def _find_controller_slot(controller_keys, taken, max_unit_number):
for controller_key in controller_keys:
for unit_number in range(max_unit_number):
if not unit_number in taken.get(controller_key, []):
return controller_key, unit_number
def _is_ide_controller(device):
return device.__class__.__name__ == 'VirtualIDEController'
def _is_scsi_controller(device):
return device.__class__.__name__ in ['VirtualLsiLogicController',
'VirtualLsiLogicSASController',
'VirtualBusLogicController']
def _find_allocated_slots(devices):
"""Return dictionary which maps controller_key to list of allocated unit
numbers for that controller_key.
"""
taken = {}
for device in devices:
if hasattr(device, 'controllerKey') and hasattr(device, 'unitNumber'):
unit_numbers = taken.setdefault(device.controllerKey, [])
unit_numbers.append(device.unitNumber)
if _is_scsi_controller(device):
# the SCSI controller sits on its own bus
unit_numbers = taken.setdefault(device.key, [])
unit_numbers.append(device.scsiCtlrUnitNumber)
return taken
def allocate_controller_key_and_unit_number(client_factory, devices,
adapter_type):
"""This function inspects the current set of hardware devices and returns
controller_key and unit_number that can be used for attaching a new virtual
disk to adapter with the given adapter_type.
"""
if devices.__class__.__name__ == "ArrayOfVirtualDevice":
devices = devices.VirtualDevice
taken = _find_allocated_slots(devices)
ret = None
if adapter_type == 'ide':
ide_keys = [dev.key for dev in devices if _is_ide_controller(dev)]
ret = _find_controller_slot(ide_keys, taken, 2)
elif adapter_type in ['lsiLogic', 'lsiLogicsas', 'busLogic']:
scsi_keys = [dev.key for dev in devices if _is_scsi_controller(dev)]
ret = _find_controller_slot(scsi_keys, taken, 16)
if ret:
return ret[0], ret[1], None
# create new controller with the specified type and return its spec
controller_key = -101
controller_spec = create_controller_spec(client_factory, controller_key,
adapter_type)
return controller_key, 0, controller_spec
def get_rdm_disk(hardware_devices, uuid):
"""Gets the RDM disk key."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskRawDiskMappingVer1BackingInfo" and
device.backing.lunUuid == uuid):
return device
def get_copy_virtual_disk_spec(client_factory, adapter_type="lsiLogic",
disk_type="preallocated"):
"""Builds the Virtual Disk copy spec."""
dest_spec = client_factory.create('ns0:VirtualDiskSpec')
dest_spec.adapterType = get_vmdk_adapter_type(adapter_type)
dest_spec.diskType = disk_type
return dest_spec
def get_vmdk_create_spec(client_factory, size_in_kb, adapter_type="lsiLogic",
disk_type="preallocated"):
"""Builds the virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type)
create_vmdk_spec.diskType = disk_type
create_vmdk_spec.capacityKb = size_in_kb
return create_vmdk_spec
def get_rdm_create_spec(client_factory, device, adapter_type="lsiLogic",
disk_type="rdmp"):
"""Builds the RDM virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:DeviceBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type)
create_vmdk_spec.diskType = disk_type
create_vmdk_spec.device = device
return create_vmdk_spec
def create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number):
"""Builds spec for the creation of a new Virtual CDROM to the VM."""
config_spec = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
config_spec.operation = "add"
cdrom = client_factory.create('ns0:VirtualCdrom')
cdrom_device_backing = client_factory.create(
'ns0:VirtualCdromIsoBackingInfo')
cdrom_device_backing.datastore = datastore
cdrom_device_backing.fileName = file_path
cdrom.backing = cdrom_device_backing
cdrom.controllerKey = controller_key
cdrom.unitNumber = cdrom_unit_number
cdrom.key = -1
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
cdrom.connectable = connectable_spec
config_spec.device = cdrom
return config_spec
def create_virtual_disk_spec(client_factory, controller_key,
disk_type="preallocated",
file_path=None,
disk_size=None,
linked_clone=False,
unit_number=None,
device_name=None):
"""Builds spec for the creation of a new/ attaching of an already existing
Virtual Disk to the VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if (file_path is None) or linked_clone:
virtual_device_config.fileOperation = "create"
virtual_disk = client_factory.create('ns0:VirtualDisk')
if disk_type == "rdm" or disk_type == "rdmp":
disk_file_backing = client_factory.create(
'ns0:VirtualDiskRawDiskMappingVer1BackingInfo')
disk_file_backing.compatibilityMode = "virtualMode" \
if disk_type == "rdm" else "physicalMode"
disk_file_backing.diskMode = "independent_persistent"
disk_file_backing.deviceName = device_name or ""
else:
disk_file_backing = client_factory.create(
'ns0:VirtualDiskFlatVer2BackingInfo')
disk_file_backing.diskMode = "persistent"
if disk_type == "thin":
disk_file_backing.thinProvisioned = True
else:
if disk_type == "eagerZeroedThick":
disk_file_backing.eagerlyScrub = True
disk_file_backing.fileName = file_path or ""
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
if not linked_clone:
virtual_disk.backing = disk_file_backing
else:
virtual_disk.backing = copy.copy(disk_file_backing)
virtual_disk.backing.fileName = ""
virtual_disk.backing.parent = disk_file_backing
virtual_disk.connectable = connectable_spec
# The Server assigns a Key to the device. Here we pass a -ve random key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
virtual_disk.key = -100
virtual_disk.controllerKey = controller_key
virtual_disk.unitNumber = unit_number or 0
virtual_disk.capacityInKB = disk_size or 0
virtual_device_config.device = virtual_disk
return virtual_device_config
def detach_virtual_disk_spec(client_factory, device, destroy_disk=False):
"""Builds spec for the detach of an already existing Virtual Disk from VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
if destroy_disk:
virtual_device_config.fileOperation = "destroy"
virtual_device_config.device = device
return virtual_device_config
def clone_vm_spec(client_factory, location,
power_on=False, snapshot=None, template=False, config=None):
"""Builds the VM clone spec."""
clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = location
clone_spec.powerOn = power_on
if snapshot:
clone_spec.snapshot = snapshot
if config is not None:
clone_spec.config = config
clone_spec.template = template
return clone_spec
def relocate_vm_spec(client_factory, datastore=None, host=None,
disk_move_type="moveAllDiskBackingsAndAllowSharing"):
"""Builds the VM relocation spec."""
rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec')
rel_spec.datastore = datastore
rel_spec.diskMoveType = disk_move_type
if host:
rel_spec.host = host
return rel_spec
def get_dummy_vm_create_spec(client_factory, name, data_store_name):
"""Builds the dummy VM create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = name
config_spec.guestId = "otherGuest"
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = 1
config_spec.memoryMB = 4
controller_key = -101
controller_spec = create_controller_spec(client_factory, controller_key)
disk_spec = create_virtual_disk_spec(client_factory, 1024, controller_key)
device_config_spec = [controller_spec, disk_spec]
config_spec.deviceChange = device_config_spec
return config_spec
def get_machine_id_change_spec(client_factory, machine_id_str):
"""Builds the machine id change config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt = client_factory.create('ns0:OptionValue')
opt.key = "machine.id"
opt.value = machine_id_str
virtual_machine_config_spec.extraConfig = [opt]
return virtual_machine_config_spec
def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
port_group_name, vlan_id):
"""Builds the virtual switch port group add spec."""
vswitch_port_group_spec = client_factory.create('ns0:HostPortGroupSpec')
vswitch_port_group_spec.name = port_group_name
vswitch_port_group_spec.vswitchName = vswitch_name
# VLAN ID of 0 means that VLAN tagging is not to be done for the network.
vswitch_port_group_spec.vlanId = int(vlan_id)
policy = client_factory.create('ns0:HostNetworkPolicy')
nicteaming = client_factory.create('ns0:HostNicTeamingPolicy')
nicteaming.notifySwitches = True
policy.nicTeaming = nicteaming
vswitch_port_group_spec.policy = policy
return vswitch_port_group_spec
def get_vnc_config_spec(client_factory, port):
"""Builds the vnc config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt_enabled = client_factory.create('ns0:OptionValue')
opt_enabled.key = "RemoteDisplay.vnc.enabled"
opt_enabled.value = "true"
opt_port = client_factory.create('ns0:OptionValue')
opt_port.key = "RemoteDisplay.vnc.port"
opt_port.value = port
extras = [opt_enabled, opt_port]
virtual_machine_config_spec.extraConfig = extras
return virtual_machine_config_spec
@utils.synchronized('vmware.get_vnc_port')
def get_vnc_port(session):
"""Return VNC port for an VM or None if there is no available port."""
min_port = CONF.vmware.vnc_port
port_total = CONF.vmware.vnc_port_total
allocated_ports = _get_allocated_vnc_ports(session)
max_port = min_port + port_total
for port in range(min_port, max_port):
if port not in allocated_ports:
return port
raise exception.ConsolePortRangeExhausted(min_port=min_port,
max_port=max_port)
def _get_allocated_vnc_ports(session):
"""Return an integer set of all allocated VNC ports."""
# TODO(rgerganov): bug #1256944
# The VNC port should be unique per host, not per vCenter
vnc_ports = set()
result = session._call_method(vim_util, "get_objects",
"VirtualMachine", [VNC_CONFIG_KEY])
while result:
for obj in result.objects:
if not hasattr(obj, 'propSet'):
continue
dynamic_prop = obj.propSet[0]
option_value = dynamic_prop.val
vnc_port = option_value.value
vnc_ports.add(int(vnc_port))
token = _get_token(result)
if token:
result = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
return vnc_ports
def search_datastore_spec(client_factory, file_name):
"""Builds the datastore search spec."""
search_spec = client_factory.create('ns0:HostDatastoreBrowserSearchSpec')
search_spec.matchPattern = [file_name]
return search_spec
def _get_token(results):
"""Get the token from the property results."""
return getattr(results, 'token', None)
def _get_reference_for_value(results, value):
for object in results.objects:
if object.obj.value == value:
return object
def _get_object_for_value(results, value):
for object in results.objects:
if object.propSet[0].val == value:
return object.obj
def _get_object_for_optionvalue(results, value):
for object in results.objects:
if hasattr(object, "propSet") and object.propSet:
if object.propSet[0].val.value == value:
return object.obj
def _get_object_from_results(session, results, value, func):
while results:
token = _get_token(results)
object = func(results, value)
if object:
if token:
session._call_method(vim_util,
"cancel_retrieve",
token)
return object
if token:
results = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
return None
def _cancel_retrieve_if_necessary(session, results):
token = _get_token(results)
if token:
results = session._call_method(vim_util,
"cancel_retrieve",
token)
def _get_vm_ref_from_name(session, vm_name):
"""Get reference to the VM with the name specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, vm_name,
_get_object_for_value)
@vm_ref_cache_from_name
def get_vm_ref_from_name(session, vm_name):
return (_get_vm_ref_from_vm_uuid(session, vm_name) or
_get_vm_ref_from_name(session, vm_name))
def _get_vm_ref_from_uuid(session, instance_uuid):
"""Get reference to the VM with the uuid specified.
This method reads all of the names of the VM's that are running
on the backend, then it filters locally the matching
instance_uuid. It is far more optimal to use
_get_vm_ref_from_vm_uuid.
"""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_value)
def _get_vm_ref_from_vm_uuid(session, instance_uuid):
"""Get reference to the VM.
The method will make use of FindAllByUuid to get the VM reference.
This method finds all VM's on the backend that match the
instance_uuid, more specifically all VM's on the backend that have
'config_spec.instanceUuid' set to 'instance_uuid'.
"""
vm_refs = session._call_method(
session._get_vim(),
"FindAllByUuid",
session._get_vim().get_service_content().searchIndex,
uuid=instance_uuid,
vmSearch=True,
instanceUuid=True)
if vm_refs:
return vm_refs[0]
def _get_vm_ref_from_extraconfig(session, instance_uuid):
"""Get reference to the VM with the uuid specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_optionvalue)
@vm_ref_cache_from_instance
def get_vm_ref(session, instance):
"""Get reference to the VM through uuid or vm name."""
uuid = instance['uuid']
vm_ref = (search_vm_ref_by_identifier(session, uuid) or
_get_vm_ref_from_name(session, instance['name']))
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=uuid)
return vm_ref
def search_vm_ref_by_identifier(session, identifier):
"""Searches VM reference using the identifier.
This method is primarily meant to separate out part of the logic for
vm_ref search that could be use directly in the special case of
migrating the instance. For querying VM linked to an instance always
use get_vm_ref instead.
"""
vm_ref = (_get_vm_ref_from_vm_uuid(session, identifier) or
_get_vm_ref_from_extraconfig(session, identifier) or
_get_vm_ref_from_uuid(session, identifier))
return vm_ref
def get_host_ref_from_id(session, host_id, property_list=None):
"""Get a host reference object for a host_id string."""
if property_list is None:
property_list = ['name']
host_refs = session._call_method(
vim_util, "get_objects",
"HostSystem", property_list)
return _get_object_from_results(session, host_refs, host_id,
_get_reference_for_value)
def get_host_id_from_vm_ref(session, vm_ref):
"""This method allows you to find the managed object
ID of the host running a VM. Since vMotion can
change the value, you should not presume that this
is a value that you can cache for very long and
should be prepared to allow for it to change.
:param session: a vSphere API connection
:param vm_ref: a reference object to the running VM
:return: the host_id running the virtual machine
"""
# to prevent typographical errors below
property_name = 'runtime.host'
# a property collector in VMware vSphere Management API
# is a set of local representations of remote values.
# property_set here, is a local representation of the
# properties we are querying for.
property_set = session._call_method(
vim_util, "get_object_properties",
None, vm_ref, vm_ref._type, [property_name])
prop = property_from_property_set(
property_name, property_set)
if prop is not None:
prop = prop.val.value
else:
# reaching here represents an impossible state
raise RuntimeError(
"Virtual Machine %s exists without a runtime.host!"
% (vm_ref))
return prop
def property_from_property_set(property_name, property_set):
'''Use this method to filter property collector results.
Because network traffic is expensive, multiple
VMwareAPI calls will sometimes pile-up properties
to be collected. That means results may contain
many different values for multiple purposes.
This helper will filter a list for a single result
and filter the properties of that result to find
the single value of whatever type resides in that
result. This could be a ManagedObjectReference ID
or a complex value.
:param property_name: name of property you want
:param property_set: all results from query
:return: the value of the property.
'''
for prop in property_set.objects:
p = _property_from_propSet(prop.propSet, property_name)
if p is not None:
return p
def _property_from_propSet(propSet, name='name'):
for p in propSet:
if p.name == name:
return p
def get_host_ref_for_vm(session, instance, props):
"""Get the ESXi host running a VM by its name."""
vm_ref = get_vm_ref(session, instance)
host_id = get_host_id_from_vm_ref(session, vm_ref)
return get_host_ref_from_id(session, host_id, props)
def get_host_name_for_vm(session, instance):
"""Get the ESXi host running a VM by its name."""
host_ref = get_host_ref_for_vm(session, instance, ['name'])
return get_host_name_from_host_ref(host_ref)
def get_host_name_from_host_ref(host_ref):
p = _property_from_propSet(host_ref.propSet)
if p is not None:
return p.val
def get_vm_state_from_name(session, vm_name):
vm_ref = get_vm_ref_from_name(session, vm_name)
vm_state = session._call_method(vim_util, "get_dynamic_property",
vm_ref, "VirtualMachine", "runtime.powerState")
return vm_state
def get_stats_from_cluster(session, cluster):
"""Get the aggregate resource stats of a cluster."""
cpu_info = {'vcpus': 0, 'cores': 0, 'vendor': [], 'model': []}
mem_info = {'total': 0, 'free': 0}
# Get the Host and Resource Pool Managed Object Refs
prop_dict = session._call_method(vim_util, "get_dynamic_properties",
cluster, "ClusterComputeResource",
["host", "resourcePool"])
if prop_dict:
host_ret = prop_dict.get('host')
if host_ret:
host_mors = host_ret.ManagedObjectReference
result = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"HostSystem", host_mors,
["summary.hardware", "summary.runtime"])
for obj in result.objects:
hardware_summary = obj.propSet[0].val
runtime_summary = obj.propSet[1].val
if runtime_summary.connectionState == "connected":
# Total vcpus is the sum of all pCPUs of individual hosts
# The overcommitment ratio is factored in by the scheduler
cpu_info['vcpus'] += hardware_summary.numCpuThreads
cpu_info['cores'] += hardware_summary.numCpuCores
cpu_info['vendor'].append(hardware_summary.vendor)
cpu_info['model'].append(hardware_summary.cpuModel)
res_mor = prop_dict.get('resourcePool')
if res_mor:
res_usage = session._call_method(vim_util, "get_dynamic_property",
res_mor, "ResourcePool", "summary.runtime.memory")
if res_usage:
# maxUsage is the memory limit of the cluster available to VM's
mem_info['total'] = int(res_usage.maxUsage / units.Mi)
# overallUsage is the hypervisor's view of memory usage by VM's
consumed = int(res_usage.overallUsage / units.Mi)
mem_info['free'] = mem_info['total'] - consumed
stats = {'cpu': cpu_info, 'mem': mem_info}
return stats
def get_cluster_ref_from_name(session, cluster_name):
"""Get reference to the cluster with the name specified."""
cls = session._call_method(vim_util, "get_objects",
"ClusterComputeResource", ["name"])
return _get_object_from_results(session, cls, cluster_name,
_get_object_for_value)
def get_host_ref(session, cluster=None):
"""Get reference to a host within the cluster specified."""
if cluster is None:
results = session._call_method(vim_util, "get_objects",
"HostSystem")
_cancel_retrieve_if_necessary(session, results)
host_mor = results.objects[0].obj
else:
host_ret = session._call_method(vim_util, "get_dynamic_property",
cluster, "ClusterComputeResource",
"host")
if not host_ret or not host_ret.ManagedObjectReference:
msg = _('No host available on cluster')
raise exception.NoValidHost(reason=msg)
host_mor = host_ret.ManagedObjectReference[0]
return host_mor
def propset_dict(propset):
"""Turn a propset list into a dictionary
PropSet is an optional attribute on ObjectContent objects
that are returned by the VMware API.
You can read more about these at:
http://pubs.vmware.com/vsphere-51/index.jsp
#com.vmware.wssdk.apiref.doc/
vmodl.query.PropertyCollector.ObjectContent.html
:param propset: a property "set" from ObjectContent
:return: dictionary representing property set
"""
if propset is None:
return {}
#TODO(hartsocks): once support for Python 2.6 is dropped
# change to {[(prop.name, prop.val) for prop in propset]}
return dict([(prop.name, prop.val) for prop in propset])
def _select_datastore(data_stores, best_match, datastore_regex=None):
"""Find the most preferable datastore in a given RetrieveResult object.
:param data_stores: a RetrieveResult object from vSphere API call
:param best_match: the current best match for datastore
:param datastore_regex: an optional regular expression to match names
:return: datastore_ref, datastore_name, capacity, freespace
"""
# data_stores is actually a RetrieveResult object from vSphere API call
for obj_content in data_stores.objects:
# the propset attribute "need not be set" by returning API
if not hasattr(obj_content, 'propSet'):
continue
propdict = propset_dict(obj_content.propSet)
# Local storage identifier vSphere doesn't support CIFS or
# vfat for datastores, therefore filtered
ds_type = propdict['summary.type']
ds_name = propdict['summary.name']
if ((ds_type == 'VMFS' or ds_type == 'NFS') and
propdict.get('summary.accessible')):
if datastore_regex is None or datastore_regex.match(ds_name):
new_ds = DSRecord(
datastore=obj_content.obj,
name=ds_name,
capacity=propdict['summary.capacity'],
freespace=propdict['summary.freeSpace'])
# favor datastores with more free space
if new_ds.freespace > best_match.freespace:
best_match = new_ds
return best_match
def get_datastore_ref_and_name(session, cluster=None, host=None,
datastore_regex=None):
"""Get the datastore list and choose the most preferable one."""
if cluster is None and host is None:
data_stores = session._call_method(vim_util, "get_objects",
"Datastore", ["summary.type", "summary.name",
"summary.capacity", "summary.freeSpace",
"summary.accessible"])
else:
if cluster is not None:
datastore_ret = session._call_method(
vim_util,
"get_dynamic_property", cluster,
"ClusterComputeResource", "datastore")
else:
datastore_ret = session._call_method(
vim_util,
"get_dynamic_property", host,
"HostSystem", "datastore")
if not datastore_ret:
raise exception.DatastoreNotFound()
data_store_mors = datastore_ret.ManagedObjectReference
data_stores = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"Datastore", data_store_mors,
["summary.type", "summary.name",
"summary.capacity", "summary.freeSpace",
"summary.accessible"])
best_match = DSRecord(datastore=None, name=None,
capacity=None, freespace=0)
while data_stores:
best_match = _select_datastore(data_stores, best_match,
datastore_regex)
token = _get_token(data_stores)
if not token:
break
data_stores = session._call_method(vim_util,
"continue_to_get_objects",
token)
if best_match.datastore:
return best_match
if datastore_regex:
raise exception.DatastoreNotFound(
_("Datastore regex %s did not match any datastores")
% datastore_regex.pattern)
else:
raise exception.DatastoreNotFound()
def _get_allowed_datastores(data_stores, datastore_regex, allowed_types):
allowed = []
for obj_content in data_stores.objects:
# the propset attribute "need not be set" by returning API
if not hasattr(obj_content, 'propSet'):
continue
propdict = propset_dict(obj_content.propSet)
# Local storage identifier vSphere doesn't support CIFS or
# vfat for datastores, therefore filtered
ds_type = propdict['summary.type']
ds_name = propdict['summary.name']
if (propdict['summary.accessible'] and ds_type in allowed_types):
if datastore_regex is None or datastore_regex.match(ds_name):
allowed.append({'ref': obj_content.obj, 'name': ds_name})
return allowed
def get_available_datastores(session, cluster=None, datastore_regex=None):
"""Get the datastore list and choose the first local storage."""
if cluster:
mobj = cluster
type = "ClusterComputeResource"
else:
mobj = get_host_ref(session)
type = "HostSystem"
ds = session._call_method(vim_util, "get_dynamic_property", mobj,
type, "datastore")
if not ds:
return []
data_store_mors = ds.ManagedObjectReference
# NOTE(garyk): use utility method to retrieve remote objects
data_stores = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"Datastore", data_store_mors,
["summary.type", "summary.name", "summary.accessible"])
allowed = []
while data_stores:
allowed.extend(_get_allowed_datastores(data_stores, datastore_regex,
['VMFS', 'NFS']))
token = _get_token(data_stores)
if not token:
break
data_stores = session._call_method(vim_util,
"continue_to_get_objects",
token)
return allowed
def get_vmdk_backed_disk_uuid(hardware_devices, volume_uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
volume_uuid in device.backing.fileName):
return device.backing.uuid
def get_vmdk_backed_disk_device(hardware_devices, uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
device.backing.uuid == uuid):
return device
def get_vmdk_volume_disk(hardware_devices, path=None):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk"):
if not path or path == device.backing.fileName:
return device
def get_res_pool_ref(session, cluster, node_mo_id):
"""Get the resource pool."""
if cluster is None:
# With no cluster named, use the root resource pool.
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
# The 0th resource pool is always the root resource pool on both ESX
# and vCenter.
res_pool_ref = results.objects[0].obj
else:
if cluster.value == node_mo_id:
# Get the root resource pool of the cluster
res_pool_ref = session._call_method(vim_util,
"get_dynamic_property",
cluster,
"ClusterComputeResource",
"resourcePool")
return res_pool_ref
def get_all_cluster_mors(session):
"""Get all the clusters in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ClusterComputeResource", ["name"])
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get cluster references %s") % excep)
def get_all_res_pool_mors(session):
"""Get all the resource pools in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get resource pool references " "%s") % excep)
def get_dynamic_property_mor(session, mor_ref, attribute):
"""Get the value of an attribute for a given managed object."""
return session._call_method(vim_util, "get_dynamic_property",
mor_ref, mor_ref._type, attribute)
def find_entity_mor(entity_list, entity_name):
"""Returns managed object ref for given cluster or resource pool name."""
return [mor for mor in entity_list if (hasattr(mor, 'propSet') and
mor.propSet[0].val == entity_name)]
def get_all_cluster_refs_by_name(session, path_list):
"""Get reference to the Cluster, ResourcePool with the path specified.
The path is the display name. This can be the full path as well.
The input will have the list of clusters and resource pool names
"""
cls = get_all_cluster_mors(session)
if not cls:
return
res = get_all_res_pool_mors(session)
if not res:
return
path_list = [path.strip() for path in path_list]
list_obj = []
for entity_path in path_list:
# entity_path could be unique cluster and/or resource-pool name
res_mor = find_entity_mor(res, entity_path)
cls_mor = find_entity_mor(cls, entity_path)
cls_mor.extend(res_mor)
for mor in cls_mor:
list_obj.append((mor.obj, mor.propSet[0].val))
return get_dict_mor(session, list_obj)
def get_dict_mor(session, list_obj):
"""The input is a list of objects in the form
(manage_object,display_name)
The managed object will be in the form
{ value = "domain-1002", _type = "ClusterComputeResource" }
Output data format:
dict_mors = {
'respool-1001': { 'cluster_mor': clusterMor,
'res_pool_mor': resourcePoolMor,
'name': display_name },
'domain-1002': { 'cluster_mor': clusterMor,
'res_pool_mor': resourcePoolMor,
'name': display_name },
}
"""
dict_mors = {}
for obj_ref, path in list_obj:
if obj_ref._type == "ResourcePool":
# Get owner cluster-ref mor
cluster_ref = get_dynamic_property_mor(session, obj_ref, "owner")
dict_mors[obj_ref.value] = {'cluster_mor': cluster_ref,
'res_pool_mor': obj_ref,
'name': path,
}
else:
# Get default resource pool of the cluster
res_pool_ref = get_dynamic_property_mor(session,
obj_ref, "resourcePool")
dict_mors[obj_ref.value] = {'cluster_mor': obj_ref,
'res_pool_mor': res_pool_ref,
'name': path,
}
return dict_mors
def get_mo_id_from_instance(instance):
"""Return the managed object ID from the instance.
The instance['node'] will have the hypervisor_hostname field of the
compute node on which the instance exists or will be provisioned.
This will be of the form
'respool-1001(MyResPoolName)'
'domain-1001(MyClusterName)'
"""
return instance['node'].partition('(')[0]
def get_vmdk_adapter_type(adapter_type):
"""Return the adapter type to be used in vmdk descriptor.
Adapter type in vmdk descriptor is same for LSI-SAS & LSILogic
because Virtual Disk Manager API does not recognize the newer controller
types.
"""
if adapter_type == "lsiLogicsas":
vmdk_adapter_type = "lsiLogic"
else:
vmdk_adapter_type = adapter_type
return vmdk_adapter_type
def clone_vmref_for_instance(session, instance, vm_ref, host_ref, ds_ref,
vmfolder_ref):
"""Clone VM and link the cloned VM to the instance.
Clones the passed vm_ref into a new VM and links the cloned vm to
the passed instance.
"""
if vm_ref is None:
LOG.warn(_("vmwareapi:vm_util:clone_vmref_for_instance, called "
"with vm_ref=None"))
raise error_util.MissingParameter(param="vm_ref")
# Get the clone vm spec
client_factory = session._get_vim().client.factory
rel_spec = relocate_vm_spec(client_factory, ds_ref, host_ref)
extra_opts = {'nvp.vm-uuid': instance['uuid']}
config_spec = get_vm_extra_config_spec(client_factory, extra_opts)
config_spec.instanceUuid = instance['uuid']
clone_spec = clone_vm_spec(client_factory, rel_spec, config=config_spec)
# Clone VM on ESX host
LOG.debug(_("Cloning VM for instance %s"), instance['uuid'],
instance=instance)
vm_clone_task = session._call_method(session._get_vim(), "CloneVM_Task",
vm_ref, folder=vmfolder_ref,
name=instance['uuid'],
spec=clone_spec)
session._wait_for_task(vm_clone_task)
LOG.debug(_("Cloned VM for instance %s"), instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def disassociate_vmref_from_instance(session, instance, vm_ref=None,
suffix='-orig'):
"""Disassociates the VM linked to the instance.
Disassociates the VM linked to the instance by performing the following
1. Update the extraConfig property for nvp.vm-uuid to be replaced with
instance[uuid]+suffix
2. Rename the VM to be instance[uuid]+suffix instead
3. Reset the instanceUUID of the VM to a new generated value
"""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
extra_opts = {'nvp.vm-uuid': instance['uuid'] + suffix}
client_factory = session._get_vim().client.factory
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid'] + suffix
reconfig_spec.instanceUuid = ''
LOG.debug(_("Disassociating VM from instance %s"), instance['uuid'],
instance=instance)
reconfig_task = session._call_method(session._get_vim(), "ReconfigVM_Task",
vm_ref, spec=reconfig_spec)
session._wait_for_task(reconfig_task)
LOG.debug(_("Disassociated VM from instance %s"), instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def associate_vmref_for_instance(session, instance, vm_ref=None,
suffix='-orig'):
"""Associates the VM to the instance.
Associates the VM to the instance by performing the following
1. Update the extraConfig property for nvp.vm-uuid to be replaced with
instance[uuid]
2. Rename the VM to be instance[uuid]
3. Reset the instanceUUID of the VM to be instance[uuid]
"""
if vm_ref is None:
vm_ref = search_vm_ref_by_identifier(session,
instance['uuid'] + suffix)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance['uuid']
+ suffix)
extra_opts = {'nvp.vm-uuid': instance['uuid']}
client_factory = session._get_vim().client.factory
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid']
reconfig_spec.instanceUuid = instance['uuid']
LOG.debug(_("Associating VM to instance %s"), instance['uuid'],
instance=instance)
reconfig_task = session._call_method(session._get_vim(), "ReconfigVM_Task",
vm_ref, spec=reconfig_spec)
session._wait_for_task(reconfig_task)
LOG.debug(_("Associated VM to instance %s"), instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
| 38.767142 | 79 | 0.645699 |
319033164b8adf69fbe1ba15193979ad9b3813e4 | 324 | py | Python | borough_map/__main__.py | timhunderwood/house-price-london | 6345e374fd363e27e5c0708557f680c1305905ac | [
"MIT"
] | null | null | null | borough_map/__main__.py | timhunderwood/house-price-london | 6345e374fd363e27e5c0708557f680c1305905ac | [
"MIT"
] | null | null | null | borough_map/__main__.py | timhunderwood/house-price-london | 6345e374fd363e27e5c0708557f680c1305905ac | [
"MIT"
] | null | null | null | import borough_map.controller
if __name__ == "__main__":
controller = borough_map.controller.Controller(
raw_price_paid_file_name="pp-complete.csv.gz",
shp_file_name="London_Borough_Excluding_MHW.shp",
start_year=1995,
end_year=2019,
end_month=11,
)
controller.animate()
| 27 | 57 | 0.688272 |
37b962b173d0cd4f1a2ad02f122176fff4860577 | 33,973 | py | Python | utils/util.py | PointCloudYC/se-pseudogrid | 65005d82fda1a31b8c945e02e378df102ba0fee0 | [
"MIT"
] | 2 | 2021-11-30T06:38:23.000Z | 2021-12-17T01:38:32.000Z | utils/util.py | PointCloudYC/se-pseudogrid | 65005d82fda1a31b8c945e02e378df102ba0fee0 | [
"MIT"
] | null | null | null | utils/util.py | PointCloudYC/se-pseudogrid | 65005d82fda1a31b8c945e02e378df102ba0fee0 | [
"MIT"
] | null | null | null | import sys
import os
import itertools
from string import ascii_uppercase
import numpy as np
import torch
import torch.distributed as dist
from sklearn.metrics import confusion_matrix, classification_report
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
# from sklearn.manifold import TSNE
# from sklearn.decomposition import PCA
import pandas as pd
import matplotlib as mpl
# mpl.rc('axes', labelsize=14)
# mpl.rc('xtick', labelsize=12)
# mpl.rc('ytick', labelsize=12)
import seaborn as sns
def str2bool(item):
"""
borrow from https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(item, bool):
return item
if item.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif item.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ValueError('the input is invalid, use yes, y, 1 or t for yes.')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def classification_metrics(preds, targets, num_classes):
seen_class = [0.0 for _ in range(num_classes)]
correct_class = [0.0 for _ in range(num_classes)]
preds = np.argmax(preds, -1)
correct = np.sum(preds == targets)
seen = preds.shape[0]
for l in range(num_classes):
seen_class[l] += np.sum(targets == l)
correct_class[l] += (np.sum((preds == l) & (targets == l)))
acc = 1.0 * correct / seen
avg_class_acc = np.mean(np.array(correct_class) / np.array(seen_class))
return acc, avg_class_acc
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True) # (B,maxk)
pred = pred.t() # (maxk, B)
correct = pred.eq(target.view(1, -1).expand_as(pred)) # (maxk, B)
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(1.0 / batch_size))
return res
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= dist.get_world_size()
return rt
def partnet_metrics(num_classes, num_parts, objects, preds, targets):
"""
Args:
num_classes:
num_parts:
objects: [int]
preds:[(num_parts,num_points)]
targets: [(num_points)]
Returns:
"""
shape_iou_tot = [0.0] * num_classes
shape_iou_cnt = [0] * num_classes
part_intersect = [np.zeros((num_parts[o_l]), dtype=np.float32) for o_l in range(num_classes)]
part_union = [np.zeros((num_parts[o_l]), dtype=np.float32) + 1e-6 for o_l in range(num_classes)]
for obj, cur_pred, cur_gt in zip(objects, preds, targets):
cur_num_parts = num_parts[obj]
cur_pred = np.argmax(cur_pred[1:, :], axis=0) + 1
cur_pred[cur_gt == 0] = 0
cur_shape_iou_tot = 0.0
cur_shape_iou_cnt = 0
for j in range(1, cur_num_parts):
cur_gt_mask = (cur_gt == j)
cur_pred_mask = (cur_pred == j)
has_gt = (np.sum(cur_gt_mask) > 0)
has_pred = (np.sum(cur_pred_mask) > 0)
if has_gt or has_pred:
intersect = np.sum(cur_gt_mask & cur_pred_mask)
union = np.sum(cur_gt_mask | cur_pred_mask)
iou = intersect / union
cur_shape_iou_tot += iou
cur_shape_iou_cnt += 1
part_intersect[obj][j] += intersect
part_union[obj][j] += union
if cur_shape_iou_cnt > 0:
cur_shape_miou = cur_shape_iou_tot / cur_shape_iou_cnt
shape_iou_tot[obj] += cur_shape_miou
shape_iou_cnt[obj] += 1
msIoU = [shape_iou_tot[o_l] / shape_iou_cnt[o_l] for o_l in range(num_classes)]
part_iou = [np.divide(part_intersect[o_l][1:], part_union[o_l][1:]) for o_l in range(num_classes)]
mpIoU = [np.mean(part_iou[o_l]) for o_l in range(num_classes)]
# Print instance mean
mmsIoU = np.mean(np.array(msIoU))
mmpIoU = np.mean(mpIoU)
return msIoU, mpIoU, mmsIoU, mmpIoU
def IoU_from_confusions(confusions):
"""
Computes IoU from confusion matrices.
:param confusions: ([..., n_c, n_c] np.int32). Can be any dimension, the confusion matrices should be described by
the last axes. n_c = number of classes
:param ignore_unclassified: (bool). True if the the first class should be ignored in the results
:return: ([..., n_c] np.float32) IoU score
"""
# Compute TP, FP, FN. This assume that the second to last axis counts the truths (like the first axis of a
# confusion matrix), and that the last axis counts the predictions (like the second axis of a confusion matrix)
TP = np.diagonal(confusions, axis1=-2, axis2=-1)
TP_plus_FN = np.sum(confusions, axis=-1)
TP_plus_FP = np.sum(confusions, axis=-2)
# Compute IoU
IoU = TP / (TP_plus_FP + TP_plus_FN - TP + 1e-6)
# Compute mIoU with only the actual classes
mask = TP_plus_FN < 1e-3
counts = np.sum(1 - mask, axis=-1, keepdims=True)
mIoU = np.sum(IoU, axis=-1, keepdims=True) / (counts + 1e-6)
# If class is absent, place mIoU in place of 0 IoU to get the actual mean later
IoU += mask * mIoU
return IoU
def s3dis_metrics(num_classes, vote_logits, validation_proj, validation_labels):
Confs = []
for logits, proj, targets in zip(vote_logits, validation_proj, validation_labels):
preds = np.argmax(logits[:, proj], axis=0).astype(np.int32)
Confs += [confusion_matrix(targets, preds, np.arange(num_classes))]
# Regroup confusions
C = np.sum(np.stack(Confs), axis=0)
IoUs = IoU_from_confusions(C)
mIoU = np.mean(IoUs)
return IoUs, mIoU
def s3dis_metrics_vis_CM(num_classes, vote_logits,
validation_proj, validation_labels,
more_metrics=False, image_path=None,
label_to_names=None,visualize_CM=True):
"""compute metrics (miou, OA, precision, recall), visualize confusion matrix
Args:
num_classes ([type]): the number of class
vote_logits ([type]): vote_logits for all sub points
validation_proj ([type]): validation projection, i.e. the nearest nb sub-pt indice for each original pt
validation_labels ([type]): validation labels for each original pt
more_metrics (bool, optional): compute other metrics (precision, recall, OA,etc). Defaults to False.
image_path ([type], optional): place to save CM picture. Defaults to None.
label_to_names ([type], optional): label and name dictionary for the dataset. Defaults to None.
visualize_CM (bool, optional): show the CM. Defaults to True.
Returns:
[type]: [description]
"""
# compute mIoU
Confs = []
targets_list=[]
preds_list=[]
for logits, proj, targets in zip(vote_logits, validation_proj, validation_labels): # vote_logits for sub-pc, validation_{proj,labels} for pc.
preds = np.argmax(logits[:, proj], axis=0).astype(np.int32) # amazing, get preds based on sub-pc's preds(logits var) for all original pts, logits shape Cx#num_subpc_pts
Confs += [confusion_matrix(targets, preds, np.arange(num_classes))] # confusion_matrix is just a np array
targets_list.append(targets)
preds_list.append(preds)
# Regroup confusions
C = np.sum(np.stack(Confs), axis=0)
IoUs = IoU_from_confusions(C)
mIoU = np.mean(IoUs)
# compute other metrics
if more_metrics:
y_true=np.vstack(targets_list).squeeze()
y_pred=np.vstack(preds_list).squeeze()
target_names = list(label_to_names.values())
cls_report = classification_report(
y_true,
y_pred,
target_names=target_names,
digits=4)
print(cls_report)
# overall_acc1, _ = classification_metrics(y_pred,y_true,num_classes)
cls_report_dict = classification_report(
y_true,
y_pred,
target_names=target_names,
digits=4,output_dict=True)
overall_acc = cls_report_dict['accuracy']
# print('Overall accuracy is {:.4f} or {:.4f}'.format(overall_acc,overall_acc1))
else:
print('Attention, overall accuracy is not computed.')
overall_acc = None
# plot CM
if visualize_CM:
print(f"save confusion matrix at root{image_path}")
plot_CM_wrapper(C,y_true,y_pred,label_to_names,image_path,filename='CM_seaborn')
return IoUs, mIoU, overall_acc
def plot_CM_wrapper(cm,y_true,y_pred,label_to_names,image_path,filename='CM_seaborn',figsize=(10,10),fmt='0.2f'):
assert label_to_names is not None
label_values = np.sort([k for k, v in label_to_names.items()])
target_names = list(label_to_names.values())
# plot w. scikit-learn style
plot_confusion_matrix2(
cm,
image_path,
filename="CM_default",
normalize = False,
target_names = target_names,
title = "Confusion Matrix")
# plot w. seaborn style
plot_confusion_matrix_seaborn(
y_true,
y_pred,
filename,
label_values,
label_to_names,
image_path,
figsize=figsize)
# plot normlaized fig. w. seaborn style
plot_confusion_matrix_seaborn(
y_true,
y_pred,
f"{filename}_percent",
label_values,
label_to_names,
image_path,
figsize=(figsize[0]+4,figsize[0]+4),
normalized=True,
fmt=fmt)
def header_properties(field_list, field_names):
# List of lines to write
lines = []
# First line describing element vertex
lines.append('element vertex %d' % field_list[0].shape[0])
# Properties lines
i = 0
for fields in field_list:
for field in fields.T:
lines.append('property %s %s' % (field.dtype.name, field_names[i]))
i += 1
return lines
def write_ply(filename, field_list, field_names, triangular_faces=None):
"""
Write ".ply" files
Parameters
----------
filename : string
the name of the file to which the data is saved. A '.ply' extension will be appended to the
file name if it does no already have one.
field_list : list, tuple, numpy array
the fields to be saved in the ply file. Either a numpy array, a list of numpy arrays or a
tuple of numpy arrays. Each 1D numpy array and each column of 2D numpy arrays are considered
as one field.
field_names : list
the name of each fields as a list of strings. Has to be the same length as the number of
fields.
Examples
--------
>>> points = np.random.rand(10, 3)
>>> write_ply('example1.ply', points, ['x', 'y', 'z'])
>>> values = np.random.randint(2, size=10)
>>> write_ply('example2.ply', [points, values], ['x', 'y', 'z', 'values'])
>>> colors = np.random.randint(255, size=(10,3), dtype=np.uint8)
>>> field_names = ['x', 'y', 'z', 'red', 'green', 'blue', values']
>>> write_ply('example3.ply', [points, colors, values], field_names)
"""
# Format list input to the right form
field_list = list(field_list) if (type(field_list) == list or type(field_list) == tuple) else list((field_list,))
for i, field in enumerate(field_list):
if field.ndim < 2:
field_list[i] = field.reshape(-1, 1)
if field.ndim > 2:
print('fields have more than 2 dimensions')
return False
# check all fields have the same number of data
n_points = [field.shape[0] for field in field_list]
if not np.all(np.equal(n_points, n_points[0])):
print('wrong field dimensions')
return False
# Check if field_names and field_list have same nb of column
n_fields = np.sum([field.shape[1] for field in field_list])
if (n_fields != len(field_names)):
print('wrong number of field names')
return False
# Add extension if not there
if not filename.endswith('.ply'):
filename += '.ply'
# open in text mode to write the header
with open(filename, 'w') as plyfile:
# First magical word
header = ['ply']
# Encoding format
header.append('format binary_' + sys.byteorder + '_endian 1.0')
# Points properties description
header.extend(header_properties(field_list, field_names))
# Add faces if needded
if triangular_faces is not None:
header.append('element face {:d}'.format(triangular_faces.shape[0]))
header.append('property list uchar int vertex_indices')
# End of header
header.append('end_header')
# Write all lines
for line in header:
plyfile.write("%s\n" % line)
# open in binary/append to use tofile
with open(filename, 'ab') as plyfile:
# Create a structured array
i = 0
type_list = []
for fields in field_list:
for field in fields.T:
type_list += [(field_names[i], field.dtype.str)]
i += 1
data = np.empty(field_list[0].shape[0], dtype=type_list)
i = 0
for fields in field_list:
for field in fields.T:
data[field_names[i]] = field
i += 1
data.tofile(plyfile)
if triangular_faces is not None:
triangular_faces = triangular_faces.astype(np.int32)
type_list = [('k', 'uint8')] + [(str(ind), 'int32') for ind in range(3)]
data = np.empty(triangular_faces.shape[0], dtype=type_list)
data['k'] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8)
data['0'] = triangular_faces[:, 0]
data['1'] = triangular_faces[:, 1]
data['2'] = triangular_faces[:, 2]
data.tofile(plyfile)
return True
def save_predicted_results_PLY(vote_logits, validation_proj,
validation_points,validation_colors,
validation_labels,test_path=None,
cloud_names=None, open3d_visualize=False):
"""save predicted results as ply for each area
"""
os.makedirs(os.path.join(test_path, 'results'),exist_ok=True)
for logits, proj, points, colors, targets, cloud_name in zip(vote_logits, validation_proj,validation_points,validation_colors, validation_labels,cloud_names):
preds = np.argmax(logits[:, proj], axis=0).astype(np.int32)
pred_name = os.path.join(test_path, 'results', f'{cloud_name}_pred.ply')
gt_name = os.path.join(test_path, 'results', f'{cloud_name}_gt.ply')
write_ply(pred_name,
[points, preds],
['x', 'y', 'z', 'preds'])
print(f"{cloud_name}_pred.ply saved successfully")
write_ply(gt_name,
[points, targets],
['x', 'y', 'z', 'gt'])
print(f"{cloud_name}_gt.ply saved successfully")
if open3d_visualize:
from .visualize import Plot
xyzrgb = np.concatenate([points, colors], axis=-1)
Plot.draw_pc(xyzrgb) # visualize raw point clouds
Plot.draw_pc_sem_ins(points, targets) # visualize ground-truth
Plot.draw_pc_sem_ins(points, preds) # visualize prediction
def sub_s3dis_metrics(num_classes, validation_logits, validation_labels, val_proportions):
Confs = []
for logits, targets in zip(validation_logits, validation_labels):
preds = np.argmax(logits, axis=0).astype(np.int32)
Confs += [confusion_matrix(targets, preds, np.arange(num_classes))]
# Regroup confusions
C = np.sum(np.stack(Confs), axis=0).astype(np.float32)
# Rescale with the right number of point per class
C *= np.expand_dims(val_proportions / (np.sum(C, axis=1) + 1e-6), 1)
IoUs = IoU_from_confusions(C)
mIoU = np.mean(IoUs)
return IoUs, mIoU
def s3dis_part_metrics(num_classes, predictions, targets, val_proportions):
# Confusions for subparts of validation set
Confs = np.zeros((len(predictions), num_classes, num_classes), dtype=np.int32)
for i, (probs, truth) in enumerate(zip(predictions, targets)):
# Predicted labels
preds = np.argmax(probs, axis=0)
# Confusions
Confs[i, :, :] = confusion_matrix(truth, preds, np.arange(num_classes))
# Sum all confusions
C = np.sum(Confs, axis=0).astype(np.float32)
# Balance with real validation proportions
C *= np.expand_dims(val_proportions / (np.sum(C, axis=1) + 1e-6), 1)
# Objects IoU
IoUs = IoU_from_confusions(C)
# Print instance mean
mIoU = np.mean(IoUs)
return IoUs, mIoU
def shapenetpart_metrics(num_classes, num_parts, objects, preds, targets, masks):
"""
Args:
num_classes:
num_parts:
objects: [int]
preds:[(num_parts,num_points)]
targets: [(num_points)]
masks: [(num_points)]
"""
total_correct = 0.0
total_seen = 0.0
Confs = []
for obj, cur_pred, cur_gt, cur_mask in zip(objects, preds, targets, masks):
obj = int(obj)
cur_num_parts = num_parts[obj]
cur_pred = np.argmax(cur_pred, axis=0)
cur_pred = cur_pred[cur_mask]
cur_gt = cur_gt[cur_mask]
correct = np.sum(cur_pred == cur_gt)
total_correct += correct
total_seen += cur_pred.shape[0]
parts = [j for j in range(cur_num_parts)]
Confs += [confusion_matrix(cur_gt, cur_pred, labels=parts)]
Confs = np.array(Confs)
obj_mIoUs = []
objects = np.asarray(objects)
for l in range(num_classes):
obj_inds = np.where(objects == l)[0]
obj_confs = np.stack(Confs[obj_inds])
obj_IoUs = IoU_from_confusions(obj_confs)
obj_mIoUs += [np.mean(obj_IoUs, axis=-1)]
objs_average = [np.mean(mIoUs) for mIoUs in obj_mIoUs]
instance_average = np.mean(np.hstack(obj_mIoUs))
class_average = np.mean(objs_average)
acc = total_correct / total_seen
print('Objs | Inst | Air Bag Cap Car Cha Ear Gui Kni Lam Lap Mot Mug Pis Roc Ska Tab')
print('-----|------|--------------------------------------------------------------------------------')
s = '{:4.1f} | {:4.1f} | '.format(100 * class_average, 100 * instance_average)
for AmIoU in objs_average:
s += '{:4.1f} '.format(100 * AmIoU)
print(s + '\n')
return acc, objs_average, class_average, instance_average
def save_fig(image_path, fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(image_path, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
def fashion_scatter_old(x, colors):
# set sns style
sns.set_style('darkgrid')
sns.set_palette('muted')
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5})
# choose a color palette with seaborn.
num_classes = len(np.unique(colors))
palette = np.array(sns.color_palette("hls", num_classes))
# create a scatter plot.
f = plt.figure(figsize=(12, 12))
ax = plt.subplot(aspect='equal')
sc = ax.scatter(x[:,0], x[:,1], lw=0, s=40, c=palette[colors.astype(np.int)])
ax.legend(fontsize='large', markerscale=2)
plt.xlim(-25, 25)
plt.ylim(-25, 25)
ax.axis('off')
ax.axis('tight')
# add the labels for each digit corresponding to the label
txts = []
# plot label on the median locations of each cluster
for i in range(num_classes):
# Position of each label at median of data points.
xtext, ytext = np.median(x[colors == i, :], axis=0)
txt = ax.text(xtext, ytext, str(i), fontsize=16)
txt.set_path_effects([
PathEffects.Stroke(linewidth=5, foreground="w"),
PathEffects.Normal()])
txts.append(txt)
return f, ax, sc, txts
def fashion_scatter(x, colors, need_mapping=False):
# choose a color palette with seaborn.
num_classes = len(np.unique(colors))
palette = np.array(sns.color_palette("hls", num_classes))
unique_items = list(np.unique(colors))
colors_mapped = np.zeros(colors.shape[0])
if need_mapping:
for i, item in enumerate(colors):
colors_mapped[i]= unique_items.index(item)
# create a scatter plot.
f = plt.figure(figsize=(8, 8))
ax = plt.subplot(aspect='equal')
if need_mapping:
scatter = ax.scatter(x[:,0], x[:,1], lw=0, s=40, c=palette[colors_mapped.astype(np.int)])
else:
scatter = ax.scatter(x[:,0], x[:,1], lw=0, s=40, c=palette[colors.astype(np.int)])
# add a legend
# produce a legend with the unique colors from the scatter
legend1 = ax.legend(*scatter.legend_elements(),
loc="upper right", title="Classes")
ax.add_artist(legend1)
# ax.legend()
plt.xlim(-25, 25)
plt.ylim(-25, 25)
ax.axis('off')
ax.axis('tight')
# add the labels for each digit corresponding to the label
txts = []
# plot label on the median locations of each cluster
for i in unique_items:
# Position of each label at median of data points.
xtext, ytext = np.median(x[colors == i, :], axis=0)
# xtext, ytext = np.mean(x[colors == i, :], axis=0)
txt = ax.text(xtext, ytext, str(i), fontsize=24)
txt.set_path_effects([
PathEffects.Stroke(linewidth=5, foreground="w"),
PathEffects.Normal()])
txts.append(txt)
return f, ax, scatter, txts
# a btter CM plot function (July 26, 2020)
# ref: https://www.kaggle.com/grfiv4/plot-a-confusion-matrix
def plot_confusion_matrix2(cm,image_path,filename='CM_default',
target_names=None,
title='Confusion matrix',
cmap=None,
normalize=True):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
import matplotlib.pyplot as plt
import itertools
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
# plt.figure(figsize=(8, 6))
plt.figure(figsize=(16+2, 12+2))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
# plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('true labels')
plt.xlabel('predicted labels\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
save_fig(image_path,filename, tight_layout=False)
plt.show()
# TODO: need to optimize the speed, can refer to the cm_analysis2 function
def plot_confusion_matrix_seaborn(y_true, y_pred, filename, labels,label_to_names,image_path, ymap=None, figsize=(10,10), normalized=False, fmt='0.2f'):
"""
Generate matrix plot of confusion matrix with pretty annotations.
The plot image is saved to disk.
args:
y_true: true label of the data, with shape (nsamples,)
y_pred: prediction of the data, with shape (nsamples,)
filename: filename of figure file to save
labels: string array, name the order of class labels in the confusion matrix.
use `clf.classes_` if using scikit-learn models.
with shape (nclass,).
ymap: dict: any -> string, length == nclass.
if not None, map the labels & ys to more understandable strings.
Caution: original y_true, y_pred and labels must align.
figsize: the size of the figure plotted.
"""
if ymap is not None:
y_pred = [ymap[yi] for yi in y_pred]
y_true = [ymap[yi] for yi in y_true]
labels = [ymap[yi] for yi in labels]
cm = confusion_matrix(y_true, y_pred, labels=labels)
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum.astype(float) * 100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if i == j:
s = cm_sum[i]
annot[i, j] = '%.1f%%\n%d/%d' % (p, c, s)
elif c == 0:
annot[i, j] = ''
else:
annot[i, j] = '%.1f%%\n%d' % (p, c)
if normalized:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] * 100 # 100% form
cm = pd.DataFrame(cm, index=label_to_names.values(), columns=label_to_names.values())
cm.index.name = 'True labels'
cm.columns.name = 'Predicted labels'
fig, ax = plt.subplots(figsize=figsize)
# fmt =',d' to show numeric number only, e.g., 1,200,000
# https://stackoverflow.com/questions/59990375/plot-confusioin-matrix-plot-is-not-showing-integer-value-instead-it-is-showing-s
if normalized:
sns.heatmap(cm, cmap='Oranges', annot=True,ax=ax, fmt=fmt)
else:
sns.heatmap(cm, cmap='Oranges', annot=True,ax=ax, fmt=',d')
save_fig(image_path,filename, tight_layout=False)
def cm_analysis2(y_true, y_pred, filename, labels,label_to_names,image_path, ymap=None, figsize=(10,10)):
columns = ['class %s' %(i) for i in list(ascii_uppercase)[0:len(np.unique(y_true))]]
cm = confusion_matrix(y_true, y_pred)
df_cm = pd.DataFrame(cm, index=columns, columns=columns)
ax = sns.heatmap(df_cm, cmap='Oranges', annot=True)
plt.show()
save_fig(image_path,filename, tight_layout=False)
def plot_wrongly_predicted_point_clouds(y_true, y_pred, test_loader,image_path,label_to_names,filename='wrongly_predicted_point_clouds',sampling_ratio=0.1):
# find indices of wrongly predicted point clouds
inds_wrong = np.squeeze(np.argwhere(y_pred!=y_true))
y_pred_wrong=y_pred[inds_wrong]
y_true_wrong=y_true[inds_wrong]
# points_wrong=test_loader.dataset[mask][0]
num_point_clouds_wrong=y_pred_wrong.shape[0]
height_subplot=num_point_clouds_wrong//4 if num_point_clouds_wrong//4==0 else num_point_clouds_wrong//4 + 1
# fig = plt.figure(figsize=(15, 10))
fig = plt.figure(figsize=(15, 5*height_subplot))
for i in range(num_point_clouds_wrong):
points = test_loader.dataset[inds_wrong[i]][0].numpy()
# points=pc_normalize(points)
ax = fig.add_subplot(height_subplot, 4, i + 1, projection="3d")
# plot raw pc
#ax.scatter(points[i, :, 0], points[i, :, 1], points[i, :, 2], c='b', marker='o')
# plot a sub-sampled cloud
inds=np.random.choice(points.shape[0], int(points.shape[0]*sampling_ratio), replace=False)
points_sample = points[inds,:]
ax.scatter(points_sample[:,0], points_sample[:,1], points_sample[:,2], )
ax.set_title(f'pred:{label_to_names[y_pred_wrong[i]]}, label: {label_to_names[y_true_wrong[i]]}')
ax.set_axis_off()
plt.show()
save_fig(image_path, filename, tight_layout=False)
return inds_wrong
# Define PLY types
ply_dtypes = dict([
(b'int8', 'i1'),
(b'char', 'i1'),
(b'uint8', 'u1'),
(b'uchar', 'u1'),
(b'int16', 'i2'),
(b'short', 'i2'),
(b'uint16', 'u2'),
(b'ushort', 'u2'),
(b'int32', 'i4'),
(b'int', 'i4'),
(b'uint32', 'u4'),
(b'uint', 'u4'),
(b'float32', 'f4'),
(b'float', 'f4'),
(b'float64', 'f8'),
(b'double', 'f8')
])
# Numpy reader format
valid_formats = {'ascii': '', 'binary_big_endian': '>',
'binary_little_endian': '<'}
def parse_header(plyfile, ext):
# Variables
line = []
properties = []
num_points = None
while b'end_header' not in line and line != b'':
line = plyfile.readline()
if b'element' in line:
line = line.split()
num_points = int(line[2])
elif b'property' in line:
line = line.split()
properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))
return num_points, properties
def parse_mesh_header(plyfile, ext):
# Variables
line = []
vertex_properties = []
num_points = None
num_faces = None
current_element = None
while b'end_header' not in line and line != b'':
line = plyfile.readline()
# Find point element
if b'element vertex' in line:
current_element = 'vertex'
line = line.split()
num_points = int(line[2])
elif b'element face' in line:
current_element = 'face'
line = line.split()
num_faces = int(line[2])
elif b'property' in line:
if current_element == 'vertex':
line = line.split()
vertex_properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))
elif current_element == 'vertex':
if not line.startswith('property list uchar int'):
raise ValueError('Unsupported faces property : ' + line)
return num_points, num_faces, vertex_properties
def read_ply(filename, triangular_mesh=False):
"""
Read ".ply" files
Parameters
----------
filename : string
the name of the file to read.
Returns
-------
result : array
data stored in the file
Examples
--------
Store data in file
>>> points = np.random.rand(5, 3)
>>> values = np.random.randint(2, size=10)
>>> write_ply('example.ply', [points, values], ['x', 'y', 'z', 'values'])
Read the file
>>> data = read_ply('example.ply')
>>> values = data['values']
array([0, 0, 1, 1, 0])
>>> points = np.vstack((data['x'], data['y'], data['z'])).T
array([[ 0.466 0.595 0.324]
[ 0.538 0.407 0.654]
[ 0.850 0.018 0.988]
[ 0.395 0.394 0.363]
[ 0.873 0.996 0.092]])
"""
with open(filename, 'rb') as plyfile:
# Check if the file start with ply
if b'ply' not in plyfile.readline():
raise ValueError('The file does not start whith the word ply')
# get binary_little/big or ascii
fmt = plyfile.readline().split()[1].decode()
if fmt == "ascii":
raise ValueError('The file is not binary')
# get extension for building the numpy dtypes
ext = valid_formats[fmt]
# PointCloud reader vs mesh reader
if triangular_mesh:
# Parse header
num_points, num_faces, properties = parse_mesh_header(plyfile, ext)
# Get point data
vertex_data = np.fromfile(plyfile, dtype=properties, count=num_points)
# Get face data
face_properties = [('k', ext + 'u1'),
('v1', ext + 'i4'),
('v2', ext + 'i4'),
('v3', ext + 'i4')]
faces_data = np.fromfile(plyfile, dtype=face_properties, count=num_faces)
# Return vertex data and concatenated faces
faces = np.vstack((faces_data['v1'], faces_data['v2'], faces_data['v3'])).T
data = [vertex_data, faces]
else:
# Parse header
num_points, properties = parse_header(plyfile, ext)
# Get data
data = np.fromfile(plyfile, dtype=properties, count=num_points)
return data
| 35.388542 | 176 | 0.606629 |
45d2581e2070bc898ddbbdf51ee7d0ff3c14922d | 7,617 | py | Python | occo/plugins/infraprocessor/node_resolution/basic.py | zfarkas/infra-processor | 86b0d80925bf5cf97148a8d0f468cfa22b079957 | [
"Apache-2.0"
] | null | null | null | occo/plugins/infraprocessor/node_resolution/basic.py | zfarkas/infra-processor | 86b0d80925bf5cf97148a8d0f468cfa22b079957 | [
"Apache-2.0"
] | null | null | null | occo/plugins/infraprocessor/node_resolution/basic.py | zfarkas/infra-processor | 86b0d80925bf5cf97148a8d0f468cfa22b079957 | [
"Apache-2.0"
] | null | null | null | ### Copyright 2014, MTA SZTAKI, www.sztaki.hu
###
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at
###
### http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
""" Basic resolver for node definitions.
.. moduleauthor:: Jozsef Kovacs <jozsef.kovacs@sztaki.mta.hu>
"""
from __future__ import absolute_import
__all__ = ['BasicResolver']
import logging
import occo.util as util
import occo.exceptions as exceptions
import occo.util.factory as factory
import sys
from ruamel import yaml
import jinja2
from occo.infraprocessor.node_resolution import Resolver, ContextSchemaChecker
from occo.exceptions import SchemaError
PROTOCOL_ID = 'basic'
log = logging.getLogger('occo.infraprocessor.node_resolution.basic')
@factory.register(Resolver, PROTOCOL_ID)
class BasicResolver(Resolver):
"""
Implementation of :class:`Resolver` for performing basic resolution.
"""
def attr_template_resolve(self, attrs, template_data):
"""
Recursively render attributes.
"""
if isinstance(attrs, dict):
for k, v in attrs.iteritems():
attrs[k] = self.attr_template_resolve(v, template_data)
return attrs
elif isinstance(attrs, list):
for i in xrange(len(attrs)):
attrs[i] = self.attr_template_resolve(attrs[i], template_data)
return attrs
elif isinstance(attrs, basestring):
template = jinja2.Template(attrs)
return template.render(**template_data)
else:
return attrs
def attr_connect_resolve(self, node, attrs, attr_mapping):
"""
Transform connection specifications into an attribute that the cookbook
`connect`_ can understand.
"""
connections = [
dict(source_role="{0}_{1}".format(node['infra_id'], role),
source_attribute=mapping['attributes'][0],
destination_attribute=mapping['attributes'][1])
for role, mappings in attr_mapping.iteritems()
for mapping in mappings
]
attrs['connections'] = connections
def resolve_attributes(self, node_desc, node_definition, template_data):
"""
Resolve the attributes of a node:
- Merge attributes of the node desc. and node def. (node desc overrides)
- Resolve string attributes as Jinja templates
- Construct an attribute to connect nodes
"""
attrs = node_definition.get('contextualisation',dict()).get('attributes', dict())
attrs.update(node_desc.get('attributes', dict()))
attr_mapping = node_desc.get('mappings', dict()).get('inbound', dict())
self.attr_template_resolve(attrs, template_data)
self.attr_connect_resolve(node_desc, attrs, attr_mapping)
return attrs
def extract_synch_attrs(self, node_desc):
"""
Fill synch_attrs.
.. todo:: Maybe this should be moved to the Compiler. The IP
depends on it, not the Chef config-manager.
.. todo:: Furthermore, synch_attrs will be obsoleted, and moved to
basic health_check as parameters.
"""
outedges = node_desc.get('mappings', dict()).get('outbound', dict())
return [mapping['attributes'][0]
for mappings in outedges.itervalues() for mapping in mappings
if mapping['synch']]
def assemble_template_data(self, node_desc, node_definition):
"""
Create the data structure that can be used in the Jinja templates.
.. todo:: Document the possibilities.
"""
from occo.infobroker import main_info_broker
def find_node_id(node_name):
"""
Convenience function to be used in templates, to acquire a node id
based on node name.
"""
nodes = main_info_broker.get(
'node.find', infra_id=node_desc['infra_id'], name=node_name)
if not nodes:
raise KeyError(
'No node exists with the given name', node_name)
elif len(nodes) > 1:
log.warning(
'There are multiple nodes with the same node name (%s). ' +
'Multiple nodes are ' +
', '.join(item['node_id'] for item in nodes) +
'. Choosing the first one as default (%s).',
node_name, nodes[0]['node_id'])
return nodes[0]
def getip(node_name):
return main_info_broker.get('node.resource.address',
find_node_id(node_name))
# As long as source_data is read-only, the following code is fine.
# As it is used only for rendering a template, it is yet read-only.
# If, for any reason, something starts modifying it, dict.update()-s
# will have to be changed to deep copy to avoid side effects. (Just
# like in Compiler, when node variables are assembled.)
source_data = dict(node_id=self.node_id)
source_data.update(node_desc)
source_data.update(node_definition)
source_data['ibget'] = main_info_broker.get
source_data['find_node_id'] = find_node_id
source_data['getip'] = getip
#state = main_info_broker.get('node.find', infra_id=node_desc['infra_id'])
#for node in state:
# source_data[node['node_description']['name']] = \
# dict(ip=main_info_broker.get('node.resource.address', node))
return source_data
def _resolve_node(self, node_definition):
"""
Implementation of :meth:`Resolver.resolve_node`.
"""
# Shorthands
node_desc = self.node_description
ib = self.info_broker
node_id = self.node_id
template_data = self.assemble_template_data(node_desc, node_definition)
# Amend resolved node with new information
data = {
'node_id' : node_id,
'name' : node_desc['name'],
'infra_id' : node_desc['infra_id'],
'attributes' : self.resolve_attributes(node_desc,
node_definition,
template_data),
'synch_attrs' : self.extract_synch_attrs(node_desc),
}
node_definition.update(data)
@factory.register(ContextSchemaChecker, PROTOCOL_ID)
class BasicContextSchemaChecker(ContextSchemaChecker):
def __init__(self):
self.req_keys = ["type","attributes"]
self.opt_keys = []
def perform_check(self, data):
missing_keys = ContextSchemaChecker.get_missing_keys(self, data, self.req_keys)
if missing_keys:
msg = "Missing key(s): " + ', '.join(str(key) for key in missing_keys)
raise SchemaError(msg)
valid_keys = self.req_keys + self.opt_keys
invalid_keys = ContextSchemaChecker.get_invalid_keys(self, data, valid_keys)
if invalid_keys:
msg = "Unknown key(s): " + ', '.join(str(key) for key in invalid_keys)
raise SchemaError(msg)
return True
| 38.276382 | 89 | 0.623211 |
15517541f35ed25032a1fdf6a893a3a2b212a4e5 | 1,215 | py | Python | src/networks.py | NaHenn/gclc | 5159c28688d179fa4031ef101d9a31556b71acc0 | [
"MIT"
] | null | null | null | src/networks.py | NaHenn/gclc | 5159c28688d179fa4031ef101d9a31556b71acc0 | [
"MIT"
] | null | null | null | src/networks.py | NaHenn/gclc | 5159c28688d179fa4031ef101d9a31556b71acc0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
networks for experiments
"""
import torch
import torch.nn as nn
class LayerNet(nn.Module):
def __init__(self, nx, nh, ny, act = torch.sigmoid, d = 2):
''' neural network with d hidden layers with the same size
Parameters
--------
nx (int):
number of input nodes
nh (int):
number of hidden nodes per layer
ny (int):
number of output nodes
act (torch.function):
activation function per layer
d (int):
number of hidden layers
'''
super(LayerNet, self).__init__()
# an affine operation: y = Wx + b
self.act = act
if d > 0:
self.linears = nn.ModuleList([nn.Linear(nx,nh)] + [nn.Linear(nh, nh) for i in range(d-1)])
self.fc2 = nn.Linear(nh, ny)
else:
self.linears = []
self.fc2 = nn.Linear(nx,ny)
def forward(self, x):
x = x.flatten(1)
for i, l in enumerate(self.linears):
x = self.act(l(x))
if i == 0:
self.z = x
self.y = self.fc2(x)
return self.y
| 26.413043 | 102 | 0.497119 |
34f5effd4f0944b833d1ec4e0405ad920ec0a3b7 | 655 | py | Python | examples/python/jrecdbc.py | pewo/jrecdb | 1e49efcff78036ffe2f2236003749f221d92ef71 | [
"MIT"
] | null | null | null | examples/python/jrecdbc.py | pewo/jrecdb | 1e49efcff78036ffe2f2236003749f221d92ef71 | [
"MIT"
] | null | null | null | examples/python/jrecdbc.py | pewo/jrecdb | 1e49efcff78036ffe2f2236003749f221d92ef71 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import requests
import json
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
try:
r = requests.get('https://127.0.0.1:4443/dbread?jobtype=autopostinstall&remove=0', verify=False, timeout=3)
r.raise_for_status()
except requests.exceptions.RequestException as err:
raise SystemExit(err)
content = r.text
#print("content: ",content)
json_data = {}
for line in content.splitlines():
#print("line: ", line)
try:
json_data = json.loads(line)
break
except:
pass
for k in json_data:
print(k, '->', json_data[k])
| 19.848485 | 109 | 0.728244 |
caefa7b7a77d6b73ada1f5689624eccb597e0196 | 553 | py | Python | cohesity_management_sdk/models/object_class_search_principals_enum.py | anoopbhat/management-sdk-python | 423d9079fd4b3a773f43882d1272181f5ec84f96 | [
"Apache-2.0"
] | 1 | 2021-01-07T20:36:22.000Z | 2021-01-07T20:36:22.000Z | cohesity_management_sdk/models/object_class_search_principals_enum.py | vloiseau/management-sdk-python | 56e80399691245e351692abc4c366fac84a82f96 | [
"Apache-2.0"
] | null | null | null | cohesity_management_sdk/models/object_class_search_principals_enum.py | vloiseau/management-sdk-python | 56e80399691245e351692abc4c366fac84a82f96 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class ObjectClassSearchPrincipalsEnum(object):
"""Implementation of the 'objectClass_SearchPrincipals' enum.
TODO: type enum description here.
Attributes:
KUSER: TODO: type description here.
KGROUP: TODO: type description here.
KCOMPUTER: TODO: type description here.
KWELLKNOWNPRINCIPAL: TODO: type description here.
"""
KUSER = 'kUser'
KGROUP = 'kGroup'
KCOMPUTER = 'kComputer'
KWELLKNOWNPRINCIPAL = 'kWellKnownPrincipal'
| 21.269231 | 65 | 0.674503 |
4267fd7c545bb4b865df98ac1a687e763623ba4b | 818 | py | Python | interface/migrations/0002_result.py | matteocao/django-project | 55ec084a622a696f3ec95e49e8f59357964c7d80 | [
"MIT"
] | 2 | 2021-01-25T10:48:28.000Z | 2021-01-25T12:05:21.000Z | interface/migrations/0002_result.py | lgaborini/django-project | 927395d41f48a50704fbaf36ba9729a8cfbccfd4 | [
"MIT"
] | null | null | null | interface/migrations/0002_result.py | lgaborini/django-project | 927395d41f48a50704fbaf36ba9729a8cfbccfd4 | [
"MIT"
] | 1 | 2021-01-25T10:48:20.000Z | 2021-01-25T10:48:20.000Z | # Generated by Django 3.1.5 on 2021-01-20 09:37
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('interface', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result', models.ImageField(storage=django.core.files.storage.FileSystemStorage(location='/results'), upload_to='')),
('diagram', models.JSONField()),
('parameters', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='interface.parameters')),
],
),
]
| 32.72 | 134 | 0.625917 |
45855aacf8c05cb01d1bef1659c2fd836b33e0dd | 3,643 | py | Python | eridanusstd/weather.py | mithrandi/eridanus | 11c80c7024548ce7c41800b077d3d0a738a04875 | [
"MIT"
] | null | null | null | eridanusstd/weather.py | mithrandi/eridanus | 11c80c7024548ce7c41800b077d3d0a738a04875 | [
"MIT"
] | 5 | 2021-03-18T20:19:01.000Z | 2022-03-11T23:14:44.000Z | eridanusstd/weather.py | mithrandi/eridanus | 11c80c7024548ce7c41800b077d3d0a738a04875 | [
"MIT"
] | 1 | 2018-01-10T15:15:15.000Z | 2018-01-10T15:15:15.000Z | import dateutil.parser
from lxml import etree
from epsilon.structlike import record
from nevow.url import URL
from eridanus import util
class WundergroundConditions(
record('displayLocation observationLocation observationTime condition '
'temperature humidity pressure windSpeed windDirection windChill '
'dewPoint heatIndex')):
@classmethod
def fromElement(cls, node):
displayLocation = node.findtext('display_location/full')
observationLocation = node.findtext('observation_location/full')
try:
observationTime = dateutil.parser.parse(
node.findtext('observation_time_rfc822'))
except ValueError:
observationTime = None
condition = node.findtext('weather')
temp = int(node.findtext('temp_c'))
humidity = node.findtext('relative_humidity')
pressure = node.findtext('pressure_string')
windSpeed = int(node.findtext('wind_mph')) * 1.609344
windDirection = node.findtext('wind_dir')
dewPoint = int(node.findtext('dewpoint_c'))
heatIndex = node.findtext('heat_index_c')
if heatIndex is None or heatIndex == 'NA':
heatIndex = None
else:
heatIndex = int(heatIndex)
windChill = node.findtext('windchill_c')
if windChill is None or windChill == 'NA':
windChill = None
else:
windChill = int(windChill)
return cls(displayLocation=displayLocation,
observationLocation=observationLocation,
observationTime=observationTime,
condition=condition,
temperature=temp,
humidity=humidity,
pressure=pressure,
windSpeed=windSpeed,
windDirection=windDirection,
windChill=windChill,
dewPoint=dewPoint,
heatIndex=heatIndex)
@property
def display(self):
def temp(v):
return u'%d\N{DEGREE SIGN}C' % (v,)
def attrs():
if self.temperature is not None:
yield u'Temperature', temp(self.temperature)
if self.condition:
yield u'Conditions', self.condition
if self.humidity:
yield u'Humidity', self.humidity
if self.dewPoint:
yield u'Dew point', temp(self.dewPoint)
if self.pressure:
yield u'Pressure', self.pressure
if self.windSpeed and self.windDirection:
yield u'Wind', u'%s at %0.2fkm/h' % (
self.windDirection, self.windSpeed)
if self.windChill:
yield u'Wind chill', temp(self.windChill)
timestring = u'<unknown>'
if self.observationTime is not None:
timestring = unicode(
self.observationTime.strftime('%H:%M %Z on %d %B %Y'))
params = u'; '.join(
u'\002%s\002: %s' % (key, value) for key, value in attrs())
return u'In %s (from %s) at %s: %s' % (
self.displayLocation, self.observationLocation, timestring, params)
class Wunderground(object):
API_ROOT = URL.fromString('http://api.wunderground.com/auto/wui/geo')
@classmethod
def current(cls, query):
url = cls.API_ROOT.child('WXCurrentObXML').child('index.xml').add('query', query)
return util.PerseverantDownloader(url).go(
).addCallback(lambda (data, headers): etree.fromstring(data)
).addCallback(WundergroundConditions.fromElement)
| 35.715686 | 89 | 0.59182 |
effc39b979004798b737ea70bdada7b1f04a98ac | 14,225 | py | Python | ee/tasks/test/test_calculate_cohort.py | ld-rale/posthog | 0fa5b18b2e940cf5cdbe8afc733eb7e3cd4ae810 | [
"MIT"
] | null | null | null | ee/tasks/test/test_calculate_cohort.py | ld-rale/posthog | 0fa5b18b2e940cf5cdbe8afc733eb7e3cd4ae810 | [
"MIT"
] | null | null | null | ee/tasks/test/test_calculate_cohort.py | ld-rale/posthog | 0fa5b18b2e940cf5cdbe8afc733eb7e3cd4ae810 | [
"MIT"
] | null | null | null | import json
import urllib.parse
from unittest.mock import patch
from uuid import uuid4
from freezegun import freeze_time
from ee.clickhouse.client import sync_execute
from ee.clickhouse.models.event import create_event
from ee.clickhouse.util import ClickhouseTestMixin
from posthog.models.cohort import Cohort
from posthog.models.person import Person
from posthog.tasks.calculate_cohort import insert_cohort_from_insight_filter
from posthog.tasks.test.test_calculate_cohort import calculate_cohort_test_factory
def _create_event(**kwargs):
kwargs.update({"event_uuid": uuid4()})
create_event(**kwargs)
def _create_person(**kwargs):
person = Person.objects.create(**kwargs)
return Person(id=person.uuid)
class TestClickhouseCalculateCohort(ClickhouseTestMixin, calculate_cohort_test_factory(_create_event, _create_person)): # type: ignore
@patch("posthog.tasks.calculate_cohort.insert_cohort_from_insight_filter.delay")
def test_create_stickiness_cohort(self, _insert_cohort_from_insight_filter):
_create_person(team_id=self.team.pk, distinct_ids=["blabla"])
_create_event(
team=self.team,
event="$pageview",
distinct_id="blabla",
properties={"$math_prop": 1},
timestamp="2021-01-01T12:00:00Z",
)
response = self.client.post(
f"/api/projects/{self.team.id}/cohorts/?insight=STICKINESS&properties=%5B%5D&interval=day&display=ActionsLineGraph&events=%5B%7B%22id%22%3A%22%24pageview%22%2C%22name%22%3A%22%24pageview%22%2C%22type%22%3A%22events%22%2C%22order%22%3A0%7D%5D&shown_as=Stickiness&date_from=2021-01-01&entity_id=%24pageview&entity_type=events&stickiness_days=1&label=%24pageview",
{"name": "test", "is_static": True},
).json()
cohort_id = response["id"]
_insert_cohort_from_insight_filter.assert_called_once_with(
cohort_id,
{
"insight": "STICKINESS",
"properties": "[]",
"interval": "day",
"display": "ActionsLineGraph",
"events": '[{"id":"$pageview","name":"$pageview","type":"events","order":0}]',
"shown_as": "Stickiness",
"date_from": "2021-01-01",
"entity_id": "$pageview",
"entity_type": "events",
"stickiness_days": "1",
"label": "$pageview",
},
)
insert_cohort_from_insight_filter(
cohort_id,
{
"date_from": "2021-01-01",
"events": [
{
"id": "$pageview",
"type": "events",
"order": 0,
"name": "$pageview",
"custom_name": None,
"math": None,
"math_property": None,
"math_group_type_index": None,
"properties": [],
}
],
"insight": "STICKINESS",
"interval": "day",
"selected_interval": 1,
"shown_as": "Stickiness",
"entity_id": "$pageview",
"entity_type": "events",
"entity_math": None,
},
)
cohort = Cohort.objects.get(pk=cohort_id)
people = Person.objects.filter(cohort__id=cohort.pk)
self.assertEqual(people.count(), 1)
@patch("posthog.tasks.calculate_cohort.insert_cohort_from_insight_filter.delay")
def test_create_trends_cohort(self, _insert_cohort_from_insight_filter):
_create_person(team_id=self.team.pk, distinct_ids=["blabla"])
with freeze_time("2021-01-01 00:06:34"):
_create_event(
team=self.team,
event="$pageview",
distinct_id="blabla",
properties={"$math_prop": 1},
timestamp="2021-01-01T12:00:00Z",
)
with freeze_time("2021-01-02 00:06:34"):
_create_event(
team=self.team,
event="$pageview",
distinct_id="blabla",
properties={"$math_prop": 4},
timestamp="2021-01-01T12:00:00Z",
)
response = self.client.post(
f"/api/projects/{self.team.id}/cohorts/?interval=day&display=ActionsLineGraph&events=%5B%7B%22id%22%3A%22%24pageview%22%2C%22name%22%3A%22%24pageview%22%2C%22type%22%3A%22events%22%2C%22order%22%3A0%7D%5D&properties=%5B%5D&entity_id=%24pageview&entity_type=events&date_from=2021-01-01&date_to=2021-01-01&label=%24pageview",
{"name": "test", "is_static": True},
).json()
cohort_id = response["id"]
_insert_cohort_from_insight_filter.assert_called_once_with(
cohort_id,
{
"interval": "day",
"display": "ActionsLineGraph",
"events": '[{"id":"$pageview","name":"$pageview","type":"events","order":0}]',
"properties": "[]",
"entity_id": "$pageview",
"entity_type": "events",
"date_from": "2021-01-01",
"date_to": "2021-01-01",
"label": "$pageview",
},
)
insert_cohort_from_insight_filter(
cohort_id,
{
"date_from": "2021-01-01",
"date_to": "2021-01-01",
"display": "ActionsLineGraph",
"events": [
{
"id": "$pageview",
"type": "events",
"order": 0,
"name": "$pageview",
"math": None,
"math_property": None,
"math_group_type_index": None,
"properties": [],
}
],
"entity_id": "$pageview",
"entity_type": "events",
"insight": "TRENDS",
"interval": "day",
},
)
cohort = Cohort.objects.get(pk=cohort_id)
people = Person.objects.filter(cohort__id=cohort.pk)
self.assertEqual(cohort.errors_calculating, 0)
self.assertEqual(
people.count(),
1,
{
"a": sync_execute(
"select person_id from person_static_cohort where team_id = {} and cohort_id = {} ".format(
self.team.id, cohort.pk
)
),
"b": sync_execute(
"select person_id from person_static_cohort FINAL where team_id = {} and cohort_id = {} ".format(
self.team.id, cohort.pk
)
),
},
)
@patch("posthog.tasks.calculate_cohort.insert_cohort_from_insight_filter.delay")
def test_create_trends_cohort_arg_test(self, _insert_cohort_from_insight_filter):
# prior to 8124, subtitute parameters was called on insight cohorting which caused '%' in LIKE arguments to be interepreted as a missing parameter
_create_person(team_id=self.team.pk, distinct_ids=["blabla"])
with freeze_time("2021-01-01 00:06:34"):
_create_event(
team=self.team,
event="$pageview",
distinct_id="blabla",
properties={"$domain": "https://app.posthog.com/123"},
timestamp="2021-01-01T12:00:00Z",
)
with freeze_time("2021-01-02 00:06:34"):
_create_event(
team=self.team,
event="$pageview",
distinct_id="blabla",
properties={"$domain": "https://app.posthog.com/123"},
timestamp="2021-01-01T12:00:00Z",
)
params = {
"date_from": "2021-01-01",
"date_to": "2021-01-01",
"display": "ActionsLineGraph",
"events": json.dumps([{"id": "$pageview", "name": "$pageview", "type": "events", "order": 0}]),
"entity_id": "$pageview",
"entity_type": "events",
"insight": "TRENDS",
"interval": "day",
"properties": json.dumps(
[{"key": "$domain", "value": "app.posthog.com", "operator": "icontains", "type": "event"}]
),
}
response = self.client.post(
f"/api/projects/{self.team.id}/cohorts/?{urllib.parse.urlencode(params)}",
{"name": "test", "is_static": True},
).json()
cohort_id = response["id"]
_insert_cohort_from_insight_filter.assert_called_once_with(
cohort_id,
{
"date_from": "2021-01-01",
"date_to": "2021-01-01",
"display": "ActionsLineGraph",
"events": '[{"id": "$pageview", "name": "$pageview", "type": "events", "order": 0}]',
"entity_id": "$pageview",
"entity_type": "events",
"insight": "TRENDS",
"interval": "day",
"properties": '[{"key": "$domain", "value": "app.posthog.com", "operator": "icontains", "type": "event"}]',
},
)
insert_cohort_from_insight_filter(
cohort_id,
{
"date_from": "2021-01-01",
"date_to": "2021-01-01",
"display": "ActionsLineGraph",
"events": [
{
"id": "$pageview",
"type": "events",
"order": 0,
"name": "$pageview",
"math": None,
"math_property": None,
"math_group_type_index": None,
"properties": [],
}
],
"properties": [
{"key": "$domain", "value": "app.posthog.com", "operator": "icontains", "type": "event"}
],
"entity_id": "$pageview",
"entity_type": "events",
"insight": "TRENDS",
"interval": "day",
},
)
cohort = Cohort.objects.get(pk=cohort_id)
people = Person.objects.filter(cohort__id=cohort.pk)
self.assertEqual(cohort.errors_calculating, 0)
self.assertEqual(
people.count(),
1,
{
"a": sync_execute(
"select person_id from person_static_cohort where team_id = {} and cohort_id = {} ".format(
self.team.id, cohort.pk
)
),
"b": sync_execute(
"select person_id from person_static_cohort FINAL where team_id = {} and cohort_id = {} ".format(
self.team.id, cohort.pk
)
),
},
)
@patch("posthog.tasks.calculate_cohort.insert_cohort_from_insight_filter.delay")
def test_create_funnels_cohort(self, _insert_cohort_from_insight_filter):
_create_person(team_id=self.team.pk, distinct_ids=["blabla"])
with freeze_time("2021-01-01 00:06:34"):
_create_event(
team=self.team,
event="$pageview",
distinct_id="blabla",
properties={"$math_prop": 1},
timestamp="2021-01-01T12:00:00Z",
)
with freeze_time("2021-01-02 00:06:34"):
_create_event(
team=self.team,
event="$another_view",
distinct_id="blabla",
properties={"$math_prop": 4},
timestamp="2021-01-02T12:00:00Z",
)
params = {
"insight": "FUNNELS",
"events": json.dumps(
[
{
"id": "$pageview",
"math": None,
"name": "$pageview",
"type": "events",
"order": 0,
"properties": [],
"math_property": None,
},
{
"id": "$another_view",
"math": None,
"name": "$another_view",
"type": "events",
"order": 1,
"properties": [],
"math_property": None,
},
]
),
"display": "FunnelViz",
"interval": "day",
"layout": "horizontal",
"date_from": "2021-01-01",
"date_to": "2021-01-07",
"funnel_step": 1,
}
response = self.client.post(
f"/api/projects/{self.team.id}/cohorts/?{urllib.parse.urlencode(params)}",
{"name": "test", "is_static": True},
).json()
cohort_id = response["id"]
_insert_cohort_from_insight_filter.assert_called_once_with(
cohort_id,
{
"insight": "FUNNELS",
"events": '[{"id": "$pageview", "math": null, "name": "$pageview", "type": "events", "order": 0, "properties": [], "math_property": null}, {"id": "$another_view", "math": null, "name": "$another_view", "type": "events", "order": 1, "properties": [], "math_property": null}]',
"display": "FunnelViz",
"interval": "day",
"layout": "horizontal",
"date_from": "2021-01-01",
"date_to": "2021-01-07",
"funnel_step": "1",
},
)
insert_cohort_from_insight_filter(
cohort_id, params,
)
cohort = Cohort.objects.get(pk=cohort_id)
people = Person.objects.filter(cohort__id=cohort.pk)
self.assertEqual(cohort.errors_calculating, 0)
self.assertEqual(people.count(), 1)
| 39.29558 | 373 | 0.486819 |
f8102ae8c7b534b234d0665fa86a743e2aeb4975 | 214,262 | py | Python | src/config/api-server/vnc_cfg_api_server/vnc_cfg_api_server.py | pawelzny/contrail-controller | 4950d3144cb8c422264ddb2a926cf4fe9e40b14d | [
"Apache-2.0"
] | null | null | null | src/config/api-server/vnc_cfg_api_server/vnc_cfg_api_server.py | pawelzny/contrail-controller | 4950d3144cb8c422264ddb2a926cf4fe9e40b14d | [
"Apache-2.0"
] | null | null | null | src/config/api-server/vnc_cfg_api_server/vnc_cfg_api_server.py | pawelzny/contrail-controller | 4950d3144cb8c422264ddb2a926cf4fe9e40b14d | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
"""
This is the main module in vnc_cfg_api_server package. It manages interaction
between http/rest, address management, authentication and database interfaces.
"""
from gevent import monkey
monkey.patch_all()
from gevent import hub
# from neutron plugin to api server, the request URL could be large.
# fix the const
import gevent.pywsgi
gevent.pywsgi.MAX_REQUEST_LINE = 65535
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
import ConfigParser
import functools
import hashlib
import itertools
import signal
import netaddr
import os
import re
import random
import socket
import ast
from cfgm_common import jsonutils as json
from provision_defaults import *
import uuid
import copy
from pprint import pformat
from cStringIO import StringIO
from vnc_api.utils import AAA_MODE_VALID_VALUES
# import GreenletProfiler
from cfgm_common import vnc_cgitb
from kazoo.exceptions import LockTimeout
from attrdict import AttrDict
from distutils.util import strtobool
from cfgm_common import has_role
from cfgm_common import _obj_serializer_all
from cfgm_common.utils import _DEFAULT_ZK_COUNTER_PATH_PREFIX
from cfgm_common.utils import _DEFAULT_ZK_LOCK_PATH_PREFIX
from cfgm_common import is_uuid_like
from cfgm_common import SG_NO_RULE_FQ_NAME
from cfgm_common.uve.vnc_api.ttypes import VncApiLatencyStats, VncApiLatencyStatsLog
import time
import requests
import xml.etree.ElementTree as etree
from functools import partial
"""
Following is needed to silence warnings on every request when keystone
auth_token middleware + Sandesh is used. Keystone or Sandesh alone
do not produce these warnings.
Exception AttributeError: AttributeError(
"'_DummyThread' object has no attribute '_Thread__block'",)
in <module 'threading' from '/usr/lib64/python2.7/threading.pyc'> ignored
See http://stackoverflow.com/questions/13193278/understand-python-threading-bug
for more information.
"""
import threading
threading._DummyThread._Thread__stop = lambda x: 42
CONFIG_VERSION = '1.0'
import bottle
import utils
import context
from context import get_request, get_context, set_context, use_context
from context import ApiContext
from context import is_internal_request
from resources import initialize_all_server_resource_classes
from vnc_db import VncDbClient
import cfgm_common
from cfgm_common import ignore_exceptions
from cfgm_common.uve.vnc_api.ttypes import VncApiCommon, VncApiConfigLog,\
VncApiDebug, VncApiInfo, VncApiNotice, VncApiError
from cfgm_common.uve.vnc_api.ttypes import FabricJobExecution, FabricJobUve, \
PhysicalRouterJobExecution, PhysicalRouterJobUve
from cfgm_common import illegal_xml_chars_RE
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, Module2NodeType,\
NodeTypeNames, INSTANCE_ID_DEFAULT, TagTypeNameToId,\
TAG_TYPE_NOT_UNIQUE_PER_OBJECT, TAG_TYPE_AUTHORIZED_ON_ADDRESS_GROUP,\
POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT, SECURITY_OBJECT_TYPES
from provision_defaults import Provision
from vnc_quota import *
from vnc_api.gen.resource_xsd import *
from vnc_api.gen.resource_common import *
from vnc_api.gen.vnc_api_client_gen import all_resource_type_tuples
import cfgm_common
from cfgm_common.utils import cgitb_hook
from cfgm_common.rest import LinkObject, hdr_server_tenant
from cfgm_common.exceptions import *
from cfgm_common.vnc_extensions import ExtensionManager
import vnc_addr_mgmt
import vnc_auth
import vnc_auth_keystone
import vnc_perms
import vnc_rbac
from cfgm_common.uve.cfgm_cpuinfo.ttypes import ModuleCpuState, ModuleCpuStateTrace
from cfgm_common.buildinfo import build_info
from cfgm_common.vnc_api_stats import log_api_stats
from pysandesh.sandesh_base import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
# from gen_py.vnc_api.ttypes import *
import netifaces
from pysandesh.connection_info import ConnectionState
from cfgm_common.uve.nodeinfo.ttypes import NodeStatusUVE, \
NodeStatus
from sandesh.traces.ttypes import RestApiTrace
from vnc_bottle import get_bottle_server
from cfgm_common.vnc_greenlets import VncGreenlet
from cfgm_common.kombu_amqp import KombuAmqpClient
_ACTION_RESOURCES = [
{'uri': '/prop-collection-get', 'link_name': 'prop-collection-get',
'method': 'GET', 'method_name': 'prop_collection_http_get'},
{'uri': '/prop-collection-update', 'link_name': 'prop-collection-update',
'method': 'POST', 'method_name': 'prop_collection_http_post'},
{'uri': '/ref-update', 'link_name': 'ref-update',
'method': 'POST', 'method_name': 'ref_update_http_post'},
{'uri': '/ref-relax-for-delete', 'link_name': 'ref-relax-for-delete',
'method': 'POST', 'method_name': 'ref_relax_for_delete_http_post'},
{'uri': '/fqname-to-id', 'link_name': 'name-to-id',
'method': 'POST', 'method_name': 'fq_name_to_id_http_post'},
{'uri': '/id-to-fqname', 'link_name': 'id-to-name',
'method': 'POST', 'method_name': 'id_to_fq_name_http_post'},
{'uri': '/useragent-kv', 'link_name': 'useragent-keyvalue',
'method': 'POST', 'method_name': 'useragent_kv_http_post'},
{'uri': '/db-check', 'link_name': 'database-check',
'method': 'POST', 'method_name': 'db_check'},
{'uri': '/fetch-records', 'link_name': 'fetch-records',
'method': 'POST', 'method_name': 'fetch_records'},
{'uri': '/start-profile', 'link_name': 'start-profile',
'method': 'POST', 'method_name': 'start_profile'},
{'uri': '/stop-profile', 'link_name': 'stop-profile',
'method': 'POST', 'method_name': 'stop_profile'},
{'uri': '/list-bulk-collection', 'link_name': 'list-bulk-collection',
'method': 'POST', 'method_name': 'list_bulk_collection_http_post'},
{'uri': '/obj-perms', 'link_name': 'obj-perms',
'method': 'GET', 'method_name': 'obj_perms_http_get'},
{'uri': '/chown', 'link_name': 'chown',
'method': 'POST', 'method_name': 'obj_chown_http_post'},
{'uri': '/chmod', 'link_name': 'chmod',
'method': 'POST', 'method_name': 'obj_chmod_http_post'},
{'uri': '/aaa-mode', 'link_name': 'aaa-mode',
'method': 'PUT', 'method_name': 'aaa_mode_http_put'},
{'uri': '/obj-cache', 'link_name': 'obj-cache',
'method': 'GET', 'method_name': 'dump_cache'},
{'uri': '/obj-cache', 'link_name': 'obj-cache',
'method': 'POST', 'method_name': 'dump_cache'},
{'uri': '/execute-job', 'link_name': 'execute-job',
'method': 'POST', 'method_name': 'execute_job_http_post'},
{'uri': '/amqp-publish', 'link_name': 'amqp-publish',
'method': 'POST', 'method_name': 'amqp_publish_http_post'},
{'uri': '/amqp-request', 'link_name': 'amqp-request',
'method': 'POST', 'method_name': 'amqp_request_http_post'},
]
_MANDATORY_PROPS = [
'loadbalancer_healthmonitor_properties',
]
def error_400(err):
return err.body
# end error_400
def error_403(err):
return err.body
# end error_403
def error_404(err):
return err.body
# end error_404
def error_405(err):
return err.body
# end error_405
def error_409(err):
return err.body
# end error_409
@bottle.error(412)
def error_412(err):
return err.body
# end error_412
def error_500(err):
return err.body
# end error_500
def error_503(err):
return err.body
# end error_503
class VncApiServer(object):
"""
This is the manager class co-ordinating all classes present in the package
"""
_INVALID_NAME_CHARS = set(':')
_GENERATE_DEFAULT_INSTANCE = [
'namespace',
'project',
'virtual_network', 'virtual-network',
'network_ipam', 'network-ipam',
]
JOB_REQUEST_EXCHANGE = "job_request_exchange"
JOB_REQUEST_ROUTING_KEY = "job.request"
def __new__(cls, *args, **kwargs):
obj = super(VncApiServer, cls).__new__(cls, *args, **kwargs)
obj.api_bottle = bottle.Bottle()
obj.route('/', 'GET', obj.homepage_http_get)
obj.api_bottle.error_handler = {
400: error_400,
403: error_403,
404: error_404,
405: error_405,
409: error_409,
500: error_500,
503: error_503,
}
cls._generate_resource_crud_methods(obj)
cls._generate_resource_crud_uri(obj)
for act_res in _ACTION_RESOURCES:
http_method = act_res.get('method', 'POST')
method_name = getattr(obj, act_res['method_name'])
obj.route(act_res['uri'], http_method, method_name)
return obj
# end __new__
@classmethod
def _validate_complex_type(cls, dict_cls, dict_body):
if dict_body is None:
return
for key, value in dict_body.items():
if key not in dict_cls.attr_fields:
raise ValueError('class %s does not have field %s' % (
str(dict_cls), key))
attr_type_vals = dict_cls.attr_field_type_vals[key]
attr_type = attr_type_vals['attr_type']
restrictions = attr_type_vals['restrictions']
is_array = attr_type_vals.get('is_array', False)
if not value:
continue
if is_array:
if not isinstance(value, list):
raise ValueError('Field %s must be a list. Received value: %s'
% (key, str(value)))
values = value
else:
values = [value]
if attr_type_vals['is_complex']:
attr_cls = cfgm_common.utils.str_to_class(attr_type, __name__)
for item in values:
if attr_type == 'AllowedAddressPair':
cls._validate_allowed_address_pair_prefix_len(item)
if attr_type == 'SubnetType':
cls._validate_subnet_type(item)
cls._validate_complex_type(attr_cls, item)
else:
simple_type = attr_type_vals['simple_type']
for idx, item in enumerate(values):
values[idx] = cls._validate_simple_type(key, attr_type,
simple_type, item,
restrictions)
if not is_array:
dict_body[key] = values[0]
# end _validate_complex_type
@staticmethod
def _validate_subnet_type(subnet):
try:
cidr_str = '%s/%s' % (subnet['ip_prefix'], subnet['ip_prefix_len'])
except TypeError:
raise ValueError("Subnet type is invalid")
try:
cidr = netaddr.IPNetwork(cidr_str)
except netaddr.core.AddrFormatError:
raise ValueError("Subnet type '%s' is invalid" % cidr_str)
subnet['ip_prefix'] = str(cidr.network)
subnet['ip_prefix_len'] = cidr.prefixlen
@classmethod
def _validate_allowed_address_pair_prefix_len(cls, value):
'''Do not allow configuration of AAP with
IPv4 prefix length less than 24 and 120 for IPv6.
LP #1720118
'''
if value['address_mode'] == 'active-standby':
ip_net_family = netaddr.IPNetwork(value['ip']['ip_prefix']).version
if ip_net_family == 6 and value['ip']['ip_prefix_len'] < 120:
raise ValueError('IPv6 Prefix length lesser than 120 is'
' is not acceptable')
if ip_net_family == 4 and value['ip']['ip_prefix_len'] < 24:
raise ValueError('IPv4 Prefix length lesser than 24'
' is not acceptable')
# end _validate_allowed_address_pair_prefix_len
@classmethod
def _validate_communityattribute_type(cls, value):
poss_values = ["no-export",
"accept-own",
"no-advertise",
"no-export-subconfed",
"no-reoriginate"]
if value in poss_values:
return
res = re.match('[0-9]+:[0-9]+', value)
if res is None:
raise ValueError('Invalid community format %s. '
'Change to \'number:number\''
% value)
asn = value.split(':')
if int(asn[0]) > 65535:
raise ValueError('Out of range ASN value %s. '
'ASN values cannot exceed 65535.'
% value)
@classmethod
def _validate_serviceinterface_type(cls, value):
poss_values = ["management",
"left",
"right"]
if value in poss_values:
return
res = re.match('other[0-9]*', value)
if res is None:
raise ValueError('Invalid service interface type %s. '
'Valid values are: management|left|right|other[0-9]*'
% value)
def validate_execute_job_input_params(self, request_params):
device_list = None
job_template_id = request_params.get('job_template_id')
job_template_fq_name = request_params.get('job_template_fq_name')
if not (job_template_id or job_template_fq_name):
err_msg = "Either job_template_id or job_template_fq_name" \
" required in request"
raise cfgm_common.exceptions.HttpError(400, err_msg)
# check if the job template id is a valid uuid
if job_template_id:
if self.invalid_uuid(job_template_id):
msg = 'Invalid job-template uuid type %s. uuid type required' \
% job_template_id
raise cfgm_common.exceptions.HttpError(400, msg)
try:
job_template_fqname = self._db_conn.uuid_to_fq_name(
job_template_id)
request_params['job_template_fq_name'] = job_template_fqname
except NoIdError as no_id_exec:
raise cfgm_common.exceptions.HttpError(404, str(no_id_exec))
except Exception as e:
msg = "Error while reading job_template_id: " + str(e)
raise cfgm_common.exceptions.HttpError(400, msg)
else:
# check if the job template fqname is a valid fq_name
try:
job_template_id = self._db_conn.fq_name_to_uuid(
"job_template", job_template_fq_name)
request_params['job_template_id'] = job_template_id
except NoIdError as no_id_exec:
raise cfgm_common.exceptions.HttpError(404, str(no_id_exec))
except Exception as e:
msg = "Error while reading job_template_fqname: " + str(e)
raise cfgm_common.exceptions.HttpError(400, msg)
extra_params = request_params.get('params')
if extra_params is not None:
device_list = extra_params.get('device_list')
if device_list:
if not isinstance(device_list, list):
err_msg = "malformed request param: device_list, " \
"expects list"
raise cfgm_common.exceptions.HttpError(400, err_msg)
for device_id in device_list:
if not isinstance(device_id, basestring):
err_msg = "malformed request param: device_list, " \
"expects list of string device_uuids," \
" found device_uuid %s" % device_id
raise cfgm_common.exceptions.HttpError(400, err_msg)
# check if the device id passed is a valid uuid
if self.invalid_uuid(device_id):
msg = 'Invalid device uuid type %s.' \
' uuid type required' % device_id
raise cfgm_common.exceptions.HttpError(400, msg)
def execute_job_http_post(self):
''' Payload of execute_job
job_template_id (Mandatory if no job_template_fq_name): <uuid> of
the created job_template
job_template_fq_name (Mandatory if no job_template_id): fqname in
the format: ["<global-system-config-name>",
"<name of the job-template>"]
input (Type json): Input Schema of the playbook under the
job_template_id
params (Type json): Extra_params for the job_manager
(Eg. device_list)
E.g. Payload:
{
"job_template_id": "<uuid>",
"params": {
"device_list": ["<device_uuid1>", "<device_uuid2>", ....
"<device_uuidn>"]
}
}
'''
try:
self.config_log("Entered execute-job",
level=SandeshLevel.SYS_INFO)
# check if the job manager functionality is enabled
if not self._args.enable_fabric_ansible:
err_msg = "Fabric ansible job manager is disabled. " \
"Please enable it by setting the " \
"'enable_fabric_ansible' to True in the conf file"
raise cfgm_common.exceptions.HttpError(405, err_msg)
request_params = get_request().json
msg = "Job Input %s " % json.dumps(request_params)
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
# some basic validation like checking if UUIDs in input are
# syntactically valid
self.validate_execute_job_input_params(request_params)
# get the auth token
auth_token = get_request().get_header('X-Auth-Token')
request_params['auth_token'] = auth_token
# get cluster id
contrail_cluster_id = get_request().get_header('X-Cluster-ID')
request_params['contrail_cluster_id'] = contrail_cluster_id
# get the API config node ip list
if not self._config_node_list:
(ok, cfg_node_list, _) = self._db_conn.dbe_list(
'config_node', field_names=['config_node_ip_address'])
if not ok:
raise cfgm_common.exceptions.HttpError(
500, 'Error in dbe_list while getting the '
'config_node_ip_address'
' %s' % cfg_node_list)
if not cfg_node_list:
err_msg = "Config-Node list empty"
raise cfgm_common.exceptions.HttpError(404, err_msg)
for node in cfg_node_list:
self._config_node_list.append(node.get(
'config_node_ip_address'))
request_params['api_server_host'] = self._config_node_list
# generate the job execution id
execution_id = str(int(round(time.time() * 1000))) + '_' + \
str(uuid.uuid4())
request_params['job_execution_id'] = execution_id
# publish message to RabbitMQ
self.publish_job_request(request_params, execution_id)
self.config_log("Published job message to RabbitMQ."
" Execution id: %s" % execution_id,
level=SandeshLevel.SYS_INFO)
return {'job_execution_id': str(execution_id)}
except cfgm_common.exceptions.HttpError as e:
raise
def publish_job_request(self, request_params, job_execution_id):
try:
self._amqp_client.publish(
request_params, self.JOB_REQUEST_EXCHANGE,
routing_key=self.JOB_REQUEST_ROUTING_KEY,
serializer='json', retry=True,
retry_policy={'max_retries': 12,
'interval_start': 2,
'interval_step': 5,
'interval_max': 15})
except Exception as e:
msg = "Failed to send job request via RabbitMQ" \
" %s %s" % (job_execution_id, repr(e))
raise cfgm_common.exceptions.HttpError(500, msg)
def amqp_publish_http_post(self):
''' Payload of amqp-publish
exchange (Type string) (mandatory): name of the exchange
exchange_type (Type string) (mandatory): type of the exchange
routing_key (Type string): routing key for the message
headers (Type dict): headers for the message
payload (Type object): the message
'''
self.config_log("Entered amqp-publish",
level=SandeshLevel.SYS_INFO)
body = get_request().json
msg = "Amqp publish %s " % json.dumps(body)
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
if self._amqp_client.get_exchange(body.get('exchange')) is None:
self._amqp_client.add_exchange(body.get('exchange'),
type=body.get('exchange_type'))
self._amqp_client.publish(body.get('payload'),
body.get('exchange'),
routing_key=body.get('routing_key'),
headers=body.get('headers'))
bottle.response.status = 202
self.config_log("Exiting amqp-publish", level=SandeshLevel.SYS_DEBUG)
# end amqp_publish_http_post
def amqp_request_http_post(self):
''' Payload of amqp-request
exchange (Type string) (mandatory): name of the exchange
exchange_type (Type string) (mandatory): type of the exchange
routing_key (Type string): routing key for the message
response_key (Type string): routing key for the response message
headers (Type dict): headers for the message
payload (Type object): the message
'''
self.config_log("Entered amqp-request",
level=SandeshLevel.SYS_INFO)
body = get_request().json
msg = "Amqp request %s " % json.dumps(body)
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
if self._amqp_client.get_exchange(body.get('exchange')) is None:
self._amqp_client.add_exchange(body.get('exchange'),
type=body.get('exchange_type'))
consumer = 'amqp_request.%s.%s' % (socket.getfqdn(), str(uuid.uuid4()))
amqp_worker = VncApiServer.AmqpWorker()
self._amqp_client.add_consumer(consumer, body.get('exchange'),
routing_key=body.get('response_key'),
callback=amqp_worker.handle_message)
self._amqp_client.publish(body.get('payload'),
body.get('exchange'),
routing_key=body.get('routing_key'),
headers=body.get('headers'))
try:
amqp_worker.queue.get(block=True, timeout=self._args.amqp_timeout)
bottle.response.status = 200
except gevent.queue.Empty:
bottle.response.status = 500
finally:
self._amqp_client.remove_consumer(consumer)
msg = "Amqp response, status %s, body %s " % (bottle.response.status,
json.dumps(amqp_worker.body))
self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
return amqp_worker.body
# end amqp_request_http_post
class AmqpWorker(object):
def __init__(self):
self.queue = gevent.queue.Queue(maxsize=1)
self.body = None
# end __init__
def handle_message(self, body, message):
message.ack()
self.body = body
self.queue.put_nowait(True)
# end handle_message
# end AmqpWorker
@classmethod
def _validate_simple_type(cls, type_name, xsd_type, simple_type, value, restrictions=None):
if value is None:
return
elif xsd_type in ('unsignedLong', 'integer'):
if not isinstance(value, (int, long)):
# If value is not an integer, then try to convert it to integer
try:
value = int(value)
except (TypeError, ValueError):
raise ValueError('%s: integer value expected instead of %s' %(
type_name, value))
if restrictions:
if not (int(restrictions[0]) <= value <= int(restrictions[1])):
raise ValueError('%s: value must be between %s and %s' %(
type_name, restrictions[0], restrictions[1]))
elif xsd_type == 'boolean':
if not isinstance(value, bool):
raise ValueError('%s: true/false expected instead of %s' %(
type_name, value))
elif xsd_type == 'string' and simple_type == 'CommunityAttribute':
cls._validate_communityattribute_type(value)
elif xsd_type == 'string' and simple_type == 'ServiceInterfaceType':
cls._validate_serviceinterface_type(value)
else:
if not isinstance(value, basestring):
raise ValueError('%s: string value expected instead of %s' %(
type_name, value))
if restrictions and value not in restrictions:
raise ValueError('%s: value must be one of %s' % (
type_name, str(restrictions)))
return value
# end _validate_simple_type
def _check_mandatory_props_list(self, prop_name):
return prop_name in _MANDATORY_PROPS
# end _check_mandatory_props_list
def _validate_props_in_request(self, resource_class, obj_dict, operation):
for prop_name in resource_class.prop_fields:
prop_field_types = resource_class.prop_field_types[prop_name]
is_simple = not prop_field_types['is_complex']
prop_type = prop_field_types['xsd_type']
restrictions = prop_field_types['restrictions']
simple_type = prop_field_types['simple_type']
is_list_prop = prop_name in resource_class.prop_list_fields
is_map_prop = prop_name in resource_class.prop_map_fields
prop_value = obj_dict.get(prop_name)
if not prop_value:
if operation == 'CREATE' and (
prop_field_types['required'] == 'required'):
if self._check_mandatory_props_list(prop_name):
err_msg = '%s property is missing' %prop_name
return False, err_msg
continue
if is_simple:
try:
obj_dict[prop_name] = self._validate_simple_type(prop_name,
prop_type, simple_type,
prop_value, restrictions)
except Exception as e:
err_msg = 'Error validating property ' + str(e)
return False, err_msg
else:
continue
prop_cls = cfgm_common.utils.str_to_class(prop_type, __name__)
if isinstance(prop_value, dict):
try:
self._validate_complex_type(prop_cls, prop_value)
except Exception as e:
err_msg = 'Error validating property %s value %s ' %(
prop_name, prop_value)
err_msg += str(e)
return False, err_msg
else: # complex-type + value isn't dict or wrapped in list or map
err_msg = 'Error in property %s type %s value of %s ' %(
prop_name, prop_cls, prop_value)
return False, err_msg
# end for all properties
return True, ''
# end _validate_props_in_request
def _validate_refs_in_request(self, resource_class, obj_dict):
for ref_name in resource_class.ref_fields:
ref_fld_types_list = list(resource_class.ref_field_types[ref_name])
ref_link_type = ref_fld_types_list[1]
if ref_link_type == 'None':
continue
attr_cls = cfgm_common.utils.str_to_class(ref_link_type, __name__)
for ref_dict in obj_dict.get(ref_name) or []:
try:
self._validate_complex_type(attr_cls, ref_dict['attr'])
except Exception as e:
err_msg = 'Error validating reference %s value %s ' \
%(ref_name, ref_dict)
err_msg += str(e)
return False, err_msg
return True, ''
# end _validate_refs_in_request
def _validate_perms_in_request(self, resource_class, obj_type, obj_dict):
for ref_name in resource_class.ref_fields:
for ref in obj_dict.get(ref_name) or []:
try:
ref_uuid = ref['uuid']
except KeyError:
ref_uuid = self._db_conn.fq_name_to_uuid(ref_name[:-5],
ref['to'])
(ok, status) = self._permissions.check_perms_link(
get_request(), ref_uuid)
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
# end _validate_perms_in_request
def _validate_resource_type(self, type):
try:
r_class = self.get_resource_class(type)
return r_class.resource_type, r_class
except TypeError:
raise cfgm_common.exceptions.HttpError(
404, "Resource type '%s' not found" % type)
# end _validate_resource_type
def _ensure_services_conn(
self, api_name, obj_type, obj_uuid=None, obj_fq_name=None):
# If not connected to zookeeper do not allow operations that
# causes the state change
if not self._db_conn._zk_db.is_connected():
errmsg = 'No connection to zookeeper.'
fq_name_str = ':'.join(obj_fq_name or [])
self.config_object_error(
obj_uuid, fq_name_str, obj_type, api_name, errmsg)
raise cfgm_common.exceptions.HttpError(503, errmsg)
# If there are too many pending updates to rabbit, do not allow
# operations that cause state change
npending = self._db_conn.dbe_oper_publish_pending()
if (npending >= int(self._args.rabbit_max_pending_updates)):
err_str = str(MaxRabbitPendingError(npending))
raise cfgm_common.exceptions.HttpError(500, err_str)
# end _ensure_services_conn
def undo(self, result, obj_type, id=None, fq_name=None, counter=None, value=0):
(code, msg) = result
if counter:
counter = counter + value
get_context().invoke_undo(code, msg, self.config_log)
failed_stage = get_context().get_state()
self.config_object_error(
id, fq_name, obj_type, failed_stage, msg)
# end undo
# http_resource_<oper> - handlers invoked from
# a. bottle route (on-the-wire) OR
# b. internal requests
# using normalized get_request() from ApiContext
@log_api_stats
def http_resource_create(self, obj_type):
resource_type, r_class = self._validate_resource_type(obj_type)
obj_dict = get_request().json[resource_type]
# check visibility
user_visible = (obj_dict.get('id_perms') or {}).get('user_visible', True)
if not user_visible and not self.is_admin_request():
result = 'This object is not visible by users'
self.config_object_error(None, None, obj_type, 'http_post', result)
raise cfgm_common.exceptions.HttpError(400, result)
self._post_validate(obj_type, obj_dict=obj_dict)
fq_name = obj_dict['fq_name']
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_create' %(obj_type), obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_create an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# properties validator
ok, result = self._validate_props_in_request(r_class,
obj_dict, operation='CREATE')
if not ok:
result = 'Bad property in create: ' + result
raise cfgm_common.exceptions.HttpError(400, result)
# references validator
ok, result = self._validate_refs_in_request(r_class, obj_dict)
if not ok:
result = 'Bad reference in create: ' + result
raise cfgm_common.exceptions.HttpError(400, result)
get_context().set_state('PRE_DBE_ALLOC')
# type-specific hook
ok, result = r_class.pre_dbe_alloc(obj_dict)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
name = obj_dict['fq_name'][-1]
fq_name = obj_dict['fq_name']
# common handling for all resource create
(ok, result) = self._post_common(obj_type, obj_dict)
if not ok:
(code, msg) = result
fq_name_str = ':'.join(obj_dict.get('fq_name', []))
self.config_object_error(None, fq_name_str, obj_type, 'http_post',
msg)
raise cfgm_common.exceptions.HttpError(code, msg)
uuid_in_req = result
# no ref to a pending deleted resource
ok, result = r_class.no_pending_deleted_resource_in_refs(obj_dict)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# Can abort resource creation and retrun 202 status code
get_context().set_state('PENDING_DBE_CREATE')
ok, result = r_class.pending_dbe_create(obj_dict)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
if ok and isinstance(result, tuple) and result[0] == 202:
# Creation accepted but not applied, pending delete return 202 HTTP
# OK code to aware clients
pending_obj_dict = result[1]
bottle.response.status = 202
rsp_body = {}
rsp_body['fq_name'] = pending_obj_dict['fq_name']
rsp_body['uuid'] = pending_obj_dict['uuid']
rsp_body['name'] = pending_obj_dict['fq_name'][-1]
rsp_body['href'] = self.generate_url(resource_type,
pending_obj_dict['uuid'])
rsp_body['parent_type'] = pending_obj_dict['parent_type']
rsp_body['parent_uuid'] = pending_obj_dict['parent_uuid']
rsp_body['parent_href'] = self.generate_url(
pending_obj_dict['parent_type'],pending_obj_dict['parent_uuid'])
return {resource_type: rsp_body}
db_conn = self._db_conn
# if client gave parent_type of config-root, ignore and remove
if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
del obj_dict['parent_type']
parent_class = None
if 'parent_type' in obj_dict:
# non config-root child, verify parent exists
parent_res_type, parent_class = self._validate_resource_type(
obj_dict['parent_type'])
parent_obj_type = parent_class.object_type
parent_res_type = parent_class.resource_type
parent_fq_name = obj_dict['fq_name'][:-1]
try:
parent_uuid = self._db_conn.fq_name_to_uuid(parent_obj_type,
parent_fq_name)
(ok, status) = self._permissions.check_perms_write(
get_request(), parent_uuid)
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
self._permissions.set_user_role(get_request(), obj_dict)
obj_dict['parent_uuid'] = parent_uuid
except NoIdError:
err_msg = 'Parent %s type %s does not exist' % (
pformat(parent_fq_name), parent_res_type)
fq_name_str = ':'.join(parent_fq_name)
self.config_object_error(None, fq_name_str, obj_type, 'http_post', err_msg)
raise cfgm_common.exceptions.HttpError(400, err_msg)
# Validate perms on references
try:
self._validate_perms_in_request(r_class, obj_type, obj_dict)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
400, 'Unknown reference in resource create %s.' %(obj_dict))
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
quota_counter = []
def stateful_create():
get_context().set_state('DBE_ALLOC')
# Alloc and Store id-mappings before creating entry on pubsub store.
# Else a subscriber can ask for an id mapping before we have stored it
(ok, result) = db_conn.dbe_alloc(obj_type, obj_dict, uuid_in_req)
if not ok:
return (ok, result)
get_context().push_undo(db_conn.dbe_release, obj_type, fq_name)
obj_id = result
env = get_request().headers.environ
tenant_name = env.get(hdr_server_tenant()) or 'default-project'
get_context().set_state('PRE_DBE_CREATE')
# type-specific hook
(ok, result) = r_class.pre_dbe_create(
tenant_name, obj_dict, db_conn)
if not ok:
return (ok, result)
callable = getattr(r_class, 'http_post_collection_fail', None)
if callable:
cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
ok, quota_limit, proj_uuid = r_class.get_quota_for_resource(obj_type,
obj_dict, db_conn)
if not ok:
return ok, quota_limit
get_context().set_state('DBE_CREATE')
if quota_limit >= 0:
path = self._path_prefix + proj_uuid + "/" + obj_type
if not self.quota_counter.get(path):
# Init quota counter
path_prefix = self._path_prefix + proj_uuid
try:
QuotaHelper._zk_quota_counter_init(
path_prefix, {obj_type: quota_limit}, proj_uuid,
self._db_conn, self.quota_counter)
except NoIdError:
msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
return (False, (404, msg))
(ok, result) = QuotaHelper.verify_quota_and_create_resource(
db_conn, obj_dict, obj_type, obj_id,
quota_limit, self.quota_counter[path])
if not ok:
return (ok, result)
else:
# To be used for reverting back count when undo() is called
quota_counter.append(self.quota_counter[path])
else:
#normal execution
(ok, result) = db_conn.dbe_create(obj_type, obj_id, obj_dict)
if not ok:
return (ok, result)
get_context().set_state('POST_DBE_CREATE')
# type-specific hook
try:
ok, result = r_class.post_dbe_create(tenant_name, obj_dict, db_conn)
except Exception as e:
ok = False
msg = ("%s:%s post_dbe_create had an exception: %s\n%s" %
(obj_type, obj_id, str(e),
cfgm_common.utils.detailed_traceback()))
result = (None, msg)
if not ok:
# Create is done, log to system, no point in informing user
self.config_log(result[1], level=SandeshLevel.SYS_ERR)
return True, obj_id
# end stateful_create
try:
ok, result = stateful_create()
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, err_msg)
if not ok:
fq_name_str = ':'.join(fq_name)
self.undo(result, obj_type, fq_name=fq_name_str,
counter=quota_counter, value=-1)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# Initialize quota counter if resource is project
if resource_type == 'project' and 'quota' in obj_dict:
proj_id = obj_dict['uuid']
quota_dict = obj_dict.get('quota')
path_prefix = self._path_prefix + proj_id
if quota_dict:
try:
QuotaHelper._zk_quota_counter_init(path_prefix, quota_dict,
proj_id, db_conn, self.quota_counter)
except NoIdError:
err_msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
rsp_body = {}
rsp_body['name'] = name
rsp_body['fq_name'] = fq_name
rsp_body['uuid'] = result
rsp_body['href'] = self.generate_url(resource_type, result)
if parent_class:
# non config-root child, send back parent uuid/href
rsp_body['parent_type'] = obj_dict['parent_type']
rsp_body['parent_uuid'] = parent_uuid
rsp_body['parent_href'] = self.generate_url(parent_res_type,
parent_uuid)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_create' %(obj_type), obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In post_%s_create an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
return {resource_type: rsp_body}
# end http_resource_create
@log_api_stats
def http_resource_read(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_read' %(obj_type), id)
except Exception as e:
pass
etag = get_request().headers.get('If-None-Match')
db_conn = self._db_conn
try:
req_obj_type = db_conn.uuid_to_obj_type(id)
if req_obj_type != obj_type:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
fq_name = db_conn.uuid_to_fq_name(id)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
# common handling for all resource get
(ok, result) = self._get_common(get_request(), id)
if not ok:
(code, msg) = result
self.config_object_error(
id, None, obj_type, 'http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
db_conn = self._db_conn
if etag:
(ok, result) = db_conn.dbe_is_latest(id, etag.strip('"'))
if not ok:
# Not present in DB
self.config_object_error(
id, None, obj_type, 'http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
is_latest = result
if is_latest:
# send Not-Modified, caches use this for read optimization
bottle.response.status = 304
return
# end if etag
# Generate field list for db layer
obj_fields = r_class.prop_fields | r_class.ref_fields
if 'fields' in get_request().query:
obj_fields = set(get_request().query.fields.split(',')) & (
obj_fields |
r_class.backref_fields |
r_class.children_fields
) | set(['id_perms', 'perms2'])
else: # default props + children + refs + backrefs
if 'exclude_back_refs' not in get_request().query:
obj_fields |= r_class.backref_fields
if 'exclude_children' not in get_request().query:
obj_fields |= r_class.children_fields
(ok, result) = r_class.pre_dbe_read(id, fq_name, db_conn)
if not ok:
(code, msg) = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
(ok, result) = db_conn.dbe_read(obj_type, id,
list(obj_fields), ret_readonly=True)
if not ok:
self.config_object_error(id, None, obj_type, 'http_get', result)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
# check visibility
if (not result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(id, None, obj_type, 'http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
if not self.is_admin_request():
self.obj_view(resource_type, result)
(ok, err_msg) = r_class.post_dbe_read(result, db_conn)
if not ok:
(code, msg) = err_msg
raise cfgm_common.exceptions.HttpError(code, msg)
rsp_body = {}
rsp_body['uuid'] = id
rsp_body['name'] = result['fq_name'][-1]
if 'exclude_hrefs' not in get_request().query:
self.generate_hrefs(resource_type, result)
rsp_body.update(result)
id_perms = result['id_perms']
bottle.response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_read' %(obj_type), id, rsp_body)
except Exception as e:
pass
return {resource_type: rsp_body}
# end http_resource_read
# filter object references based on permissions
def obj_view(self, resource_type, obj_dict, ref_perms=None):
r_class = self.get_resource_class(resource_type)
obj_links = r_class.obj_links & set(obj_dict.keys())
if not ref_perms:
if self.is_rbac_enabled():
fields = ['perms2']
else:
fields = ['id_perms']
ref_uuids = {ref['uuid'] for link in obj_links
for ref in obj_dict[link]}
ref_perms = {obj_dict['uuid']: obj_dict for obj_dict in
self._db_conn._object_db.object_raw_read(
resource_type, ref_uuids,fields)}
for link_field in obj_links:
links = obj_dict[link_field]
# build new links in returned dict based on permissions on linked
# object
for link in obj_dict[link_field]:
if (link['uuid'] not in ref_perms or
not self._permissions.check_perms_read(
get_request(),
link['uuid'],
ref_perms[link['uuid']])[0]):
obj_dict[link_field].remove(link)
# end obj_view
@log_api_stats
def http_resource_update(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
# Early return if there is no body or an empty body
request = get_request()
req_json = request.json
if not req_json or not req_json[resource_type]:
return
obj_dict = get_request().json[resource_type]
if 'perms2' in obj_dict:
if 'owner' not in obj_dict['perms2']:
raise cfgm_common.exceptions.HttpError(400,
'owner in perms2 must be present')
fields = r_class.prop_fields | r_class.ref_fields
try:
ok, result = self._db_conn.dbe_read(obj_type, id, fields)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
self.config_object_error(id, None, obj_type, 'http_resource_update',
result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
db_obj_dict = result
# Look if the resource have a pending version, if yes use it as resource
# to update
if hasattr(r_class, 'get_pending_resource'):
ok, result = r_class.get_pending_resource(db_obj_dict, fields)
if ok and isinstance(result, dict):
db_obj_dict = result
id = obj_dict['uuid'] = db_obj_dict['uuid']
if not ok and result[0] != 404:
self.config_object_error(
id, None, obj_type, 'http_resource_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
if resource_type == 'project' and 'quota' in db_obj_dict:
old_quota_dict = db_obj_dict['quota']
else:
old_quota_dict = None
self._put_common(
'http_put', obj_type, id, db_obj_dict, req_obj_dict=obj_dict,
quota_dict=old_quota_dict)
rsp_body = {}
rsp_body['uuid'] = id
rsp_body['href'] = self.generate_url(resource_type, id)
return {resource_type: rsp_body}
# end http_resource_update
@log_api_stats
def http_resource_delete(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
db_conn = self._db_conn
# if obj doesn't exist return early
try:
req_obj_type = db_conn.uuid_to_obj_type(id)
if req_obj_type != obj_type:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
_ = db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'ID %s does not exist' %(id))
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_delete' %(obj_type), id)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_delete an extension had error for %s' \
%(obj_type, id)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# read in obj from db (accepting error) to get details of it
try:
(read_ok, read_result) = db_conn.dbe_read(obj_type, id)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
if not read_ok:
self.config_object_error(
id, None, obj_type, 'http_delete', read_result)
# proceed down to delete the resource
# check visibility
if (not read_result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(id, None, obj_type, 'http_delete', result)
raise cfgm_common.exceptions.HttpError(404, result)
# common handling for all resource delete
parent_uuid = read_result.get('parent_uuid')
(ok, del_result) = self._delete_common(
get_request(), obj_type, id, parent_uuid)
if not ok:
(code, msg) = del_result
self.config_object_error(id, None, obj_type, 'http_delete', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
# Permit abort resource deletion and retrun 202 status code
get_context().set_state('PENDING_DBE_DELETE')
ok, result = r_class.pending_dbe_delete(read_result)
if (not ok and isinstance(result, tuple) and result[0] == 409 and
isinstance(result[1], set)):
# Found back reference to existing enforced or draft resource
exist_hrefs = [self.generate_url(type, uuid)
for type, uuid in result[1]]
msg = "Delete when resource still referred: %s" % exist_hrefs
self.config_object_error(id, None, obj_type, 'http_delete', msg)
raise cfgm_common.exceptions.HttpError(409, msg)
elif ok and isinstance(result, tuple) and result[0] == 202:
# Deletion accepted but not applied, pending delete
# return 202 HTTP OK code to aware clients
bottle.response.status = 202
return
elif not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# fail if non-default children or non-derived backrefs exist
for child_field in r_class.children_fields:
child_type, is_derived = r_class.children_field_types[child_field]
if is_derived:
continue
child_cls = self.get_resource_class(child_type)
default_child_name = 'default-%s' %(
child_cls(parent_type=obj_type).get_type())
exist_hrefs = []
for child in read_result.get(child_field, []):
if child['to'][-1] in [default_child_name,
POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT]:
continue
exist_hrefs.append(
self.generate_url(child_type, child['uuid']))
if exist_hrefs:
err_msg = 'Delete when children still present: %s' %(
exist_hrefs)
self.config_object_error(
id, None, obj_type, 'http_delete', err_msg)
raise cfgm_common.exceptions.HttpError(409, err_msg)
relaxed_refs = set(db_conn.dbe_get_relaxed_refs(id))
for backref_field in r_class.backref_fields:
backref_type, _, is_derived = \
r_class.backref_field_types[backref_field]
if is_derived:
continue
exist_hrefs = [self.generate_url(backref_type, backref['uuid'])
for backref in read_result.get(backref_field, [])
if backref['uuid'] not in relaxed_refs]
if exist_hrefs:
err_msg = 'Delete when resource still referred: %s' %(
exist_hrefs)
self.config_object_error(
id, None, obj_type, 'http_delete', err_msg)
raise cfgm_common.exceptions.HttpError(409, err_msg)
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
quota_counter = []
def stateful_delete():
get_context().set_state('PRE_DBE_DELETE')
proj_id = r_class.get_project_id_for_resource(read_result, obj_type,
db_conn)
(ok, del_result, zk_del_kwargs) = r_class.pre_dbe_delete(id, read_result, db_conn)
if not ok:
if del_result:
return (ok, del_result)
else:
return (ok, zk_del_kwargs)
zk_del_kwargs = zk_del_kwargs or {}
# Delete default children first
for child_field in r_class.children_fields:
child_type, is_derived = r_class.children_field_types[child_field]
if is_derived:
continue
if child_field in self._GENERATE_DEFAULT_INSTANCE:
self.delete_default_children(child_type, read_result)
callable = getattr(r_class, 'http_delete_fail', None)
if callable:
cleanup_on_failure.append((callable, [id, read_result, db_conn]))
get_context().set_state('DBE_DELETE')
(ok, del_result) = db_conn.dbe_delete(obj_type, id, read_result)
if not ok:
return (ok, del_result)
if proj_id:
(ok, proj_dict) = QuotaHelper.get_project_dict_for_quota(
proj_id, db_conn)
if not ok:
return ok, proj_dict
quota_limit = QuotaHelper.get_quota_limit(proj_dict, obj_type)
path = self._path_prefix + proj_id + "/" + obj_type
if quota_limit > 0:
if self.quota_counter.get(path):
self.quota_counter[path] -= 1
else:
# quota counter obj not initialized
# in this api-server, Init counter
path_prefix = self._path_prefix + proj_id
QuotaHelper._zk_quota_counter_init(
path_prefix, {obj_type : quota_limit},
proj_id, db_conn, self.quota_counter)
if db_conn._zk_db.quota_counter_exists(path):
self.quota_counter[path] -= 1
quota_counter.append(self.quota_counter.get(path))
elif self.quota_counter.get(path):
# quota limit is modified to unlimited
# delete counter object
del self.quota_counter[path]
# type-specific hook
get_context().set_state('POST_DBE_DELETE')
try:
ok, result = r_class.post_dbe_delete(
id, read_result, db_conn, **zk_del_kwargs)
except Exception as e:
ok = False
msg = ("%s:%s post_dbe_delete had an exception: %s\n%s" %
(obj_type, id, str(e),
cfgm_common.utils.detailed_traceback()))
result = (None, msg)
if not ok:
# Delete is done, log to system, no point in informing user
self.config_log(result[1], level=SandeshLevel.SYS_ERR)
return (True, '')
# end stateful_delete
try:
ok, result = stateful_delete()
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, err_msg)
if not ok:
self.undo(result, obj_type, id=id, counter=quota_counter, value=1)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_delete' %(obj_type), id, read_result)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_delete an extension had error for %s' \
%(obj_type, id)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# end http_resource_delete
@log_api_stats
def http_resource_list(self, obj_type):
resource_type, r_class = self._validate_resource_type(obj_type)
db_conn = self._db_conn
env = get_request().headers.environ
parent_uuids = None
back_ref_uuids = None
obj_uuids = None
pagination = {}
if 'parent_fq_name_str' in get_request().query:
parent_uuids = []
parent_fq_name = get_request().query.parent_fq_name_str.split(':')
parent_types = r_class.parent_types
if 'parent_type' in get_request().query:
parent_types = [get_request().query.parent_type]
for parent_type in parent_types:
_, p_class = self._validate_resource_type(parent_type)
try:
parent_uuids.append(
self._db_conn.fq_name_to_uuid(p_class.object_type,
parent_fq_name),
)
except cfgm_common.exceptions.NoIdError:
pass
elif 'parent_id' in get_request().query:
parent_uuids = get_request().query.parent_id.split(',')
if 'back_ref_id' in get_request().query:
back_ref_uuids = get_request().query.back_ref_id.split(',')
if 'obj_uuids' in get_request().query:
obj_uuids = get_request().query.obj_uuids.split(',')
if 'fq_names' in get_request().query:
obj_fqn_strs = get_request().query.fq_names.split(',')
obj_uuid = None
for obj_fqn_str in obj_fqn_strs:
try:
obj_fqn = obj_fqn_str.split(':')
obj_uuid = self._db_conn.fq_name_to_uuid(obj_type, obj_fqn)
if obj_uuids is None:
obj_uuids = []
obj_uuids.append(obj_uuid)
except cfgm_common.exceptions.NoIdError as e:
pass
if obj_uuids is None:
return {'%ss' %(resource_type): []}
if 'page_marker' in get_request().query:
pagination['marker'] = self._validate_page_marker(
get_request().query['page_marker'])
if 'page_limit' in get_request().query:
pagination['limit'] = self._validate_page_limit(
get_request().query['page_limit'])
# common handling for all resource get
for parent_uuid in list(parent_uuids or []):
(ok, result) = self._get_common(get_request(), parent_uuid)
if not ok:
parent_uuids.remove(parent_uuid)
if obj_uuids is None and back_ref_uuids is None and parent_uuids == []:
return {'%ss' %(resource_type): []}
if 'count' in get_request().query:
is_count = 'true' in get_request().query.count.lower()
else:
is_count = False
if 'detail' in get_request().query:
is_detail = 'true' in get_request().query.detail.lower()
else:
is_detail = False
if 'fields' in get_request().query:
req_fields = get_request().query.fields.split(',')
else:
req_fields = []
if 'shared' in get_request().query:
include_shared = 'true' in get_request().query.shared.lower()
else:
include_shared = False
try:
filters = utils.get_filters(get_request().query.filters)
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid filter ' + get_request().query.filters)
if 'exclude_hrefs' in get_request().query:
exclude_hrefs = True
else:
exclude_hrefs = False
return self._list_collection(obj_type, parent_uuids, back_ref_uuids,
obj_uuids, is_count, is_detail, filters,
req_fields, include_shared, exclude_hrefs,
pagination)
# end http_resource_list
# internal_request_<oper> - handlers of internally generated requests
# that save-ctx, generate-ctx and restore-ctx
def internal_request_create(self, resource_type, obj_json):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%ss' %(resource_type),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
json_as_dict = {'%s' %(resource_type): obj_json}
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
json_as_dict, None)
set_context(context.ApiContext(internal_req=i_req))
resp = self.http_resource_create(object_type)
return True, resp
finally:
set_context(orig_context)
# end internal_request_create
def internal_request_update(self, resource_type, obj_uuid, obj_json):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%ss' %(resource_type),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
json_as_dict = {'%s' %(resource_type): obj_json}
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
json_as_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.http_resource_update(object_type, obj_uuid)
return True, ""
finally:
set_context(orig_context)
# end internal_request_update
def internal_request_delete(self, resource_type, obj_uuid):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%s/%s' %(resource_type, obj_uuid),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
None, None)
set_context(context.ApiContext(internal_req=i_req))
self.http_resource_delete(object_type, obj_uuid)
return True, ""
finally:
set_context(orig_context)
# end internal_request_delete
def internal_request_ref_update(self, res_type, obj_uuid, operation,
ref_res_type, ref_uuid=None,
ref_fq_name=None, attr=None,
relax_ref_for_delete=False):
req_dict = {'type': res_type,
'uuid': obj_uuid,
'operation': operation,
'ref-type': ref_res_type,
'ref-uuid': ref_uuid,
'ref-fq-name': ref_fq_name,
'attr': attr,
'relax_ref_for_delete': relax_ref_for_delete}
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/ref-update',
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
req_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.ref_update_http_post()
return True, ""
finally:
set_context(orig_context)
# end internal_request_ref_update
def internal_request_prop_collection(self, obj_uuid, updates=None):
req_dict = {
'uuid': obj_uuid,
'updates': updates or [],
}
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/ref-update',
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
req_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.prop_collection_http_post()
return True, ''
finally:
set_context(orig_context)
def alloc_vn_id(self, fq_name_str):
return self._db_conn._zk_db.alloc_vn_id(fq_name_str)
def alloc_security_group_id(self, fq_name_str):
return self._db_conn._zk_db.alloc_sg_id(fq_name_str)
def alloc_tag_value_id(self, tag_type, name):
return self._db_conn._zk_db.alloc_tag_value_id(tag_type, name)
def create_default_children(self, object_type, parent_obj):
childs = self.get_resource_class(object_type).children_field_types
# Create a default child only if provisioned for
child_types = {type for _, (type, derivate) in childs.items()
if (not derivate and
type in self._GENERATE_DEFAULT_INSTANCE)}
if not child_types:
return True, ''
for child_type in child_types:
child_cls = self.get_resource_class(child_type)
child_obj_type = child_cls.object_type
child_obj = child_cls(parent_obj=parent_obj)
child_dict = child_obj.__dict__
child_dict['id_perms'] = self._get_default_id_perms()
child_dict['perms2'] = self._get_default_perms2()
(ok, result) = self._db_conn.dbe_alloc(child_obj_type, child_dict)
if not ok:
return (ok, result)
obj_id = result
# For virtual networks, allocate an ID
if child_obj_type == 'virtual_network':
child_dict['virtual_network_network_id'] = self.alloc_vn_id(
child_obj.get_fq_name_str())
(ok, result) = self._db_conn.dbe_create(child_obj_type, obj_id,
child_dict)
if not ok:
# DB Create failed, log and stop further child creation.
err_msg = "DB Create failed creating %s" % child_type
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
return (ok, result)
# recurse down type hierarchy
ok, result = self.create_default_children(child_obj_type,
child_obj)
if not ok:
return False, result
return True, ''
# end create_default_children
def delete_default_children(self, resource_type, parent_dict):
r_class = self.get_resource_class(resource_type)
for child_field in r_class.children_fields:
# Delete a default child only if provisioned for
child_type, is_derived = r_class.children_field_types[child_field]
if child_type not in self._GENERATE_DEFAULT_INSTANCE:
continue
child_cls = self.get_resource_class(child_type)
# first locate default child then delete it")
default_child_name = 'default-%s' %(child_type)
child_infos = parent_dict.get(child_field, [])
for child_info in child_infos:
if child_info['to'][-1] == default_child_name:
default_child_id = child_info['uuid']
self.http_resource_delete(child_type, default_child_id)
break
# end delete_default_children
@classmethod
def _generate_resource_crud_methods(cls, obj):
for object_type, _ in all_resource_type_tuples:
create_method = functools.partial(obj.http_resource_create,
object_type)
functools.update_wrapper(create_method, obj.http_resource_create)
setattr(obj, '%ss_http_post' %(object_type), create_method)
read_method = functools.partial(obj.http_resource_read,
object_type)
functools.update_wrapper(read_method, obj.http_resource_read)
setattr(obj, '%s_http_get' %(object_type), read_method)
update_method = functools.partial(obj.http_resource_update,
object_type)
functools.update_wrapper(update_method, obj.http_resource_update)
setattr(obj, '%s_http_put' %(object_type), update_method)
delete_method = functools.partial(obj.http_resource_delete,
object_type)
functools.update_wrapper(delete_method, obj.http_resource_delete)
setattr(obj, '%s_http_delete' %(object_type), delete_method)
list_method = functools.partial(obj.http_resource_list,
object_type)
functools.update_wrapper(list_method, obj.http_resource_list)
setattr(obj, '%ss_http_get' %(object_type), list_method)
# end _generate_resource_crud_methods
@classmethod
def _generate_resource_crud_uri(cls, obj):
for object_type, resource_type in all_resource_type_tuples:
# CRUD + list URIs of the form
# obj.route('/virtual-network/<id>', 'GET', obj.virtual_network_http_get)
# obj.route('/virtual-network/<id>', 'PUT', obj.virtual_network_http_put)
# obj.route('/virtual-network/<id>', 'DELETE', obj.virtual_network_http_delete)
# obj.route('/virtual-networks', 'POST', obj.virtual_networks_http_post)
# obj.route('/virtual-networks', 'GET', obj.virtual_networks_http_get)
# leaf resource
obj.route('/%s/<id>' %(resource_type),
'GET',
getattr(obj, '%s_http_get' %(object_type)))
obj.route('/%s/<id>' %(resource_type),
'PUT',
getattr(obj, '%s_http_put' %(object_type)))
obj.route('/%s/<id>' %(resource_type),
'DELETE',
getattr(obj, '%s_http_delete' %(object_type)))
# collection of leaf
obj.route('/%ss' %(resource_type),
'POST',
getattr(obj, '%ss_http_post' %(object_type)))
obj.route('/%ss' %(resource_type),
'GET',
getattr(obj, '%ss_http_get' %(object_type)))
# end _generate_resource_crud_uri
def __init__(self, args_str=None):
self._db_conn = None
self._resource_classes = initialize_all_server_resource_classes(self)
self._args = None
self._path_prefix = _DEFAULT_ZK_COUNTER_PATH_PREFIX
self.quota_counter = {}
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
self.lock_path_prefix = '%s/%s' % (self._args.cluster_id,
_DEFAULT_ZK_LOCK_PATH_PREFIX)
self.security_lock_prefix = '%s/security' % self.lock_path_prefix
# set the max size of the api requests
bottle.BaseRequest.MEMFILE_MAX = self._args.max_request_size
# multi_tenancy is ignored if aaa_mode is configured by user
if self._args.aaa_mode is not None:
if self.aaa_mode not in AAA_MODE_VALID_VALUES:
self.aaa_mode = AAA_MODE_DEFAULT_VALUE
elif self._args.multi_tenancy is not None:
# MT configured by user - determine from aaa-mode
self.aaa_mode = "cloud-admin" if self._args.multi_tenancy else "no-auth"
else:
self.aaa_mode = "cloud-admin"
api_proto = 'https' if self._args.config_api_ssl_enable else 'http'
api_host_name = socket.getfqdn(self._args.listen_ip_addr)
self._base_url = "%s://%s:%s" % (api_proto, api_host_name,
self._args.listen_port)
# Generate LinkObjects for all entities
links = []
# Link for root
links.append(LinkObject('root', self._base_url , '/config-root',
'config-root'))
for _, resource_type in all_resource_type_tuples:
link = LinkObject('collection',
self._base_url , '/%ss' %(resource_type),
'%s' %(resource_type))
links.append(link)
for _, resource_type in all_resource_type_tuples:
link = LinkObject('resource-base',
self._base_url , '/%s' %(resource_type),
'%s' %(resource_type))
links.append(link)
self._homepage_links = links
self._pipe_start_app = None
#GreenletProfiler.set_clock_type('wall')
self._profile_info = None
for act_res in _ACTION_RESOURCES:
link = LinkObject('action', self._base_url, act_res['uri'],
act_res['link_name'], act_res['method'])
self._homepage_links.append(link)
# Register for VN delete request. Disallow delete of system default VN
self.route('/virtual-network/<id>', 'DELETE', self.virtual_network_http_delete)
self.route('/documentation/<filename:path>',
'GET', self.documentation_http_get)
self._homepage_links.insert(
0, LinkObject('documentation', self._base_url,
'/documentation/index.html',
'documentation', 'GET'))
# APIs to reserve/free block of IP address from a VN/Subnet
self.route('/virtual-network/<id>/ip-alloc',
'POST', self.vn_ip_alloc_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/ip-alloc',
'virtual-network-ip-alloc', 'POST'))
self.route('/virtual-network/<id>/ip-free',
'POST', self.vn_ip_free_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/ip-free',
'virtual-network-ip-free', 'POST'))
# APIs to find out number of ip instances from given VN subnet
self.route('/virtual-network/<id>/subnet-ip-count',
'POST', self.vn_subnet_ip_count_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/subnet-ip-count',
'virtual-network-subnet-ip-count', 'POST'))
# Enable/Disable aaa mode
self.route('/aaa-mode', 'GET', self.aaa_mode_http_get)
self.route('/aaa-mode', 'PUT', self.aaa_mode_http_put)
# Set Tag actions
self.route('/set-tag', 'POST', self.set_tag)
self._homepage_links.append(
LinkObject('action', self._base_url, '/set-tag', 'set-tag',
'POST'))
# Commit or discard draft security policy
self.route('/security-policy-draft', 'POST',
self.security_policy_draft)
self._homepage_links.append(
LinkObject('action', self._base_url, '/security-policy-draft',
'security-policy-draft', 'POST'))
# randomize the collector list
self._random_collectors = self._args.collectors
self._chksum = "";
if self._args.collectors:
self._chksum = hashlib.md5(''.join(self._args.collectors)).hexdigest()
self._random_collectors = random.sample(self._args.collectors, \
len(self._args.collectors))
# sandesh init
self._sandesh = Sandesh()
# Reset the sandesh send rate limit value
if self._args.sandesh_send_rate_limit is not None:
SandeshSystem.set_sandesh_send_rate_limit(
self._args.sandesh_send_rate_limit)
module = Module.API_SERVER
module_name = ModuleNames[Module.API_SERVER]
node_type = Module2NodeType[module]
node_type_name = NodeTypeNames[node_type]
self.table = "ObjectConfigNode"
if self._args.worker_id:
instance_id = self._args.worker_id
else:
instance_id = INSTANCE_ID_DEFAULT
hostname = socket.getfqdn(self._args.listen_ip_addr)
self._sandesh.init_generator(module_name, hostname,
node_type_name, instance_id,
self._random_collectors,
'vnc_api_server_context',
int(self._args.http_server_port),
['cfgm_common', 'vnc_cfg_api_server.sandesh'],
logger_class=self._args.logger_class,
logger_config_file=self._args.logging_conf,
config=self._args.sandesh_config)
self._sandesh.trace_buffer_create(name="VncCfgTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="RestApiTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="DBRequestTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="DBUVERequestTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf",
size=1000)
VncGreenlet.register_sandesh_handler()
self._sandesh.set_logging_params(
enable_local_log=self._args.log_local,
category=self._args.log_category,
level=self._args.log_level,
file=self._args.log_file,
enable_syslog=self._args.use_syslog,
syslog_facility=self._args.syslog_facility)
ConnectionState.init(self._sandesh, hostname, module_name,
instance_id,
staticmethod(ConnectionState.get_conn_state_cb),
NodeStatusUVE, NodeStatus, self.table)
# Address Management interface
addr_mgmt = vnc_addr_mgmt.AddrMgmt(self)
self._addr_mgmt = addr_mgmt
# DB interface initialization
if self._args.wipe_config:
self._db_connect(True)
else:
self._db_connect(self._args.reset_config)
self._db_init_entries()
# ZK quota counter initialization
(ok, project_list, _) = self._db_conn.dbe_list('project',
field_names=['quota'])
if not ok:
(code, err_msg) = project_list # status
raise cfgm_common.exceptions.HttpError(code, err_msg)
for project in project_list or []:
if project.get('quota'):
path_prefix = self._path_prefix + project['uuid']
try:
QuotaHelper._zk_quota_counter_init(
path_prefix, project['quota'], project['uuid'],
self._db_conn, self.quota_counter)
except NoIdError:
err_msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
# API/Permissions check
# after db init (uses db_conn)
self._rbac = vnc_rbac.VncRbac(self, self._db_conn)
self._permissions = vnc_perms.VncPermissions(self, self._args)
if self.is_rbac_enabled():
self._create_default_rbac_rule()
if self.is_auth_needed():
self._generate_obj_view_links()
if os.path.exists('/usr/bin/contrail-version'):
cfgm_cpu_uve = ModuleCpuState()
cfgm_cpu_uve.name = socket.getfqdn(self._args.listen_ip_addr)
cfgm_cpu_uve.config_node_ip = self.get_server_ip()
command = "contrail-version contrail-config | grep 'contrail-config'"
version = os.popen(command).read()
_, rpm_version, build_num = version.split()
cfgm_cpu_uve.build_info = build_info + '"build-id" : "' + \
rpm_version + '", "build-number" : "' + \
build_num + '"}]}'
cpu_info_trace = ModuleCpuStateTrace(data=cfgm_cpu_uve, sandesh=self._sandesh)
cpu_info_trace.send(sandesh=self._sandesh)
self.re_uuid = re.compile('^[0-9A-F]{8}-?[0-9A-F]{4}-?4[0-9A-F]{3}-?[89AB][0-9A-F]{3}-?[0-9A-F]{12}$',
re.IGNORECASE)
# Load extensions
self._extension_mgrs = {}
self._load_extensions()
# Authn/z interface
if self._args.auth == 'keystone':
auth_svc = vnc_auth_keystone.AuthServiceKeystone(self, self._args)
else:
auth_svc = vnc_auth.AuthService(self, self._args)
self._pipe_start_app = auth_svc.get_middleware_app()
self._auth_svc = auth_svc
if int(self._args.worker_id) == 0:
try:
self._extension_mgrs['resync'].map(
self._resync_domains_projects)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
# following allowed without authentication
self.white_list = [
'^/documentation', # allow all documentation
'^/$', # allow discovery
]
self._global_asn = None
# api server list info
self._config_node_list = []
# create amqp handle
self._amqp_client = self.initialize_amqp_client()
# end __init__
def initialize_amqp_client(self):
amqp_client = None
use_ssl = None
try:
if self._args.rabbit_use_ssl is not None:
use_ssl = str(self._args.rabbit_use_ssl).lower() == 'true'
# prepare rabbitMQ params
rabbitmq_cfg = AttrDict(
servers=self._args.rabbit_server,
port=self._args.rabbit_port,
user=self._args.rabbit_user,
password=self._args.rabbit_password,
vhost=self._args.rabbit_vhost,
ha_mode=self._args.rabbit_ha_mode,
use_ssl=use_ssl,
ssl_version=self._args.kombu_ssl_version,
ssl_keyfile=self._args.kombu_ssl_keyfile,
ssl_certfile=self._args.kombu_ssl_certfile,
ssl_ca_certs=self._args.kombu_ssl_ca_certs
)
amqp_client = KombuAmqpClient(self.config_log, rabbitmq_cfg,
heartbeat=self.get_rabbit_health_check_interval())
amqp_client.add_exchange(self.JOB_REQUEST_EXCHANGE, type="direct")
amqp_client.run()
except Exception as e:
err_msg = "Error while initializing the AMQP client %s " % repr(e)
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
if amqp_client is not None:
amqp_client.stop()
return None
return amqp_client
@property
def global_autonomous_system(self):
if not self._global_asn:
gsc_class = self.get_resource_class(GlobalSystemConfig.object_type)
ok, result = gsc_class.locate(uuid=self._gsc_uuid, create_it=False,
fields=['autonomous_system'])
if not ok:
msg = ("Cannot fetch Global System Config to obtain "
"autonomous system")
raise cfgm_common.exceptions.VncError(msg)
self._global_asn = result['autonomous_system']
return self._global_asn
@global_autonomous_system.setter
def global_autonomous_system(self, asn):
self._global_asn = asn
def _extensions_transform_request(self, request):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'transform_request', request)
# end _extensions_transform_request
def _extensions_validate_request(self, request):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'validate_request', request)
# end _extensions_validate_request
def _extensions_transform_response(self, request, response):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'transform_response', request, response)
# end _extensions_transform_response
@ignore_exceptions
def _generate_rest_api_request_trace(self):
method = get_request().method.upper()
if method == 'GET':
return None
req_id = get_request().headers.get('X-Request-Id',
'req-%s' %(str(uuid.uuid4())))
gevent.getcurrent().trace_request_id = req_id
url = get_request().url
if method == 'DELETE':
req_data = ''
else:
try:
req_data = json.dumps(get_request().json)
except Exception as e:
req_data = '%s: Invalid request body' %(e)
rest_trace = RestApiTrace(request_id=req_id)
rest_trace.url = url
rest_trace.method = method
rest_trace.request_data = req_data
# Also log keystone response time against this request id,
# before returning the trace message.
if ((get_context().get_keystone_response_time()) is not None):
response_time = get_context().get_keystone_response_time()
response_time_in_usec = ((response_time.days*24*60*60) +
(response_time.seconds*1000000) +
response_time.microseconds)
stats = VncApiLatencyStats(
operation_type='VALIDATE',
application='KEYSTONE',
response_time_in_usec=response_time_in_usec,
response_size=0,
identifier=req_id,
)
stats_log = VncApiLatencyStatsLog(node_name="issu-vm6", api_latency_stats=stats, sandesh=self._sandesh)
x=stats_log.send(sandesh=self._sandesh)
return rest_trace
# end _generate_rest_api_request_trace
@ignore_exceptions
def _generate_rest_api_response_trace(self, rest_trace, response):
if not rest_trace:
return
rest_trace.status = bottle.response.status
rest_trace.response_body = json.dumps(response)
rest_trace.trace_msg(name='RestApiTraceBuf', sandesh=self._sandesh)
# end _generate_rest_api_response_trace
# Public Methods
def route(self, uri, method, handler):
@use_context
def handler_trap_exception(*args, **kwargs):
try:
trace = None
self._extensions_transform_request(get_request())
self._extensions_validate_request(get_request())
trace = self._generate_rest_api_request_trace()
(ok, status) = self._rbac.validate_request(get_request())
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
response = handler(*args, **kwargs)
self._generate_rest_api_response_trace(trace, response)
self._extensions_transform_response(get_request(), response)
return response
except Exception as e:
if trace:
trace.trace_msg(name='RestApiTraceBuf',
sandesh=self._sandesh)
# don't log details of cfgm_common.exceptions.HttpError i.e handled error cases
if isinstance(e, cfgm_common.exceptions.HttpError):
bottle.abort(e.status_code, e.content)
else:
string_buf = StringIO()
cgitb_hook(file=string_buf, format="text")
err_msg = string_buf.getvalue()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
raise
self.api_bottle.route(uri, method, handler_trap_exception)
# end route
def get_args(self):
return self._args
# end get_args
def get_server_ip(self):
ip_list = []
for i in netifaces.interfaces():
try:
if netifaces.AF_INET in netifaces.ifaddresses(i):
addr = netifaces.ifaddresses(i)[netifaces.AF_INET][0][
'addr']
if addr != '127.0.0.1' and addr not in ip_list:
ip_list.append(addr)
except ValueError as e:
self.config_log("Skipping interface %s: %s" % (i, str(e)),
level=SandeshLevel.SYS_DEBUG)
return ip_list
# end get_server_ip
def get_listen_ip(self):
return self._args.listen_ip_addr
# end get_listen_ip
def get_server_port(self):
return self._args.listen_port
# end get_server_port
def get_enable_ssl(self):
return self._args.config_api_ssl_enable
# end get_enable_ssl
def get_keyfile(self):
return self._args.config_api_ssl_keyfile
# end get_keyfile
def get_certfile(self):
return self._args.config_api_ssl_certfile
# end get_certfile
def get_ca_cert(self):
return self._args.config_api_ssl_ca_cert
# end get_ca_cert
def get_worker_id(self):
return int(self._args.worker_id)
# end get_worker_id
def get_pipe_start_app(self):
return self._pipe_start_app
# end get_pipe_start_app
def get_rabbit_health_check_interval(self):
return float(self._args.rabbit_health_check_interval)
# end get_rabbit_health_check_interval
def is_auth_disabled(self):
return self._args.auth is None or self._args.auth.lower() != 'keystone'
def is_admin_request(self):
if not self.is_auth_needed():
return True
if is_internal_request():
return True
env = bottle.request.headers.environ
roles = []
for field in ('HTTP_X_API_ROLE', 'HTTP_X_ROLE'):
if field in env:
roles.extend(env[field].split(','))
return has_role(self.cloud_admin_role, roles)
def get_auth_headers_from_token(self, request, token):
if self.is_auth_disabled() or not self.is_auth_needed():
return {}
return self._auth_svc.get_auth_headers_from_token(request, token)
# end get_auth_headers_from_token
def _generate_obj_view_links(self):
for object_type, resource_type in all_resource_type_tuples:
r_class = self.get_resource_class(resource_type)
r_class.obj_links = (r_class.ref_fields | r_class.backref_fields | r_class.children_fields)
# Check for the system created VN. Disallow such VN delete
def virtual_network_http_delete(self, id):
db_conn = self._db_conn
# if obj doesn't exist return early
try:
obj_type = db_conn.uuid_to_obj_type(id)
if obj_type != 'virtual_network':
raise cfgm_common.exceptions.HttpError(
404, 'No virtual-network object found for id %s' %(id))
vn_name = db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'ID %s does not exist' %(id))
if (vn_name == cfgm_common.IP_FABRIC_VN_FQ_NAME or
vn_name == cfgm_common.LINK_LOCAL_VN_FQ_NAME):
raise cfgm_common.exceptions.HttpError(
409,
'Can not delete system created default virtual-network '+id)
super(VncApiServer, self).virtual_network_http_delete(id)
# end
@use_context
def homepage_http_get(self):
json_body = {}
json_links = []
# strip trailing '/' in url
url = get_request().url[:-1]
url = url.replace('<script>', '<!--script>')
url = url.replace('</script>', '</script-->')
for link in self._homepage_links:
# strip trailing '/' in url
json_links.append(
{'link': link.to_dict(with_url=url)}
)
json_body = {"href": url, "links": json_links}
return json_body
# end homepage_http_get
def documentation_http_get(self, filename):
# ubuntu packaged path
doc_root = '/usr/share/doc/contrail-config/doc/contrail-config/html/'
if not os.path.exists(doc_root):
# centos packaged path
doc_root='/usr/share/doc/python-vnc_cfg_api_server/contrial-config/html/'
return bottle.static_file(
filename,
root=doc_root)
# end documentation_http_get
def obj_perms_http_get(self):
if self.is_auth_disabled() or not self.is_auth_needed():
result = {
'token_info': None,
'is_cloud_admin_role': False,
'is_global_read_only_role': False,
'permissions': 'RWX'
}
return result
obj_uuid = None
if 'uuid' in get_request().query:
obj_uuid = get_request().query.uuid
ok, result = self._auth_svc.validate_user_token()
if not ok:
code, msg = result
self.config_object_error(obj_uuid, None, None,
'obj_perms_http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
token_info = result
# roles in result['token_info']['access']['user']['roles']
result = {'token_info': token_info}
# Handle v2 and v3 responses
roles_list = []
if 'access' in token_info:
roles_list = [roles['name'] for roles in
token_info['access']['user']['roles']]
elif 'token' in token_info:
roles_list = [roles['name'] for roles in
token_info['token']['roles']]
result['is_cloud_admin_role'] = has_role(self.cloud_admin_role,
roles_list)
result['is_global_read_only_role'] = has_role(
self.global_read_only_role, roles_list)
if obj_uuid:
result['permissions'] = self._permissions.obj_perms(get_request(),
obj_uuid)
if 'token' in token_info.keys():
if 'project' in token_info['token'].keys():
domain = None
try:
domain = token_info['token']['project']['domain']['id']
domain = str(uuid.UUID(domain))
except ValueError, TypeError:
if domain == 'default':
domain = 'default-domain'
domain = self._db_conn.fq_name_to_uuid('domain', [domain])
if domain:
domain = domain.replace('-', '')
token_info['token']['project']['domain']['id'] = domain
return result
# end obj_perms_http_get
def invalid_uuid(self, uuid):
return self.re_uuid.match(uuid) is None
def invalid_access(self, access):
return type(access) is not int or access not in range(0, 8)
def invalid_share_type(self, share_type):
return share_type not in cfgm_common.PERMS2_VALID_SHARE_TYPES
# change ownership of an object
def obj_chown_http_post(self):
obj_uuid = get_request().json.get('uuid')
owner = get_request().json.get('owner')
if obj_uuid is None:
msg = "Bad Request, no resource UUID provided to chown"
raise cfgm_common.exceptions.HttpError(400, msg)
if owner is None:
msg = "Bad Request, no owner UUID provided to chown"
raise cfgm_common.exceptions.HttpError(400, msg)
if self.invalid_uuid(obj_uuid):
msg = "Bad Request, invalid resource UUID"
raise cfgm_common.exceptions.HttpError(400, msg)
if self.invalid_uuid(owner):
msg = "Bad Request, invalid owner UUID"
raise cfgm_common.exceptions.HttpError(400, msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
self._ensure_services_conn('chown', obj_type, obj_uuid=obj_uuid)
# ensure user has RW permissions to object
perms = self._permissions.obj_perms(get_request(), obj_uuid)
if not 'RW' in perms:
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
try:
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=['perms2'])
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
obj_dict['perms2']['owner'] = owner
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
msg = "chown: %s owner set to %s" % (obj_uuid, owner)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
return {}
#end obj_chown_http_post
def dump_cache(self):
self._post_common(None, {})
req_dict = get_request().json or {}
obj_uuids = req_dict.get('uuids', [])
count = req_dict.get('count', 10)
return self._db_conn._object_db._obj_cache_mgr.dump_cache(
obj_uuids=obj_uuids, count=count)
# chmod for an object
def obj_chmod_http_post(self):
try:
obj_uuid = get_request().json['uuid']
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
if self.invalid_uuid(obj_uuid):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid object id")
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
self._ensure_services_conn('chmod', obj_type, obj_uuid=obj_uuid)
# ensure user has RW permissions to object
perms = self._permissions.obj_perms(get_request(), obj_uuid)
if not 'RW' in perms:
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
request_params = get_request().json
owner = request_params.get('owner')
share = request_params.get('share')
owner_access = request_params.get('owner_access')
global_access = request_params.get('global_access')
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=['perms2', 'is_shared'])
obj_perms = obj_dict['perms2']
old_perms = '%s/%d %d %s' % (obj_perms['owner'],
obj_perms['owner_access'], obj_perms['global_access'],
['%s:%d' % (item['tenant'], item['tenant_access']) for item in obj_perms['share']])
if owner:
if self.invalid_uuid(owner):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid owner")
obj_perms['owner'] = owner.replace('-','')
if owner_access is not None:
if self.invalid_access(owner_access):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid owner_access value")
obj_perms['owner_access'] = owner_access
if share is not None:
try:
for item in share:
"""
item['tenant'] := [<share_type>:] <uuid>
share_type := ['domain' | 'tenant']
"""
(share_type, share_id) = cfgm_common.utils.shareinfo_from_perms2_tenant(item['tenant'])
if self.invalid_share_type(share_type) or self.invalid_uuid(share_id) or self.invalid_access(item['tenant_access']):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid share list")
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
obj_perms['share'] = share
if global_access is not None:
if self.invalid_access(global_access):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid global_access value")
obj_perms['global_access'] = global_access
obj_dict['is_shared'] = (global_access != 0)
new_perms = '%s/%d %d %s' % (obj_perms['owner'],
obj_perms['owner_access'], obj_perms['global_access'],
['%s:%d' % (item['tenant'], item['tenant_access']) for item in obj_perms['share']])
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
msg = "chmod: %s perms old=%s, new=%s" % (obj_uuid, old_perms, new_perms)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
return {}
# end obj_chmod_http_post
def prop_collection_http_get(self):
if 'uuid' not in get_request().query:
raise cfgm_common.exceptions.HttpError(
400, 'Object uuid needed for property collection get')
obj_uuid = get_request().query.uuid
if 'fields' not in get_request().query:
raise cfgm_common.exceptions.HttpError(
400, 'Object fields needed for property collection get')
obj_fields = get_request().query.fields.split(',')
if 'position' in get_request().query:
fields_position = get_request().query.position
else:
fields_position = None
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Object Not Found: ' + obj_uuid)
resource_class = self.get_resource_class(obj_type)
for obj_field in obj_fields:
if ((obj_field not in resource_class.prop_list_fields) and
(obj_field not in resource_class.prop_map_fields)):
err_msg = '%s neither "ListProperty" nor "MapProperty"' %(
obj_field)
raise cfgm_common.exceptions.HttpError(400, err_msg)
# request validations over
# common handling for all resource get
(ok, result) = self._get_common(get_request(), obj_uuid)
if not ok:
(code, msg) = result
self.config_object_error(
obj_uuid, None, None, 'prop_collection_http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
try:
ok, result = self._db_conn.prop_collection_get(
obj_type, obj_uuid, obj_fields, fields_position)
if not ok:
self.config_object_error(
obj_uuid, None, None, 'prop_collection_http_get', result)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
# check visibility
if (not result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(
id, None, None, 'prop_collection_http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
# Prepare response
del result['id_perms']
return result
# end prop_collection_http_get
def prop_collection_http_post(self):
request_params = get_request().json
# validate each requested operation
obj_uuid = request_params.get('uuid')
if not obj_uuid:
err_msg = 'Error: prop_collection_update needs obj_uuid'
raise cfgm_common.exceptions.HttpError(400, err_msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Object Not Found: ' + obj_uuid)
r_class = self.get_resource_class(obj_type)
for req_param in request_params.get('updates') or []:
obj_field = req_param.get('field')
if obj_field in r_class.prop_list_fields:
prop_coll_type = 'list'
elif obj_field in r_class.prop_map_fields:
prop_coll_type = 'map'
else:
err_msg = '%s neither "ListProperty" nor "MapProperty"' %(
obj_field)
raise cfgm_common.exceptions.HttpError(400, err_msg)
req_oper = req_param.get('operation').lower()
field_val = req_param.get('value')
field_pos = str(req_param.get('position'))
prop_type = r_class.prop_field_types[obj_field]['xsd_type']
prop_cls = cfgm_common.utils.str_to_class(prop_type, __name__)
prop_val_type = prop_cls.attr_field_type_vals[prop_cls.attr_fields[0]]['attr_type']
prop_val_cls = cfgm_common.utils.str_to_class(prop_val_type, __name__)
try:
self._validate_complex_type(prop_val_cls, field_val)
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
if prop_coll_type == 'list':
if req_oper not in ('add', 'modify', 'delete'):
err_msg = 'Unsupported operation %s in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
if ((req_oper == 'add') and field_val is None):
err_msg = 'Add needs field value in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif ((req_oper == 'modify') and
None in (field_val, field_pos)):
err_msg = 'Modify needs field value and position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif ((req_oper == 'delete') and field_pos is None):
err_msg = 'Delete needs field position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif prop_coll_type == 'map':
if req_oper not in ('set', 'delete'):
err_msg = 'Unsupported operation %s in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
if ((req_oper == 'set') and field_val is None):
err_msg = 'Set needs field value in request %s' %(
req_oper, json.dumps(req_param))
elif ((req_oper == 'delete') and field_pos is None):
err_msg = 'Delete needs field position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
# Get actual resource from DB
fields = r_class.prop_fields | r_class.ref_fields
try:
ok, result = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=fields)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except Exception:
ok = False
result = cfgm_common.utils.detailed_traceback()
if not ok:
self.config_object_error(
obj_uuid, None, obj_type, 'prop_collection_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
db_obj_dict = result
# Look if the resource have a pending version, if yes use it as resource
# to update
if hasattr(r_class, 'get_pending_resource'):
ok, result = r_class.get_pending_resource(db_obj_dict, fields)
if ok and isinstance(result, dict):
db_obj_dict = result
obj_uuid = db_obj_dict['uuid']
if not ok and result[0] != 404:
self.config_object_error(obj_uuid, None, obj_type,
'prop_collection_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
self._put_common('prop-collection-update', obj_type, obj_uuid,
db_obj_dict,
req_prop_coll_updates=request_params.get('updates'))
# end prop_collection_http_post
def ref_update_http_post(self):
# grab fields
type = get_request().json.get('type')
res_type, res_class = self._validate_resource_type(type)
obj_uuid = get_request().json.get('uuid')
ref_type = get_request().json.get('ref-type')
ref_field = '%s_refs' %(ref_type.replace('-', '_'))
ref_res_type, ref_class = self._validate_resource_type(ref_type)
operation = get_request().json.get('operation')
ref_uuid = get_request().json.get('ref-uuid')
ref_fq_name = get_request().json.get('ref-fq-name')
attr = get_request().json.get('attr')
relax_ref_for_delete = get_request().json.get('relax_ref_for_delete', False)
# validate fields
if None in (res_type, obj_uuid, ref_res_type, operation):
err_msg = 'Bad Request: type/uuid/ref-type/operation is null: '
err_msg += '%s, %s, %s, %s.' \
%(res_type, obj_uuid, ref_res_type, operation)
raise cfgm_common.exceptions.HttpError(400, err_msg)
operation = operation.upper()
if operation not in ['ADD', 'DELETE']:
err_msg = 'Bad Request: operation should be add or delete: %s' \
%(operation)
raise cfgm_common.exceptions.HttpError(400, err_msg)
if not ref_uuid and not ref_fq_name:
err_msg = 'Bad Request: ref-uuid or ref-fq-name must be specified'
raise cfgm_common.exceptions.HttpError(400, err_msg)
obj_type = res_class.object_type
ref_obj_type = ref_class.object_type
if not ref_uuid:
try:
ref_uuid = self._db_conn.fq_name_to_uuid(ref_obj_type, ref_fq_name)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(ref_fq_name) + ' not found')
elif operation == 'ADD':
# if UUID provided verify existence of the reference being added
try:
ref_fq_name = self._db_conn.uuid_to_fq_name(ref_uuid)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
# To invoke type specific hook and extension manager
fields = res_class.prop_fields | res_class.ref_fields
try:
ok, result = self._db_conn.dbe_read(obj_type, obj_uuid, fields)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except Exception:
ok = False
result = cfgm_common.utils.detailed_traceback()
if not ok:
self.config_object_error(obj_uuid, None, obj_type, 'ref_update',
result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
db_obj_dict = result
# Look if the resource have a pending version, if yes use it as resource
# to update
if hasattr(res_class, 'get_pending_resource'):
ok, result = res_class.get_pending_resource(db_obj_dict, fields)
if ok and isinstance(result, dict):
db_obj_dict = result
obj_uuid = db_obj_dict['uuid']
if not ok and result[0] != 404:
self.config_object_error(
obj_uuid, None, obj_type, 'ref_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
obj_dict = {'uuid': obj_uuid}
if ref_field in db_obj_dict:
obj_dict[ref_field] = copy.deepcopy(db_obj_dict[ref_field])
if operation == 'ADD':
if ref_obj_type+'_refs' not in obj_dict:
obj_dict[ref_obj_type+'_refs'] = []
existing_ref = [ref for ref in obj_dict[ref_obj_type+'_refs']
if ref['uuid'] == ref_uuid]
if existing_ref:
ref['attr'] = attr
else:
obj_dict[ref_obj_type+'_refs'].append(
{'to':ref_fq_name, 'uuid': ref_uuid, 'attr':attr})
elif operation == 'DELETE':
for old_ref in obj_dict.get(ref_obj_type+'_refs', []):
if old_ref['to'] == ref_fq_name or old_ref['uuid'] == ref_uuid:
obj_dict[ref_obj_type+'_refs'].remove(old_ref)
break
ref_args = {'ref_obj_type':ref_obj_type, 'ref_uuid': ref_uuid,
'operation': operation, 'data': {'attr': attr},
'relax_ref_for_delete': relax_ref_for_delete}
self._put_common('ref-update', obj_type, obj_uuid, db_obj_dict,
req_obj_dict=obj_dict, ref_args=ref_args)
return {'uuid': obj_uuid}
# end ref_update_http_post
def ref_relax_for_delete_http_post(self):
self._post_common(None, {})
# grab fields
obj_uuid = get_request().json.get('uuid')
ref_uuid = get_request().json.get('ref-uuid')
# validate fields
if None in (obj_uuid, ref_uuid):
err_msg = 'Bad Request: Both uuid and ref-uuid should be specified: '
err_msg += '%s, %s.' %(obj_uuid, ref_uuid)
raise cfgm_common.exceptions.HttpError(400, err_msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
self._db_conn.ref_relax_for_delete(obj_uuid, ref_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'uuid ' + obj_uuid + ' not found')
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
fq_name = self._db_conn.uuid_to_fq_name(obj_uuid)
apiConfig.identifier_name=':'.join(fq_name)
apiConfig.identifier_uuid = obj_uuid
apiConfig.operation = 'ref-relax-for-delete'
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
return {'uuid': obj_uuid}
# end ref_relax_for_delete_http_post
def fq_name_to_id_http_post(self):
self._post_common(None, {})
type = get_request().json.get('type')
res_type, r_class = self._validate_resource_type(type)
obj_type = r_class.object_type
fq_name = get_request().json['fq_name']
try:
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
if obj_type == 'project':
resource_type, r_class = self._validate_resource_type(obj_type)
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_read_fqname' %(obj_type), fq_name)
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except Exception as e:
self.config_log("fq_name_to_id_http_post error: " + str(e),
level=SandeshLevel.SYS_DEBUG)
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(fq_name) + ' not found')
else:
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(fq_name) + ' not found')
# ensure user has access to this id
ok, result = self._permissions.check_perms_read(bottle.request, id)
if not ok:
err_code, err_msg = result
raise cfgm_common.exceptions.HttpError(err_code, err_msg)
return {'uuid': id}
# end fq_name_to_id_http_post
def id_to_fq_name_http_post(self):
self._post_common(None, {})
obj_uuid = get_request().json['uuid']
# ensure user has access to this id
ok, result = self._permissions.check_perms_read(get_request(), obj_uuid)
if not ok:
err_code, err_msg = result
raise cfgm_common.exceptions.HttpError(err_code, err_msg)
try:
fq_name = self._db_conn.uuid_to_fq_name(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'UUID ' + obj_uuid + ' not found')
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
res_type = self.get_resource_class(obj_type).resource_type
return {'fq_name': fq_name, 'type': res_type}
# end id_to_fq_name_http_post
# Enables a user-agent to store and retrieve key-val pair
# TODO this should be done only for special/quantum plugin
def useragent_kv_http_post(self):
self._post_common(None, {})
request_params = get_request().json
oper = request_params.get('operation')
if oper is None:
err_msg = ("Error: Key/value store API needs 'operation' "
"parameter")
raise cfgm_common.exceptions.HttpError(400, err_msg)
if 'key' not in request_params:
err_msg = ("Error: Key/value store API needs 'key' parameter")
raise cfgm_common.exceptions.HttpError(400, err_msg)
key = request_params.get('key')
val = request_params.get('value', '')
# TODO move values to common
if oper == 'STORE':
self._db_conn.useragent_kv_store(key, val)
elif oper == 'RETRIEVE':
try:
result = self._db_conn.useragent_kv_retrieve(key)
return {'value': result}
except NoUserAgentKey:
raise cfgm_common.exceptions.HttpError(
404, "Unknown User-Agent key " + key)
elif oper == 'DELETE':
result = self._db_conn.useragent_kv_delete(key)
else:
raise cfgm_common.exceptions.HttpError(
404, "Invalid Operation " + oper)
# end useragent_kv_http_post
def db_check(self):
""" Check database for inconsistencies. No update to database """
check_result = self._db_conn.db_check()
return {'results': check_result}
# end db_check
def fetch_records(self):
""" Retrieve and return all records """
result = self._db_conn.db_read()
return {'results': result}
# end fetch_records
def start_profile(self):
#GreenletProfiler.start()
pass
# end start_profile
def stop_profile(self):
pass
#GreenletProfiler.stop()
#stats = GreenletProfiler.get_func_stats()
#self._profile_info = stats.print_all()
#return self._profile_info
# end stop_profile
def get_profile_info(self):
return self._profile_info
# end get_profile_info
def get_resource_class(self, type_str):
try:
return self._resource_classes[type_str]
except KeyError:
raise TypeError('Invalid Contrail resource type: %s' % type_str)
def list_bulk_collection_http_post(self):
""" List collection when requested ids don't fit in query params."""
type = get_request().json.get('type') # e.g. virtual-network
resource_type, r_class = self._validate_resource_type(type)
try:
parent_uuids = get_request().json['parent_id'].split(',')
except KeyError:
parent_uuids = None
try:
back_ref_uuids = get_request().json['back_ref_id'].split(',')
except KeyError:
back_ref_uuids = None
try:
obj_uuids = get_request().json['obj_uuids'].split(',')
except KeyError:
obj_uuids = None
is_count = get_request().json.get('count', False)
is_detail = get_request().json.get('detail', False)
include_shared = get_request().json.get('shared', False)
try:
filters = utils.get_filters(get_request().json.get('filters'))
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid filter ' + get_request().json.get('filters'))
req_fields = get_request().json.get('fields', [])
if req_fields:
req_fields = req_fields.split(',')
exclude_hrefs = get_request().json.get('exclude_hrefs', False)
pagination = {}
if 'page_marker' in get_request().json:
pagination['marker'] = self._validate_page_marker(
get_request().json['page_marker'])
if 'page_limit' in get_request().json:
pagination['limit'] = self._validate_page_limit(
get_request().json['page_limit'])
return self._list_collection(r_class.object_type, parent_uuids,
back_ref_uuids, obj_uuids, is_count,
is_detail, filters, req_fields,
include_shared, exclude_hrefs,
pagination)
# end list_bulk_collection_http_post
# Private Methods
def _parse_args(self, args_str):
'''
Eg. python vnc_cfg_api_server.py --cassandra_server_list
10.1.2.3:9160 10.1.2.4:9160
--redis_server_ip 127.0.0.1
--redis_server_port 6382
--collectors 127.0.0.1:8086
--http_server_port 8090
--listen_ip_addr 127.0.0.1
--listen_port 8082
--admin_port 8095
--region_name RegionOne
--log_local
--log_level SYS_DEBUG
--logging_conf <logger-conf-file>
--log_category test
--log_file <stdout>
--trace_file /var/log/contrail/vnc_openstack.err
--use_syslog
--syslog_facility LOG_USER
--worker_id 1
--rabbit_max_pending_updates 4096
--rabbit_health_check_interval 120.0
--cluster_id <testbed-name>
[--auth keystone]
[--default_encoding ascii ]
--object_cache_size 10000
--object_cache_exclude_types ''
--max_request_size 1024000
'''
self._args, _ = utils.parse_args(args_str)
# end _parse_args
# sigchld handler is currently not engaged. See comment @sigchld
def sigchld_handler(self):
# DB interface initialization
self._db_connect(reset_config=False)
self._db_init_entries()
# end sigchld_handler
def sigterm_handler(self):
exit()
# sighup handler for applying new configs
def sighup_handler(self):
if self._args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read(self._args.conf_file)
if 'DEFAULTS' in config.sections():
try:
collectors = config.get('DEFAULTS', 'collectors')
if type(collectors) is str:
collectors = collectors.split()
new_chksum = hashlib.md5("".join(collectors)).hexdigest()
if new_chksum != self._chksum:
self._chksum = new_chksum
self._random_collectors = random.sample(collectors, len(collectors))
# Reconnect to achieve load-balance irrespective of list
self._sandesh.reconfig_collectors(self._random_collectors)
except ConfigParser.NoOptionError as e:
pass
# end sighup_handler
def _load_extensions(self):
try:
conf_sections = self._args.config_sections
hostname = socket.getfqdn(self._args.listen_ip_addr)
self._extension_mgrs['resourceApi'] = ExtensionManager(
'vnc_cfg_api.resourceApi',
propagate_map_exceptions=True,
api_server_ip=hostname,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh)
if self._args.auth != 'no-auth':
self._extension_mgrs['resync'] = ExtensionManager(
'vnc_cfg_api.resync', api_server_ip=hostname,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh)
self._extension_mgrs['resourceApi'].map_method(
'set_resync_extension_manager', self._extension_mgrs['resync'])
self._extension_mgrs['neutronApi'] = ExtensionManager(
'vnc_cfg_api.neutronApi',
api_server_ip=hostname,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh,
api_server_obj=self)
except Exception as e:
err_msg = cfgm_common.utils.detailed_traceback()
self.config_log("Exception in extension load: %s" %(err_msg),
level=SandeshLevel.SYS_ERR)
# end _load_extensions
def _db_connect(self, reset_config):
cass_server_list = self._args.cassandra_server_list
redis_server_ip = self._args.redis_server_ip
redis_server_port = self._args.redis_server_port
zk_server = self._args.zk_server_ip
rabbit_servers = self._args.rabbit_server
rabbit_port = self._args.rabbit_port
rabbit_user = self._args.rabbit_user
rabbit_password = self._args.rabbit_password
rabbit_vhost = self._args.rabbit_vhost
rabbit_ha_mode = self._args.rabbit_ha_mode
cassandra_user = self._args.cassandra_user
cassandra_password = self._args.cassandra_password
cassandra_use_ssl = self._args.cassandra_use_ssl
cassandra_ca_certs = self._args.cassandra_ca_certs
obj_cache_entries = int(self._args.object_cache_entries)
obj_cache_exclude_types = \
[t.replace('-', '_').strip() for t in
self._args.object_cache_exclude_types.split(',')]
debug_obj_cache_types = \
[t.replace('-', '_').strip() for t in
self._args.debug_object_cache_types.split(',')]
db_engine = self._args.db_engine
self._db_engine = db_engine
cred = None
db_server_list = None
if db_engine == 'cassandra':
if cassandra_user is not None and cassandra_password is not None:
cred = {'username':cassandra_user,'password':cassandra_password}
db_server_list = cass_server_list
self._db_conn = VncDbClient(
self, db_server_list, rabbit_servers, rabbit_port, rabbit_user,
rabbit_password, rabbit_vhost, rabbit_ha_mode, self._args.listen_ip_addr,
reset_config, zk_server, self._args.cluster_id, db_credential=cred,
db_engine=db_engine, rabbit_use_ssl=self._args.rabbit_use_ssl,
kombu_ssl_version=self._args.kombu_ssl_version,
kombu_ssl_keyfile= self._args.kombu_ssl_keyfile,
kombu_ssl_certfile=self._args.kombu_ssl_certfile,
kombu_ssl_ca_certs=self._args.kombu_ssl_ca_certs,
obj_cache_entries=obj_cache_entries,
obj_cache_exclude_types=obj_cache_exclude_types,
debug_obj_cache_types=debug_obj_cache_types,
cassandra_use_ssl=self._args.cassandra_use_ssl,
cassandra_ca_certs=self._args.cassandra_ca_certs)
#TODO refacter db connection management.
self._addr_mgmt._get_db_conn()
# end _db_connect
def _ensure_id_perms_present(self, obj_uuid, obj_dict):
"""
Called at resource creation to ensure that id_perms is present in obj
"""
# retrieve object and permissions
id_perms = self._get_default_id_perms()
if (('id_perms' not in obj_dict) or
(obj_dict['id_perms'] is None)):
# Resource creation
if obj_uuid is None:
obj_dict['id_perms'] = id_perms
return
return
# retrieve the previous version of the id_perms
# from the database and update the id_perms with
# them.
if obj_uuid is not None:
try:
old_id_perms = self._db_conn.uuid_to_obj_perms(obj_uuid)
for field, value in old_id_perms.items():
if value is not None:
id_perms[field] = value
except NoIdError:
pass
# not all fields can be updated
if obj_uuid:
field_list = ['enable', 'description']
else:
field_list = ['enable', 'description', 'user_visible', 'creator']
# Start from default and update from obj_dict
req_id_perms = obj_dict['id_perms']
for key in field_list:
if key in req_id_perms:
id_perms[key] = req_id_perms[key]
# TODO handle perms present in req_id_perms
obj_dict['id_perms'] = id_perms
# end _ensure_id_perms_present
def _get_default_id_perms(self, **kwargs):
id_perms = copy.deepcopy(Provision.defaults.perms)
id_perms_json = json.dumps(id_perms, default=lambda o: dict((k, v)
for k, v in o.__dict__.iteritems()))
id_perms_dict = json.loads(id_perms_json)
id_perms_dict.update(kwargs)
return id_perms_dict
# end _get_default_id_perms
def _ensure_perms2_present(self, obj_type, obj_uuid, obj_dict,
project_id=None):
"""
Called at resource creation to ensure that id_perms is present in obj
"""
# retrieve object and permissions
perms2 = self._get_default_perms2()
# set ownership of object to creator tenant
if obj_type == 'project' and 'uuid' in obj_dict:
perms2['owner'] = str(obj_dict['uuid']).replace('-', '')
elif obj_dict.get('perms2') and obj_dict['perms2'].get('owner'):
perms2['owner'] = obj_dict['perms2']['owner']
elif 'fq_name' in obj_dict and obj_dict['fq_name'][:-1]:
if 'parent_type' in obj_dict:
parent_type = obj_dict['parent_type'].replace('-', '_')
else:
r_class = self.get_resource_class(obj_type)
if (len(r_class.parent_types) != 1):
msg = ("Ambiguous parent to ensure permissiosn of %s, "
"please choose one parent type: %s" %
(obj_type, pformat(r_class.parent_types)))
raise cfgm_common.exceptions.HttpError(400, msg)
parent_type = r_class.parent_types[0].replace('-', '_')
if parent_type == 'domain':
if project_id:
perms2['owner'] = project_id
else:
perms2['owner'] = 'cloud-admin'
else:
parent_fq_name = obj_dict['fq_name'][:-1]
parent_uuid = obj_dict.get('parent_uuid')
try:
if parent_uuid is None:
try:
parent_uuid = self._db_conn.fq_name_to_uuid(
parent_type, parent_fq_name)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Name' + pformat(parent_fq_name) + ' not found')
ok, parent_obj_dict = self._db_conn.dbe_read(
parent_type, parent_uuid, obj_fields=['perms2'])
except NoIdError as e:
msg = "Parent %s cannot be found: %s" % (parent_type, str(e))
raise cfgm_common.exceptions.HttpError(404, msg)
perms2['owner'] = parent_obj_dict['perms2']['owner']
elif project_id:
perms2['owner'] = project_id
else:
perms2['owner'] = 'cloud-admin'
if obj_dict.get('perms2') is None:
# Resource creation
if obj_uuid is None:
obj_dict['perms2'] = perms2
return
# Resource already exists
try:
obj_dict['perms2'] = self._db_conn.uuid_to_obj_perms2(obj_uuid)
except NoIdError:
obj_dict['perms2'] = perms2
return
# retrieve the previous version of the perms2
# from the database and update the perms2 with
# them.
if obj_uuid is not None:
try:
old_perms2 = self._db_conn.uuid_to_obj_perms2(obj_uuid)
for field, value in old_perms2.items():
if value is not None:
perms2[field] = value
except NoIdError:
pass
# Start from default and update from obj_dict
req_perms2 = obj_dict['perms2']
for key in req_perms2:
perms2[key] = req_perms2[key]
# TODO handle perms2 present in req_perms2
obj_dict['perms2'] = perms2
# ensure is_shared and global_access are consistent
shared = obj_dict.get('is_shared', None)
gaccess = obj_dict['perms2'].get('global_access', None)
if (gaccess is not None and shared is not None and
shared != (gaccess != 0)):
msg = ("Inconsistent is_shared (%s a) and global_access (%s)" %
(shared, gaccess))
# NOTE(ethuleau): ignore exception for the moment as it breaks the
# Neutron use case where external network have global access but
# is property 'is_shared' is False https://review.opencontrail.org/#/q/Id6a0c1a509d7663da8e5bc86f2c7c91c73d420a2
# Before patch https://review.opencontrail.org/#q,I9f53c0f21983bf191b4c51318745eb348d48dd86,n,z
# error was also ignored as all retruned errors of that method were
# not took in account
# raise cfgm_common.exceptions.HttpError(400, msg)
def _get_default_perms2(self):
perms2 = copy.deepcopy(Provision.defaults.perms2)
perms2_json = json.dumps(perms2, default=lambda o: dict((k, v)
for k, v in o.__dict__.iteritems()))
perms2_dict = json.loads(perms2_json)
return perms2_dict
# end _get_default_perms2
def _db_init_entries(self):
# create singleton defaults if they don't exist already in db
gsc = self.create_singleton_entry(GlobalSystemConfig(
autonomous_system=64512, config_version=CONFIG_VERSION))
self._gsc_uuid = gsc.uuid
gvc = self.create_singleton_entry(GlobalVrouterConfig(
parent_obj=gsc))
self.create_singleton_entry(Domain())
self.create_singleton_entry(Fabric())
# Global and default policy resources
pm = self.create_singleton_entry(PolicyManagement())
self._global_pm_uuid = pm.uuid
aps = self.create_singleton_entry(ApplicationPolicySet(
parent_obj=pm, all_applications=True))
ok, result = self._db_conn.ref_update(
ApplicationPolicySet.object_type,
aps.uuid,
GlobalVrouterConfig.object_type,
gvc.uuid,
{'attr': None},
'ADD',
None,
)
if not ok:
msg = ("Error while referencing global vrouter config %s with the "
"default global application policy set %s: %s" %
(gvc.uuid, aps.uuid, result[1]))
self.config_log(msg, level=SandeshLevel.SYS_ERR)
ip_fab_vn = self.create_singleton_entry(
VirtualNetwork(cfgm_common.IP_FABRIC_VN_FQ_NAME[-1],
is_provider_network=True))
self.create_singleton_entry(
RoutingInstance(cfgm_common.IP_FABRIC_VN_FQ_NAME[-1], ip_fab_vn,
routing_instance_is_default=True))
self.create_singleton_entry(
RoutingInstance('__default__', ip_fab_vn))
link_local_vn = self.create_singleton_entry(
VirtualNetwork(cfgm_common.LINK_LOCAL_VN_FQ_NAME[-1]))
self.create_singleton_entry(
RoutingInstance('__link_local__', link_local_vn,
routing_instance_is_default=True))
# dc network
dci_vn = self.create_singleton_entry(
VirtualNetwork(cfgm_common.DCI_VN_FQ_NAME[-1]))
self.create_singleton_entry(
RoutingInstance(cfgm_common.DCI_VN_FQ_NAME[-1], dci_vn,
routing_instance_is_default=True))
self.create_singleton_entry(
RoutingInstance('__default__', dci_vn))
# specifying alarm kwargs like contrail_alarm.py
alarm_kwargs = {"alarm_rules":
{"or_list" : [
{"and_list": [
{ "operand1": "UveConfigReq.err_info.*.",
"operation": "==",
"operand2": {"json_value": "True"}
} ]
} ]
},
"alarm_severity": 1,
"fq_name": [
"default-global-system-config",
"system-defined-bottle-request-size-limit"
],
"id_perms": {
"description": "Bottle request size limit exceeded."
},
"parent_type": "global-system-config",
"uve_keys": {
"uve_key": [
"config-node"
]
}
}
self.create_singleton_entry(Alarm(**alarm_kwargs))
try:
self.create_singleton_entry(
RoutingInstance('default-virtual-network',
routing_instance_is_default=True))
except Exception as e:
self.config_log('error while creating primary routing instance for'
'default-virtual-network: ' + str(e),
level=SandeshLevel.SYS_NOTICE)
# Create singleton SG __no_rule__ object for openstack
domain_obj = Domain(SG_NO_RULE_FQ_NAME[0])
proj_obj = Project(SG_NO_RULE_FQ_NAME[1], domain_obj)
sg_rules = PolicyEntriesType()
id_perms = IdPermsType(enable=True,
description="Security group with no rules",
user_visible=True)
perms2 = PermType2(owner='cloud-admin')
perms2.set_global_access(PERMS_RX)
sg_obj = SecurityGroup(
name=SG_NO_RULE_FQ_NAME[-1],
parent_obj=proj_obj,
security_group_entries=sg_rules.exportDict(''),
id_perms=id_perms.exportDict(''),
perms2=perms2.exportDict(''),
security_group_id=self.alloc_security_group_id(
':'.join(SG_NO_RULE_FQ_NAME)))
self.create_singleton_entry(sg_obj)
self.create_singleton_entry(DiscoveryServiceAssignment())
self.create_singleton_entry(GlobalQosConfig())
sc_ipam_subnet_v4 = IpamSubnetType(subnet=SubnetType('0.0.0.0', 8))
sc_ipam_subnet_v6 = IpamSubnetType(subnet=SubnetType('::ffff', 104))
sc_ipam_subnets = IpamSubnets([sc_ipam_subnet_v4, sc_ipam_subnet_v6])
sc_ipam_obj = NetworkIpam('service-chain-flat-ipam',
ipam_subnet_method="flat-subnet", ipam_subnets=sc_ipam_subnets)
self.create_singleton_entry(sc_ipam_obj)
# Create pre-defined tag-type
for type_str, type_id in TagTypeNameToId.items():
type_id_hex = "0x{:04x}".format(type_id)
tag = TagType(name=type_str, tag_type_id=type_id_hex)
tag.display_name = type_str
self.create_singleton_entry(tag, user_visible=False)
if int(self._args.worker_id) == 0:
self._db_conn.db_resync()
#Load init data for job playbooks like JobTemplates, Tags, etc
if self._args.enable_fabric_ansible:
self._load_init_data()
# make default ipam available across tenants for backward compatability
obj_type = 'network_ipam'
fq_name = ['default-domain', 'default-project', 'default-network-ipam']
obj_uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=['perms2'])
obj_dict['perms2']['global_access'] = PERMS_RX
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
# end _db_init_entries
# Load init data for job playbooks like JobTemplates, Tags, etc
def _load_init_data(self):
"""
This function loads init data from a data file specified by the
argument '--fabric_ansible_dir' to the database. The data file
must be in JSON format and follow the format below:
{
"data": [
{
"object_type": "<vnc object type name>",
"objects": [
{
<vnc object payload>
},
...
]
},
...
]
}
Here is an example:
{
"data": [
{
"object_type": "tag",
"objects": [
{
"fq_name": [
"fabric=management_ip"
],
"name": "fabric=management_ip",
"tag_type_name": "fabric",
"tag_value": "management_ip"
}
]
}
]
}
"""
try:
json_data = self._load_json_data()
if json_data is None:
msg = 'unable to load init data'
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
return
for item in json_data.get("data"):
object_type = item.get("object_type")
# Get the class name from object type
cls_name = cfgm_common.utils.CamelCase(object_type)
# Get the class object
cls_ob = cfgm_common.utils.str_to_class(cls_name, __name__)
# saving the objects to the database
for obj in item.get("objects"):
instance_obj = cls_ob(**obj)
self.create_singleton_entry(instance_obj)
# update the objects if it already exists
fq_name = instance_obj.get_fq_name()
uuid = self._db_conn.fq_name_to_uuid(
object_type.replace('-', '_'), fq_name)
self._db_conn.dbe_update(object_type, uuid, obj)
for item in json_data.get("refs"):
from_type = item.get("from_type")
from_fq_name = item.get("from_fq_name")
from_uuid = self._db_conn._object_db.fq_name_to_uuid(
from_type, from_fq_name
)
to_type = item.get("to_type")
to_fq_name = item.get("to_fq_name")
to_uuid = self._db_conn._object_db.fq_name_to_uuid(
to_type, to_fq_name
)
ok, result = self._db_conn.ref_update(
from_type,
from_uuid,
to_type,
to_uuid,
{ 'attr': None },
'ADD',
None,
)
except Exception as e:
err_msg = 'error while loading init data: %s\n' % str(e)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# end Load init data
# Load json data from fabric_ansible_playbooks/conf directory
def _load_json_data(self):
json_file = self._args.fabric_ansible_dir + '/conf/predef_payloads.json'
if not os.path.exists(json_file):
msg = 'predef payloads file does not exist: %s' % json_file
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
return None
# open the json file
with open(json_file) as data_file:
input_json = json.load(data_file)
# Loop through the json
for item in input_json.get("data"):
if item.get("object_type") == "job-template":
for object in item.get("objects"):
fq_name = object.get("fq_name")[-1]
schema_name = fq_name.replace('template', 'schema.json')
with open(os.path.join(self._args.fabric_ansible_dir +
'/schema/', schema_name), 'r+') as schema_file:
schema_json = json.load(schema_file)
object["job_template_input_schema"] = schema_json.get(
"input_schema")
object["job_template_output_schema"] = schema_json.get(
"output_schema")
object["job_template_input_ui_schema"] = schema_json.get(
"input_ui_schema")
object["job_template_output_ui_schema"] = schema_json.get(
"output_ui_schema")
return input_json
# end load json data
# generate default rbac group rule
def _create_default_rbac_rule(self):
# allow full access to cloud admin
rbac_rules = [
{
'rule_object':'fqname-to-id',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'id-to-fqname',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'useragent-kv',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'documentation',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'R'}]
},
{
'rule_object':'/',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'R'}]
},
]
obj_type = 'api_access_list'
fq_name = ['default-global-system-config', 'default-api-access-list']
try:
# ensure global list is not missing any default rules (bug 1642464)
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, id)
update_obj = False
cur_rbac_rules = copy.deepcopy(obj_dict['api_access_list_entries']['rbac_rule'])
for rule in rbac_rules:
present = False
for existing_rule in cur_rbac_rules:
if rule == existing_rule:
present = True
cur_rbac_rules.remove(existing_rule)
break
if not present:
obj_dict['api_access_list_entries']['rbac_rule'].append(rule)
update_obj = True
if update_obj:
self._db_conn.dbe_update(obj_type, id, obj_dict)
return
except NoIdError:
pass
rge = RbacRuleEntriesType([])
for rule in rbac_rules:
rule_perms = [RbacPermType(role_name=p['role_name'], role_crud=p['role_crud']) for p in rule['rule_perms']]
rbac_rule = RbacRuleType(rule_object=rule['rule_object'],
rule_field=rule['rule_field'], rule_perms=rule_perms)
rge.add_rbac_rule(rbac_rule)
rge_dict = rge.exportDict('')
glb_rbac_cfg = ApiAccessList(parent_type='global-system-config',
fq_name=fq_name, api_access_list_entries = rge_dict)
try:
self.create_singleton_entry(glb_rbac_cfg)
except Exception as e:
err_msg = 'Error creating default api access list object'
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
# end _create_default_rbac_rule
def _resync_domains_projects(self, ext):
if hasattr(ext.obj, 'resync_domains_projects'):
ext.obj.resync_domains_projects()
# end _resync_domains_projects
def create_singleton_entry(self, singleton_obj, user_visible=True):
s_obj = singleton_obj
obj_type = s_obj.object_type
fq_name = s_obj.get_fq_name()
# TODO remove backward compat create mapping in zk
# for singleton START
try:
cass_uuid = self._db_conn._object_db.fq_name_to_uuid(obj_type, fq_name)
try:
zk_uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
# doesn't exist in zookeeper but does so in cassandra,
# migrate this info to zookeeper
self._db_conn._zk_db.create_fq_name_to_uuid_mapping(obj_type, fq_name, str(cass_uuid))
except NoIdError:
# doesn't exist in cassandra as well as zookeeper, proceed normal
pass
# TODO backward compat END
# create if it doesn't exist yet
try:
s_obj.uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
obj_json = json.dumps(s_obj, default=_obj_serializer_all)
obj_dict = json.loads(obj_json)
if s_obj.get_id_perms():
obj_dict['id_perms'] = s_obj.get_id_perms()
else:
obj_dict['id_perms'] = self._get_default_id_perms(
user_visible=user_visible)
if s_obj.get_perms2():
obj_dict['perms2'] = s_obj.get_perms2()
else:
obj_dict['perms2'] = self._get_default_perms2()
(ok, result) = self._db_conn.dbe_alloc(obj_type, obj_dict)
obj_id = result
s_obj.uuid = obj_id
# For virtual networks, allocate an ID
if obj_type == 'virtual_network':
vn_id = self.alloc_vn_id(s_obj.get_fq_name_str())
obj_dict['virtual_network_network_id'] = vn_id
if obj_type == 'tag':
obj_dict = self._allocate_tag_id(obj_dict)
self._db_conn.dbe_create(obj_type, obj_id, obj_dict)
self.create_default_children(obj_type, s_obj)
return s_obj
# end create_singleton_entry
# allocate tag id for tag object
def _allocate_tag_id(self, obj_dict):
type_str = obj_dict['tag_type_name']
value_str = obj_dict['tag_value']
ok, result = self.get_resource_class('tag_type').locate(
[type_str], id_perms=IdPermsType(user_visible=False))
tag_type = result
obj_dict['tag_type_refs'] = [
{
'uuid': tag_type['uuid'],
'to': tag_type['fq_name'],
},
]
# Allocate ID for tag value. Use the all fq_name to distinguish same
# tag values between global and scoped
value_id = self.get_resource_class(
'tag').vnc_zk_client.alloc_tag_value_id(
type_str, ':'.join(obj_dict['fq_name']))
# Compose Tag ID with the type ID and value ID
obj_dict['tag_id'] = "{}{:04x}".format(tag_type['tag_type_id'],
value_id)
return obj_dict
# end allocate tag id
def _validate_page_marker(self, req_page_marker):
# query params always appears as string
if req_page_marker and req_page_marker.lower() != 'none':
try:
req_page_marker_uuid = req_page_marker.split(':')[-1]
_ = str(uuid.UUID(req_page_marker_uuid))
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid page_marker %s: %s' %(
req_page_marker, e))
else:
req_page_marker = None
return req_page_marker
# end _validate_page_marker
def _validate_page_limit(self, req_page_limit):
try:
val = int(req_page_limit)
if val <= 0:
raise Exception("page_limit has to be greater than zero")
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid page_limit %s: %s' %(
req_page_limit, e))
return int(req_page_limit)
# end _validate_page_limit
def _list_collection(self, obj_type, parent_uuids=None,
back_ref_uuids=None, obj_uuids=None,
is_count=False, is_detail=False, filters=None,
req_fields=None, include_shared=False,
exclude_hrefs=False, pagination=None):
resource_type, r_class = self._validate_resource_type(obj_type)
is_admin = self.is_admin_request()
if is_admin:
field_names = req_fields
else:
field_names = [u'id_perms'] + (req_fields or [])
if is_count and is_admin:
ret_result = 0
else:
ret_result = []
page_filled = False
if 'marker' in pagination:
# if marker is None, start scanning from uuid 0
page_start = pagination['marker'] or '0'
if 'limit' in pagination:
page_count = pagination['limit']
else:
page_count = self._args.paginate_count
else:
page_start = None # cookie to start next search
page_count = None # remainder count to finish page
(ok, result) = r_class.pre_dbe_list(obj_uuids, self._db_conn)
if not ok:
(code, msg) = result
raise cfgm_common.exceptions.HttpError(code, msg)
while not page_filled:
(ok, result, ret_marker) = self._db_conn.dbe_list(obj_type,
parent_uuids, back_ref_uuids, obj_uuids, is_count and is_admin,
filters, is_detail=is_detail, field_names=field_names,
include_shared=include_shared,
paginate_start=page_start,
paginate_count=page_count)
if not ok:
self.config_object_error(None, None, '%ss' %(obj_type),
'dbe_list', result)
raise cfgm_common.exceptions.HttpError(404, result)
# If only counting, return early
if is_count and is_admin:
ret_result += result
return {'%ss' %(resource_type): {'count': ret_result}}
allowed_fields = set(['uuid', 'href', 'fq_name'])
allowed_fields |= set(req_fields or [])
obj_dicts = []
if is_admin:
for obj_result in result:
if not exclude_hrefs:
obj_result['href'] = self.generate_url(
resource_type, obj_result['uuid'])
if is_detail:
obj_result['name'] = obj_result['fq_name'][-1]
obj_dicts.append({resource_type: obj_result})
else:
obj_dicts.append(obj_result)
else:
# fetch all perms of child/ref/back_ref of listed resources in
# one DB call for performance reason
if is_detail:
ref_uuids = {ref['uuid'] for link in r_class.obj_links
for o in result for ref in o.get(link, [])}
if self.is_rbac_enabled():
fields = ['perms2']
else:
fields = ['id_perms']
ref_dicts = self._db_conn._object_db.object_raw_read(
resource_type, list(ref_uuids), fields)
ref_perms = {obj_dict['uuid']: obj_dict
for obj_dict in ref_dicts}
for obj_result in result:
id_perms = obj_result.get('id_perms')
if not id_perms:
# It is possible that the object was deleted, but received
# an update after that. We need to ignore it for now. In
# future, we should clean up such stale objects
continue
if not id_perms.get('user_visible', True):
# skip items not authorized
continue
ok, status = self._permissions.check_perms_read(
get_request(), obj_result['uuid'], obj_result)
if not ok and status[0] == 403:
continue
obj_result['name'] = obj_result['fq_name'][-1]
if is_detail:
self.obj_view(resource_type, obj_result, ref_perms)
obj_dicts.append({resource_type: obj_result})
else:
for field in set(obj_result.keys()) - allowed_fields:
del obj_result[field]
obj_dicts.append(obj_result)
if not exclude_hrefs:
obj_result['href'] = self.generate_url(
resource_type, obj_result['uuid'])
# end obj_result in result
# end not admin req
ret_result.extend(obj_dicts)
if 'marker' not in pagination:
page_filled = True
elif ret_marker is None: # pagination request and done
page_filled = True
else: # pagination request and partially filled
page_start = ret_marker
page_count -= len(result)
if page_count <= 0:
page_filled = True
# end while not page_filled
(ok, err_msg) = r_class.post_dbe_list(ret_result, self._db_conn)
if not ok:
(code, msg) = err_msg
raise cfgm_common.exceptions.HttpError(code, msg)
if 'marker' in pagination: # send next marker along with results
if is_count:
return {'%ss' %(resource_type): {'count': len(ret_result)},
'marker': ret_marker}
else:
return {'%ss' %(resource_type): ret_result,
'marker': ret_marker}
if is_count:
return {'%ss' %(resource_type): {'count': len(ret_result)}}
else:
return {'%ss' %(resource_type): ret_result}
# end _list_collection
def get_db_connection(self):
return self._db_conn
# end get_db_connection
def generate_url(self, resource_type, obj_uuid):
try:
url_parts = get_request().urlparts
netloc = url_parts.netloc.replace('<script>', '<!--script>')
netloc = netloc.replace('</script>', '</script-->')
return '%s://%s/%s/%s'\
% (url_parts.scheme, netloc, resource_type, obj_uuid)
except Exception as e:
return '%s/%s/%s' % (self._base_url, resource_type, obj_uuid)
# end generate_url
def generate_hrefs(self, resource_type, obj_dict):
# return a copy of obj_dict with href keys for:
# self, parent, children, refs, backrefs
r_class = self.get_resource_class(resource_type)
obj_dict['href'] = self.generate_url(resource_type, obj_dict['uuid'])
try:
obj_dict['parent_href'] = self.generate_url(
obj_dict['parent_type'], obj_dict['parent_uuid'])
except KeyError:
# No parent
pass
for field, field_info in itertools.chain(
r_class.children_field_types.items(),
r_class.ref_field_types.items(),
r_class.backref_field_types.items(),
):
try:
type = field_info[0]
for link in obj_dict[field]:
link['href'] = self.generate_url(type, link['uuid'])
except KeyError:
pass
def config_object_error(self, id, fq_name_str, obj_type,
operation, err_str):
apiConfig = VncApiCommon()
if obj_type is not None:
apiConfig.object_type = obj_type
apiConfig.identifier_name = fq_name_str
apiConfig.identifier_uuid = id
apiConfig.operation = operation
if err_str:
apiConfig.error = "%s:%s" % (obj_type, err_str)
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
# end config_object_error
def config_log(self, msg_str, level=SandeshLevel.SYS_INFO):
errcls = {
SandeshLevel.SYS_DEBUG: VncApiDebug,
SandeshLevel.SYS_INFO: VncApiInfo,
SandeshLevel.SYS_NOTICE: VncApiNotice,
SandeshLevel.SYS_ERR: VncApiError,
}
errcls.get(level, VncApiError)(
api_msg=msg_str, level=level, sandesh=self._sandesh).send(
sandesh=self._sandesh)
# end config_log
def _set_api_audit_info(self, apiConfig):
apiConfig.url = get_request().url
apiConfig.remote_ip = get_request().headers.get('X-Requestor-IP')
if not apiConfig.remote_ip:
# If the X-Requestor-IP was not sent, it's likely that the request
# did not come from node.js. In this case, try to get the remote IP as:
# 1. If present, the first IP address of HTTP_X_FORWARDED_FOR.
# 2. Else, If present, from the REMOTE_ADDR.
# 3. HTTP_X_Host
if 'HTTP_X_FORWARDED_FOR' in get_request().environ:
addr = get_request().environ.get('HTTP_X_FORWARDED_FOR').split(',')
apiConfig.remote_ip = addr[0]
elif 'REMOTE_ADDR' in get_request().environ:
apiConfig.remote_ip = get_request().environ.get('REMOTE_ADDR')
else:
apiConfig.remote_ip = get_request().headers.get('Host')
useragent = get_request().headers.get('X-Contrail-Useragent')
if not useragent:
useragent = get_request().headers.get('User-Agent')
apiConfig.useragent = useragent
apiConfig.user = get_request().headers.get('X-User-Name')
apiConfig.project = get_request().headers.get('X-Project-Name')
apiConfig.domain = get_request().headers.get('X-Domain-Name', 'None')
if apiConfig.domain.lower() == 'none':
apiConfig.domain = 'default-domain'
if int(get_request().headers.get('Content-Length', 0)) > 0:
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
# end _set_api_audit_info
# uuid is parent's for collections
def _get_common(self, request, uuid=None):
# TODO check api + resource perms etc.
if self.is_auth_needed() and uuid:
if isinstance(uuid, list):
for u_id in uuid:
ok, result = self._permissions.check_perms_read(request,
u_id)
if not ok:
return ok, result
else:
return self._permissions.check_perms_read(request, uuid)
return (True, '')
# end _get_common
def _put_common(
self, api_name, obj_type, obj_uuid, db_obj_dict, req_obj_dict=None,
req_prop_coll_updates=None, ref_args=None, quota_dict=None):
obj_fq_name = db_obj_dict.get('fq_name', 'missing-fq-name')
# ZK and rabbitmq should be functional
self._ensure_services_conn(
api_name, obj_type, obj_uuid, obj_fq_name)
resource_type, r_class = self._validate_resource_type(obj_type)
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_update' %(obj_type), obj_uuid, req_obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_update an extension had error for %s' \
%(obj_type, req_obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
db_conn = self._db_conn
# check visibility
if (not db_obj_dict['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % obj_uuid
self.config_object_error(obj_uuid, None, obj_type, api_name, result)
raise cfgm_common.exceptions.HttpError(404, result)
# properties validator (for collections validation in caller)
if req_obj_dict is not None:
ok, result = self._validate_props_in_request(r_class,
req_obj_dict, operation='UPDATE')
if not ok:
result = 'Bad property in %s: %s' %(api_name, result)
raise cfgm_common.exceptions.HttpError(400, result)
# references validator
if req_obj_dict is not None:
ok, result = self._validate_refs_in_request(r_class, req_obj_dict)
if not ok:
result = 'Bad reference in %s: %s' %(api_name, result)
raise cfgm_common.exceptions.HttpError(400, result)
# common handling for all resource put
request = get_request()
fq_name_str = ":".join(obj_fq_name or [])
if req_obj_dict:
if ('id_perms' in req_obj_dict and
req_obj_dict['id_perms'].get('uuid')):
if not self._db_conn.match_uuid(req_obj_dict, obj_uuid):
msg = (
"UUID mismatch from %s:%s" %
(request.environ.get('REMOTE_ADDR',
"Remote address not found"),
request.environ.get('HTTP_USER_AGENT',
"User agent not found"))
)
self.config_object_error(
obj_uuid, fq_name_str, obj_type, 'put', msg)
self._db_conn.set_uuid(obj_type, req_obj_dict,
uuid.UUID(obj_uuid),
do_lock=False)
# Ensure object has at least default permissions set
self._ensure_id_perms_present(obj_uuid, req_obj_dict)
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name = fq_name_str
apiConfig.identifier_uuid = obj_uuid
apiConfig.operation = api_name
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig,
sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
if self.is_auth_needed():
ok, result = self._permissions.check_perms_write(request, obj_uuid)
if not ok:
(code, msg) = result
self.config_object_error(
obj_uuid, fq_name_str, obj_type, api_name, msg)
raise cfgm_common.exceptions.HttpError(code, msg)
# no ref to a pending deleted resource
ok, result = r_class.no_pending_deleted_resource_in_refs(req_obj_dict)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# Validate perms on references
if req_obj_dict is not None:
try:
self._validate_perms_in_request(
r_class, obj_type, req_obj_dict)
except NoIdError:
raise cfgm_common.exceptions.HttpError(400,
'Unknown reference in resource update %s %s.'
%(obj_type, req_obj_dict))
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
if req_obj_dict is not None:
req_obj_dict['uuid'] = obj_uuid
# Permit abort resource update and retrun 202 status code
get_context().set_state('PENDING_DBE_UPDATE')
ok, result = r_class.pending_dbe_update(db_obj_dict, req_obj_dict,
req_prop_coll_updates)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
if ok and isinstance(result, tuple) and result[0] == 202:
# Modifications accepted but not applied, pending update
# returns 202 HTTP OK code to aware clients
bottle.response.status = 202
return True, ''
def stateful_update():
get_context().set_state('PRE_DBE_UPDATE')
# type-specific hook
(ok, result) = r_class.pre_dbe_update(
obj_uuid, obj_fq_name, req_obj_dict or {}, self._db_conn,
prop_collection_updates=req_prop_coll_updates)
if not ok:
return (ok, result)
attr_to_publish = None
if isinstance(result, dict):
attr_to_publish = result
get_context().set_state('DBE_UPDATE')
if api_name == 'ref-update':
# read ref_update args
ref_obj_type = ref_args.get('ref_obj_type')
ref_uuid = ref_args.get('ref_uuid')
ref_data = ref_args.get('data')
operation = ref_args.get('operation')
relax_ref_for_delete = ref_args.get('relax_ref_for_delete', False)
(ok, result) = db_conn.ref_update(
obj_type,
obj_uuid,
ref_obj_type,
ref_uuid,
ref_data,
operation,
db_obj_dict['id_perms'],
attr_to_publish=attr_to_publish,
relax_ref_for_delete=relax_ref_for_delete
)
elif req_obj_dict:
(ok, result) = db_conn.dbe_update(
obj_type,
obj_uuid,
req_obj_dict,
attr_to_publish=attr_to_publish,
)
# Update quota counter
if resource_type == 'project' and 'quota' in req_obj_dict:
proj_id = req_obj_dict['uuid']
quota_dict = req_obj_dict['quota']
path_prefix = self._path_prefix + proj_id
try:
QuotaHelper._zk_quota_counter_update(
path_prefix, quota_dict, proj_id, db_conn,
self.quota_counter)
except NoIdError:
msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
self.config_log(msg, level=SandeshLevel.SYS_ERR)
elif req_prop_coll_updates:
(ok, result) = db_conn.prop_collection_update(
obj_type,
obj_uuid,
req_prop_coll_updates,
attr_to_publish=attr_to_publish,
)
if not ok:
return (ok, result)
get_context().set_state('POST_DBE_UPDATE')
# type-specific hook
(ok, result) = r_class.post_dbe_update(
obj_uuid, obj_fq_name, req_obj_dict or {}, self._db_conn,
prop_collection_updates=req_prop_coll_updates)
if not ok:
return (ok, result)
return (ok, result)
# end stateful_update
try:
ok, result = stateful_update()
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, err_msg)
if not ok:
self.undo(result, obj_type, id=obj_uuid)
# Revert changes made to quota counter by using DB quota dict
if resource_type == 'project' and 'quota' in req_obj_dict:
proj_id = db_obj_dict['uuid']
quota_dict = db_obj_dict.get('quota') or None
path_prefix = self._path_prefix + proj_id
try:
QuotaHelper._zk_quota_counter_update(
path_prefix, quota_dict, proj_id, self._db_conn,
self.quota_counter)
except NoIdError:
err_msg = "Error in rolling back quota count on undo "\
"Internal error : Failed to read resource count"
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_update' %(obj_type), obj_uuid,
req_obj_dict, db_obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In post_%s_update an extension had error for %s' \
%(obj_type, req_obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# end _put_common
# parent_type needed for perms check. None for derived objects (eg.
# routing-instance)
def _delete_common(self, request, obj_type, uuid, parent_uuid):
# If not connected to zookeeper do not allow operations that
# causes the state change
if not self._db_conn._zk_db.is_connected():
return (False,
(503, "Not connected to zookeeper. Not able to perform requested action"))
# If there are too many pending updates to rabbit, do not allow
# operations that cause state change
npending = self._db_conn.dbe_oper_publish_pending()
if (npending >= int(self._args.rabbit_max_pending_updates)):
err_str = str(MaxRabbitPendingError(npending))
return (False, (500, err_str))
fq_name = self._db_conn.uuid_to_fq_name(uuid)
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name=':'.join(fq_name)
apiConfig.identifier_uuid = uuid
apiConfig.operation = 'delete'
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
# TODO check api + resource perms etc.
if not self.is_auth_needed() or not parent_uuid:
return (True, '')
"""
Validate parent allows write access. Implicitly trust
parent info in the object since coming from our DB.
"""
return self._permissions.check_perms_delete(request, obj_type, uuid,
parent_uuid)
# end _http_delete_common
def _post_validate(self, obj_type=None, obj_dict=None):
if not obj_dict:
return
def _check_field_present(fname):
fval = obj_dict.get(fname)
if not fval:
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, no %s in POST body" %(fname))
return fval
fq_name = _check_field_present('fq_name')
# well-formed name checks
if illegal_xml_chars_RE.search(fq_name[-1]):
raise cfgm_common.exceptions.HttpError(400,
"Bad Request, name has illegal xml characters")
if obj_type == 'route_target':
invalid_chars = self._INVALID_NAME_CHARS - set(':')
else:
invalid_chars = self._INVALID_NAME_CHARS
if any((c in invalid_chars) for c in fq_name[-1]):
raise cfgm_common.exceptions.HttpError(400,
"Bad Request, name has one of invalid chars %s"
%(invalid_chars))
# end _post_validate
def validate_parent_type(self, obj_type, obj_dict):
parent_type = obj_dict.get('parent_type')
r_class = self.get_resource_class(obj_type)
allowed_parent_types = r_class.parent_types
if parent_type:
if parent_type not in allowed_parent_types:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid parent type: %s. Allowed types: %s' % (
parent_type, allowed_parent_types))
elif (len(allowed_parent_types) > 1 and
'config-root' not in allowed_parent_types):
raise cfgm_common.exceptions.HttpError(
400, 'Missing parent type: %s. Allowed types: %s' % (
parent_type, allowed_parent_types))
elif len(allowed_parent_types) == 1:
parent_type = allowed_parent_types[0]
if parent_type in ('config-root', None):
if len(obj_dict['fq_name']) != 1:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid fq-name of an object with no parent: %s' % (
obj_dict['fq_name']))
elif len(obj_dict['fq_name']) < 2:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid fq-name for object with parent_type %s: %s' % (
parent_type, obj_dict['fq_name']))
# end validate_parent_type
def _post_common(self, obj_type, obj_dict):
self._ensure_services_conn(
'http_post', obj_type, obj_fq_name=obj_dict.get('fq_name'))
if not obj_dict:
# TODO check api + resource perms etc.
return (True, None)
# Fail if object exists already
try:
obj_uuid = self._db_conn.fq_name_to_uuid(
obj_type, obj_dict['fq_name'])
raise cfgm_common.exceptions.HttpError(
409, '' + pformat(obj_dict['fq_name']) +
' already exists with uuid: ' + obj_uuid)
except NoIdError:
pass
self.validate_parent_type(obj_type, obj_dict)
# Ensure object has at least default permissions set
self._ensure_id_perms_present(None, obj_dict)
self._ensure_perms2_present(obj_type, None, obj_dict,
get_request().headers.environ.get('HTTP_X_PROJECT_ID', None))
# TODO check api + resource perms etc.
uuid_in_req = obj_dict.get('uuid', None)
# Set the display name
if (('display_name' not in obj_dict) or
(obj_dict['display_name'] is None)):
obj_dict['display_name'] = obj_dict['fq_name'][-1]
fq_name_str = ":".join(obj_dict['fq_name'])
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name=fq_name_str
apiConfig.identifier_uuid = uuid_in_req
apiConfig.operation = 'post'
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
if uuid_in_req:
if uuid_in_req != str(uuid.UUID(uuid_in_req)):
bottle.abort(400, 'Invalid UUID format: ' + uuid_in_req)
try:
fq_name = self._db_conn.uuid_to_fq_name(uuid_in_req)
raise cfgm_common.exceptions.HttpError(
409, uuid_in_req + ' already exists with fq_name: ' +
pformat(fq_name))
except NoIdError:
pass
apiConfig.identifier_uuid = uuid_in_req
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
return (True, uuid_in_req)
# end _post_common
def reset(self):
# cleanup internal state/in-flight operations
if self._amqp_client is not None:
self._amqp_client.stop()
if self._db_conn:
self._db_conn.reset()
# end reset
# allocate block of IP addresses from VN. Subnet info expected in request
# body
def vn_ip_alloc_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
# expected format {"subnet_list" : "2.1.1.0/24", "count" : 4}
req_dict = get_request().json
count = req_dict.get('count', 1)
subnet = req_dict.get('subnet')
family = req_dict.get('family')
try:
result = self.get_resource_class('virtual_network').ip_alloc(
vn_fq_name, subnet, count, family)
except vnc_addr_mgmt.AddrMgmtSubnetUndefined as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except vnc_addr_mgmt.AddrMgmtSubnetExhausted as e:
raise cfgm_common.exceptions.HttpError(409, str(e))
return result
# end vn_ip_alloc_http_post
# free block of ip addresses to subnet
def vn_ip_free_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
"""
{
"subnet" : "2.1.1.0/24",
"ip_addr": [ "2.1.1.239", "2.1.1.238", "2.1.1.237", "2.1.1.236" ]
}
"""
req_dict = get_request().json
ip_list = req_dict['ip_addr'] if 'ip_addr' in req_dict else []
result = self.get_resource_class('virtual_network').ip_free(
vn_fq_name, ip_list)
return result
# end vn_ip_free_http_post
# return no. of IP addresses from VN/Subnet
def vn_subnet_ip_count_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
# expected format {"subnet_list" : ["2.1.1.0/24", "1.1.1.0/24"]
req_dict = get_request().json
try:
(ok, result) = self._db_conn.dbe_read('virtual_network', id)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except Exception as e:
ok = False
result = cfgm_common.utils.detailed_traceback()
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
obj_dict = result
subnet_list = req_dict[
'subnet_list'] if 'subnet_list' in req_dict else []
result = self.get_resource_class('virtual_network').subnet_ip_count(
vn_fq_name, subnet_list)
return result
# end vn_subnet_ip_count_http_post
# check if token validatation needed
def is_auth_needed(self):
return self.aaa_mode != 'no-auth'
def is_rbac_enabled(self):
return self.aaa_mode == 'rbac'
@property
def aaa_mode(self):
return self._args.aaa_mode
@aaa_mode.setter
def aaa_mode(self, mode):
self._args.aaa_mode = mode
# indication if multi tenancy with rbac is enabled or disabled
def aaa_mode_http_get(self):
return {'aaa-mode': self.aaa_mode}
def aaa_mode_http_put(self):
aaa_mode = get_request().json['aaa-mode']
if aaa_mode not in AAA_MODE_VALID_VALUES:
raise ValueError('Invalid aaa-mode %s' % aaa_mode)
ok, result = self._auth_svc.validate_user_token()
if not ok:
code, msg = result
self.config_object_error(None, None, None, 'aaa_mode_http_put',
msg)
raise cfgm_common.exceptions.HttpError(code, msg)
if not self.is_admin_request():
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
self.aaa_mode = aaa_mode
if self.is_rbac_enabled():
self._create_default_rbac_rule()
return {'aaa-mode': self.aaa_mode}
# end
@property
def cloud_admin_role(self):
return self._args.cloud_admin_role
@property
def global_read_only_role(self):
return self._args.global_read_only_role
def set_tag(self):
self._post_common(None, {})
req_dict = get_request().json
obj_type = req_dict.pop('obj_type')
obj_uuid = req_dict.pop('obj_uuid')
need_update = False
if obj_type is None or obj_uuid is None:
msg = "Object type and UUID must be specified"
raise cfgm_common.exceptions.HttpError(400, msg)
ok, result = self._db_conn.dbe_read(
obj_type,
obj_uuid,
obj_fields=['parent_type', 'perms2', 'tag_refs'],
)
if not ok:
raise cfgm_common.exceptions.HttpError(*result)
obj_dict = result
def _locate_tag(type, value, is_global=False):
name = type + "=" + value
# unless global, inherit project id from caller
if is_global:
fq_name = [name]
else:
fq_name = copy.deepcopy(obj_dict['fq_name'])
if obj_type == 'project':
fq_name.append(name)
elif ('parent_type' in obj_dict and
obj_dict['parent_type'] == 'project'):
fq_name[-1] = name
elif ('perms2' in obj_dict and
is_uuid_like(obj_dict['perms2']['owner'])):
parent_uuid = str(uuid.UUID(obj_dict['perms2']['owner']))
try:
fq_name = self._db_conn.uuid_to_fq_name(parent_uuid)
except NoIdError:
msg = ("Cannot find %s %s owner" %
(obj_type, obj_dict['uuid']))
raise cfgm_common.exceptions.HttpError(404, msg)
fq_name.append(name)
else:
msg = ("Not able to determine the scope of the tag '%s'" %
name)
raise cfgm_common.exceptions.HttpError(404, msg)
# lookup (validate) tag
try:
tag_uuid = self._db_conn.fq_name_to_uuid('tag', fq_name)
except NoIdError:
msg = "Tag with FQName %s not found" % pformat(fq_name)
raise cfgm_common.exceptions.HttpError(404, msg)
return fq_name, tag_uuid
refs_per_type = {}
for ref in obj_dict.get('tag_refs', []):
ref_type = ref['to'][-1].partition('=')[0]
refs_per_type.setdefault(ref_type, []).append(ref)
for tag_type, attrs in req_dict.items():
tag_type = tag_type.lower()
# If the body of a Tag type is None, all references to that Tag
# type are remove on the resource
if attrs is None:
for ref in refs_per_type.get(tag_type, []):
need_update = True
obj_dict['tag_refs'].remove(ref)
refs_per_type[tag_type] = []
continue
# Else get defined values and update Tag references on the resource
is_global = attrs.get('is_global', False)
value = attrs.get('value')
add_values = set(attrs.get('add_values', []))
delete_values = set(attrs.get('delete_values', []))
# Tag type is unique per object, unless
# TAG_TYPE_NOT_UNIQUE_PER_OBJECT type
if tag_type not in TAG_TYPE_NOT_UNIQUE_PER_OBJECT:
if add_values or delete_values:
msg = ("Tag type %s cannot be set multiple times on a "
"same object." % tag_type)
raise cfgm_common.exceptions.HttpError(400, msg)
# address-group object can only be associated with label
if (obj_type == 'address_group' and
tag_type not in TAG_TYPE_AUTHORIZED_ON_ADDRESS_GROUP):
msg = ("Invalid tag type %s for object type %s" %
(tag_type, obj_type))
raise cfgm_common.exceptions.HttpError(400, msg)
refs_per_values = {}
if tag_type in refs_per_type:
refs_per_values = {ref['to'][-1].partition('=')[2]: ref for ref
in refs_per_type[tag_type]}
if tag_type not in TAG_TYPE_NOT_UNIQUE_PER_OBJECT:
if value is None or isinstance(value, list):
msg = "No valid value provided for tag type %s" % tag_type
raise cfgm_common.exceptions.HttpError(400, msg)
# don't need to update if tag type with same value already
# referenced
if value in refs_per_values:
continue
for ref in refs_per_values.values():
need_update = True
# object already have a reference to that tag type with a
# different value, remove it
obj_dict['tag_refs'].remove(ref)
# finally, reference the tag type with the new value
tag_fq_name, tag_uuid = _locate_tag(tag_type, value, is_global)
obj_dict.setdefault('tag_refs', []).append({
'uuid': tag_uuid,
'to': tag_fq_name,
'attr': None,
})
need_update = True
else:
# Add 'value' attribut to 'add_values' list if not null
if value is not None:
add_values.add(value)
for add_value in add_values - set(refs_per_values.keys()):
need_update = True
tag_fq_name, tag_uuid = _locate_tag(tag_type, add_value,
is_global)
obj_dict.setdefault('tag_refs', []).append({
'uuid': tag_uuid,
'to': tag_fq_name,
'attr': None,
})
for del_value in delete_values & set(refs_per_values.keys()):
need_update = True
obj_dict['tag_refs'].remove(refs_per_values[del_value])
if need_update:
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
return {}
def security_policy_draft(self):
self._post_common(None, {})
req_dict = get_request().json
scope_uuid = req_dict.pop('scope_uuid')
action = req_dict.pop('action')
pm_class = self.get_resource_class('policy-management')
try:
scope_type = self._db_conn.uuid_to_obj_type(scope_uuid)
except NoIdError as e:
msg = ("Cannot find scope where pending security resource are "
"own: %s" % str(e))
scope_class = self.get_resource_class(scope_type)
scope_fq_name = self._db_conn.uuid_to_fq_name(scope_uuid)
pm_fq_name = [POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT]
if (scope_type == GlobalSystemConfig.object_type and
scope_fq_name == GlobalSystemConfig().fq_name):
parent_type = PolicyManagement.resource_type
parent_fq_name = PolicyManagement().fq_name
parent_uuid = self._global_pm_uuid
else:
pm_fq_name = scope_fq_name + pm_fq_name
parent_type = scope_class.resource_type
parent_fq_name = scope_fq_name
parent_uuid = scope_uuid
ok, result = pm_class.locate(
fq_name=pm_fq_name,
create_it=False,
fields=['%ss' % type for type in SECURITY_OBJECT_TYPES],
)
if not ok and result[0] == 404:
# Draft dedicated policy management does not exists, the draft mode
# is not enabled on the scope
msg = ("Security draft mode is not enabled on the %s %s (%s)" %
(scope_type.replace('_', ' ').title(), scope_fq_name,
scope_uuid))
raise cfgm_common.exceptions.HttpError(400, msg)
if not ok:
raise cfgm_common.exceptions.HttpError(result[0], result[1])
pm = result
scope_lock = self._db_conn._zk_db._zk_client.write_lock(
'%s/%s/%s' % (
self.security_lock_prefix, scope_type,
':'.join(scope_fq_name)
),
'api-server-%s %s' % (socket.getfqdn(self._args.listen_ip_addr), action),
)
try:
acquired_lock = scope_lock.acquire(timeout=1)
except LockTimeout:
acquired_lock = False
if acquired_lock:
try:
if action == 'commit':
self._security_commit_resources(scope_type, parent_type,
parent_fq_name,
parent_uuid, pm)
elif action == 'discard':
self._security_discard_resources(pm)
else:
msg = "Only 'commit' or 'discard' actions are supported"
raise cfgm_common.exceptions.HttpError(400, msg)
finally:
scope_lock.release()
else:
contenders = scope_lock.contenders()
action_in_progress = '<unknown action>'
if len(contenders) > 0 and contenders[0]:
_, _, action_in_progress = contenders[0].partition(' ')
msg = ("Security resource modifications or commit/discard action "
"on %s '%s' (%s) scope is under progress. Try again later."
% (scope_type.replace('_', ' ').title(),
':'.join(scope_fq_name), scope_uuid))
raise cfgm_common.exceptions.HttpError(400, msg)
# TODO(ethuleau): we could return some stats or type/uuid resources
# actions which were done during commit or discard?
return {}
def _security_commit_resources(self, scope_type, parent_type,
parent_fq_name, parent_uuid, pm):
updates = []
deletes = []
held_refs = []
for type_name in SECURITY_OBJECT_TYPES:
r_class = self.get_resource_class(type_name)
for child in pm.get('%ss' % r_class.object_type, []):
ok, result = r_class.locate(child['to'], child['uuid'],
create_it=False)
if not ok:
continue
draft = result
fq_name = parent_fq_name + [child['to'][-1]]
try:
uuid = self._db_conn.fq_name_to_uuid(r_class.object_type,
fq_name)
except NoIdError:
# No original version found, new resource created
uuid = None
self._holding_backrefs(updates, held_refs, scope_type,
r_class.object_type, fq_name, draft)
# Purge pending resource as we re-use the same UUID
self.internal_request_delete(r_class.object_type,
child['uuid'])
if uuid and draft['draft_mode_state'] == 'deleted':
# The resource is removed, we can purge original resource
deletes.append((r_class.object_type, uuid))
elif uuid and draft['draft_mode_state'] == 'updated':
# Update orginal resource with pending resource
draft.pop('fq_name', None)
draft.pop('uuid', None)
draft.pop('draft_mode_state', None)
if 'id_perms' in draft:
draft['id_perms'].pop('uuid', None)
draft['parent_type'] = parent_type
draft['parent_uuid'] = parent_uuid
# if a ref type was purge when the draft mode is enabled,
# set the ref to an empty list to ensure all refs will be
# removed when resource will be updated/committed
for ref_type in r_class.ref_fields:
if ref_type not in draft:
draft[ref_type] = []
self._update_fq_name_security_refs(
parent_fq_name, pm['fq_name'], type_name, draft)
updates.append(('update', (r_class.resource_type, uuid,
copy.deepcopy(draft))))
elif not uuid and draft['draft_mode_state'] == 'created':
# Create new resource with pending values (re-use UUID)
draft.pop('id_perms', None)
draft.pop('perms2', None)
draft.pop('draft_mode_state', None)
draft['fq_name'] = fq_name
draft['parent_type'] = parent_type
draft['parent_uuid'] = parent_uuid
self._update_fq_name_security_refs(
parent_fq_name, pm['fq_name'], type_name, draft)
updates.append(('create', (r_class.resource_type,
copy.deepcopy(draft))))
else:
msg = (
"Try to commit a security resource %s (%s) with "
"invalid state '%s'. Ignore it." %
(':'.join(draft.get('fq_name', ['FQ name unknown'])),
draft.get('uuid', 'UUID unknown'),
draft.get('draft_mode_state', 'No draft mode state'))
)
self.config_log(msg, level=SandeshLevel.SYS_WARN)
# Need to create/update leaf resources first as they could be
# referenced by another create/updated resource (e.g.: FP -> FP)
updates.reverse() # order is: AG, SG, FR, FP and APS
for action, args in updates:
getattr(self, 'internal_request_%s' % action)(*args)
# Postpone delete to be sure deleted resource not anymore
# referenced and delete resource with ref before resource with backref
for args in deletes: # order is: APS, FP, FR, SG and AG
self.internal_request_delete(*args)
for args, kwargs in held_refs:
self.internal_request_ref_update(*args, **kwargs)
@staticmethod
def _update_fq_name_security_refs(parent_fq_name, pm_fq_name, res_type,
draft):
for ref_type in SECURITY_OBJECT_TYPES:
for ref in draft.get('%s_refs' % ref_type, []):
if ref['to'][:-1] == pm_fq_name:
ref['to'] = parent_fq_name + [ref['to'][-1]]
if res_type == 'firewall_rule':
for ep in [draft.get('endpoint_1', {}),
draft.get('endpoint_2', {})]:
ag_fq_name = ep.get('address_group', [])
if ag_fq_name and ag_fq_name.split(':')[:-1] == pm_fq_name:
ep['address_group'] = ':'.join(parent_fq_name + [
ag_fq_name.split(':')[-1]])
def _holding_backrefs(self, updates, held_refs, scope_type, obj_type,
fq_name, obj_dict):
backref_fields = {'%s_back_refs' % t for t in SECURITY_OBJECT_TYPES}
if (scope_type == GlobalSystemConfig().object_type and
obj_dict['draft_mode_state'] != 'deleted'):
for backref_field in set(obj_dict.keys()) & backref_fields:
backref_type = backref_field[:-10]
for backref in copy.deepcopy(obj_dict.get(backref_field, [])):
# if it's a backref to global resource let it
if backref['to'][0] in [PolicyManagement().name,
POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT]:
continue
self.internal_request_ref_update(
backref_type,
backref['uuid'],
'DELETE',
obj_type,
ref_uuid=obj_dict['uuid'],
)
if obj_type == AddressGroup.object_type:
# Is not allowed to directly create Address Group
# reference to a Firewall Rule, use its endpoints
# address-group property
backref_class = self.get_resource_class(backref_type)
ok, result = backref_class.locate(
backref['to'],
backref['uuid'],
create_it=False,
fields=['endpoint_1', 'endpoint_2'])
if not ok:
msg = ("Cannot read Firewall Rule %s (%s)" %
(backref['to'], backref['uuid']))
raise cfgm_common.exceptions.HttpError(400, msg)
fr = result
for ep_type in ['endpoint_1', 'endpoint_2']:
if (ep_type in fr and
fr[ep_type].get('address_group', '') ==\
':'.join(obj_dict['fq_name'])):
ept = FirewallRuleEndpointType(
address_group=':'.join(fq_name))
updates.append(
('update',
(FirewallRule.resource_type, fr['uuid'],
{ep_type: vars(ept)})))
else:
held_refs.append(
((backref_type, backref['uuid'], 'ADD', obj_type),
{
'ref_fq_name': fq_name,
'attr': backref.get('attr')
}
)
)
obj_dict[backref_field].remove(backref)
def _security_discard_resources(self, pm):
for type_name in SECURITY_OBJECT_TYPES:
r_class = self.get_resource_class(type_name)
for child in pm.get('%ss' % r_class.object_type, []):
self.internal_request_delete(r_class.object_type,
child['uuid'])
# end VncApiServer
def main(args_str=None, server=None):
vnc_api_server = server
pipe_start_app = vnc_api_server.get_pipe_start_app()
server_ip = vnc_api_server.get_listen_ip()
server_port = vnc_api_server.get_server_port()
enable_ssl = vnc_api_server.get_enable_ssl()
if enable_ssl:
certfile=vnc_api_server.get_certfile()
keyfile=vnc_api_server.get_keyfile()
ca_cert=vnc_api_server.get_ca_cert()
""" @sigchld
Disable handling of SIG_CHLD for now as every keystone request to validate
token sends SIG_CHLD signal to API server.
"""
#hub.signal(signal.SIGCHLD, vnc_api_server.sigchld_handler)
hub.signal(signal.SIGTERM, vnc_api_server.sigterm_handler)
hub.signal(signal.SIGHUP, vnc_api_server.sighup_handler)
if pipe_start_app is None:
pipe_start_app = vnc_api_server.api_bottle
try:
if enable_ssl:
if not (certfile and keyfile and ca_cert):
msg = "SSL is enabled but one or more of these options " \
"config_api_ssl_keyfile, config_api_ssl_certfile, " \
"config_api_ssl_ca_cert not specified"
raise cfgm_common.exceptions.VncError(msg)
bottle.run(app=pipe_start_app, host=server_ip, port=server_port,
ca_certs=ca_cert, keyfile=keyfile, certfile=certfile,
server=get_bottle_server(server._args.max_requests))
else:
bottle.run(app=pipe_start_app, host=server_ip, port=server_port,
server=get_bottle_server(server._args.max_requests))
except KeyboardInterrupt:
# quietly handle Ctrl-C
pass
finally:
# always cleanup gracefully
vnc_api_server.reset()
# end main
def server_main(args_str=None):
vnc_cgitb.enable(format='text')
main(args_str, VncApiServer(args_str))
#server_main
if __name__ == "__main__":
server_main()
| 42.826704 | 136 | 0.569308 |
80bbbb2a4425b6b72a69d6fe8cf0dc0ec3398b1c | 111 | py | Python | argotools/forecastlib/__init__.py | LeonardoClemente/argotools-pkg | cf261bd823dfc7eb808f1561b2052b5f000cac10 | [
"MIT"
] | null | null | null | argotools/forecastlib/__init__.py | LeonardoClemente/argotools-pkg | cf261bd823dfc7eb808f1561b2052b5f000cac10 | [
"MIT"
] | null | null | null | argotools/forecastlib/__init__.py | LeonardoClemente/argotools-pkg | cf261bd823dfc7eb808f1561b2052b5f000cac10 | [
"MIT"
] | null | null | null | # from models import *
# required file in processing directory for regional
CDC_REGIONS = './Regions.csv'
| 22.2 | 53 | 0.72973 |
e185c1a3e01c46e1fde00d46d7be19cebe9bfc07 | 6,792 | py | Python | training/training.py | SimonBartels/Variations_of_VAE | 89eec430eb3ec4483a47f345cc83b86051a81be7 | [
"MIT"
] | 1 | 2021-11-07T22:52:14.000Z | 2021-11-07T22:52:14.000Z | training/training.py | SimonBartels/Variations_of_VAE | 89eec430eb3ec4483a47f345cc83b86051a81be7 | [
"MIT"
] | null | null | null | training/training.py | SimonBartels/Variations_of_VAE | 89eec430eb3ec4483a47f345cc83b86051a81be7 | [
"MIT"
] | 1 | 2021-08-05T13:32:29.000Z | 2021-08-05T13:32:29.000Z | import math
import time
from collections import defaultdict
from collections.abc import Iterable
import torch
from torch import Tensor
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
def train_epoch(epoch, model, optimizer, scheduler, train_loader):
"""
epoch: Index of the epoch to run
model: The model to run data through. Forward should return a tuple of (loss, metrics_dict).
optimizer: The optimizer to step with at every batch
train_loader: PyTorch DataLoader to generate batches of training data
log_interval: Interval in seconds of how often to log training progress (0 to disable batch progress logging)
"""
train_loss = 0
train_count = 0
if scheduler is not None:
learning_rates = []
acc_metrics_dict = defaultdict(lambda: 0)
for batch_idx, xb in enumerate(train_loader):
batch_size = xb.size(0) if isinstance(xb, torch.Tensor) else xb[0].size(0)
loss, batch_metrics_dict, px_z = train_batch(model, optimizer, xb, scheduler)
# Model saves loss types in dict calculate accumulated metrics
semisup_metrics = ["seq2y_loss",
"z2y_loss",
"labelled seqs",
"unlabelled seqs",
"unlabelled_loss",
"labelled_loss"]
for key, value in batch_metrics_dict.items():
if key not in semisup_metrics:
acc_metrics_dict[key] += value * batch_size
acc_metrics_dict[key + "_count"] += batch_size
if 'seq2y' in key or 'z2y' in key:
if batch_metrics_dict['labelled seqs'] != None:
acc_metrics_dict[key] += value * batch_metrics_dict['labelled seqs'].size(0)
acc_metrics_dict[key + "_count"] += batch_metrics_dict['labelled seqs'].size(0)
else:
acc_metrics_dict[key] += 0
acc_metrics_dict[key + "_count"] += 1
if key == "unlabelled_loss":
acc_metrics_dict[key] += value * batch_metrics_dict['unlabelled seqs'].size(0)
acc_metrics_dict[key + "_count"] += batch_metrics_dict['unlabelled seqs'].size(0)
if key == "labelled_loss" and batch_metrics_dict['labelled seqs'] != None:
acc_metrics_dict[key] += value * batch_metrics_dict['labelled seqs'].size(0)
acc_metrics_dict[key + "_count"] += batch_metrics_dict['labelled seqs'].size(0)
else:
acc_metrics_dict[key] += 0
acc_metrics_dict[key + "_count"] += 1
metrics_dict = {k: acc_metrics_dict[k] / acc_metrics_dict[k + "_count"] for k in acc_metrics_dict.keys() if not k.endswith("_count")}
train_loss += loss.item() * batch_size
train_count += batch_size
if scheduler is not None:
learning_rates.append(scheduler.get_last_lr())
average_loss = train_loss / train_count
if scheduler is not None:
metrics_dict['learning_rates'] = learning_rates
return average_loss, metrics_dict, px_z
def train_batch(model, optimizer, xb, scheduler = None):
model.train()
# Reset gradient for next batch
optimizer.zero_grad()
# Push whole batch of data through model.forward() account for protein_data_loader pushes more than tensor through
if isinstance(xb, Tensor):
loss, batch_metrics_dict, px_z = model(xb)
else:
loss, batch_metrics_dict, px_z = model(*xb)
# Calculate the gradient of the loss w.r.t. the graph leaves
loss.backward()
clip_grad_value = 200
if clip_grad_value is not None:
clip_grad_value_(model.parameters(), clip_grad_value)
# for n, p in model.named_parameters():
# try:
# if p.grad.norm().item() > 100:
# print(n, p.grad.norm().item())
# except AttributeError:
# continue
# Step in the direction of the gradient
optimizer.step()
# Schedule learning rate
if scheduler is not None:
scheduler.step()
return loss, batch_metrics_dict, px_z
def validate(epoch, model, validation_loader):
model.eval()
validation_loss = 0
validation_count = 0
with torch.no_grad():
acc_metrics_dict = defaultdict(lambda: 0)
for i, xb in enumerate(validation_loader):
batch_size = xb.size(0) if isinstance(xb, torch.Tensor) else xb[0].size(0)
# Push whole batch of data through model.forward()
if isinstance(xb, Tensor):
loss, batch_metrics_dict, px_z = model(xb)
else:
loss, batch_metrics_dict, px_z = model(*xb)
semisup_metrics = ["seq2y_loss",
"z2y_loss",
"labelled seqs",
"unlabelled seqs",
"unlabelled_loss",
"labelled_loss"]
# Calculate accumulated metrics
for key, value in batch_metrics_dict.items():
if key not in semisup_metrics:
acc_metrics_dict[key] += value * batch_size
acc_metrics_dict[key + "_count"] += batch_size
if 'seq2y' in key or 'z2y' in key:
if batch_metrics_dict['labelled seqs'] != None:
acc_metrics_dict[key] += value * batch_metrics_dict['labelled seqs'].size(0)
acc_metrics_dict[key + "_count"] += batch_metrics_dict['labelled seqs'].size(0)
else:
acc_metrics_dict[key] += 0
acc_metrics_dict[key + "_count"] += 1
if key == "unlabelled_loss":
acc_metrics_dict[key] += value * batch_metrics_dict['unlabelled seqs'].size(0)
acc_metrics_dict[key + "_count"] += batch_metrics_dict['unlabelled seqs'].size(0)
if key == "labelled_loss" and batch_metrics_dict['labelled seqs'] != None:
acc_metrics_dict[key] += value * batch_metrics_dict['labelled seqs'].size(0)
acc_metrics_dict[key + "_count"] += batch_metrics_dict['labelled seqs'].size(0)
else:
acc_metrics_dict[key] += 0
acc_metrics_dict[key + "_count"] += 1
validation_loss += loss.item() * batch_size
validation_count += batch_size
metrics_dict = {k: acc_metrics_dict[k] / acc_metrics_dict[k + "_count"] for k in acc_metrics_dict.keys() if not k.endswith("_count")}
average_loss = validation_loss / validation_count
return average_loss, metrics_dict, px_z
| 41.414634 | 145 | 0.593198 |
a8cbef8bdb4de5c1216b138f0cb9fc99e2051dec | 130 | py | Python | bslparloursite/videolibrary/admin.py | natfarleydev/thebslparlour | ebb2588282cdb2a977ec6c5f8d82cec4e8fd1f99 | [
"CC0-1.0"
] | 1 | 2016-01-06T23:13:11.000Z | 2016-01-06T23:13:11.000Z | bslparloursite/videolibrary/admin.py | natfarleydev/thebslparlour | ebb2588282cdb2a977ec6c5f8d82cec4e8fd1f99 | [
"CC0-1.0"
] | 4 | 2021-03-18T20:15:04.000Z | 2021-06-10T17:52:31.000Z | bslparloursite/videolibrary/admin.py | natfarleydev/thebslparlour | ebb2588282cdb2a977ec6c5f8d82cec4e8fd1f99 | [
"CC0-1.0"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import SourceVideo
admin.site.register(SourceVideo)
| 16.25 | 32 | 0.807692 |
9db5d94997baad62af729aa8850efbcddfeeaad8 | 7,712 | py | Python | openml/lit_regressor.py | cmonserr/Why_Difficulty | 7b34cc3556a1b99ac67cb155fba8d0837c9b7b10 | [
"MIT"
] | 1 | 2022-02-04T11:33:41.000Z | 2022-02-04T11:33:41.000Z | openml/lit_regressor.py | dcastf01/creating_adversarial_images | 01564f7b4ff9f19021986e57f5bfad827213c8a6 | [
"MIT"
] | null | null | null | openml/lit_regressor.py | dcastf01/creating_adversarial_images | 01564f7b4ff9f19021986e57f5bfad827213c8a6 | [
"MIT"
] | null | null | null |
from typing import Optional
import timm
import torch
import torch.nn as nn
from timm.models.factory import create_model
from torch.nn import functional as F
from torch.nn.modules import linear
from torchvision import models
from openml.config import CONFIG, ModelsAvailable
from openml.custom_models import AlexNet, GoogleNet
from openml.lit_system import LitSystem
class LitRegressor(LitSystem):
def __init__(self,
experiment_name:str,
lr:float,
optim: str,
in_chans:int,
features_out_layer1:Optional[int]=None,
features_out_layer2:Optional[int]=None,
features_out_layer3:Optional[int]=None,
tanh1:Optional[bool]=None,
tanh2:Optional[bool]=None,
dropout1:Optional[float]=None,
dropout2:Optional[float]=None,
is_mlp_preconfig:Optional[bool]=None,
num_fold:Optional[int]=None,
num_repeat:Optional[int]=None
):
super().__init__( lr, optim=optim,is_regresor=True)
self.generate_model(experiment_name,
in_chans,
features_out_layer1,
features_out_layer2,
features_out_layer3,
tanh1,
tanh2,
dropout1,
dropout2,
is_mlp_preconfig
)
self.criterion=F.smooth_l1_loss #cambio de loss function
self.num_fold=num_fold
self.num_repeat=num_repeat
def forward(self,x):
return self.step(x)
def step(self,x):
x=self.model(x)
token_mean=self.token_mean.expand(x.shape[0],-1)
x=torch.cat((x,token_mean),dim=1)
y=self.regressor(x)
y=torch.clamp(y,min=-6,max=+6)
return y
def training_step(self, batch,batch_idx):
if len(batch)==3:
x,targets,index=batch
elif len(batch)==4:
x,targets,index,labels=batch
preds=self.step(x)
loss=self.criterion(preds,targets)
preds=torch.squeeze(preds,1)
targets=torch.squeeze(targets,1)
metric_value=self.train_metrics_base(preds,targets)
data_dict={"loss":loss,**metric_value}
self.insert_each_metric_value_into_dict(data_dict,prefix="")
return loss
def validation_step(self, batch,batch_idx):
if len(batch)==3:
x,targets,index=batch
elif len(batch)==4:
x,targets,index,labels=batch
preds=self.step(x)
loss=self.criterion(preds,targets)
preds=torch.squeeze(preds,1)
targets=torch.squeeze(targets,1)
metric_value=self.valid_metrics_base(preds,targets)
data_dict={"val_loss":loss,**metric_value}
self.insert_each_metric_value_into_dict(data_dict,prefix="")
def test_step(self, batch,batch_idx):
if len(batch)==3:
x,targets,index=batch
elif len(batch)==4:
x,targets,index,labels=batch
preds=self.step(x)
loss=self.criterion(preds,targets)
preds=torch.squeeze(preds,1)
targets=torch.squeeze(targets,1)
metric_value=self.test_metrics_base(preds,targets)
data_dict={"test_loss":loss,**metric_value}
self.insert_each_metric_value_into_dict(data_dict,prefix="")
def generate_model(self,
experiment_name:str,
in_chans:int,
features_out_layer1:Optional[int]=None,
features_out_layer2:Optional[int]=None,
features_out_layer3:Optional[int]=None,
tanh1:Optional[bool]=None,
tanh2:Optional[bool]=None,
dropout1:Optional[float]=None,
dropout2:Optional[float]=None,
is_mlp_preconfig:Optional[bool]=None
):
if isinstance(experiment_name,str):
model_enum=ModelsAvailable[experiment_name.lower()]
if model_enum.value in timm.list_models(pretrained=True) :
extras=dict(in_chans=in_chans)
self.model=timm.create_model(
model_enum.value,
pretrained=True,
**extras
)
elif model_enum==ModelsAvailable.alexnet:
self.model=AlexNet(in_chans=in_chans)
elif model_enum==ModelsAvailable.googlenet:
self.model=GoogleNet(in_chans=in_chans)
#
dim_parameter_token=2
if CONFIG.only_train_head:
for param in self.model.parameters():
param.requires_grad=False
self.token_mean=nn.Parameter(torch.zeros(dim_parameter_token))
if model_enum==ModelsAvailable.resnet50:
linear_sizes = [self.model.fc.out_features+dim_parameter_token]
# self.aditional_token=nn.Parameter(torch.zeros())
elif model_enum==ModelsAvailable.densenet121:
linear_sizes=[self.model.classifier.out_features+dim_parameter_token]
# self.aditional_token=nn.Parameter(torch.zeros())
elif model_enum==ModelsAvailable.vgg16:
linear_sizes=[self.model.head.fc.out_features+dim_parameter_token]
elif model_enum==ModelsAvailable.alexnet:
linear_sizes=[256*3*3+dim_parameter_token]
elif model_enum==ModelsAvailable.googlenet:
linear_sizes=[1024+dim_parameter_token]
if features_out_layer3:
linear_sizes.append(features_out_layer3)
if features_out_layer2:
linear_sizes.append(features_out_layer2)
if features_out_layer1:
linear_sizes.append(features_out_layer1)
if is_mlp_preconfig:
self.regressor=Mlp(linear_sizes[0],linear_sizes[1])
else:
linear_layers = [nn.Linear(in_f, out_f,)
for in_f, out_f in zip(linear_sizes, linear_sizes[1:])]
if tanh1:
linear_layers.insert(0,nn.Tanh())
if dropout1:
linear_layers.insert(0,nn.Dropout(0.25))
if tanh2:
linear_layers.insert(-2,nn.Tanh())
if dropout2:
linear_layers.insert(-2,nn.Dropout(0.25))
self.regressor=nn.Sequential(*linear_layers)
# if model_enum==ModelsAvailable.resnet50:
# self.model.fc=self.regressor
# pass
# elif model_enum==ModelsAvailable.densenet121:
# self.model.classifier=self.regressor
# pass
def swish(x):
return x * torch.sigmoid(x)
ACT2FN={"gelu": torch.nn.functional.gelu, "relu": torch.nn.functional.relu, "swish": swish}
class Mlp(nn.Module):
def __init__(self, in_dim,hidden_dim,out_dim=1):
super(Mlp, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
return x
| 37.803922 | 91 | 0.561981 |
46daeee910ed3f8a26e74078712afe3d0ecf557b | 17,156 | py | Python | Source/Analytics/TransmissionLineImpedanceCalculator/Source/Step_1_VI Acquisition/PowerSystemPsseLibrary.py | GridProtectionAlliance/openECA | d91f435844a6c3eb9f22a593aabeffef98d2525c | [
"MIT"
] | 13 | 2016-11-07T10:53:52.000Z | 2021-05-30T22:05:48.000Z | Source/Analytics/TransmissionLineImpedanceCalculator/Source/Step_1_VI Acquisition/PowerSystemPsseLibrary.py | GridProtectionAlliance/openECA | d91f435844a6c3eb9f22a593aabeffef98d2525c | [
"MIT"
] | 48 | 2016-06-11T16:36:03.000Z | 2021-01-15T15:33:57.000Z | Source/Analytics/TransmissionLineImpedanceCalculator/Source/Step_1_VI Acquisition/PowerSystemPsseLibrary.py | GridProtectionAlliance/openECA | d91f435844a6c3eb9f22a593aabeffef98d2525c | [
"MIT"
] | 6 | 2016-05-23T17:06:17.000Z | 2018-12-21T02:59:02.000Z | # File:"C:\Users\Duotong\Documents\DuotongYang\PSSE_simulation\ICSEG Power Case 1 - IEEE 14 Bus Systems\20150917_simulation.py", generated on THU, SEP 17 2015 10:10, release 32.00.03
from __future__ import with_statement
from __future__ import division
from contextlib import contextmanager
import os,sys
import csv
PSSE_LOCATION = r"C:\Program Files (x86)\PTI\PSSE32\PSSBIN"
sys.path.append(PSSE_LOCATION)
os.environ['PATH'] = os.environ['PATH'] + ';' + PSSE_LOCATION
import psspy # importing python
from psspy import _i,_f # importing the default integer and float values used by PSS\E(every API uses them)
import redirect, random, pdb, time
redirect.psse2py() # redirecting PSS\E output to python)
import numpy
import difflib
import pdb
import scipy
import heapq
import itertools
from scipy import special,optimize
from scipy.sparse import bsr_matrix
from numpy import genfromtxt,max
@contextmanager
def silence(file_object=None):
"""
Discard stdout (i.e. write to null device) or
optionally write to given file-like object.
"""
if file_object is None:
file_object = open(os.devnull, 'w')
old_stdout = sys.stdout
try:
sys.stdout = file_object
yield
finally:
sys.stdout = old_stdout
#### Update on OC changing: the region changes becomes owner changes
def change_load(load_bus,percentage):
psspy.bsys(0,0,[0.0,0.0],0,[],len(load_bus),load_bus,0,[],0,[])
psspy.scal(sid = 0,all = 0, apiopt = 0,status1 = 2, status3 = 1, status4 = 1, scalval1 = percentage)
def change_gen(gen_bus,increment):
psspy.bsys(0,0,[0.0,0.0],0,[],len(gen_bus),gen_bus,0,[],0,[])
psspy.scal(sid = 0,all = 0, apiopt = 0,status1 = 3, scalval2 = increment)
def LoadIncreaseMW(load_bus,percentage):
psspy.bsys(0,0,[0.0,0.0],0,[],len(load_bus),load_bus,0,[],0,[])
ierr,allBusLoad = psspy.aloadcplx(0,1,['MVAACT'])
allBusLoad = allBusLoad[0]
BusLoadReal = numpy.real(allBusLoad)
return numpy.sum(BusLoadReal)*percentage/100
def changeOperatingCondition(numberofRegions,index_OC,loadIncrease,load_bus_region,gen_bus_region):
#change load operating points
for region in range(0,numberofRegions):
# Compute load increament in MW
loadIncrementMW = LoadIncreaseMW(load_bus_region[region],loadIncrease[index_OC,region])
# change region load
change_load(load_bus_region[region],loadIncrease[index_OC,region])
# re-dispatch Pgen
change_gen(gen_bus_region[region],loadIncrementMW)
##########################################################################
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def zerolistmaker(n):
listofzeros = [0] * n
return listofzeros
def computeJacobianMatrix(linedata,bus_voltage,bus_angle,nbus,pq,npq,response_buses,nonSlackBus):
## 20160212: update angle calculation
## 20160327: compared with all other methods already
fb = linedata[:,0]
tb = linedata[:,1]
G_y = linedata[:,2]
B_y = linedata[:,3]
nb = len(response_buses)
nl= len(fb)
G = bsr_matrix((nb,nb)).todense()
B = bsr_matrix((nb,nb)).todense()
### computes G and B matix based on Y
for k in range(0,nl):
i = response_buses.index(fb[k])
j = response_buses.index(tb[k])
G[i,j] = linedata[k,2]
G[j,i] = G[i,j]
for k in range(0,nl):
i = response_buses.index(fb[k])
j = response_buses.index(tb[k])
B[i,j] = linedata[k,3]
B[j,i] = B[i,j]
### compute the jacobian matrix
from numpy import sin
from numpy import cos
J1 = bsr_matrix((len(nonSlackBus),len(nonSlackBus))).todense() # -1 is to remove the slack bus
for i in range(0,len(nonSlackBus)):
m = response_buses.index(nonSlackBus[i])
for k in range(0,len(nonSlackBus)):
n = response_buses.index(nonSlackBus[k])
if n == m:
for n in range(0,nbus):
J1[i,k] = J1[i,k] + bus_voltage[m]*bus_voltage[n]*(-1*G[m,n]*sin(bus_angle[m]-bus_angle[n]) + B[m,n]*cos(bus_angle[m] - bus_angle[n]))
J1[i,k] = J1[i,k] - numpy.square(bus_voltage[m])*B[m,m]
else:
J1[i,k] = bus_voltage[m]*bus_voltage[n]*(G[m,n]*sin(bus_angle[m]-bus_angle[n]) - B[m,n]*cos(bus_angle[m] - bus_angle[n]))
J2 = bsr_matrix((len(nonSlackBus),npq)).todense() # -1 is to remove the slack bus
for i in range(0,len(nonSlackBus)):
m = response_buses.index(nonSlackBus[i])
for k in range(0,npq):
n = response_buses.index(pq[k])
if n == m:
for n in range(0,nbus):
J2[i,k] = J2[i,k] + bus_voltage[n]*(G[m,n]*cos(bus_angle[m]-bus_angle[n]) + B[m,n]*sin(bus_angle[m] - bus_angle[n]))
J2[i,k] = J2[i,k] + bus_voltage[m]*G[m,m]
else:
J2[i,k] = bus_voltage[m]*(G[m,n]*cos(bus_angle[m]-bus_angle[n]) + B[m,n]*sin(bus_angle[m] - bus_angle[n]))
J3 = bsr_matrix((npq,len(nonSlackBus))).todense() # -1 is to remove the slack bus
for i in range(0,npq):
m = response_buses.index(pq[i])
for k in range(0,len(nonSlackBus)):
n = response_buses.index(nonSlackBus[k])
if n == m:
for n in range(0,nbus):
J3[i,k] = J3[i,k] + bus_voltage[m]*bus_voltage[n]*(G[m,n]*cos(bus_angle[m]-bus_angle[n]) + B[m,n]*sin(bus_angle[m] - bus_angle[n]))
J3[i,k] = J3[i,k] - numpy.square(bus_voltage[m])*G[m,m]
else:
J3[i,k] = bus_voltage[m]*bus_voltage[n]*(-1*G[m,n]*cos(bus_angle[m]-bus_angle[n]) - B[m,n]*sin(bus_angle[m] - bus_angle[n]))
J4 = bsr_matrix((npq,npq)).todense() # load_bus is the PQ bus
for i in range(0,npq):
m = response_buses.index(pq[i])
for k in range(0,npq):
n = response_buses.index(pq[k])
if n == m:
for n in range(0,nbus):
J4[i,k] = J4[i,k] + bus_voltage[n]*((G[m,n]*sin(bus_angle[m]-bus_angle[n]) - B[m,n]*cos(bus_angle[m] - bus_angle[n])))
J4[i,k] = J4[i,k] - bus_voltage[m]*B[m,m]
else:
J4[i,k] = bus_voltage[m]*(G[m,n]*sin(bus_angle[m]-bus_angle[n]) - B[m,n]*cos(bus_angle[m] - bus_angle[n]))
return J1,J2,J3,J4
def computeCriticalBus(J4,pq,thresholdParticipationFactor):
#compute the eigen values and left/right eigenvector
from scipy.linalg import eig
eigenvalue,leftEigVector,rightEigVector = eig(J4,left = True)
#compute the critical mode
min_eig_index = numpy.argmin(eigenvalue)
min_eigvalue = eigenvalue[min_eig_index]
#compute the participation factor
ParticipationFactor = []
for k in range(0,npq):
ParticipationFactor.append(rightEigVector[k][min_eig_index]*leftEigVector[min_eig_index][k])
#compute the critical buses based on threshold value
LargestParticipationFactor = []
CriticalBus = []
NormalizedParticipationFactor = numpy.true_divide(ParticipationFactor,max(ParticipationFactor))
for k in range(0,len(NormalizedParticipationFactor)):
if NormalizedParticipationFactor[k] >= thresholdParticipationFactor:
LargestParticipationFactor.append(NormalizedParticipationFactor[k])
CriticalBus.append(pq[k])
#rank the buses
order = NormalizedParticipationFactor.argsort()
ranks = order.argsort()
NormalizedRank = numpy.true_divide(ranks,max(ranks))
return CriticalBus,LargestParticipationFactor,NormalizedParticipationFactor,ranks,NormalizedRank
def performModalAnalysis(Jr,pq,CriticalEigenValueNumber,CriticalBusesNumber):
#############################################
#This function is able to compute critical bus and critical eigenvalues
#20160211 update the eigenvalue computation: minimum eigenvalue is determined based on their magnitude only
# update the PF computation: PF is determined by its eigenvector element's magnitude only, since the eigenvector could be complex.
#20160212 update the critical eigenvalue selection method: negative eigenvalues could be voltage unstable so they will be considered as critical buses
#20160325 can update more critical eigenvalues and more PF.
#20160327 functionalized
#############################################
#compute the eigenvalues and left/right eigenvector
import operator
from scipy.linalg import eig
eigenvalue,leftEigVector,rightEigVector = eig(Jr,left = True)
# compute the magnitude of eigenvalue
negative_EigenvalueRealPart = []
negative_EigenvalueRealPart_index = []
#if all eigenvalue are larger than 0...
if all(numpy.real(value) >= 0 for value in eigenvalue) == True:
eigenvalue_magnitude = numpy.abs(eigenvalue)
# compute the critical mode based on the smallest eigenvalues
min_eig_index = sorted(range(len(eigenvalue_magnitude)), key=lambda x: eigenvalue_magnitude[x])
critical_eig_index = min_eig_index[0:CriticalEigenValueNumber]
critical_eig = [eigenvalue[i] for i in critical_eig_index]
else:
#output the eigenvalue's real part smaller than 0
for index_eigvalue in range(0,len(eigenvalue)):
if numpy.real(eigenvalue[index_eigvalue]) < 0:
negative_EigenvalueRealPart.append(eigenvalue[index_eigvalue])
negative_EigenvalueRealPart_index.append(index_eigvalue)
#output minimum critical eigenvalue & index
min_critical_eig_index, min_critical_eigvalue = min(enumerate(negative_EigenvalueRealPart), key=operator.itemgetter(1))
critical_eig_index = negative_EigenvalueRealPart_index[min_critical_eig_index]
critical_eig = min_critical_eigvalue
#initialize the participation factor
npq = len(pq)
CriticalBus_CriticalMode = []
LargestNormalizedParticipationFactor_CriticalMode = []
LargestParticipationFactor_CriticalMode = []
indexofLargestNormalizedParticipationFactor_CriticalMode = []
#for each citical mode compute its PFs
for eig_index in range(0,len(critical_eig_index)):
ParticipationFactor = []
for k in range(0,npq):
ParticipationFactor.append(numpy.abs(rightEigVector[k][critical_eig_index[eig_index]])*numpy.abs(leftEigVector[k][critical_eig_index[eig_index]]))
#Find the index largest PF and its associated critical buses
NormalizedParticipationFactor = numpy.true_divide(ParticipationFactor,max(ParticipationFactor))
indexNormalizedParticipationFactor = sorted(range(len(NormalizedParticipationFactor)), key=lambda x: NormalizedParticipationFactor[x])
indexofLargestNormalizedParticipationFactor = indexNormalizedParticipationFactor[::-1]
indexofLargestNormalizedParticipationFactor = indexofLargestNormalizedParticipationFactor[0:CriticalBusesNumber]
#Find the largest PF and its associated critical buses
LargestNormalizedParticipationFactor = [NormalizedParticipationFactor[i] for i in indexofLargestNormalizedParticipationFactor]
LargestParticipationFactor = [ParticipationFactor[i] for i in indexofLargestNormalizedParticipationFactor]
CriticalBus = [pq[i] for i in indexofLargestNormalizedParticipationFactor]
#save the critical buses, largest normalized PF, largest PF to each critical mode
CriticalBus_CriticalMode.append(CriticalBus)
LargestNormalizedParticipationFactor_CriticalMode.append(LargestNormalizedParticipationFactor)
LargestParticipationFactor_CriticalMode.append(LargestParticipationFactor)
indexofLargestNormalizedParticipationFactor_CriticalMode.append(indexofLargestNormalizedParticipationFactor)
return CriticalBus_CriticalMode,LargestNormalizedParticipationFactor_CriticalMode,LargestParticipationFactor_CriticalMode,indexofLargestNormalizedParticipationFactor_CriticalMode,critical_eig, critical_eig_index, eigenvalue
def inverseMatrix(Matrix):
try:
inverse = numpy.linalg.inv(Matrix)
except numpy.linalg.LinAlgError:
# Not invertible. Skip this one.
pass
else:
return inverse
def most_common_N_items(lst,N):
lst = CriticalBus_InsecuredOC
freq = zerolistmaker(npq)
for item_index in range(0,npq):
item = pq[item_index]
freq[item_index] = lst.count(item)
freq_ind = heapq.nlargest(N, range(len(freq)), freq.__getitem__)
mostcomm_freq = [freq[i] for i in freq_ind]
mostcomm_bus = [pq[i] for i in freq_ind]
return mostcomm_freq,mostcomm_bus
def control_cap(combination):
for m in range(0,len(combination)):
if combination[0] == 1:
psspy.shunt_data(117,r""" 1""",1,[_f, 345])
if combination[1] == 1:
psspy.shunt_data(120,r""" 1""",1,[_f, 65]) # this bus cannot be higher than 65
if combination[2] == 1:
psspy.shunt_data(154,r""" 1""",1,[_f, 54.5]) # this bus cannot be higher than 65
if combination[3] == 1:
psspy.shunt_data(173,r""" 1""",1,[_f, 63]) # this bus cannot be higher than 64
if combination[4] == 1:
psspy.shunt_data(179,r""" 1""",1,[_f, 55]) # this bus cannot be higher than 65
if combination[5] == 1:
psspy.shunt_data(248,r""" 1""",1,[_f, 55.6]) # this bus cannot be to high
def writefiletotxt(filepath,filename):
import pickle
with open(filepath, 'wb') as f:
pickle.dump(filename, f)
def readfilefromtxt(filepath):
import pickle
with open(filepath, 'rb') as f:
filename = pickle.load(f)
return filename
def getbus(region):
region = region - 1
psspy.bsys(0,0,[ 0.6, 345.],1,[region],0,[],0,[],0,[])
ierr,busInterestedRegion = psspy.abusint(1,1,['number'])
busInterestedRegion = busInterestedRegion[0]
return busInterestedRegion
def clusterCriticalBuses(CriticalBus_InsecuredOC,similarityThreshold):
## Group the buses as one cluster if they are similar
cluster_index = 0
resulting_cluster = []
basecase_index = 0
resulting_cluster_allOC = []
basecase_index_allOC = []
index_OC_similartobasecase_allOC = []
length_index_OC_similartobasecase_allOC = []
for cluster_index in range(0,1000):
#check if all operating conditions are murged
if len(numpy.nonzero(CriticalBus_InsecuredOC)[0]) == 0:
break
else:
# find the next non-empty/non-zero operating conidtion
basecase_index = numpy.nonzero(CriticalBus_InsecuredOC)[0][0]
basecase = CriticalBus_InsecuredOC[basecase_index]
# empty the basecase
CriticalBus_InsecuredOC[basecase_index] = []
# find the clusters with bus similar to the basecase
similarity_allOC = []
OC_similartobasecase = []
index_OC_similartobasecase = []
for OC in range(0,len(CriticalBus_InsecuredOC)):
sm=difflib.SequenceMatcher(None,basecase,CriticalBus_InsecuredOC[OC])
similarity = sm.ratio()
similarity_allOC.append(similarity)
if similarity >= similarityThreshold:
OC_similartobasecase.append(CriticalBus_InsecuredOC[OC])
index_OC_similartobasecase.append(OC)
# initialize the first resulting cluster
if len(OC_similartobasecase) == 0:
resulting_cluster = basecase
else:
resulting_cluster = basecase + [i for i in OC_similartobasecase[0] if i not in basecase]
# compute the resulting cluster for the rest of the cases
for OC_similar in range(0,len(OC_similartobasecase)):
#compute the resulting cluster
resulting_cluster = resulting_cluster + [i for i in OC_similartobasecase[OC_similar] if i not in resulting_cluster]
#empty the similar OCs
CriticalBus_InsecuredOC[index_OC_similartobasecase[OC_similar]] = []
CriticalBus_InsecuredOC_nonSimilar = CriticalBus_InsecuredOC
resulting_cluster_allOC.append(resulting_cluster)
index_OC_similartobasecase_allOC.append(index_OC_similartobasecase)
length_index_OC_similartobasecase_allOC.append(len(index_OC_similartobasecase))
basecase_index_allOC.append(basecase_index)
return resulting_cluster_allOC, length_index_OC_similartobasecase_allOC
def getMeasurements(response_buses):
psspy.bsys(sid = 1,numbus = len(response_buses), buses = response_buses)
ierr,bus_voltage = psspy.abusreal(1,1,['PU'])
bus_voltage = bus_voltage[0]
ierr,bus_angle = psspy.abusreal(1,1,['ANGLE'])
bus_angle = bus_angle[0]
return bus_voltage,bus_angle | 42.57072 | 227 | 0.668279 |
4d0127e36c4a6538ebc978b15c4b330ae05ef3cc | 2,816 | py | Python | src/python/datasets/train_valid_test_split.py | PeterJackNaylor/NucSeg | 9eaa72d536e61e1ea2e396a47bb5d0a224134790 | [
"MIT"
] | null | null | null | src/python/datasets/train_valid_test_split.py | PeterJackNaylor/NucSeg | 9eaa72d536e61e1ea2e396a47bb5d0a224134790 | [
"MIT"
] | null | null | null | src/python/datasets/train_valid_test_split.py | PeterJackNaylor/NucSeg | 9eaa72d536e61e1ea2e396a47bb5d0a224134790 | [
"MIT"
] | 1 | 2022-02-14T18:45:33.000Z | 2022-02-14T18:45:33.000Z | import numpy as np
import sys
from glob import glob
from sklearn.model_selection import train_test_split
from scipy.ndimage import distance_transform_edt
from multiprocessing import Pool
def distance_transform_array(bin_image):
res = np.zeros_like(bin_image, dtype=np.float64)
for j in range(1, int(bin_image.max()) + 1):
one_cell = np.zeros_like(bin_image)
one_cell[bin_image == j] = 1
one_cell = distance_transform_edt(one_cell)
res[bin_image == j] = one_cell[bin_image == j]
return res
def distance_transform_tensor(bin_image):
pool = Pool()
result = pool.map(distance_transform_array, bin_image)
pool.close()
pool.join()
result = np.array(result)
return result
def load_data():
files = glob("*.npz")
a_x, a_y, a_o, a_d = [], [], [], []
for f in files:
name = f.split("_")[-1].split(".")[0]
data = np.load(f)
x = data["x"]
y = data["y"]
organs = data["organs"]
a_x.append(x)
a_y.append(y)
a_o.append(organs)
data_set = np.zeros_like(organs)
data_set[:] = name
a_d.append(data_set)
a_x = np.concatenate(a_x, axis=0)
a_y = np.concatenate(a_y, axis=0)
a_o = np.concatenate(a_o, axis=0)
a_d = np.concatenate(a_d, axis=0)
return a_x, a_y, a_o, a_d
def create_train_val_test(ptrain, pval, ptest, indice_max, array_to_stratefy):
assert ptrain + pval + ptest == 1.0
indices = np.arange(indice_max)
train_ind, val_test_ind = train_test_split(
indices, train_size=ptrain, stratify=array_to_stratefy
)
ratio_val_test = pval / (pval + ptest)
val_ind, test_ind = train_test_split(
indices[val_test_ind],
train_size=ratio_val_test,
stratify=array_to_stratefy[val_test_ind],
)
return train_ind, val_ind, test_ind
def main():
x, labeled_y, o, d = load_data()
# we stratefy with respect to the dataset and the organ type
strat_array = np.char.add(o, d)
n = len(o)
train, val, test = create_train_val_test(0.6, 0.2, 0.2, n, strat_array)
y = labeled_y.copy()
if sys.argv[1] == "binary":
y[y > 0] = 1
y = y.astype("uint8")
elif sys.argv[1] == "distance":
y = distance_transform_tensor(y)
else:
raise Exception("Unknown method")
np.savez(
f"Xy_train.npz",
x=x[train],
y=y[train],
organs=o[train],
labeled_y=labeled_y[train],
)
np.savez(
f"Xy_validation.npz",
x=x[val],
y=y[val],
organs=o[val],
labeled_y=labeled_y[val],
)
np.savez(
f"Xy_test.npz",
x=x[test],
y=y[test],
organs=o[test],
labeled_y=labeled_y[test]
)
if __name__ == "__main__":
main()
| 25.6 | 78 | 0.604403 |
ad859f2a559454a18b8b882cc6f9c10c24d8cc2a | 27,630 | py | Python | glycan_profiling/database/builder/glycopeptide/common.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | null | null | null | glycan_profiling/database/builder/glycopeptide/common.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | null | null | null | glycan_profiling/database/builder/glycopeptide/common.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | null | null | null | import itertools
from uuid import uuid4
from collections import defaultdict, Counter
from multiprocessing import Process, Queue, Event, RLock
from threading import Thread
from itertools import product
try:
from Queue import Empty as QueueEmptyException
except ImportError:
from queue import Empty as QueueEmptyException
from glypy import Composition
from glypy.composition import formula
from glypy.structure.glycan_composition import FrozenGlycanComposition
from glycan_profiling.serialize import DatabaseBoundOperation, func
from glycan_profiling.serialize.hypothesis import GlycopeptideHypothesis
from glycan_profiling.serialize.hypothesis.peptide import Glycopeptide, Peptide, Protein
from glycan_profiling.serialize.hypothesis.glycan import (
GlycanCombination, GlycanClass, GlycanComposition,
GlycanTypes, GlycanCombinationGlycanComposition)
from glycan_profiling.serialize.utils import toggle_indices
from glycan_profiling.task import TaskBase
from glycan_profiling.database.builder.glycan import glycan_combinator
from glycan_profiling.database.builder.base import HypothesisSerializerBase
from glycopeptidepy.structure.sequence import (
_n_glycosylation, _o_glycosylation, _gag_linker_glycosylation)
_DEFAULT_GLYCAN_STEP_LIMIT = 15000
def slurp(session, model, ids, flatten=True):
if flatten:
ids = [j for i in ids for j in i]
total = len(ids)
last = 0
step = 100
results = []
while last < total:
results.extend(session.query(model).filter(
model.id.in_(ids[last:last + step])))
last += step
return results
class GlycopeptideHypothesisSerializerBase(DatabaseBoundOperation, HypothesisSerializerBase):
"""Common machinery for Glycopeptide Hypothesis construction.
Attributes
----------
uuid : str
The uuid of the hypothesis to be constructed
"""
def __init__(self, database_connection, hypothesis_name=None, glycan_hypothesis_id=None, full_cross_product=True):
DatabaseBoundOperation.__init__(self, database_connection)
self._hypothesis_name = hypothesis_name
self._hypothesis_id = None
self._hypothesis = None
self._glycan_hypothesis_id = glycan_hypothesis_id
self.uuid = str(uuid4().hex)
self.total_glycan_combination_count = -1
self.full_cross_product = full_cross_product
def _construct_hypothesis(self):
if self._hypothesis_name is None or self._hypothesis_name.strip() == "":
self._hypothesis_name = self._make_name()
if self.glycan_hypothesis_id is None:
raise ValueError("glycan_hypothesis_id must not be None")
self._hypothesis = GlycopeptideHypothesis(
name=self._hypothesis_name, glycan_hypothesis_id=self._glycan_hypothesis_id,
uuid=self.uuid)
self.session.add(self._hypothesis)
self.session.commit()
self._hypothesis_id = self._hypothesis.id
self._hypothesis_name = self._hypothesis.name
self._glycan_hypothesis_id = self._hypothesis.glycan_hypothesis_id
def _make_name(self):
return "GlycopeptideHypothesis-" + self.uuid
@property
def glycan_hypothesis_id(self):
if self._glycan_hypothesis_id is None:
self._construct_hypothesis()
return self._glycan_hypothesis_id
def combinate_glycans(self, n):
combinator = glycan_combinator.GlycanCombinationSerializer(
self.engine, self.glycan_hypothesis_id,
self.hypothesis_id, n)
combinator.run()
self.total_glycan_combination_count = combinator.total_count
if not (self.total_glycan_combination_count > 0):
raise ValueError("No glycan combinations were generated. No glycopeptides can be produced!")
def _count_produced_glycopeptides(self):
count = self.query(
func.count(Glycopeptide.id)).filter(
Glycopeptide.hypothesis_id == self.hypothesis_id).scalar()
self.log("Generated %d glycopeptides" % count)
self.set_parameters({
"database_size": count
})
return count
def _sql_analyze_database(self):
self.log("Analyzing Indices")
self._analyze_database()
if self.is_sqlite():
self._sqlite_reload_analysis_plan()
self.log("Done Analyzing Indices")
class GlycopeptideHypothesisDestroyer(DatabaseBoundOperation, TaskBase):
def __init__(self, database_connection, hypothesis_id):
DatabaseBoundOperation.__init__(self, database_connection)
self.hypothesis_id = hypothesis_id
def delete_glycopeptides(self):
self.log("Delete Glycopeptides")
self.session.query(Glycopeptide).filter(
Glycopeptide.hypothesis_id == self.hypothesis_id).delete(
synchronize_session=False)
self.session.commit()
def delete_peptides(self):
self.log("Delete Peptides")
q = self.session.query(Protein.id).filter(Protein.hypothesis_id == self.hypothesis_id)
for protein_id, in q:
self.session.query(Peptide).filter(
Peptide.protein_id == protein_id).delete(
synchronize_session=False)
self.session.commit()
def delete_protein(self):
self.log("Delete Protein")
self.session.query(Protein).filter(Protein.hypothesis_id == self.hypothesis_id).delete(
synchronize_session=False)
self.session.commit()
def delete_hypothesis(self):
self.log("Delete Hypothesis")
self.session.query(GlycopeptideHypothesis).filter(
GlycopeptideHypothesis.id == self.hypothesis_id).delete()
self.session.commit()
def run(self):
self.delete_glycopeptides()
self.delete_peptides()
self.delete_protein()
self.delete_hypothesis()
self.session.commit()
def distinct_glycan_classes(session, hypothesis_id):
structure_classes = session.query(GlycanClass.name.distinct()).join(
GlycanComposition.structure_classes).join(
GlycanCombinationGlycanComposition).join(
GlycanCombination).filter(
GlycanCombination.hypothesis_id == hypothesis_id).all()
return [sc[0] for sc in structure_classes]
def composition_to_structure_class_map(session, glycan_hypothesis_id):
mapping = defaultdict(list)
id_to_class_iterator = session.query(GlycanComposition.id, GlycanClass.name).join(
GlycanComposition.structure_classes).filter(
GlycanComposition.hypothesis_id == glycan_hypothesis_id).all()
for gc_id, sc_name in id_to_class_iterator:
mapping[gc_id].append(sc_name)
return mapping
def combination_structure_class_map(session, hypothesis_id, composition_class_map):
mapping = defaultdict(list)
iterator = session.query(
GlycanCombinationGlycanComposition).join(GlycanCombination).filter(
GlycanCombination.hypothesis_id == hypothesis_id).order_by(GlycanCombination.id)
for glycan_id, combination_id, count in iterator:
listing = mapping[combination_id]
for i in range(count):
listing.append(composition_class_map[glycan_id])
return mapping
class GlycanCombinationPartitionTable(TaskBase):
def __init__(self, session, glycan_combinations, glycan_classes, hypothesis):
self.session = session
self.tables = defaultdict(lambda: defaultdict(list))
self.hypothesis_id = hypothesis.id
self.glycan_hypothesis_id = hypothesis.glycan_hypothesis_id
self.glycan_classes = glycan_classes
self.build_table(glycan_combinations)
def build_table(self, glycan_combinations):
composition_class_map = composition_to_structure_class_map(
self.session, self.glycan_hypothesis_id)
combination_class_map = combination_structure_class_map(
self.session, self.hypothesis_id, composition_class_map)
for entry in glycan_combinations:
size_table = self.tables[entry.count]
component_classes = combination_class_map[entry.id]
class_assignment_generator = product(*component_classes)
for classes in class_assignment_generator:
counts = Counter(c for c in classes)
key = tuple(counts[c] for c in self.glycan_classes)
class_table = size_table[key]
class_table.append(entry)
def build_key(self, mapping):
return tuple(mapping.get(c, 0) for c in self.glycan_classes)
def get_entries(self, size, mapping):
key = self.build_key(mapping)
return self.tables[size][key]
def __getitem__(self, key):
size, mapping = key
return self.get_entries(size, mapping)
def limiting_combinations(iterable, n, limit=100):
i = 0
for result in itertools.combinations(iterable, n):
i += 1
yield result
if i > limit:
break
class GlycanCombinationRecord(object):
__slots__ = [
'id', 'calculated_mass', 'formula', 'count', 'glycan_composition_string',
'_composition', '_dehydrated_composition']
def __init__(self, combination):
self.id = combination.id
self.calculated_mass = combination.calculated_mass
self.formula = combination.formula
self.count = combination.count
self.glycan_composition_string = combination.composition
self._composition = None
self._dehydrated_composition = None
def total_composition(self):
if self._composition is None:
self._composition = self.convert().total_composition()
return self._composition
def dehydrated_composition(self):
if self._dehydrated_composition is None:
self._dehydrated_composition = self.total_composition() - (Composition("H2O") * self.count)
return self._dehydrated_composition
def convert(self):
gc = FrozenGlycanComposition.parse(self.glycan_composition_string)
gc.id = self.id
gc.count = self.count
return gc
def __repr__(self):
return "GlycanCombinationRecord(%d, %s)" % (
self.id, self.glycan_composition_string)
class PeptideGlycosylator(object):
def __init__(self, session, hypothesis_id, glycan_offset=None, glycan_limit=_DEFAULT_GLYCAN_STEP_LIMIT):
self.session = session
self.glycan_offset = glycan_offset
self.glycan_limit = glycan_limit
self.hypothesis_id = hypothesis_id
self.hypothesis = self.session.query(GlycopeptideHypothesis).get(hypothesis_id)
self.total_combinations = self._get_total_combination_count()
self.build_glycan_table(self.glycan_offset)
def _get_total_combination_count(self):
count = self.session.query(
GlycanCombination).filter(
GlycanCombination.hypothesis_id == self.hypothesis_id).count()
return count
def _load_glycan_records(self):
if self.glycan_offset is None:
glycan_combinations = self.session.query(
GlycanCombination).filter(
GlycanCombination.hypothesis_id == self.hypothesis_id).all()
glycan_combinations = [GlycanCombinationRecord(gc) for gc in glycan_combinations]
else:
glycan_combinations = self.session.query(
GlycanCombination).filter(
GlycanCombination.hypothesis_id == self.hypothesis_id).offset(
self.glycan_offset).limit(self.glycan_limit).all()
return glycan_combinations
def _build_size_table(self, glycan_combinations):
self.glycan_combination_partitions = GlycanCombinationPartitionTable(
self.session, glycan_combinations, distinct_glycan_classes(
self.session, self.hypothesis_id), self.hypothesis)
def build_glycan_table(self, offset=None):
self.glycan_offset = offset
glycan_combinations = self._load_glycan_records()
self._build_size_table(glycan_combinations)
def handle_peptide(self, peptide):
water = Composition("H2O")
peptide_composition = Composition(str(peptide.formula))
obj = peptide.convert()
reference = obj.clone()
# Handle N-linked glycosylation sites
n_glycosylation_unoccupied_sites = set(peptide.n_glycosylation_sites)
for site in list(n_glycosylation_unoccupied_sites):
if obj[site][1]:
n_glycosylation_unoccupied_sites.remove(site)
for i in range(len(n_glycosylation_unoccupied_sites)):
i += 1
for gc in self.glycan_combination_partitions[i, {GlycanTypes.n_glycan: i}]:
total_mass = peptide.calculated_mass + gc.calculated_mass - (gc.count * water.mass)
formula_string = formula(peptide_composition + gc.dehydrated_composition())
for site_set in limiting_combinations(n_glycosylation_unoccupied_sites, i):
sequence = reference.clone()
for site in site_set:
sequence.add_modification(site, _n_glycosylation.name)
sequence.glycan = gc.convert()
glycopeptide_sequence = str(sequence)
glycopeptide = dict(
calculated_mass=total_mass,
formula=formula_string,
glycopeptide_sequence=glycopeptide_sequence,
peptide_id=peptide.id,
protein_id=peptide.protein_id,
hypothesis_id=peptide.hypothesis_id,
glycan_combination_id=gc.id)
yield glycopeptide
# Handle O-linked glycosylation sites
o_glycosylation_unoccupied_sites = set(peptide.o_glycosylation_sites)
for site in list(o_glycosylation_unoccupied_sites):
if obj[site][1]:
o_glycosylation_unoccupied_sites.remove(site)
for i in range(len(o_glycosylation_unoccupied_sites)):
i += 1
for gc in self.glycan_combination_partitions[i, {GlycanTypes.o_glycan: i}]:
total_mass = peptide.calculated_mass + gc.calculated_mass - (gc.count * water.mass)
formula_string = formula(peptide_composition + gc.dehydrated_composition())
for site_set in limiting_combinations(o_glycosylation_unoccupied_sites, i):
sequence = reference.clone()
for site in site_set:
sequence.add_modification(site, _o_glycosylation.name)
sequence.glycan = gc.convert()
glycopeptide_sequence = str(sequence)
glycopeptide = dict(
calculated_mass=total_mass,
formula=formula_string,
glycopeptide_sequence=glycopeptide_sequence,
peptide_id=peptide.id,
protein_id=peptide.protein_id,
hypothesis_id=peptide.hypothesis_id,
glycan_combination_id=gc.id)
yield glycopeptide
# Handle GAG glycosylation sites
gag_unoccupied_sites = set(peptide.gagylation_sites)
for site in list(gag_unoccupied_sites):
if obj[site][1]:
gag_unoccupied_sites.remove(site)
for i in range(len(gag_unoccupied_sites)):
i += 1
for gc in self.glycan_combination_partitions[i, {GlycanTypes.gag_linker: i}]:
total_mass = peptide.calculated_mass + gc.calculated_mass - (gc.count * water.mass)
formula_string = formula(peptide_composition + gc.dehydrated_composition())
for site_set in limiting_combinations(gag_unoccupied_sites, i):
sequence = reference.clone()
for site in site_set:
sequence.add_modification(site, _gag_linker_glycosylation.name)
sequence.glycan = gc.convert()
glycopeptide_sequence = str(sequence)
glycopeptide = dict(
calculated_mass=total_mass,
formula=formula_string,
glycopeptide_sequence=glycopeptide_sequence,
peptide_id=peptide.id,
protein_id=peptide.protein_id,
hypothesis_id=peptide.hypothesis_id,
glycan_combination_id=gc.id)
yield glycopeptide
def null_log_handler(msg):
print(msg)
class PeptideGlycosylatingProcess(Process):
process_name = "glycopeptide-build-worker"
def __init__(self, connection, hypothesis_id, input_queue, chunk_size=5000, done_event=None,
log_handler=null_log_handler, glycan_offset=None,
glycan_limit=_DEFAULT_GLYCAN_STEP_LIMIT):
Process.__init__(self)
self.daemon = True
self.connection = connection
self.input_queue = input_queue
self.chunk_size = chunk_size
self.hypothesis_id = hypothesis_id
self.done_event = done_event
self.log_handler = log_handler
self.glycan_offset = glycan_offset
self.glycan_limit = glycan_limit
self.session = None
self.work_done_event = Event()
def is_work_done(self):
return self.work_done_event.is_set()
def process_result(self, collection):
self.session.bulk_insert_mappings(Glycopeptide, collection, render_nulls=True)
self.session.commit()
def load_peptides(self, work_items):
peptides = slurp(self.session, Peptide, work_items, flatten=False)
return peptides
def task(self):
database = DatabaseBoundOperation(self.connection)
self.session = database.session
has_work = True
glycosylator = PeptideGlycosylator(
database.session, self.hypothesis_id,
glycan_offset=self.glycan_offset,
glycan_limit=self.glycan_limit)
result_accumulator = []
n = 0
n_gps = 0
while has_work:
try:
work_items = self.input_queue.get(timeout=5)
if work_items is None:
has_work = False
continue
except Exception:
if self.done_event.is_set():
has_work = False
continue
peptides = self.load_peptides(work_items)
n += len(peptides)
for peptide in peptides:
for gp in glycosylator.handle_peptide(peptide):
result_accumulator.append(gp)
if len(result_accumulator) > self.chunk_size:
n_gps += len(result_accumulator)
self.process_result(result_accumulator)
result_accumulator = []
if len(result_accumulator) > 0:
n_gps += len(result_accumulator)
self.process_result(result_accumulator)
result_accumulator = []
self.work_done_event.set()
# It seems there is no public API to force the process to check if it is done
# but the internal method is invoked when creating a Process `repr` on Python 2.
# This problem supposedly doesn't exist in Python 3.
repr(self)
self.log_handler("Process %r completed. (%d peptides, %d glycopeptides)" % (self.pid, n, n_gps))
def run(self):
new_name = getattr(self, 'process_name', None)
if new_name is not None:
TaskBase().try_set_process_name(new_name)
try:
self.task()
except Exception as e:
import traceback
self.log_handler(
"An exception has occurred for %r.\n%r\n%s" % (
self, e, traceback.format_exc()))
class NonSavingPeptideGlycosylatingProcess(PeptideGlycosylatingProcess):
def process_result(self, collection):
pass
class QueuePushingPeptideGlycosylatingProcess(PeptideGlycosylatingProcess):
def __init__(self, connection, hypothesis_id, input_queue, output_queue, chunk_size=5000,
done_event=None, log_handler=null_log_handler, database_mutex=None,
glycan_offset=None, glycan_limit=_DEFAULT_GLYCAN_STEP_LIMIT):
super(QueuePushingPeptideGlycosylatingProcess, self).__init__(
connection, hypothesis_id, input_queue, chunk_size, done_event, log_handler,
glycan_offset=glycan_offset, glycan_limit=glycan_limit)
self.output_queue = output_queue
self.database_mutex = database_mutex
def load_peptides(self, work_items):
with self.database_mutex:
result = super(QueuePushingPeptideGlycosylatingProcess, self).load_peptides(work_items)
return result
def process_result(self, collection):
self.output_queue.put(collection)
class MultipleProcessPeptideGlycosylator(TaskBase):
def __init__(self, connection_specification, hypothesis_id, chunk_size=6500, n_processes=4,
glycan_combination_count=None, glycan_limit=_DEFAULT_GLYCAN_STEP_LIMIT):
self.n_processes = n_processes
self.connection_specification = connection_specification
self.chunk_size = chunk_size
self.hypothesis_id = hypothesis_id
self.glycan_combination_count = glycan_combination_count
self.current_glycan_offset = 0
self.glycan_limit = glycan_limit
self.input_queue = Queue(10)
self.output_queue = Queue(1000)
self.workers = []
self.dealt_done_event = Event()
self.ipc_controller = self.ipc_logger()
self.database_mutex = RLock()
def spawn_worker(self):
worker = QueuePushingPeptideGlycosylatingProcess(
self.connection_specification, self.hypothesis_id, self.input_queue,
self.output_queue, self.chunk_size, self.dealt_done_event,
self.ipc_controller.sender(), self.database_mutex,
glycan_offset=self.current_glycan_offset,
glycan_limit=self.glycan_limit)
return worker
def push_work_batches(self, peptide_ids):
n = len(peptide_ids)
i = 0
chunk_size = min(int(n * 0.05), 1000)
while i < n:
self.input_queue.put(peptide_ids[i:(i + chunk_size)])
i += chunk_size
self.log("... Dealt Peptides %d-%d %0.2f%%" % (i - chunk_size, min(i, n), (min(i, n) / float(n)) * 100))
self.log("... All Peptides Dealt")
self.dealt_done_event.set()
def create_barrier(self):
self.database_mutex.__enter__()
def teardown_barrier(self):
self.database_mutex.__exit__(None, None, None)
def create_queue_feeder_thread(self, peptide_ids):
queue_feeder = Thread(target=self.push_work_batches, args=(peptide_ids,))
queue_feeder.daemon = True
queue_feeder.start()
return queue_feeder
def spawn_all_workers(self):
self.workers = []
for i in range(self.n_processes):
worker = self.spawn_worker()
worker.start()
self.workers.append(worker)
def process(self, peptide_ids):
connection = DatabaseBoundOperation(self.connection_specification)
session = connection.session
self.log("Begin Creation. Dropping Indices")
index_controller = toggle_indices(session, Glycopeptide)
index_controller.drop()
while self.current_glycan_offset < self.glycan_combination_count:
_current_progress = float(self.current_glycan_offset + self.glycan_limit)
_current_percent_complete = _current_progress / self.glycan_combination_count * 100.0
_current_percent_complete = min(_current_percent_complete, 100.0)
self.log("... Processing Glycan Combinations %d-%d (%0.2f%%)" % (
self.current_glycan_offset, min(self.current_glycan_offset + self.glycan_limit,
self.glycan_combination_count),
_current_percent_complete))
queue_feeder = self.create_queue_feeder_thread(peptide_ids)
self.spawn_all_workers()
has_work = True
last = 0
i = 0
while has_work:
try:
batch = self.output_queue.get(True, 5)
try:
waiting_batches = self.output_queue.qsize()
if waiting_batches > 10:
self.create_barrier()
self.log("... %d waiting sets." % (waiting_batches,))
try:
for _ in range(waiting_batches):
batch.extend(self.output_queue.get(True, 1))
# check to see if any new work items have arrived while
# we've been draining the queue
waiting_batches = self.output_queue.qsize()
if waiting_batches != 0:
# if so, while the barrier is up, let's write the batch
# to disk and then try to drain the queue again
i += len(batch)
try:
session.bulk_insert_mappings(Glycopeptide, batch, render_nulls=True)
session.commit()
except Exception:
session.rollback()
raise
batch = []
for _ in range(waiting_batches):
batch.extend(self.output_queue.get_nowait())
except QueueEmptyException:
pass
self.teardown_barrier()
except NotImplementedError:
# platform does not support qsize()
pass
self.create_barrier()
i += len(batch)
try:
session.bulk_insert_mappings(Glycopeptide, batch, render_nulls=True)
session.commit()
except Exception:
session.rollback()
raise
finally:
self.teardown_barrier()
if (i - last) > self.chunk_size * 20:
self.log("... %d Glycopeptides Created" % (i,))
last = i
except QueueEmptyException:
if all(w.is_work_done() for w in self.workers):
has_work = False
continue
queue_feeder.join()
self.ipc_controller.stop()
for worker in self.workers:
self.log("Joining Process %r (%s)" % (worker.pid, worker.is_alive()))
worker.join(10)
if worker.is_alive():
self.log("Failed to join %r" % worker.pid)
self.current_glycan_offset += self.glycan_limit
self.log("All Work Done. Rebuilding Indices")
index_controller.create()
| 41.177347 | 118 | 0.632067 |
34b2fe971b149908d1363bfe2b6ee4b2c3f78dfc | 1,778 | py | Python | test/python/robotlib_tests/test_robot.py | austin-bowen/robotlib | 44bfdaad493235dc8385ce1a0c80789e5c65f62d | [
"MIT"
] | null | null | null | test/python/robotlib_tests/test_robot.py | austin-bowen/robotlib | 44bfdaad493235dc8385ce1a0c80789e5c65f62d | [
"MIT"
] | null | null | null | test/python/robotlib_tests/test_robot.py | austin-bowen/robotlib | 44bfdaad493235dc8385ce1a0c80789e5c65f62d | [
"MIT"
] | null | null | null | import unittest
from typing import Dict
from unittest import TestCase
from robotlib.robot import Robot, InputSample, Input, Device, Output
class TestRobot(TestCase):
def test_enable(self):
self.fail()
def test_disable(self):
self.fail()
class UltrasonicInput(Input):
def sample(self):
import random
return 100 * random.random()
def sample_sim(self, world):
return self.sample()
class DriveMotorOutput(Output):
def apply(self):
# TODO
pass
def apply_sim(self, world):
# TODO
world.set_robot_linear_velocity(self.device.linear_velocity)
world.set_robot_angular_velocity(self.device.angular_velocity)
class DriveMotors(Device):
def __init__(self, name: str):
super().__init__(name)
self.linear_velocity = 0
self.angular_velocity = 0
self.add_output(DriveMotorOutput('velocity'))
class BasicRobot(Robot):
def __init__(self, name: str):
super().__init__(name)
ultrasonic_sensor = Device('ultrasonic_sensor')
ultrasonic_sensor.add_input(UltrasonicInput('distance'))
self.add_device(ultrasonic_sensor)
self.drive_motors: DriveMotors = self.add_device(
DriveMotors('drive_motors'))
def process_observation(self, observation: Dict[str, InputSample],
start_time_s: float, stop_time_s: float):
print(observation)
print(start_time_s)
print(stop_time_s)
if self.run_time >= 0.001:
self.stop()
class TestBasicRobot(TestCase):
def setUp(self) -> None:
self.robot = BasicRobot('My Robot')
def test_run(self):
self.robot.run()
if __name__ == '__main__':
unittest.main()
| 23.090909 | 70 | 0.650169 |
9f1e9c87408113143209090467c56a53955d7f11 | 6,711 | py | Python | randommer.py | GauravKK08/python-randommer-api-wrapper | 967bf2811ba1f6a077bc8ca1f6c6d0857f8b1157 | [
"Apache-2.0"
] | 1 | 2021-05-26T13:07:00.000Z | 2021-05-26T13:07:00.000Z | randommer.py | GauravKK08/python-randommer-api-wrapper | 967bf2811ba1f6a077bc8ca1f6c6d0857f8b1157 | [
"Apache-2.0"
] | null | null | null | randommer.py | GauravKK08/python-randommer-api-wrapper | 967bf2811ba1f6a077bc8ca1f6c6d0857f8b1157 | [
"Apache-2.0"
] | null | null | null | import requests
import time
import json
class Randommer():
def __init__(self, api_key=None, json_response=True, prefetch_cultures=True):
with open('config.json') as fp:
self.config = json.load(fp)
if api_key is None:
self.api_key = self.config.get('api_key')
if self.api_key is None:
raise Exception('API key could not be loaded from config/not passed via param.')
self.api_url = self.config['api_url']
self.request_headers = {'X-Api-Key': self.api_key, 'accept': '*/*'}
self.json_response = json_response
self.cultures = None
if prefetch_cultures:
self.cultures = self.get_misc_cultures()
self.nameTypes = ['firstname', 'surname', 'fullname']
self.country_codes = []
self.get_phone_countries()
self.loremTypes = ['normal', 'business']
self.text_types = ['paragraph', 'words']
def make_request(self, api_url, params={}, method='GET', contentType = None):
start_time = time.time()
print('Hitting URL: %s'%api_url)
if contentType:
self.request_headers['Content-Type'] = contentType
if method == 'GET':
response = requests.get(url=api_url, headers=self.request_headers, params=params)
elif method == 'POST':
response = requests.get(url=api_url, headers=self.request_headers, params=params)
end_time = time.time()
print("Execution took {} seconds".format(end_time-start_time))
if response.status_code !=200:
raise Exception('Non OK status code. Response text: %s'%response.text)
if self.json_response:
result = response.json()
else:
result= response.text
return result
def get_random_card_numbers(self):
return self.make_request(api_url=self.api_url+'Card')
def get_available_card_types(self):
return self.make_request(api_url=self.api_url+'Card/Types')
def get_misc_cultures(self):
return self.make_request(api_url=self.api_url+'Misc/Cultures')
def get_random_address(self, number=1, culture='en'):
if number < 1 or number > 1000:
raise Exception('You can only ask for address(es) 1 to 1000.')
is_valid_culture = False
if not self.cultures:
self.cultures = self.get_misc_cultures()
for _culture in self.cultures:
if _culture['code'] == culture.lower():
is_valid_culture = True
if not is_valid_culture:
raise Exception('Provided culture: %s does not seem valid.'%culture)
params = {'number': number, 'culture': culture}
return self.make_request(api_url=self.api_url+'Misc/Random-Address', params=params)
def get_random_name(self, quantity=1, nameType='firstname'):
if nameType not in self.nameTypes:
raise Exception('Invalid nameType:%s can only be one of %s'%(nameType, self.nameTypes))
if quantity < 1 or quantity > 5000:
raise Exception('Can only ask for 1 to 5000 random names at a time.')
params = {'nameType': nameType, 'quantity': quantity}
return self.make_request(api_url=self.api_url+'Name', params=params)
def get_business_suggestions(self, startingWords='Lorem Ipsum'):
if len(startingWords) > 100:
raise Exception('starting words can only be less than 100 chaaracters.')
params = {'startingWords': startingWords}
return self.make_request(api_url=self.api_url+'Name/Suggestions', params=params)
def get_phone_countries(self):
country_codes = self.make_request(api_url=self.api_url+'Phone/Countries')
for country_code in country_codes:
self.country_codes.append(country_code['countryCode'])
return country_codes
def validate_phone_number(self, telephone, countryCode):
if countryCode not in self.country_codes:
raise Exception('Invalid country code: %s'%countryCodes)
if len(telephone) > 25:
raise Exception('Invalid telephone number: %s'%telephone)
params = {'telephone': telephone, 'countryCode': countryCode}
return self.make_request(api_url=self.api_url+'Phone/Validate', params=params)
def get_bulk_telephone_numbers(self, countryCode='IN', quantity=10):
if countryCode not in self.country_codes:
raise Exception('Invalid country code: %s'%countryCodes)
if quantity < 1 or quantity > 1000:
raise Exception('Can only ask for 1 to 1000 random nos at a time.')
params = {'countryCode': countryCode, 'quantity': quantity}
return self.make_request(api_url=self.api_url+'Phone/Generate', params=params)
def generate_ssn(self):
return self.make_request(api_url=self.api_url+'SocialNumber', params={})
def generate_lorem_ipsum(self, loremType='normal', text_type='words', number=10):
if loremType not in self.loremTypes:
raise Exception('Unknown lorem type: %s'%loremType)
if text_type not in self.text_types:
raise Exception('Unknown text type: %s'%text_type)
params = {'loremType': loremType, 'type': text_type, 'number': number}
return self.make_request(api_url=self.api_url+'Text/LoremIpsum', params=params)
def generate_password(self, length=16, hasDigits=True, hasUppercase=True, hasSpecial = True):
if length <3 or length > 250:
raise Exception('Password length can only be 3 to 250 chars max.')
params = {'length': length, 'hasDigits': hasDigits, 'hasUppercase': hasUppercase, 'hasSpecial': hasSpecial}
return self.make_request(api_url=self.api_url+'Text/Password', params=params)
def humanize_text(self, text='Lorem Ipsum Dolor Sit Amet.'):
params = {'text': text}
return self.make_request(api_url=self.api_url+'Text/Humanize', params=params, method='POST', contentType='application/json-patch+json')
rm = Randommer()
print(rm.get_random_card_numbers())
print(rm.get_available_card_types())
print(rm.get_misc_cultures())
print(rm.get_random_address(culture='cz'))
print(rm.get_random_name(nameType='fullname'))
print(rm.get_business_suggestions())
print(rm.get_phone_countries())
print('Is valid?', rm.validate_phone_number(telephone='+919545667788', countryCode='IN'))
print(rm.get_bulk_telephone_numbers())
print(rm.generate_ssn())
print(rm.generate_lorem_ipsum())
print(rm.generate_password())
## Seems to have some issues.
#print(rm.humanize_text())
| 45.965753 | 144 | 0.656087 |
3204f393fd435e711eee2a6e947ab43dcb47525e | 15,686 | py | Python | venv/lib/python3.7/site-packages/ccxt/async_support/__init__.py | balibou/ccxt-ohlcv-fetcher | a64cd43cbfd3fe09de34d8a66416ecc6c10d3b2f | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/ccxt/async_support/__init__.py | balibou/ccxt-ohlcv-fetcher | a64cd43cbfd3fe09de34d8a66416ecc6c10d3b2f | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/ccxt/async_support/__init__.py | balibou/ccxt-ohlcv-fetcher | a64cd43cbfd3fe09de34d8a66416ecc6c10d3b2f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""CCXT: CryptoCurrency eXchange Trading Library (Async)"""
# -----------------------------------------------------------------------------
__version__ = '1.18.1270'
# -----------------------------------------------------------------------------
from ccxt.async_support.base.exchange import Exchange # noqa: F401
from ccxt.base.decimal_to_precision import decimal_to_precision # noqa: F401
from ccxt.base.decimal_to_precision import TRUNCATE # noqa: F401
from ccxt.base.decimal_to_precision import ROUND # noqa: F401
from ccxt.base.decimal_to_precision import DECIMAL_PLACES # noqa: F401
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS # noqa: F401
from ccxt.base.decimal_to_precision import NO_PADDING # noqa: F401
from ccxt.base.decimal_to_precision import PAD_WITH_ZERO # noqa: F401
from ccxt.base import errors # noqa: F401
from ccxt.base.errors import BaseError # noqa: F401
from ccxt.base.errors import ExchangeError # noqa: F401
from ccxt.base.errors import NotSupported # noqa: F401
from ccxt.base.errors import AuthenticationError # noqa: F401
from ccxt.base.errors import PermissionDenied # noqa: F401
from ccxt.base.errors import AccountSuspended # noqa: F401
from ccxt.base.errors import InvalidNonce # noqa: F401
from ccxt.base.errors import InsufficientFunds # noqa: F401
from ccxt.base.errors import InvalidOrder # noqa: F401
from ccxt.base.errors import OrderNotFound # noqa: F401
from ccxt.base.errors import OrderNotCached # noqa: F401
from ccxt.base.errors import DuplicateOrderId # noqa: F401
from ccxt.base.errors import CancelPending # noqa: F401
from ccxt.base.errors import NetworkError # noqa: F401
from ccxt.base.errors import DDoSProtection # noqa: F401
from ccxt.base.errors import RequestTimeout # noqa: F401
from ccxt.base.errors import ExchangeNotAvailable # noqa: F401
from ccxt.base.errors import InvalidAddress # noqa: F401
from ccxt.base.errors import AddressPending # noqa: F401
from ccxt.base.errors import ArgumentsRequired # noqa: F401
from ccxt.base.errors import BadRequest # noqa: F401
from ccxt.base.errors import BadResponse # noqa: F401
from ccxt.base.errors import NullResponse # noqa: F401
from ccxt.base.errors import OrderImmediatelyFillable # noqa: F401
from ccxt.base.errors import OrderNotFillable # noqa: F401
from ccxt.async_support._1btcxe import _1btcxe # noqa: F401
from ccxt.async_support.acx import acx # noqa: F401
from ccxt.async_support.adara import adara # noqa: F401
from ccxt.async_support.allcoin import allcoin # noqa: F401
from ccxt.async_support.anxpro import anxpro # noqa: F401
from ccxt.async_support.bcex import bcex # noqa: F401
from ccxt.async_support.bequant import bequant # noqa: F401
from ccxt.async_support.bibox import bibox # noqa: F401
from ccxt.async_support.bigone import bigone # noqa: F401
from ccxt.async_support.binance import binance # noqa: F401
from ccxt.async_support.binanceje import binanceje # noqa: F401
from ccxt.async_support.binanceus import binanceus # noqa: F401
from ccxt.async_support.bit2c import bit2c # noqa: F401
from ccxt.async_support.bitbank import bitbank # noqa: F401
from ccxt.async_support.bitbay import bitbay # noqa: F401
from ccxt.async_support.bitfinex import bitfinex # noqa: F401
from ccxt.async_support.bitfinex2 import bitfinex2 # noqa: F401
from ccxt.async_support.bitflyer import bitflyer # noqa: F401
from ccxt.async_support.bitforex import bitforex # noqa: F401
from ccxt.async_support.bithumb import bithumb # noqa: F401
from ccxt.async_support.bitkk import bitkk # noqa: F401
from ccxt.async_support.bitlish import bitlish # noqa: F401
from ccxt.async_support.bitmart import bitmart # noqa: F401
from ccxt.async_support.bitmax import bitmax # noqa: F401
from ccxt.async_support.bitmex import bitmex # noqa: F401
from ccxt.async_support.bitso import bitso # noqa: F401
from ccxt.async_support.bitstamp import bitstamp # noqa: F401
from ccxt.async_support.bitstamp1 import bitstamp1 # noqa: F401
from ccxt.async_support.bittrex import bittrex # noqa: F401
from ccxt.async_support.bitz import bitz # noqa: F401
from ccxt.async_support.bl3p import bl3p # noqa: F401
from ccxt.async_support.bleutrade import bleutrade # noqa: F401
from ccxt.async_support.braziliex import braziliex # noqa: F401
from ccxt.async_support.btcalpha import btcalpha # noqa: F401
from ccxt.async_support.btcbox import btcbox # noqa: F401
from ccxt.async_support.btcchina import btcchina # noqa: F401
from ccxt.async_support.btcmarkets import btcmarkets # noqa: F401
from ccxt.async_support.btctradeim import btctradeim # noqa: F401
from ccxt.async_support.btctradeua import btctradeua # noqa: F401
from ccxt.async_support.btcturk import btcturk # noqa: F401
from ccxt.async_support.buda import buda # noqa: F401
from ccxt.async_support.cex import cex # noqa: F401
from ccxt.async_support.chilebit import chilebit # noqa: F401
from ccxt.async_support.cobinhood import cobinhood # noqa: F401
from ccxt.async_support.coinbase import coinbase # noqa: F401
from ccxt.async_support.coinbaseprime import coinbaseprime # noqa: F401
from ccxt.async_support.coinbasepro import coinbasepro # noqa: F401
from ccxt.async_support.coincheck import coincheck # noqa: F401
from ccxt.async_support.coinegg import coinegg # noqa: F401
from ccxt.async_support.coinex import coinex # noqa: F401
from ccxt.async_support.coinexchange import coinexchange # noqa: F401
from ccxt.async_support.coinfalcon import coinfalcon # noqa: F401
from ccxt.async_support.coinfloor import coinfloor # noqa: F401
from ccxt.async_support.coingi import coingi # noqa: F401
from ccxt.async_support.coinmarketcap import coinmarketcap # noqa: F401
from ccxt.async_support.coinmate import coinmate # noqa: F401
from ccxt.async_support.coinone import coinone # noqa: F401
from ccxt.async_support.coinspot import coinspot # noqa: F401
from ccxt.async_support.cointiger import cointiger # noqa: F401
from ccxt.async_support.coolcoin import coolcoin # noqa: F401
from ccxt.async_support.coss import coss # noqa: F401
from ccxt.async_support.crex24 import crex24 # noqa: F401
from ccxt.async_support.deribit import deribit # noqa: F401
from ccxt.async_support.digifinex import digifinex # noqa: F401
from ccxt.async_support.dsx import dsx # noqa: F401
from ccxt.async_support.dx import dx # noqa: F401
from ccxt.async_support.exmo import exmo # noqa: F401
from ccxt.async_support.exx import exx # noqa: F401
from ccxt.async_support.fcoin import fcoin # noqa: F401
from ccxt.async_support.fcoinjp import fcoinjp # noqa: F401
from ccxt.async_support.flowbtc import flowbtc # noqa: F401
from ccxt.async_support.foxbit import foxbit # noqa: F401
from ccxt.async_support.fybse import fybse # noqa: F401
from ccxt.async_support.gateio import gateio # noqa: F401
from ccxt.async_support.gemini import gemini # noqa: F401
from ccxt.async_support.hitbtc import hitbtc # noqa: F401
from ccxt.async_support.hitbtc2 import hitbtc2 # noqa: F401
from ccxt.async_support.huobipro import huobipro # noqa: F401
from ccxt.async_support.huobiru import huobiru # noqa: F401
from ccxt.async_support.ice3x import ice3x # noqa: F401
from ccxt.async_support.idex import idex # noqa: F401
from ccxt.async_support.independentreserve import independentreserve # noqa: F401
from ccxt.async_support.indodax import indodax # noqa: F401
from ccxt.async_support.itbit import itbit # noqa: F401
from ccxt.async_support.kkex import kkex # noqa: F401
from ccxt.async_support.kraken import kraken # noqa: F401
from ccxt.async_support.kucoin import kucoin # noqa: F401
from ccxt.async_support.kucoin2 import kucoin2 # noqa: F401
from ccxt.async_support.kuna import kuna # noqa: F401
from ccxt.async_support.lakebtc import lakebtc # noqa: F401
from ccxt.async_support.latoken import latoken # noqa: F401
from ccxt.async_support.lbank import lbank # noqa: F401
from ccxt.async_support.liquid import liquid # noqa: F401
from ccxt.async_support.livecoin import livecoin # noqa: F401
from ccxt.async_support.luno import luno # noqa: F401
from ccxt.async_support.lykke import lykke # noqa: F401
from ccxt.async_support.mandala import mandala # noqa: F401
from ccxt.async_support.mercado import mercado # noqa: F401
from ccxt.async_support.mixcoins import mixcoins # noqa: F401
from ccxt.async_support.negociecoins import negociecoins # noqa: F401
from ccxt.async_support.oceanex import oceanex # noqa: F401
from ccxt.async_support.okcoincny import okcoincny # noqa: F401
from ccxt.async_support.okcoinusd import okcoinusd # noqa: F401
from ccxt.async_support.okex import okex # noqa: F401
from ccxt.async_support.okex3 import okex3 # noqa: F401
from ccxt.async_support.paymium import paymium # noqa: F401
from ccxt.async_support.poloniex import poloniex # noqa: F401
from ccxt.async_support.rightbtc import rightbtc # noqa: F401
from ccxt.async_support.southxchange import southxchange # noqa: F401
from ccxt.async_support.stronghold import stronghold # noqa: F401
from ccxt.async_support.surbitcoin import surbitcoin # noqa: F401
from ccxt.async_support.theocean import theocean # noqa: F401
from ccxt.async_support.therock import therock # noqa: F401
from ccxt.async_support.tidebit import tidebit # noqa: F401
from ccxt.async_support.tidex import tidex # noqa: F401
from ccxt.async_support.upbit import upbit # noqa: F401
from ccxt.async_support.vaultoro import vaultoro # noqa: F401
from ccxt.async_support.vbtc import vbtc # noqa: F401
from ccxt.async_support.virwox import virwox # noqa: F401
from ccxt.async_support.whitebit import whitebit # noqa: F401
from ccxt.async_support.xbtce import xbtce # noqa: F401
from ccxt.async_support.yobit import yobit # noqa: F401
from ccxt.async_support.zaif import zaif # noqa: F401
from ccxt.async_support.zb import zb # noqa: F401
exchanges = [
'_1btcxe',
'acx',
'adara',
'allcoin',
'anxpro',
'bcex',
'bequant',
'bibox',
'bigone',
'binance',
'binanceje',
'binanceus',
'bit2c',
'bitbank',
'bitbay',
'bitfinex',
'bitfinex2',
'bitflyer',
'bitforex',
'bithumb',
'bitkk',
'bitlish',
'bitmart',
'bitmax',
'bitmex',
'bitso',
'bitstamp',
'bitstamp1',
'bittrex',
'bitz',
'bl3p',
'bleutrade',
'braziliex',
'btcalpha',
'btcbox',
'btcchina',
'btcmarkets',
'btctradeim',
'btctradeua',
'btcturk',
'buda',
'cex',
'chilebit',
'cobinhood',
'coinbase',
'coinbaseprime',
'coinbasepro',
'coincheck',
'coinegg',
'coinex',
'coinexchange',
'coinfalcon',
'coinfloor',
'coingi',
'coinmarketcap',
'coinmate',
'coinone',
'coinspot',
'cointiger',
'coolcoin',
'coss',
'crex24',
'deribit',
'digifinex',
'dsx',
'dx',
'exmo',
'exx',
'fcoin',
'fcoinjp',
'flowbtc',
'foxbit',
'fybse',
'gateio',
'gemini',
'hitbtc',
'hitbtc2',
'huobipro',
'huobiru',
'ice3x',
'idex',
'independentreserve',
'indodax',
'itbit',
'kkex',
'kraken',
'kucoin',
'kucoin2',
'kuna',
'lakebtc',
'latoken',
'lbank',
'liquid',
'livecoin',
'luno',
'lykke',
'mandala',
'mercado',
'mixcoins',
'negociecoins',
'oceanex',
'okcoincny',
'okcoinusd',
'okex',
'okex3',
'paymium',
'poloniex',
'rightbtc',
'southxchange',
'stronghold',
'surbitcoin',
'theocean',
'therock',
'tidebit',
'tidex',
'upbit',
'vaultoro',
'vbtc',
'virwox',
'whitebit',
'xbtce',
'yobit',
'zaif',
'zb',
]
base = [
'Exchange',
'exchanges',
'decimal_to_precision',
]
__all__ = base + errors.__all__ + exchanges
| 50.928571 | 86 | 0.556547 |
8197cf053906f5eda02c9b8ebff8d7767be65c49 | 252 | py | Python | tempdir/login.py | lamendoza-tip/design_app | a8db05985954618fdd73077452cf844c32954694 | [
"MIT"
] | null | null | null | tempdir/login.py | lamendoza-tip/design_app | a8db05985954618fdd73077452cf844c32954694 | [
"MIT"
] | null | null | null | tempdir/login.py | lamendoza-tip/design_app | a8db05985954618fdd73077452cf844c32954694 | [
"MIT"
] | null | null | null | from flask import Flask
from flask import request
from flask import render_template
sample = Flask(__name__)
@sample.route("/")
def main():
return render_template("login.html")
if __name__ == "__main__":
sample.run(host="0.0.0.0", port=5050) | 21 | 41 | 0.722222 |
2628003524fd59cf7ac932202dec9c597d0f275f | 3,975 | py | Python | tensorflow_examples/lite/model_customization/core/data_util/dataloader.py | windmaple/examples | 09d7cd780226679b133fc7cb93b5a8342e9d5c2f | [
"Apache-2.0"
] | 1 | 2021-09-22T12:03:20.000Z | 2021-09-22T12:03:20.000Z | tensorflow_examples/lite/model_maker/core/data_util/dataloader.py | godofecht/examples | 5875c06c3cc76af5419986ab9d2f3d51bea43425 | [
"Apache-2.0"
] | 5 | 2021-06-08T20:55:10.000Z | 2022-02-10T00:45:10.000Z | tensorflow_examples/lite/model_maker/core/data_util/dataloader.py | godofecht/examples | 5875c06c3cc76af5419986ab9d2f3d51bea43425 | [
"Apache-2.0"
] | 1 | 2020-04-21T01:27:17.000Z | 2020-04-21T01:27:17.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common Dataset used for tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import json
import os
import tensorflow as tf # TF2
from official.nlp.bert import input_pipeline
def load(tfrecord_file, meta_data_file, model_spec):
"""Loads data from tfrecord file and metada file."""
dataset = input_pipeline.single_file_dataset(
tfrecord_file, model_spec.get_name_to_features())
dataset = dataset.map(
model_spec.select_data_from_record,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
with tf.io.gfile.GFile(meta_data_file, 'rb') as reader:
meta_data = json.load(reader)
return dataset, meta_data
def get_cache_filenames(cache_dir, model_spec, data_name):
"""Gets cache tfrecord filename, metada filename and prefix of filenames."""
hasher = hashlib.md5()
hasher.update(data_name.encode('utf-8'))
hasher.update(str(model_spec.get_config()).encode('utf-8'))
cache_prefix = os.path.join(cache_dir, hasher.hexdigest())
cache_tfrecord_file = cache_prefix + '.tfrecord'
cache_meta_data_file = cache_prefix + '_meta_data'
return cache_tfrecord_file, cache_meta_data_file, cache_prefix
def write_meta_data(meta_data_file, meta_data):
"""Writes meta data into file."""
with tf.io.gfile.GFile(meta_data_file, 'w') as f:
json.dump(meta_data, f)
class DataLoader(object):
"""This class provides generic utilities for loading customized domain data that will be used later in model retraining.
For different ML problems or tasks, such as image classification, text
classification etc., a subclass is provided to handle task-specific data
loading requirements.
"""
def __init__(self, dataset, size):
"""Init function for class `DataLoader`.
In most cases, one should use helper functions like `from_folder` to create
an instance of this class.
Args:
dataset: A tf.data.Dataset object that contains a potentially large set of
elements, where each element is a pair of (input_data, target). The
`input_data` means the raw input data, like an image, a text etc., while
the `target` means some ground truth of the raw input data, such as the
classification label of the image etc.
size: The size of the dataset. tf.data.Dataset donesn't support a function
to get the length directly since it's lazy-loaded and may be infinite.
"""
self.dataset = dataset
self.size = size
def split(self, fraction, shuffle=True):
"""Splits dataset into two sub-datasets with the given fraction.
Primarily used for splitting the data set into training and testing sets.
Args:
fraction: float, demonstrates the fraction of the first returned
subdataset in the original data.
shuffle: boolean, indicates whether to randomly shufflerandomly shuffle
the data before splitting.
Returns:
The splitted two sub dataset.
"""
if shuffle:
ds = self.dataset.shuffle(
buffer_size=self.size, reshuffle_each_iteration=False)
else:
ds = self.dataset
train_size = int(self.size * fraction)
trainset = DataLoader(ds.take(train_size), train_size)
test_size = self.size - train_size
testset = DataLoader(ds.skip(test_size), test_size)
return trainset, testset
| 35.176991 | 122 | 0.736101 |
d1dd9116e913d2b42a2b1a3342af04f188a0645d | 2,669 | py | Python | grammaranalyzer/core.py | mas-student/web-python-2018-04-ht03 | d853dee86e6271e132f8d79d24b52aafbe7d3779 | [
"MIT"
] | null | null | null | grammaranalyzer/core.py | mas-student/web-python-2018-04-ht03 | d853dee86e6271e132f8d79d24b52aafbe7d3779 | [
"MIT"
] | null | null | null | grammaranalyzer/core.py | mas-student/web-python-2018-04-ht03 | d853dee86e6271e132f8d79d24b52aafbe7d3779 | [
"MIT"
] | null | null | null | import os
import git
import collections
from operator import itemgetter
from .helpers import is_python, get_filenames_from_path
from .parse import get_nodes_from_filenames, get_node_name, get_words_from_names, is_function_node, is_name_node
from .analysis import is_verb
from .output import to_fuzzy, to_csv, to_json, to_output
def get_words_from_path(path, language, node, pos):
language_filter = None
node_filter = None
pos_filter = None
if language == 'python':
language_filter = is_python
if node == 'function':
node_filter = is_function_node
elif node == 'variable':
node_filter = is_name_node
if pos == 'verb':
pos_filter = is_verb
filenames = filter(language_filter, get_filenames_from_path(path))
nodes = filter(node_filter, get_nodes_from_filenames(filenames))
node_names = map(get_node_name, nodes)
words = filter(pos_filter, get_words_from_names(node_names))
return words
def get_occurrences_from_path(path, language, node, pos, limit):
verbs = list(get_words_from_path(path, language, node, pos))
occurrences = collections.Counter(verbs).most_common(limit)
return occurrences
def fetch_project(projects_path, project_uri):
if project_uri.startswith('git@github.com'):
project_name = project_uri.split('/')[1].split('.')[0]
project_path = os.path.join(projects_path, project_name)
if not os.path.exists(project_path):
git.Git(projects_path).clone(project_uri)
else:
project_path = os.path.join(projects_path, project_uri)
return project_path
def to_format(format, occurrences):
if format == 'csv':
return to_csv(occurrences)
elif format == 'json':
return to_json(occurrences)
elif format == 'fuzzy':
return to_fuzzy(occurrences)
return to_fuzzy(occurrences)
def normalize_format(output, format):
if format is None:
if output.endswith('.csv'):
return output, 'csv'
elif output.endswith('.json'):
return output, 'json'
else:
return output, 'fuzzy'
return output, format
def explore_projects(projects, projects_path, language, node, pos, limit, output, format):
total_counter = collections.Counter()
total_top_verbs = []
for project in projects:
path = fetch_project(projects_path, project)
occurrences = list(get_occurrences_from_path(path, language, node, pos, limit))
buffer = to_format(format, occurrences)
to_output(output, buffer)
total_counter.update(dict(occurrences))
total_top_verbs += map(itemgetter(0), occurrences)
| 32.156627 | 112 | 0.698764 |
2dbd4bcec6881b72b6a35c5cef28f61c6bf6bb4e | 950 | py | Python | api/src/opentrons/protocols/__init__.py | SebastianEggert/opentrons | c377d6a4c10c742f9ca4dba746af903e4ea4f079 | [
"Apache-2.0"
] | null | null | null | api/src/opentrons/protocols/__init__.py | SebastianEggert/opentrons | c377d6a4c10c742f9ca4dba746af903e4ea4f079 | [
"Apache-2.0"
] | null | null | null | api/src/opentrons/protocols/__init__.py | SebastianEggert/opentrons | c377d6a4c10c742f9ca4dba746af903e4ea4f079 | [
"Apache-2.0"
] | null | null | null | from . import execute_v1, execute_v3
from opentrons.protocol_api.execute import (
get_protocol_schema_version, validate_protocol)
def execute_protocol(protocol_json):
protocol_version = get_protocol_schema_version(protocol_json)
if protocol_version > 3:
raise RuntimeError(
f'JSON Protocol version {protocol_version} is not yet ' +
'supported in this version of the API')
validate_protocol(protocol_json)
if protocol_version == 3:
ins = execute_v3.load_pipettes(
protocol_json)
lw = execute_v3.load_labware(
protocol_json)
execute_v3.dispatch_commands(protocol_json, ins, lw)
else:
ins = execute_v1.load_pipettes(
protocol_json)
lw = execute_v1.load_labware(
protocol_json)
execute_v1.dispatch_commands(protocol_json, ins, lw)
return {
'pipettes': ins,
'labware': lw
}
| 29.6875 | 69 | 0.664211 |
7814b03c29b830f5191eb2b227943f042c98d159 | 4,052 | py | Python | sensorflow/sensorflow.py | maxpowel/sensorflow-python | 7c6f23087fbff085c43dd6d3bc00ce8dae884484 | [
"MIT"
] | null | null | null | sensorflow/sensorflow.py | maxpowel/sensorflow-python | 7c6f23087fbff085c43dd6d3bc00ce8dae884484 | [
"MIT"
] | null | null | null | sensorflow/sensorflow.py | maxpowel/sensorflow-python | 7c6f23087fbff085c43dd6d3bc00ce8dae884484 | [
"MIT"
] | null | null | null | import serial
import json
import struct
from abc import ABCMeta, abstractmethod
class SensorflowSource(metaclass=ABCMeta):
def __iter__(self):
return self
def __next__(self):
while True:
yield self.receive()
@abstractmethod
def receive(self):
pass
@abstractmethod
def send(self, data):
pass
@abstractmethod
def close(self):
pass
class Serializer(metaclass=ABCMeta):
@abstractmethod
def loads(self, data):
pass
@abstractmethod
def dumps(self, data):
pass
class JsonSerializer(Serializer):
def loads(self, data):
return json.loads(data.decode('utf-8'))
def dumps(self, data):
return json.dumps(data).encode('utf-8')
class SerialSource(SensorflowSource):
def __init__(self, port="/dev/ttyUSB0", baudrate=115200, timeout=2):
self.serial = serial.Serial(
port=port,
baudrate=baudrate,
timeout=timeout
)
def receive(self):
return self.serial.readline()
def send(self, data):
self.serial.write(data)
def close(self):
self.serial.close()
class Sensorflow(object):
def __init__(self, source, serializer):
self.source = source
self.serializer = serializer
def ping(self):
run = True
val = 0
message = None
while run:
val += 1
self.source.send(struct.pack("B", val))
response = self.source.receive()
if len(response):
run = False
message = response[1:len(response)-1].decode('ascii')
val = response[0]
return val, message
def send(self, data):
self.source.send(self.serializer.dumps(data))
def receive(self):
return self.serializer.loads(self.source.receive())
def send_receive(self, data):
self.send(data)
return self.receive()
def close(self):
self.source.close()
# Commands here
def status(self):
return self.send_receive({"command": "status"})
def sensor_read(self):
return self.send_receive({"command": "read"})
def configure(self, configs):
if not isinstance(configs, list):
configs = [configs]
data = bytes()
for config in configs:
data += config.build_config()
self.send({"command": "writeConfig", "totalSensors": len(configs), "dataSize": len(data)})
self.source.send(data)
return self.receive()
# Sensor configurations
class DS18B20Sensor(object):
sensor_type = "DS18B20"
def __init__(self, address):
if len(address) != 8:
raise Exception("DS18B20 address should have a length of 8, not {size}".format(size=len(address)))
self.address = address
def build_config(self):
sensor_type_packed = struct.pack("{size}s".format(size=len(self.sensor_type)), bytes(self.sensor_type, 'ascii'))
address_packed = struct.pack("8B", *self.address)
# print(address)
data = sensor_type_packed + struct.pack("BB", 0, len(self.address)) + address_packed
return data
class DHTSensor(object):
DHT11 = 11
DHT22 = 22
DHT21 = 21
AM2301 = 21
sensor_type = "DHT"
def __init__(self, dht_type, pin):
self.dht_type = dht_type
self.pin = pin
def build_config(self):
sensor_type_packed = struct.pack("{size}s".format(size=len(self.sensor_type)), bytes(self.sensor_type, 'ascii'))
pinout_info_packed = struct.pack("BB", self.pin, self.dht_type)
data = sensor_type_packed + struct.pack("BB", 0, len(pinout_info_packed)) + pinout_info_packed
return data
class INA219Sensor(object):
sensor_type = "INA219"
def build_config(self):
sensor_type_packed = struct.pack("{size}s".format(size=len(self.sensor_type)), bytes(self.sensor_type, 'ascii'))
data = sensor_type_packed + struct.pack("BB", 0, 0)
return data | 25.325 | 120 | 0.611303 |
6fa92b7eb700d354e8dad2be544af30b36f3a89b | 8,135 | py | Python | configs/cascade_mask_rcnn_x101_64x4d_fpn_1x.py | marinarierav-uab/foveabox | 1f313fd14aaf018aadb0c6b3de163eb0a3b1fbd5 | [
"Apache-2.0"
] | 1 | 2021-01-14T12:04:34.000Z | 2021-01-14T12:04:34.000Z | configs/cascade_mask_rcnn_x101_64x4d_fpn_1x.py | marinarierav-uab/foveabox | 1f313fd14aaf018aadb0c6b3de163eb0a3b1fbd5 | [
"Apache-2.0"
] | null | null | null | configs/cascade_mask_rcnn_x101_64x4d_fpn_1x.py | marinarierav-uab/foveabox | 1f313fd14aaf018aadb0c6b3de163eb0a3b1fbd5 | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='CascadeRCNN',
num_stages=3,
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss',
beta=1.0,
loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss',
beta=1.0,
loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss',
beta=1.0,
loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=81,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100,
mask_thr_binary=0.5),
keep_all_stages=False)
# dataset settings
dataset_type = 'Polyp'
data_root = 'data/CVC-VideoClinicDBtrain_valid/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/renamed-train.json',
img_prefix=data_root + 'images/train/',
#img_scale=[(384, 288), (384*0.9, 288*0.9), (384*0.8, 288*0.8), (384*0.7, 288*0.7)], # escalado de imagen --> adds grey padding pocho, need to fix
#img_scale=[(384*0.9, 288*0.9),(384*1.1, 288*1.1)], # escalado de imagen --> adds grey padding pocho, need to fix
img_scale=[(384, 288)],
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=True,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/renamed-valid.json',
img_prefix=data_root + 'images/train/',
img_scale=(384, 288),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=True,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/test.json',
img_prefix=data_root + 'images/test/',
img_scale=(384, 288),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=True,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 20
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/cascade_mask_rcnn_x101_64x4d_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1),('val', 1)] | 30.698113 | 155 | 0.528826 |
b784921e52c677af8bb48dc0ed200af95fa93262 | 638 | py | Python | manage.py | Johnnyevans32/Spentech-Sales-Management | 6ed077bd8a23b332ea1a893228a695a0ec039d8e | [
"MIT"
] | null | null | null | manage.py | Johnnyevans32/Spentech-Sales-Management | 6ed077bd8a23b332ea1a893228a695a0ec039d8e | [
"MIT"
] | null | null | null | manage.py | Johnnyevans32/Spentech-Sales-Management | 6ed077bd8a23b332ea1a893228a695a0ec039d8e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'spentech_dashboard.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29 | 82 | 0.688088 |
c23d6791bdfa1f545a03a5fec0a8d1715c4c95e5 | 556 | py | Python | tests/frontends/test_frontend.py | fosterrath-mila/myia | b4382bd6eb53c709136a6142ee517e0b92d7baed | [
"MIT"
] | null | null | null | tests/frontends/test_frontend.py | fosterrath-mila/myia | b4382bd6eb53c709136a6142ee517e0b92d7baed | [
"MIT"
] | null | null | null | tests/frontends/test_frontend.py | fosterrath-mila/myia | b4382bd6eb53c709136a6142ee517e0b92d7baed | [
"MIT"
] | null | null | null |
import pytest
from myia.frontends import (
FrontendLoadingError,
UnknownFrontend,
activate_frontend,
)
def test_load_frontend_unknown():
with pytest.raises(UnknownFrontend):
activate_frontend('_made_up_frontend')
def test_frontend_error():
from myia.frontends import _frontends
name = '__testing_name000_'
def f():
raise ValueError('test')
assert name not in _frontends
_frontends[name] = f
with pytest.raises(FrontendLoadingError):
activate_frontend(name)
del _frontends[name]
| 18.533333 | 46 | 0.710432 |
e2da90df87cbd5e8f93ecf2141731a3fc5b48443 | 474 | py | Python | src/mayaToCorona/mtco_devmodule/scripts/Corona/passes.py | haggi/OpenMaya | 746e0740f480d9ef8d2173f31b3c99b9b0ea0d24 | [
"MIT"
] | 42 | 2015-01-03T15:07:25.000Z | 2021-12-09T03:56:59.000Z | src/mayaToCorona/mtco_devmodule/scripts/Corona/passes.py | haggi/OpenMaya | 746e0740f480d9ef8d2173f31b3c99b9b0ea0d24 | [
"MIT"
] | 66 | 2015-01-02T13:28:44.000Z | 2022-03-16T14:00:57.000Z | src/mayaToCorona/mtco_devmodule/scripts/Corona/passes.py | haggi/OpenMaya | 746e0740f480d9ef8d2173f31b3c99b9b0ea0d24 | [
"MIT"
] | 12 | 2015-02-07T05:02:17.000Z | 2020-07-10T17:21:44.000Z | def getAOVs():
return [
"Alpha",
"SourceColor",
"Components",
"Shadows",
"Albedo",
"RawComponent",
"Normals",
"NormalsDotProduct",
"NormalsDiscrepancy",
"PrimitiveCoords",
"MapCoords",
"Velocity",
"ZDepth",
"WorldPosition",
"Id",
"Texmap"
] | 22.571429 | 33 | 0.35443 |
930a15cbda1de39b378f6dab278074311c782310 | 1,218 | py | Python | tests/integration/network/convex/test_convex_network.py | datacraft-dsc/starfish-py | 95ff24410f056e8e2d313c3af97439fe003e294a | [
"Apache-2.0"
] | null | null | null | tests/integration/network/convex/test_convex_network.py | datacraft-dsc/starfish-py | 95ff24410f056e8e2d313c3af97439fe003e294a | [
"Apache-2.0"
] | null | null | null | tests/integration/network/convex/test_convex_network.py | datacraft-dsc/starfish-py | 95ff24410f056e8e2d313c3af97439fe003e294a | [
"Apache-2.0"
] | 1 | 2021-01-28T12:14:03.000Z | 2021-01-28T12:14:03.000Z | """
Test Convex Network class
"""
import pytest
import secrets
from starfish.network.did import did_generate_random
from tests.integration.network.convex.helpers import auto_topup_account
TEST_AMOUNT = 5
def test_convex_network_setup(convex_network, config):
assert(convex_network.url == config['convex']['network']['url'])
def test_convex_network_did(convex_network, convex_accounts):
register_account = convex_accounts[0]
auto_topup_account(convex_network, register_account)
did = did_generate_random()
ddo = f'test - ddo - {did}'
result = convex_network.register_did(register_account, did, ddo)
assert(result)
assert(result == did)
result = convex_network.resolve_did(did, register_account.address)
assert(result)
assert(result == ddo)
def test_convex_network_provenance(convex_network, convex_accounts):
register_account = convex_accounts[0]
auto_topup_account(convex_network, register_account)
asset_id = secrets.token_hex(32)
result = convex_network.register_provenance(register_account, asset_id)
print(result)
assert(result)
result = convex_network.get_provenance_event_list(asset_id)
print(result)
assert(result)
| 27.681818 | 75 | 0.762726 |
c1ce926145b6f359e6d89541236208353e53b8ab | 1,604 | py | Python | tests/conftest.py | nicfit/Parcyl | 2c5bdd472f570b2e6e25e2b687b8a2a9f006a225 | [
"MIT"
] | 2 | 2019-01-29T20:59:26.000Z | 2020-07-16T07:12:37.000Z | tests/conftest.py | nicfit/Parcyl | 2c5bdd472f570b2e6e25e2b687b8a2a9f006a225 | [
"MIT"
] | 6 | 2019-02-09T18:48:22.000Z | 2020-03-20T04:41:58.000Z | tests/conftest.py | nicfit/Parcyl | 2c5bdd472f570b2e6e25e2b687b8a2a9f006a225 | [
"MIT"
] | 1 | 2019-09-21T03:38:05.000Z | 2019-09-21T03:38:05.000Z | import sys
import textwrap
import subprocess
from pathlib import Path
import pytest
@pytest.fixture()
def parcyl_d(tmpdir):
return ParcylDir(tmpdir)
class ParcylDir:
_PARCYL_PY = Path(__file__).parent.parent.absolute() / "parcyl.py"
_SETUP_PY_FORMAT = textwrap.dedent("""\
import parcyl
assert parcyl._UNIT_TESTING == True
setup = parcyl.Setup({setup_kwargs})
setup()
""")
def __init__(self, tmp_d):
self._tmpdir = tmp_d
self.path = Path(str(tmp_d))
# Write project parcyl.py to the new dir so it used for `setup`
self.path.joinpath("parcyl.py")\
.write_text(self._PARCYL_PY.read_text() +
"\n_UNIT_TESTING = True")
def _withFile(self, fname, contents):
f = self.path / fname
f.write_text(contents)
def withSetupPy(self, setup_kwargs: dict=None, contents: str=None):
if contents is None:
if setup_kwargs:
kwarg_s = "\n".join([f"{name}=\"{value}\","
for name, value in setup_kwargs.items()])
else:
kwarg_s = ""
contents = self._SETUP_PY_FORMAT.format(setup_kwargs=kwarg_s)
self._withFile("setup.py", contents)
return self
def withSetupCfg(self, contents):
self._withFile("setup.cfg", contents)
return self
def setup(self, cmd, check=True):
proc = subprocess.run(f"{sys.executable} setup.py {cmd}",
cwd=str(self.path), shell=True, check=check)
return proc
| 28.140351 | 79 | 0.587905 |
07f6c1da8c2edae137f308089eeb016f66d5656f | 1,640 | py | Python | Recursion2/Conclusion/theskyline.py | mamoudmatook/Leetcode | 59fb1612ee648a9b99ff7cc779ada5656c01ecd2 | [
"MIT"
] | null | null | null | Recursion2/Conclusion/theskyline.py | mamoudmatook/Leetcode | 59fb1612ee648a9b99ff7cc779ada5656c01ecd2 | [
"MIT"
] | null | null | null | Recursion2/Conclusion/theskyline.py | mamoudmatook/Leetcode | 59fb1612ee648a9b99ff7cc779ada5656c01ecd2 | [
"MIT"
] | null | null | null | import builtins
class Solution:
def getSkyline(self, buildings):
n = len(buildings)
if n == 0:
return []
if n == 1:
xs, xe, y = buildings[0]
return [[xs, y], [xe, 0]]
lef_skyline = self.getSkyline(buildings[:n//2])
right_skyline = self.getSkyline(buildings[n//2:])
return self.merge_skyline(lef_skyline, right_skyline)
def merge_skyline(self, left, right):
def update_output(x, y):
if not output or x != output[-1][0]:
output.append([x, y])
else:
output[-1][1] = y
def append_skyline(p, lst, n, curry):
while p < n:
x, y = lst[p]
p += 1
if curry != y:
update_output(x, y)
curry = y
nl, nr = len(left), len(right)
pl = pr = 0
curry = ly = ry = 0
output = []
while pl < nl and pr < nr:
point_l, point_r = left[pl], right[pr]
if point_l[0] < point_r[0]:
x, ly = point_l
pl += 1
else:
x, ry = point_r
pr +=1
max_y = max(ly, ry)
if curry != max_y:
update_output(x, max_y)
curry = max_y
append_skyline(pl, left, nl, curry)
append_skyline(pr, right, nr, curry)
return output
s = Solution()
buildings = [[2,9,10],[3,7,15],[5,12,12],[15,20,10],[19,24,8]]
buildings = [[2,9,10],[3,7,15] ]
print(s.getSkyline(buildings)) | 29.285714 | 62 | 0.445122 |
6761bcff3c703235b22cc7461d1fe3a13f8278a7 | 258 | py | Python | reckonanalytics/keywordGen/views.py | 01dkg/googleAdsAutomations | 036acaa7d3146b0132a767085c37653d8e75712d | [
"Apache-2.0"
] | 9 | 2018-04-19T02:53:52.000Z | 2022-03-14T08:49:59.000Z | reckonanalytics/keywordGen/views.py | joaquinmorenoa/Marketing-Tech-Ads-Automation | 1f99d7b3b71ee546b70ddfe14b1b03d08d2ea877 | [
"Apache-2.0"
] | null | null | null | reckonanalytics/keywordGen/views.py | joaquinmorenoa/Marketing-Tech-Ads-Automation | 1f99d7b3b71ee546b70ddfe14b1b03d08d2ea877 | [
"Apache-2.0"
] | 8 | 2018-03-06T21:33:22.000Z | 2022-01-27T14:49:02.000Z | # howdy/views.py
from django.shortcuts import render
from django.views.generic import TemplateView
# Create your views here.
class HomePageView(TemplateView):
def get(self, request, **kwargs):
return render(request, 'index.html', context=None)
| 28.666667 | 58 | 0.748062 |
a6ff2d42d31f829a9e21197ef6de2a70448c61ac | 6,021 | py | Python | common/gen.py | lisiynos/lisiynos.github.io | 5900ee6ab0055a8047dd2b54b8667d05f3a145c2 | [
"MIT"
] | 1 | 2019-03-02T17:20:25.000Z | 2019-03-02T17:20:25.000Z | common/gen.py | lisiynos/lisiynos.github.io | 5900ee6ab0055a8047dd2b54b8667d05f3a145c2 | [
"MIT"
] | null | null | null | common/gen.py | lisiynos/lisiynos.github.io | 5900ee6ab0055a8047dd2b54b8667d05f3a145c2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
from string import Template
from os import listdir
from os.path import isfile, join, dirname, realpath
# from BeautifulSoup import BeautifulSoup
from bs4 import BeautifulSoup
import sys
if sys.version < '3':
reload(sys) # Reload does the trick!
sys.setdefaultencoding('UTF-8')
# Filter *.html files
def check_files(curFile):
thisDir = dirname(realpath(curFile)) # __file__
allFiles = set(f for f in listdir(thisDir) if isfile(join(thisDir, f)) and f.endswith(".html"))
# allFiles.remove('index_template.html')
allFiles.remove('index.html')
print(allFiles)
def ReadTemplate(template_fn):
return open('..\\common\\' + template_fn, 'r', encoding='utf8').read()
def GenFile(template, params, fn, overwrite=False):
"""
Генерация файла
:param template: шаблон
:param params: параметры (словарь значений)
:param fn: имя файла
:param overwrite: перезаписывать если файл уже существует
"""
if not overwrite and os.path.isfile(fn):
return
print('Generating "%s"...' % fn)
if sys.version < '3':
f = open(fn, 'w')
else:
f = open(fn, 'w', encoding='UTF-8')
f.write(Template(template).safe_substitute(params))
f.close()
# Шаблон одной строки при генерации таблицы
line_template = ReadTemplate("line_template.html")
chapter_template = ReadTemplate("chapter_template.html")
body = ""
# Статистика по одной сессии
class Session:
all_theory = 0
all_practice = 0
def theme(self, theory, practice):
self.all_theory += theory
self.all_practice += practice
def all_time(self):
return self.all_theory + self.all_practice
# Сумма по стоблцам
session = Session()
session_dist = Session()
# Темы
tags = set()
# Тема
class Theme():
def __init__(self, theme, title):
self.theme = theme
self.title = title
# Строка? Для Python2 и Python3
def isString(arg):
if sys.version < '3':
return isinstance(arg, basestring)
else:
return isinstance(arg, str)
# Read from file
def go(arg):
global body, session, session_dist, tags
if isString(arg):
filename = arg
elif isinstance(arg, Theme):
d = {
'theme': arg.theme,
'title': arg.title,
}
res = Template(chapter_template).safe_substitute(d)
body += "\n" + res
return
else:
filename, theory, practic, theory_dist, practic_dist = arg
if sys.version < '3':
f = open(filename, "r")
else:
f = open(filename, "r", encoding='UTF-8')
html = f.read()
f.close()
parsed_html = BeautifulSoup(html, "lxml")
# Find hours and theme
# print parsed_html.body.find('h1').text
ul_tags = parsed_html.body.findAll(True, {'class': 'tags'})
# print(str(ul_tags).encode('utf8').decode(sys.stdout.encoding))
if len(ul_tags) == 0:
print(filename)
for item in ul_tags[0].findAll('a'):
tags.add(item.text)
# print u"Тема: ".encode('cp866'), item.text
if isString(arg):
theory = int(parsed_html.body.find('span', attrs={'class': 'theory'}).text)
practic = int(parsed_html.body.find('span', attrs={'class': 'practic'}).text)
theory_dist = int(parsed_html.body.find('span', attrs={'class': 'theory_dist'}).text)
practic_dist = int(parsed_html.body.find('span', attrs={'class': 'practic_dist'}).text)
print('theory = ' + theory + ' ' + theory_dist)
print('practic = ' + practic + ' ' + practic_dist)
# Выводить в index.html и генерировать описание
d = {
'theory': theory, 'practic': practic,
'theory_dist': theory_dist, 'practic_dist': practic_dist,
'sum': theory + practic,
'dist': theory_dist + practic_dist,
'filename': filename,
'theme': parsed_html.body.find('h1').text,
}
# print d
session.theme(theory, practic)
session_dist.theme(theory_dist, practic_dist)
# Заменяем все нули на
if sys.version < '3':
for k, v in d.iteritems():
if v == 0:
d[k] = ' '
else:
for k, v in d.items():
if v == 0:
d[k] = ' '
res = Template(line_template).safe_substitute(d)
if sys.version < '3':
body += "\n" + res # .encode('utf-8')
else:
body += "\n" + res
# print parsed_html.body.find('div', attrs={'class':'container'}).text
import color_console as cons
def Check(expected, actual, message):
if actual != expected:
default_colors = cons.get_text_attr()
default_bg = default_colors & 0x0070
cons.set_text_attr(cons.FOREGROUND_RED | default_bg | cons.FOREGROUND_INTENSITY)
print('ERROR:', )
cons.set_text_attr(default_colors)
print(message % (expected, actual))
# Генеририрование index.html для конкретной сессии
def GenerateIndex(title):
# Все проверки
Check(18, session.all_theory, u'Теории на очной сессии должно быть %d часов, сейчас: %d')
Check(36, session.all_time(), u'Всего на очной сессии должно быть %d часов, сейчас: %d')
Check(18, session_dist.all_time(), u'Сумма по дистанционной сессии должна быть %d часов, сейчас: %d')
themes = list(tags)
themes.sort()
# print (', '.join(themes)).encode("utf-8")
print('Themes: %d' % len(themes))
GenFile(ReadTemplate("index_template.html"),
{
'title': title,
'body': body, # .encode("utf-8"),
'all_theory': session.all_theory,
'all_practic': session.all_practice,
'all_session': session.all_time(),
'all_theory_dist': session_dist.all_theory,
'all_practic_dist': session_dist.all_practice,
'all_session_dist': session_dist.all_time(),
'themes': (', '.join(themes)),
},
"index.html", True)
# Показываем неиспользованные файлы
| 28.947115 | 105 | 0.612523 |
ac9a4386100322b1ea27092936f369aedbffcbbb | 12,697 | py | Python | main/auth/auth.py | daspots/dasapp | 15b113f86842fb8dcef71780a8bda23618427ab3 | [
"MIT"
] | null | null | null | main/auth/auth.py | daspots/dasapp | 15b113f86842fb8dcef71780a8bda23618427ab3 | [
"MIT"
] | null | null | null | main/auth/auth.py | daspots/dasapp | 15b113f86842fb8dcef71780a8bda23618427ab3 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
import functools
import re
from flask_oauthlib import client as oauth
from google.appengine.ext import ndb
import flask
import flask_login
import flask_wtf
import unidecode
import wtforms
import cache
import config
import model
import task
import util
from main import app
_signals = flask.signals.Namespace()
###############################################################################
# Flask Login
###############################################################################
login_manager = flask_login.LoginManager()
class AnonymousUser(flask_login.AnonymousUserMixin):
id = 0
admin = False
name = 'Anonymous'
user_db = None
def key(self):
return None
def has_permission(self, permission):
return False
login_manager.anonymous_user = AnonymousUser
class FlaskUser(AnonymousUser):
def __init__(self, user_db):
self.user_db = user_db
self.id = user_db.key.id()
self.name = user_db.name
self.admin = user_db.admin
def key(self):
return self.user_db.key.urlsafe()
def get_id(self):
return self.user_db.key.urlsafe()
def is_authenticated(self):
return True
def is_active(self):
return self.user_db.active
def is_anonymous(self):
return False
def has_permission(self, permission):
return self.user_db.has_permission(permission)
@login_manager.user_loader
def load_user(key):
user_db = ndb.Key(urlsafe=key).get()
if user_db:
return FlaskUser(user_db)
return None
login_manager.init_app(app)
def current_user_id():
return flask_login.current_user.id
def current_user_key():
return flask_login.current_user.user_db.key if flask_login.current_user.user_db else None
def current_user_db():
return flask_login.current_user.user_db
def is_logged_in():
return flask_login.current_user.id != 0
###############################################################################
# Decorators
###############################################################################
def login_required(f):
decorator_order_guard(f, 'auth.login_required')
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if is_logged_in():
return f(*args, **kwargs)
if flask.request.path.startswith('/api/'):
return flask.abort(401)
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return decorated_function
def admin_required(f):
decorator_order_guard(f, 'auth.admin_required')
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if is_logged_in() and current_user_db().admin:
return f(*args, **kwargs)
if not is_logged_in() and flask.request.path.startswith('/api/'):
return flask.abort(401)
if not is_logged_in():
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
def cron_required(f):
decorator_order_guard(f, 'auth.cron_required')
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if 'X-Appengine-Cron' in flask.request.headers:
return f(*args, **kwargs)
if is_logged_in() and current_user_db().admin:
return f(*args, **kwargs)
if not is_logged_in():
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
permission_registered = _signals.signal('permission-registered')
def permission_required(permission=None, methods=None):
def permission_decorator(f):
decorator_order_guard(f, 'auth.permission_required')
# default to decorated function name as permission
perm = permission or f.func_name
meths = [m.upper() for m in methods] if methods else None
permission_registered.send(f, permission=perm)
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if meths and flask.request.method.upper() not in meths:
return f(*args, **kwargs)
if is_logged_in() and current_user_db().has_permission(perm):
return f(*args, **kwargs)
if not is_logged_in():
if flask.request.path.startswith('/api/'):
return flask.abort(401)
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
return permission_decorator
###############################################################################
# Sign in stuff
###############################################################################
class SignInForm(flask_wtf.FlaskForm):
email = wtforms.StringField(
'Email',
[wtforms.validators.required()],
filters=[util.email_filter],
)
password = wtforms.StringField(
'Password',
[wtforms.validators.required()],
)
remember = wtforms.BooleanField(
'Keep me signed in',
[wtforms.validators.optional()],
)
recaptcha = flask_wtf.RecaptchaField()
next_url = wtforms.HiddenField()
@app.route('/signin/', methods=['GET', 'POST'])
def signin():
next_url = util.get_next_url()
# form = None
# if config.CONFIG_DB.has_email_authentication:
form = form_with_recaptcha(SignInForm())
save_request_params()
if form.validate_on_submit():
result = get_user_db_from_email(form.email.data, form.password.data)
if result:
cache.reset_auth_attempt()
return signin_user_db(result)
if result is None:
form.email.errors.append('Email or Password do not match')
if result is False:
return flask.redirect(flask.url_for('welcome'))
if not form.errors:
form.next_url.data = next_url
if form and form.errors:
cache.bump_auth_attempt()
return flask.render_template(
'auth/auth.html',
title='Sign in',
html_class='auth',
next_url=next_url,
form=form,
form_type='signin',
# form_type='signin' if config.CONFIG_DB.has_email_authentication else '',
**urls_for_oauth(next_url)
)
###############################################################################
# Sign up stuff
###############################################################################
class SignUpForm(flask_wtf.FlaskForm):
email = wtforms.StringField(
'Email',
[wtforms.validators.required(), wtforms.validators.email()],
filters=[util.email_filter],
)
recaptcha = flask_wtf.RecaptchaField()
@app.route('/signup/', methods=['GET', 'POST'])
def signup():
# next_url = util.get_next_url()
next_url = None
form = None
# if config.CONFIG_DB.has_email_authentication:
if True:
form = form_with_recaptcha(SignUpForm())
save_request_params()
if form.validate_on_submit():
user_db = model.User.get_by('email', form.email.data)
if user_db:
form.email.errors.append('This email is already taken.')
if not form.errors:
user_db = create_user_db(
None,
util.create_name_from_email(form.email.data),
form.email.data,
form.email.data,
)
user_db.put()
task.activate_user_notification(user_db)
cache.bump_auth_attempt()
return flask.redirect(flask.url_for('signin'))
if form and form.errors:
cache.bump_auth_attempt()
title = 'Sign up' if True else 'Sign in'
return flask.render_template(
'auth/auth.html',
title=title,
html_class='auth',
next_url=next_url,
form=form,
**urls_for_oauth(next_url)
)
###############################################################################
# Sign out stuff
###############################################################################
@app.route('/signout/')
def signout():
flask_login.logout_user()
return flask.redirect(util.param('next') or flask.url_for('signin'))
###############################################################################
# Helpers
###############################################################################
def url_for_signin(service_name, next_url):
return flask.url_for('signin_%s' % service_name, next=next_url)
def urls_for_oauth(next_url):
return {
'azure_ad_signin_url': url_for_signin('azure_ad', next_url),
'bitbucket_signin_url': url_for_signin('bitbucket', next_url),
'dropbox_signin_url': url_for_signin('dropbox', next_url),
'facebook_signin_url': url_for_signin('facebook', next_url),
'github_signin_url': url_for_signin('github', next_url),
'google_signin_url': url_for_signin('google', next_url),
'gae_signin_url': url_for_signin('gae', next_url),
'instagram_signin_url': url_for_signin('instagram', next_url),
'linkedin_signin_url': url_for_signin('linkedin', next_url),
'mailru_signin_url': url_for_signin('mailru', next_url),
'microsoft_signin_url': url_for_signin('microsoft', next_url),
'reddit_signin_url': url_for_signin('reddit', next_url),
'twitter_signin_url': url_for_signin('twitter', next_url),
'vk_signin_url': url_for_signin('vk', next_url),
'yahoo_signin_url': url_for_signin('yahoo', next_url),
}
def create_oauth_app(service_config, name):
upper_name = name.upper()
app.config[upper_name] = service_config
service_oauth = oauth.OAuth()
service_app = service_oauth.remote_app(name, app_key=upper_name)
service_oauth.init_app(app)
return service_app
def decorator_order_guard(f, decorator_name):
if f in app.view_functions.values():
raise SyntaxError(
'Do not use %s above app.route decorators as it would not be checked. '
'Instead move the line below the app.route lines.' % decorator_name
)
def save_request_params():
flask.session['auth-params'] = {
'next': util.get_next_url(),
'remember': util.param('remember'),
}
def signin_oauth(oauth_app, scheme=None):
try:
flask.session.pop('oauth_token', None)
save_request_params()
return oauth_app.authorize(callback=flask.url_for(
'%s_authorized' % oauth_app.name, _external=True, _scheme=scheme
))
except oauth.OAuthException:
flask.flash(
'Something went wrong with sign in. Please try again.',
category='danger',
)
return flask.redirect(flask.url_for('signin', next=util.get_next_url()))
def form_with_recaptcha(form):
should_have_recaptcha = cache.get_auth_attempt() >= config.RECAPTCHA_LIMIT
if not (should_have_recaptcha and config.CONFIG_DB.has_recaptcha):
del form.recaptcha
return form
###############################################################################
# User related stuff
###############################################################################
def create_user_db(auth_id, name, username, email='', verified=False, **props):
email = email.lower() if email else ''
if verified and email:
user_dbs, cursors = model.User.get_dbs(email=email, verified=True, limit=2)
if len(user_dbs) == 1:
user_db = user_dbs[0]
user_db.auth_ids.append(auth_id)
user_db.put()
task.new_user_notification(user_db)
return user_db
if isinstance(username, str):
username = username.decode('utf-8')
username = unidecode.unidecode(username.split('@')[0].lower()).strip()
username = re.sub(r'[\W_]+', '.', username)
new_username = username
n = 1
while not model.User.is_username_available(new_username):
new_username = '%s%d' % (username, n)
n += 1
user_db = model.User(
name=name,
email=email,
username=new_username,
auth_ids=[auth_id] if auth_id else [],
verified=verified,
token=util.uuid(),
**props
)
user_db.put()
task.new_user_notification(user_db)
return user_db
@ndb.toplevel
def signin_user_db(user_db):
if not user_db:
return flask.redirect(flask.url_for('signin'))
flask_user_db = FlaskUser(user_db)
auth_params = flask.session.get('auth-params', {
'next': flask.url_for('welcome'),
'remember': False,
})
flask.session.pop('auth-params', None)
if flask_login.login_user(flask_user_db, remember=auth_params['remember']):
user_db.put_async()
return flask.redirect(util.get_next_url(auth_params['next']))
flask.flash('Sorry, but you could not sign in.', category='danger')
return flask.redirect(flask.url_for('signin'))
def get_user_db_from_email(email, password):
user_dbs, cursors = model.User.get_dbs(email=email, active=True, limit=2)
if not user_dbs:
return None
if len(user_dbs) > 1:
flask.flash('''We are sorry but it looks like there is a conflict with
your account. Our support team has been informed and we will get
back to you as soon as possible.''', category='danger')
task.email_conflict_notification(email)
return False
user_db = user_dbs[0]
if user_db.password_hash == util.password_hash(user_db, password):
return user_db
return None
| 28.791383 | 91 | 0.646058 |
f82019600db734a8c0670042e2c3da8eddfa4729 | 23,245 | py | Python | test/functional/test_runner.py | buntheun/kh69coin-core | f92fd594ab1493292b5dd12ef4d3c9e9597cfee9 | [
"MIT"
] | null | null | null | test/functional/test_runner.py | buntheun/kh69coin-core | f92fd594ab1493292b5dd12ef4d3c9e9597cfee9 | [
"MIT"
] | null | null | null | test/functional/test_runner.py | buntheun/kh69coin-core | f92fd594ab1493292b5dd12ef4d3c9e9597cfee9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_accounts.py',
'p2p_segwit.py',
'wallet_dump.py',
'rpc_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'interface_http.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'mining_prioritisetransaction.py',
'p2p_invalid_block.py',
'p2p_invalid_tx.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'rpc_signmessage.py',
'feature_nulldummy.py',
'wallet_import_rescan.py',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'wallet_scriptaddress2.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'feature_minchainwork.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'p2p_unrequested_blocks.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_config_args.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
# vv Tests less than 20m vv
'feature_fee_estimation.py',
# vv Tests less than 5m vv
'feature_maxuploadtarget.py',
'mempool_packages.py',
'feature_dbcrash.py',
# vv Tests less than 2m vv
'feature_bip68_sequence.py',
'mining_getblocktemplate_longpoll.py',
'p2p_timeouts.py',
# vv Tests less than 60s vv
'feature_bip9_softforks.py',
'p2p_feefilter.py',
'rpc_bind.py',
# vv Tests less than 30s vv
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/kh69coin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and kh69coind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], combined_logs_len=0):
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "kh69coind"]) is not None:
print("%sWARNING!%s There is already a kh69coind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "KH69COIND" not in os.environ:
os.environ["KH69COIND"] = build_dir + '/src/kh69coind' + exeext
os.environ["KH69COINCLI"] = build_dir + '/src/kh69coin-cli' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, testdir, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr
print('.', end='', flush=True)
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that at most a handful of the
test scripts don't start with one of the allowed name prefixes."""
# LEEWAY is provided as a transition measure, so that pull-requests
# that introduce new tests that don't conform with the naming
# convention don't immediately cause the tests to fail.
LEEWAY = 10
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if len(bad_script_names) > 0:
print("INFO: %d tests not meeting naming conventions:" % (len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
assert len(bad_script_names) <= LEEWAY, "Too many tests not following naming convention! (%d found, maximum: %d)" % (len(bad_script_names), LEEWAY)
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `kh69coin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| 40.355903 | 195 | 0.634588 |
431674f95c4ced0b4eb87d741f4a939e7631b653 | 2,750 | py | Python | generic_cache/decorator.py | pantuza/generic_cache | 6db5d178b81a85031ebb3c0d093cc3e3aba0d08b | [
"MIT"
] | 9 | 2018-08-22T18:52:49.000Z | 2020-04-21T23:38:43.000Z | generic_cache/decorator.py | pantuza/generic_cache | 6db5d178b81a85031ebb3c0d093cc3e3aba0d08b | [
"MIT"
] | null | null | null | generic_cache/decorator.py | pantuza/generic_cache | 6db5d178b81a85031ebb3c0d093cc3e3aba0d08b | [
"MIT"
] | 2 | 2018-08-23T14:44:52.000Z | 2018-09-05T12:43:16.000Z | # Copyright (c) 2018, Globo.com (https://github.com/globocom)
#
# License: MIT
class CacheDecorator(object):
def __init__(self, key_prefix, cache_backend, key_builder, default_timeout=None):
self._key_prefix = key_prefix
self._cache_backend = cache_backend
self._key_builder = key_builder
self._default_timeout = default_timeout
self._build_generic_cache()
def __call__(self, key_type, key_timeout=None, key_version=""):
if key_timeout == None:
key_timeout = self._default_timeout
return self._build_decorator(key_type, key_timeout, key_version)
def _build_decorator(self, key_type, key_timeout, key_version):
from functools import wraps
def decorator(func):
@wraps(func)
def decorated(*args, **kwargs):
disable_cache = kwargs.pop('disable_cache', False)
disable_cache_overwrite = kwargs.pop(
'disable_cache_overwrite', False
)
def call_original():
return func(*args, **kwargs)
key = self._build_key(key_type, func, *args, key_version=key_version, **kwargs)
key.timeout = key_timeout
return self._generic_cache.get(
key, call_original, disable_cache=disable_cache,
disable_cache_overwrite=disable_cache_overwrite
)
decorated.cache = CacheHandler(func, self, key_type, key_version)
return decorated
return decorator
def _build_generic_cache(self):
from .cache import GenericCache
self._generic_cache = GenericCache(
self._cache_backend,
self._default_timeout,
key_prefix=self._key_prefix
)
def _build_key(self, key_type, original_func, *func_args, **func_kwargs):
key_prefix = self._key_prefix + key_type
return self._key_builder.build_key(key_prefix, original_func, *func_args, **func_kwargs)
class CacheHandler(object):
def __init__(self, func, decorator_factory, key_type, key_version):
self.func = func
self.decorator_factory = decorator_factory
self.key_version = key_version
self.key_type = key_type
def _call_cache(self, method, *args, **kwargs):
key = self.decorator_factory._build_key(self.key_type, self.func, *args, **kwargs)
method = getattr(self.decorator_factory._generic_cache, method)
return method(key)
def get(self, *args, **kwargs):
return self._call_cache("get_from_cache", *args, **kwargs)
def flush(self, *args, **kwargs):
return self._call_cache("flush", *args, **kwargs)
| 38.732394 | 96 | 0.639273 |
2f1d202afacb6d19329611afcd48d3234c3a89aa | 438 | py | Python | backend/app/api/data_routes.py | shaunmulligan/hypecycle | aa368193934cbd9c73ab1cc541c6cc193469849c | [
"MIT"
] | 1 | 2022-03-18T01:14:09.000Z | 2022-03-18T01:14:09.000Z | backend/app/api/data_routes.py | shaunmulligan/hypecycle | aa368193934cbd9c73ab1cc541c6cc193469849c | [
"MIT"
] | 25 | 2022-01-16T15:30:06.000Z | 2022-01-19T18:41:50.000Z | backend/app/api/data_routes.py | shaunmulligan/hypecycle | aa368193934cbd9c73ab1cc541c6cc193469849c | [
"MIT"
] | null | null | null | from typing import List
from fastapi import APIRouter, HTTPException, Path
from app.api import gps_crud
from app.api.models import GpsReadingDB
router = APIRouter()
@router.get("/location/")
async def get_location():
"""
Get the last location from DB
"""
location = await gps_crud.get_last_location()
if not location:
raise HTTPException(status_code=404, detail="no location not found")
return location | 24.333333 | 76 | 0.726027 |
866d1d82859f0707ea87617d522768095d8d9417 | 9,262 | py | Python | tools/test_check_kconfigs.py | thomasonw/esp-idf | abea9e4c02bb17e86298aec4e299780399e4789f | [
"Apache-2.0"
] | 1 | 2021-04-18T15:44:42.000Z | 2021-04-18T15:44:42.000Z | tools/test_check_kconfigs.py | Wangrenai/esp-idf | abea9e4c02bb17e86298aec4e299780399e4789f | [
"Apache-2.0"
] | 1 | 2019-02-15T06:43:13.000Z | 2019-02-15T06:43:13.000Z | tools/test_check_kconfigs.py | Wangrenai/esp-idf | abea9e4c02bb17e86298aec4e299780399e4789f | [
"Apache-2.0"
] | 1 | 2019-02-14T22:29:30.000Z | 2019-02-14T22:29:30.000Z | #!/usr/bin/env python
#
# Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from check_kconfigs import LineRuleChecker
from check_kconfigs import InputError
from check_kconfigs import IndentAndNameChecker
from check_kconfigs import CONFIG_NAME_MAX_LENGTH
class ApplyLine(object):
def apply_line(self, string):
self.checker.process_line(string + '\n', 0)
def expect_error(self, string, expect, cleanup=None):
try:
with self.assertRaises(InputError) as cm:
self.apply_line(string)
if expect:
self.assertEqual(cm.exception.suggested_line, expect + '\n')
finally:
if cleanup:
# cleanup of the previous failure
self.apply_line(cleanup)
def expt_success(self, string):
self.apply_line(string)
class TestLineRuleChecker(unittest.TestCase, ApplyLine):
def setUp(self):
self.checker = LineRuleChecker('Kconfig')
def tearDown(self):
pass
def test_tabulators(self):
self.expect_error('\ttest', expect=' test')
self.expect_error('\t test', expect=' test')
self.expect_error(' \ttest', expect=' test')
self.expect_error(' \t test', expect=' test')
self.expt_success(' test')
self.expt_success('test')
def test_trailing_whitespaces(self):
self.expect_error(' ', expect='')
self.expect_error(' ', expect='')
self.expect_error('test ', expect='test')
self.expt_success('test')
self.expt_success('')
def test_line_length(self):
self.expect_error('x' * 120, expect=None)
self.expt_success('x' * 119)
self.expt_success('')
class TestIndentAndNameChecker(unittest.TestCase, ApplyLine):
def setUp(self):
self.checker = IndentAndNameChecker('Kconfig')
self.checker.min_prefix_length = 4
def tearDown(self):
self.checker.__exit__('Kconfig', None, None)
class TestIndent(TestIndentAndNameChecker):
def setUp(self):
super(TestIndent, self).setUp()
self.checker.min_prefix_length = 0 # prefixes are ignored in this test case
def test_indent_characters(self):
self.expt_success('menu "test"')
self.expect_error(' test', expect=' test')
self.expect_error(' test', expect=' test')
self.expect_error(' test', expect=' test')
self.expect_error(' test', expect=' test')
self.expt_success(' test')
self.expt_success(' test2')
self.expt_success(' config')
self.expect_error(' default', expect=' default')
self.expt_success(' help')
self.expect_error(' text', expect=' text')
self.expt_success(' help text')
self.expt_success(' menu')
self.expt_success(' endmenu')
self.expect_error(' choice', expect=' choice', cleanup=' endchoice')
self.expect_error(' choice', expect=' choice', cleanup=' endchoice')
self.expt_success(' choice')
self.expt_success(' endchoice')
self.expt_success(' config')
self.expt_success('endmenu')
def test_help_content(self):
self.expt_success('menu "test"')
self.expt_success(' config')
self.expt_success(' help')
self.expt_success(' description')
self.expt_success(' config keyword in the help')
self.expt_success(' menu keyword in the help')
self.expt_success(' menuconfig keyword in the help')
self.expt_success(' endmenu keyword in the help')
self.expt_success(' endmenu keyword in the help')
self.expt_success('') # newline in help
self.expt_success(' endmenu keyword in the help')
self.expect_error(' menu "real menu with wrong indent"',
expect=' menu "real menu with wrong indent"', cleanup=' endmenu')
self.expt_success('endmenu')
def test_mainmenu(self):
self.expt_success('mainmenu "test"')
self.expect_error('test', expect=' test')
self.expt_success(' not_a_keyword')
self.expt_success(' config')
self.expt_success(' menuconfig')
self.expect_error('test', expect=' test')
self.expect_error(' test', expect=' test')
self.expt_success(' menu')
self.expt_success(' endmenu')
def test_ifendif(self):
self.expt_success('menu "test"')
self.expt_success(' config')
self.expt_success(' help')
self.expect_error(' if', expect=' if', cleanup=' endif')
self.expt_success(' if')
self.expect_error(' config', expect=' config')
self.expt_success(' config')
self.expt_success(' help')
self.expt_success(' endif')
self.expt_success(' config')
self.expt_success('endmenu')
def test_config_without_menu(self):
self.expt_success('menuconfig')
self.expt_success(' help')
self.expt_success(' text')
self.expt_success('')
self.expt_success(' text')
self.expt_success('config')
self.expt_success(' help')
def test_backslashes(self):
self.expt_success('default \\')
self.expect_error('help', expect=None)
self.expt_success(' CONFIG')
self.expt_success('default \\')
self.expt_success(' LINE1\\')
self.expt_success(' LINE2')
self.expt_success('help')
class TestName(TestIndentAndNameChecker):
def setUp(self):
super(TestName, self).setUp()
self.checker.min_prefix_length = 0 # prefixes are ignored in this test case
def test_name_length(self):
max_length = CONFIG_NAME_MAX_LENGTH
too_long = max_length + 1
self.expt_success('menu "test"')
self.expt_success(' config ABC')
self.expt_success(' config ' + ('X' * max_length))
self.expect_error(' config ' + ('X' * too_long), expect=None)
self.expt_success(' menuconfig ' + ('X' * max_length))
self.expect_error(' menuconfig ' + ('X' * too_long), expect=None)
self.expt_success(' choice ' + ('X' * max_length))
self.expect_error(' choice ' + ('X' * too_long), expect=None)
self.expt_success('endmenu')
def test_config_backslash(self):
self.expect_error('config\\', expect=None)
self.expect_error('menuconfig\\', expect=None)
self.expect_error('choice\\', expect=None)
class TestPrefix(TestIndentAndNameChecker):
def test_prefix_len(self):
self.expt_success('menu "test"')
self.expt_success(' config ABC_1')
self.expt_success(' config ABC_2')
self.expt_success(' config ABC_DEBUG')
self.expt_success(' config ABC_ANOTHER')
self.expt_success('endmenu')
self.expt_success('menu "test2"')
self.expt_success(' config A')
self.expt_success(' config B')
self.expect_error('endmenu', expect=None)
def test_choices(self):
self.expt_success('menu "test"')
self.expt_success(' choice ASSERTION_LEVEL')
self.expt_success(' config ASSERTION_DEBUG')
self.expt_success(' config ASSERTION_RELEASE')
self.expt_success(' menuconfig ASSERTION_XY')
self.expt_success(' endchoice')
self.expt_success(' choice DEBUG')
self.expt_success(' config DE_1')
self.expt_success(' config DE_2')
self.expect_error(' endchoice', expect=None)
self.expect_error('endmenu', expect=None)
def test_nested_menu(self):
self.expt_success('menu "test"')
self.expt_success(' config DOESNT_MATTER')
self.expt_success(' menu "inner menu"')
self.expt_success(' config MENUOP_1')
self.expt_success(' config MENUOP_2')
self.expt_success(' config MENUOP_3')
self.expt_success(' endmenu')
self.expt_success('endmenu')
def test_nested_ifendif(self):
self.expt_success('menu "test"')
self.expt_success(' config MENUOP_1')
self.expt_success(' if MENUOP_1')
self.expt_success(' config MENUOP_2')
self.expt_success(' endif')
self.expt_success('endmenu')
if __name__ == '__main__':
unittest.main()
| 39.080169 | 97 | 0.604297 |
5c9cfbde3b7090ad3536985b2d3cc87d91476e41 | 10,861 | py | Python | simapi.py | RichieBrady/SimAPI-Python | 9bd07e9fd0b3b4f37c968b061791ef9c1c6d213c | [
"MIT"
] | 1 | 2020-04-08T19:40:10.000Z | 2020-04-08T19:40:10.000Z | simapi.py | RichieBrady/SimAPI-Python | 9bd07e9fd0b3b4f37c968b061791ef9c1c6d213c | [
"MIT"
] | 8 | 2020-04-12T08:52:21.000Z | 2021-09-22T18:34:19.000Z | simapi.py | RichieBrady/SimAPI-Python | 9bd07e9fd0b3b4f37c968b061791ef9c1c6d213c | [
"MIT"
] | 2 | 2020-04-08T08:52:15.000Z | 2021-04-24T12:44:39.000Z | import csv
import json
import logging
import polling2
import requests
import pandas as pd
user_url = 'http://127.0.0.1:8000/user/'
login_url = 'http://127.0.0.1:8000/login/'
init_url = 'http://127.0.0.1:8000/init_model/'
input_url = 'http://127.0.0.1:8000/input/'
output_url = 'http://127.0.0.1:8000/output/'
graphql_url = 'http://127.0.0.1:8000/graphql/'
send_fmu = 'http://127.0.0.1:8000/send_fmu/'
# TODO add utility method to prepare user csv e.g. add time step column etc.
class SimApi:
def __init__(self, model_name, model_count, step_size, final_time, idf_path, epw_path, csv):
"""
Class represents the programming interface exposed to a user of the SimApi system.
:param model_name: (string) name of model must be unique
:param model_count: (int) number of models to instantiate
:param step_size: (int) size of each step per hour, value in seconds e.g. 4 steps per hour = 900 step size
(15 minutes in seconds)
:param final_time: (int) final runtime of model, value in hours. Will be changed to accommodate run times
over a few days
:param idf_path: (string) absolute path to .idf
:param epw_path: (string) absolute path to .epw
:param csv: (list) absolute path(s) to csv file(s), number of files must equal model count
"""
self.logger = logging.getLogger('simapi')
handler = logging.FileHandler('./simapi.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.INFO)
self._header = None
self._model_name = model_name
self._model_count = model_count
self._step_size = step_size
self._final_time = final_time
self._idf_path = idf_path
self._epw_path = epw_path
self._csv = csv
# model initialization parameters
self._init_data = {
'model_name': self._model_name, # change name each time script is run!
'container_id': None, # TODO change container_id from hostname to src_simulator_*
'model_count': self._model_count,
'step_size': self._step_size, # step size in seconds. 600 secs = 10 mins
'final_time': self._final_time # 24 hours = 86400 secs
}
self.sim_names = []
@staticmethod
def create_user(user_email='user@user.com', user_name='user', user_password='user user88'):
"""
Creates new user
:param user_email: (string) user email
:param user_name: (string) user name
:param user_password: (string) user password
:return:
"""
# TODO add check for existing user
json_data = {
"name": user_name,
"email": user_email,
"password": user_password
}
return requests.post(user_url, data=json_data)
def login(self, username="user@user.com", password="user user88"):
"""
Login as current user and store user token as a header dictionary to be used in requests
:param username: (string) user name
:param password: (string) user password
"""
data = {"username": username, # username = email
"password": password}
print(login_url)
resp = requests.post(login_url, data=data)
if resp.status_code == 200:
json_resp = resp.json()
token = json_resp['token'] # get validation token
self._header = {'Authorization': 'Token ' + token} # set request header
return resp.status_code
def send_and_generate(self):
"""
Send files needed to generate an fmu. return when fmu has finished generating.
:return: (int) status code of request, 201 if success
"""
idf_file = open(self._idf_path, 'rb')
epw_file = open(self._epw_path, 'rb')
file = {'idf_file': ('update.idf', idf_file),
'epw_file': ('update.epw', epw_file)}
resp = requests.post(init_url, headers=self._header, data=self._init_data, files=file)
idf_file.close()
epw_file.close()
return resp.status_code
def send_and_init(self):
"""
send data and initialize model as a simulation object, returns when simulation object has finished initializing
:return: (int) status code of request, 200 if success
"""
resp = requests.post(send_fmu, headers=self._header, json=self._init_data)
# graphql query for all models in db related to initial_model_name.
model_query = """
{{
fmuModels(modelN: "{0}"){{
modelName
}}
}}
""".format(self._model_name)
r = requests.get(url=graphql_url, json={'query': model_query}).json()['data']['fmuModels']
# TODO check if model count = initialized_model_count and relay to user,
# account for case when initialized_model_count < model count
# initialized_model_count = len(r)
# prints init_data on successful post
return resp.status_code
# TODO split into multiple methods giving the user more control over simulations
def simulate_models(self):
"""
Starts communication with simulation model and returns when model has reached its final time
:return: (int) 200 for success
"""
def test_method(query, url):
resp = requests.get(url=url, json={'query': query})
json_data = resp.json()['data']['outputs']
# self.logger.info("Output current length: {}".format(len(json_data)))
return len(json_data)
# TODO needs rework asap
# query for all models in db related to initial_model_name.
model_query = """
{{
fmuModels(modelN: "{0}"){{
modelName
}}
}}
""".format(self._model_name)
r = requests.get(url=graphql_url, json={'query': model_query})
i = 0
while i < self._model_count:
name = r.json()['data']['fmuModels'][i]['modelName'] # extract model name from graphql query response
print(name)
self.sim_names.append(name) # store extracted model names.
i += 1
f_time = 60 * 60 * self._final_time
data_frames = []
for file in self._csv:
data_frames.append(pd.read_csv(file))
i = 0 # first step
while i < f_time:
j = 0
# TODO process models async client side!
while j < self._model_count:
# TODO store dataframe in generator method and call next each iter
if len(data_frames) > 1:
df = data_frames[j]
else:
df = data_frames[0]
row = df.loc[df['time_step'] == i]
input_dict = row.to_dict('records')
input_dict = input_dict[0]
input_data = {
'fmu_model': self.sim_names[j],
'time_step': i,
'input_json': json.dumps(input_dict)
}
r = requests.post(input_url, headers=self._header, data=input_data)
print(r.text + ' ' + str(r.status_code))
j += 1
output_query = """
{{
outputs(modelN: "{0}", tStep: {1}) {{
outputJson
}}
}}
""".format(self._model_name, i)
try:
polling2.poll(
lambda: test_method(query=output_query, url=graphql_url) == self._model_count,
step=0.1,
timeout=60)
except polling2.TimeoutException:
print("Timeout error occurred\nLength of results is: {}".format(
test_method(query=output_query, url=graphql_url)))
i += self._step_size
# send empty input to kill and restart process in sim container(s)
k = 0
while k < self._model_count:
input_data = {
'fmu_model': self.sim_names[k],
'time_step': 0,
'input_json': json.dumps({"end_proc": -1})
}
r = requests.post(input_url, headers=self._header, data=input_data)
k += 1
print("\nAll data sent to simulation\n")
return 200
def request_model_outputs(self, sim_name):
f_time = 60*60*self._final_time
num_of_steps = f_time/self._step_size
self.logger.info("Expected number of steps: {}".format(num_of_steps))
def test_method(query, url):
resp = requests.get(url=url, json={'query': query})
json_data = resp.json()['data']['outputs']
self.logger.info("Output current length: {}".format(len(json_data)))
return len(json_data)
output_query = """
{{
outputs(modelN: "{0}") {{
timeStep
outputJson
}}
}}
""".format(sim_name)
print("Processing remaining inputs...")
try:
polling2.poll(
lambda: test_method(query=output_query, url=graphql_url) == num_of_steps,
step=0.1,
poll_forever=True)
except polling2.TimeoutException:
print("Timeout error occurred\nLength of results is: {}".format(test_method(query=output_query, url=graphql_url)))
json_output = requests.get(url=graphql_url, json={'query': output_query}).json()['data']['outputs']
# TODO store list of output names and use as csv column
print("Retrieving outputs...")
try:
csv_columns = ['time_step', 'output']
with open(f'output_csv/{sim_name}.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for out in json_output:
writer.writerow({'time_step': out['timeStep'], 'output': json.loads(out['outputJson'])})
except IOError:
print("I/O error")
@staticmethod
def multi_thread_client(self):
"""
Let user make multi-threaded requests, simulations per thread = (number of sims / available threads).
Avoid sequential processing of container requests client side.
:return:
"""
return NotImplementedError
| 38.514184 | 126 | 0.564129 |
deed3ce45bb05f06e51243af3cb0be75e5da73f8 | 247 | py | Python | tests/data/expected/main/main_nested_directory/definitions/drink/coffee.py | adaamz/datamodel-code-generator | 3b34573f35f8d420e4668a85047c757fd1da7754 | [
"MIT"
] | 891 | 2019-07-23T04:23:32.000Z | 2022-03-31T13:36:33.000Z | tests/data/expected/main/main_nested_directory/definitions/drink/coffee.py | adaamz/datamodel-code-generator | 3b34573f35f8d420e4668a85047c757fd1da7754 | [
"MIT"
] | 663 | 2019-07-23T09:50:26.000Z | 2022-03-29T01:56:55.000Z | tests/data/expected/main/main_nested_directory/definitions/drink/coffee.py | adaamz/datamodel-code-generator | 3b34573f35f8d420e4668a85047c757fd1da7754 | [
"MIT"
] | 108 | 2019-07-23T08:50:37.000Z | 2022-03-09T10:50:22.000Z | # generated by datamodel-codegen:
# filename: definitions/drink/coffee.json
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from enum import Enum
class Coffee(Enum):
Black = 'Black'
Espresso = 'Espresso'
| 19 | 44 | 0.724696 |
21a00a4ace3376c64a4370e05fd7b9812b08b62f | 5,483 | py | Python | tools/sync_mediawiki.py | CHChang810716/libsimdpp | 669bbffb9a94660f83c1c2e46af6098a8e993caa | [
"BSL-1.0"
] | 975 | 2015-01-08T08:30:17.000Z | 2022-03-30T02:57:58.000Z | tools/sync_mediawiki.py | abique/libsimdpp | a42deb7e3ca75c5ebaca0a334f18f1ab9822db02 | [
"BSL-1.0"
] | 125 | 2015-05-06T09:25:14.000Z | 2022-03-23T01:07:07.000Z | tools/sync_mediawiki.py | abique/libsimdpp | a42deb7e3ca75c5ebaca0a334f18f1ab9822db02 | [
"BSL-1.0"
] | 140 | 2015-05-03T18:56:00.000Z | 2022-03-22T07:14:46.000Z | #!/usr/bin/env python3
# Copyright (C) 2016-2017 Povilas Kanapickas <povilas@radix.lt>
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# This script depends on pywikibot framework. Install using
# pip install pywikibot --pre
import os
# pywikibot loads 'user-config.py' file in current directory by default
# disable this behavior. We want the same information to be supplied via
# command line.
os.environ['PYWIKIBOT2_NO_USER_CONFIG']='1'
import pywikibot
import pywikibot.config2
import pywikibot.pagegenerators
import pywikibot.data.api
import argparse
import itertools
import shutil
import sys
SYNC_DIRECTION_UPLOAD = 1
SYNC_DIRECTION_DOWNLOAD = 2
def get_path_from_title(title):
title = title.replace(' ', '_')
pathnames = title.replace(':', '/').split('/')
pathnames = [ p for p in pathnames if p != '' ]
return '/'.join(pathnames) + '.mwiki'
def fix_whitespace(text):
# Trims trailing whitespace on lines
# Adds trailing newline if not present. MediaWiki strips it and we don't
# want to fight with editors.
# Note that splitlines does not return empty line corresponding to the
# trailing newline character.
lines = text.splitlines()
lines = [ l.rstrip() for l in lines ]
return '\n'.join(lines) + '\n'
def sync_single_page(page, direction, dest_root):
title = page.title()
text = page.get(get_redirect=True)
dest_path = os.path.join(dest_root, get_path_from_title(title))
if direction == SYNC_DIRECTION_UPLOAD:
if not os.path.exists(dest_path):
return
with open(dest_path, 'r') as file:
new_text = file.read()
if fix_whitespace(text) != fix_whitespace(new_text):
page.put(new_text, 'sync with git')
print('Uploaded {0}'.format(title))
elif direction == SYNC_DIRECTION_DOWNLOAD:
dest_dir = os.path.dirname(dest_path)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
with open(dest_path, 'w') as file:
file.write(fix_whitespace(text))
print('Downloaded {0}'.format(dest_path))
def remove_no_longer_existing_pages(pages, dest_root):
paths = []
for dir, dirnames, filenames in os.walk(dest_root):
for filename in filenames:
rel_path = os.path.join(os.path.relpath(dir, dest_root), filename)
if rel_path.startswith('./'):
rel_path = rel_path[2:]
paths.append(rel_path)
paths = set(paths)
existing_paths = set([get_path_from_title(page.title()) for page in pages])
deleted_paths = paths - existing_paths
for path in deleted_paths:
os.remove(os.path.join(dest_root, path))
def perform_sync(url, direction, dest_root, user, password):
if direction == SYNC_DIRECTION_DOWNLOAD:
if os.path.exists(dest_root):
shutil.rmtree(dest_root)
os.makedirs(dest_root)
# Supply information to config that would otherwise be defined in
# user-config.py
pywikibot.config2.family = 'libsimdpp'
pywikibot.config2.mylang = 'en'
pywikibot.config2.family_files['libsimdpp'] = url
pywikibot.config2.step = 100
pywikibot.config2.put_throttle = 0
site = pywikibot.Site(user=user, fam='libsimdpp')
# pywikibot.login.LoginManager seems to be not fully implemented and broken
# Comments in the source suggest that data.api module contains full
# implementation. Use it instead.
login_manager = pywikibot.data.api.LoginManager(password=password,
site=site, user=user)
login_manager.login()
pages = itertools.chain(
pywikibot.pagegenerators.AllpagesPageGenerator(namespace=0, site=site),
pywikibot.pagegenerators.AllpagesPageGenerator(namespace=10, site=site)
)
pages = pywikibot.pagegenerators.PreloadingGenerator(pages, groupsize=100)
pages = list(pages)
for page in pages:
sync_single_page(page, direction, dest_root)
if direction == SYNC_DIRECTION_DOWNLOAD:
remove_no_longer_existing_pages(pages, dest_root)
def main():
parser = argparse.ArgumentParser(prog='sync_mediawiki')
parser.add_argument('url', type=str,
help='URL to root of a MediaWiki instance')
parser.add_argument('direction', type=str,
help='"upload" or "download"')
parser.add_argument('destination_root', type=str,
help='Destination directory to place results to')
parser.add_argument('user', type=str,
help='Username to perform bot operations under')
parser.add_argument('password', type=str,
help='User password to authenticate with')
args = parser.parse_args()
direction = None
if args.direction == 'upload':
direction = SYNC_DIRECTION_UPLOAD
elif args.direction == 'download':
direction = SYNC_DIRECTION_DOWNLOAD
else:
print('Incorrect direction option. Expected "upload" or "download"')
sys.exit(1)
if args.destination_root == '' or args.destination_root == '.':
print("The output directory can not be the current directory")
sys.exit(1)
perform_sync(args.url, direction, args.destination_root,
args.user, args.password)
if __name__ == '__main__':
main()
| 34.055901 | 79 | 0.668794 |
ae57a5fe6833a418c20a7352671eb54b5849065e | 10,911 | py | Python | manila_tempest_tests/tests/api/test_shares_negative.py | redhat-openstack/manila | bef43561b303a36d99849952ba8c408b19bafd02 | [
"Apache-2.0"
] | 1 | 2015-05-28T22:28:08.000Z | 2015-05-28T22:28:08.000Z | manila_tempest_tests/tests/api/test_shares_negative.py | redhat-openstack/manila | bef43561b303a36d99849952ba8c408b19bafd02 | [
"Apache-2.0"
] | 5 | 2015-08-13T15:17:28.000Z | 2016-08-02T02:55:01.000Z | manila_tempest_tests/tests/api/test_shares_negative.py | redhat-openstack/manila | bef43561b303a36d99849952ba8c408b19bafd02 | [
"Apache-2.0"
] | 2 | 2015-08-29T08:19:58.000Z | 2016-08-02T02:46:10.000Z | # Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config # noqa
from tempest import test # noqa
from tempest_lib import exceptions as lib_exc # noqa
import testtools # noqa
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
CONF = config.CONF
class SharesNegativeTest(base.BaseSharesTest):
@classmethod
def resource_setup(cls):
super(SharesNegativeTest, cls).resource_setup()
cls.share = cls.create_share(
name='public_share',
description='public_share_desc',
size=1,
is_public=True,
metadata={'key': 'value'}
)
@test.attr(type=["negative", "smoke", "gate", ])
def test_create_share_with_invalid_protocol(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share,
share_protocol="nonexistent_protocol")
@test.attr(type=["negative", "smoke", "gate", ])
def test_create_share_with_wrong_public_value(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share, is_public='truebar')
@test.attr(type=["negative", "smoke", "gate", ])
def test_update_share_with_wrong_public_value(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.update_share, self.share["id"],
is_public="truebar")
@test.attr(type=["negative", "smoke", "gate", ])
def test_get_share_with_wrong_id(self):
self.assertRaises(lib_exc.NotFound, self.shares_client.get_share,
"wrong_share_id")
@test.attr(type=["negative", "smoke", "gate", ])
def test_get_share_without_passing_share_id(self):
# Should not be able to get share when empty ID is passed
self.assertRaises(lib_exc.NotFound,
self.shares_client.get_share, '')
@test.attr(type=["negative", "smoke", "gate", ])
def test_list_shares_nonadmin_with_nonexistent_share_server_filter(self):
# filtering by share server allowed only for admins by default
self.assertRaises(lib_exc.Forbidden,
self.shares_client.list_shares_with_detail,
{'share_server_id': 'fake_share_server_id'})
@test.attr(type=["negative", "smoke", "gate", ])
def test_delete_share_with_wrong_id(self):
self.assertRaises(lib_exc.NotFound, self.shares_client.delete_share,
"wrong_share_id")
@test.attr(type=["negative", "smoke", "gate", ])
def test_delete_share_without_passing_share_id(self):
# Should not be able to delete share when empty ID is passed
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_share, '')
@test.attr(type=["negative", "smoke", "gate", ])
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_create_snapshot_with_wrong_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.create_snapshot,
"wrong_share_id")
@test.attr(type=["negative", "smoke", "gate", ])
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_delete_snapshot_with_wrong_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_snapshot,
"wrong_share_id")
@test.attr(type=["negative", "smoke", "gate", ])
def test_create_share_with_invalid_size(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share, size="#$%")
@test.attr(type=["negative", "smoke", "gate", ])
def test_create_share_with_out_passing_size(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share, size="")
@test.attr(type=["negative", "smoke", "gate", ])
def test_create_share_with_zero_size(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share, size=0)
@test.attr(type=["negative", "gate", ])
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_try_delete_share_with_existing_snapshot(self):
# share can not be deleted while snapshot exists
# create share
share = self.create_share()
# create snapshot
self.create_snapshot_wait_for_active(share["id"])
# try delete share
self.assertRaises(lib_exc.Forbidden,
self.shares_client.delete_share, share["id"])
@test.attr(type=["negative", "gate", ])
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_create_share_from_snap_with_less_size(self):
# requires minimum 5Gb available space
skip_msg = "Check disc space for this test"
try: # create share
share = self.create_share(size=2, cleanup_in_class=False)
except share_exceptions.ShareBuildErrorException:
self.skip(skip_msg)
try: # create snapshot
snap = self.create_snapshot_wait_for_active(
share["id"], cleanup_in_class=False)
except share_exceptions.SnapshotBuildErrorException:
self.skip(skip_msg)
# try create share from snapshot with less size
self.assertRaises(lib_exc.BadRequest,
self.create_share,
size=1, snapshot_id=snap["id"],
cleanup_in_class=False)
@test.attr(type=["negative", "smoke", "gate", ])
@testtools.skipIf(not CONF.share.multitenancy_enabled,
"Only for multitenancy.")
def test_create_share_with_nonexistant_share_network(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.create_share,
share_network_id="wrong_sn_id")
@test.attr(type=["negative", "smoke", "gate", ])
@testtools.skipIf(not CONF.share.multitenancy_enabled,
"Only for multitenancy.")
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_create_share_from_snap_with_different_share_network(self):
# create share
share = self.create_share(cleanup_in_class=False)
# get parent's share network
parent_share = self.shares_client.get_share(share["id"])
parent_sn = self.shares_client.get_share_network(
parent_share["share_network_id"])
# create new share-network - net duplicate of parent's share
new_duplicated_sn = self.create_share_network(
cleanup_in_class=False,
neutron_net_id=parent_sn["neutron_net_id"],
neutron_subnet_id=parent_sn["neutron_subnet_id"],
)
# create snapshot of parent share
snap = self.create_snapshot_wait_for_active(
share["id"], cleanup_in_class=False)
# try create share with snapshot using another share-network
# 400 bad request is expected
self.assertRaises(
lib_exc.BadRequest,
self.create_share,
cleanup_in_class=False,
share_network_id=new_duplicated_sn["id"],
snapshot_id=snap["id"],
)
@test.attr(type=["gate", "smoke", "negative", ])
def test_update_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden, isolated_client.update_share,
self.share["id"], name="new_name")
@test.attr(type=["gate", "smoke", "negative", ])
def test_delete_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden,
isolated_client.delete_share,
self.share['id'])
@test.attr(type=["gate", "smoke", "negative", ])
def test_set_metadata_of_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden,
isolated_client.set_metadata,
self.share['id'],
{'key': 'value'})
@test.attr(type=["gate", "smoke", "negative", ])
def test_update_metadata_of_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden,
isolated_client.update_all_metadata,
self.share['id'],
{'key': 'value'})
@test.attr(type=["gate", "smoke", "negative", ])
def test_delete_metadata_of_other_tenants_public_share(self):
isolated_client = self.get_client_with_isolated_creds(
type_of_creds='alt')
self.assertRaises(lib_exc.Forbidden,
isolated_client.delete_metadata,
self.share['id'],
'key')
@test.attr(type=["gate", "smoke", "negative", ])
def test_list_by_share_server_by_user(self):
self.assertRaises(lib_exc.Forbidden,
self.shares_client.list_shares,
params={'share_server_id': 12345})
@test.attr(type=["gate", "smoke", "negative", ])
def test_manage_share_by_user(self):
self.assertRaises(lib_exc.Forbidden,
self.shares_client.manage_share,
'fake-host', 'nfs', '/export/path',
'fake-type')
@test.attr(type=["gate", "smoke", "negative", ])
def test_unmanage_share_by_user(self):
self.assertRaises(lib_exc.Forbidden,
self.shares_client.unmanage_share,
'fake-id')
| 41.965385 | 79 | 0.620841 |
2787105eab5a75f5ab728a66c204ceedeaf4f3ea | 2,041 | py | Python | src/libraries/enet.py | kirirowy/mai-bot | f6b50083d5fa335d01b831f23c5b2d20bb2a8446 | [
"MIT"
] | 1 | 2022-02-27T06:56:39.000Z | 2022-02-27T06:56:39.000Z | src/libraries/enet.py | kiritowy/mai-bot | f6b50083d5fa335d01b831f23c5b2d20bb2a8446 | [
"MIT"
] | null | null | null | src/libraries/enet.py | kiritowy/mai-bot | f6b50083d5fa335d01b831f23c5b2d20bb2a8446 | [
"MIT"
] | null | null | null | # 列举文件
from asyncio.log import logger
import asyncio
from fileinput import filename
from sqlite3 import Timestamp
from unittest import result
import oss2
import time
from itertools import islice
AccessKeyId = ""
AccessKeySecret = ""
oss_url="oss-cn-guangzhou.aliyuncs.com"
BuckerName="mai2photo"
auth = oss2.Auth(AccessKeyId,AccessKeySecret)
bucket = oss2.Bucket(auth,oss_url,BuckerName)
folder=""
def allphoto(userid:str):
photolist = []
# print(f"{userid}=============")
#列举所有文件
for obj in oss2.ObjectIterator(bucket, prefix=folder,):
filen=str(obj.key)
print('file: ' + obj.key)
namecut = filen.split('-')
print('=====')
print(namecut[0])
print(type(namecut[0]))
print(type(userid))
if namecut[0] == str(userid):
print('进来了')
orgtime = namecut[1].split('.')
timestamp = int(orgtime[0])
timeArray = time.localtime(timestamp)
otherStyleTime = time.strftime("%Y%m%d%H%M%S", timeArray)
photolist.append(otherStyleTime)
print(photolist)
# print(otherStyleTime)
# print('-------')
print(f"{photolist}============")
return photolist
# allphoto(userid='45601466')
def downloadPhoto(userid:str,photo:str):
print(userid)
print(photo)
# 阿里云账号AccessKey拥有所有API的访问权限,风险很高。强烈建议您创建并使用RAM账号进行API访问或日常运维,请登录RAM控制台创建RAM账号。
timeArray = time.strptime(str(photo), "%Y%m%d%H%M%S")
timeStamp = int(time.mktime(timeArray))
auth = oss2.Auth(AccessKeyId, AccessKeySecret)
# Endpoint以杭州为例,其它Region请按实际情况填写。
# 填写Bucket名称,例如examplebucket。
bucket = oss2.Bucket(auth, oss_url, BuckerName)
photoname = f"{str(userid)}-{str(timeStamp)}.jpg"
# 填写Object完整路径,完整路径中不包含Bucket名称,例如testfolder/exampleobject.txt。
# 下载Object到本地文件,并保存到指定的本地路径D:\\localpath\\examplefile.txt。如果指定的本地文件存在会覆盖,不存在则新建。
bucket.get_object_to_file(photoname, f"/home/pi/mai-bot/src/static/{photoname}")
return photoname | 30.462687 | 84 | 0.650171 |
493818e2c58b7a73cfe18d4f8e9b968fc4f3c5f1 | 3,286 | py | Python | dataset.py | m-zayan/ml_utils | c93dcca6362b22d3b9e4652593695b38a8e73763 | [
"MIT"
] | null | null | null | dataset.py | m-zayan/ml_utils | c93dcca6362b22d3b9e4652593695b38a8e73763 | [
"MIT"
] | null | null | null | dataset.py | m-zayan/ml_utils | c93dcca6362b22d3b9e4652593695b38a8e73763 | [
"MIT"
] | null | null | null | from typing import Union
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston, load_diabetes, load_breast_cancer, load_iris, load_digits
from sklearn.model_selection import train_test_split
class Load:
"""
Parameters
--------
dtype (str): pandas or numpy, if numpy the return type is tuple (X, Y)
"""
def __init__(self, dtype: str = 'numpy'):
self.type = {'pandas': 0, 'numpy': 1}
self.is_numpy = self.type[dtype]
self.d = {'boston': load_boston,
'iris': load_iris,
'breast_cancer': load_breast_cancer,
'diabetes': load_diabetes,
'digits': load_digits}
def __load__(self, dname: str):
"""
Parameters
--------
dname (str): dataset name (available: boston, iris, breast_cancer, diabetes, digits)
is_numpy (bool): if true return tuple contain (X, Y)
Returns
--------
Union[pd.DataFrame, np.array, dict]: dataset with type numpy, pandas dataframe or dictionary in case of
load train, test data
"""
data: Union[pd.DataFrame, np.array, dict] = None
if self.is_numpy:
data = self.d[dname](True)
print({'Name': dname.upper(), 'n_samples': data[0].shape[0], 'n_features': data[0].shape[1]})
else:
data_dict = self.d[dname]()
data = pd.DataFrame(data=np.c_[data_dict['data'], data_dict['target']],
columns=list(data_dict['feature_names']) + ['target'])
print({'Name': dname.upper(), 'n_samples': data.shape[0], 'n_features': data.shape[1] - 1})
return data
def load_data(self, dname: str):
data = self.__load__(dname)
return data
def load_train_test(self, dname: str, test_size: float, ignore_type: bool = False, random_state: int = None):
"""
Parameters
----------
dname (str): dataset name
test_size (int): test set size
ignore_type (float): split pandas dataframe or numpy
random_state (int): random_state of train, test split
Returns
-------
dict: keys = ['train', 'test'], values are tuple (pairs) in which the first index is the feature values (x)
and the second index is target values (y)
Raises
------
TypeError: If ignore_type = False, and Load object attribute, is_numpy = False
"""
if not self.is_numpy and ignore_type is False:
raise TypeError('is_numpy != True, data should be a numpy.array, '
'or set ignore_type=True')
else:
data = dict()
x, y = (None, None)
if not self.is_numpy:
df = self.__load__(dname)
x, y = df.iloc[:, :-1], df['target']
else:
x, y = self.__load__(dname)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_size, random_state=random_state)
y_train = y_train[:, np.newaxis]
y_test = y_test[:, np.newaxis]
data['train'] = (x_train, y_train)
data['test'] = (x_test, y_test)
return data
| 30.425926 | 117 | 0.557212 |
e4d562b130891e09c88c8758ebf2092ba37e498e | 2,964 | py | Python | 2017/day_22/solution_p2.py | rvaughan/AdventOfCode2017 | fb9199282c0083cd0b3072c27e63ea83d866efc2 | [
"MIT"
] | null | null | null | 2017/day_22/solution_p2.py | rvaughan/AdventOfCode2017 | fb9199282c0083cd0b3072c27e63ea83d866efc2 | [
"MIT"
] | null | null | null | 2017/day_22/solution_p2.py | rvaughan/AdventOfCode2017 | fb9199282c0083cd0b3072c27e63ea83d866efc2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
This code holds the solution for part 2 of day 22 of the Advent of Code for 2017.
"""
from collections import defaultdict
import sys
UP = (-1, 0)
DOWN = (1, 0)
LEFT = (0, -1)
RIGHT = (0, 1)
def default_factory():
return "."
def turn_left(direction):
if direction == UP:
return LEFT
if direction == LEFT:
return DOWN
if direction == DOWN:
return RIGHT
if direction == RIGHT:
return UP
def turn_right(direction):
if direction == UP:
return RIGHT
if direction == RIGHT:
return DOWN
if direction == DOWN:
return LEFT
if direction == LEFT:
return UP
def turn_around(direction):
if direction == UP:
return DOWN
if direction == RIGHT:
return LEFT
if direction == DOWN:
return UP
if direction == LEFT:
return RIGHT
def do_moves(map, cur_pos, num_moves):
cur_direction = UP
infected = 0
for _ in xrange(num_moves):
# Infect / cleanse / choose direction of travel
if map[cur_pos] == ".":
map[cur_pos] = "W"
cur_direction = turn_left(cur_direction)
elif map[cur_pos] == "W":
map[cur_pos] = "#"
infected += 1
elif map[cur_pos] == "#":
map[cur_pos] = "F"
cur_direction = turn_right(cur_direction)
elif map[cur_pos] == "F":
map[cur_pos] = "."
cur_direction = turn_around(cur_direction)
# Do move
cur_pos = (cur_pos[0] + cur_direction[0], cur_pos[1] + cur_direction[1])
return infected
def load_map(filename):
cur_pos = (0, 0)
map = defaultdict(default_factory)
with open(filename, "r") as f:
for row_idx, line in enumerate(f.readlines()):
line = line.strip()
cur_pos = (row_idx, 0)
for cell_idx, cell in zip(xrange(len(line)), line):
map[(row_idx, cell_idx)] = cell
cur_pos = (row_idx, cell_idx)
cur_pos = (cur_pos[0] / 2), (cur_pos[1] / 2)
return map, cur_pos
def dump_map(map):
for row_idx in xrange(-5, 10, 1):
row = "{:03} ".format(row_idx)
for cell_idx in xrange(-5, 10, 1):
row += map[(row_idx, cell_idx)]
print row
# Run some tests on the code.
MAP, CUR_POS = load_map("test_map.txt")
INFECTIONS = do_moves(MAP, CUR_POS, 100)
if INFECTIONS != 26:
print "First map moves failed. [{0}]".format(INFECTIONS)
dump_map(MAP)
sys.exit(-1)
print "First moves ok"
MAP, CUR_POS = load_map("test_map.txt")
INFECTIONS = do_moves(MAP, CUR_POS, 10000000)
if INFECTIONS != 2511944:
print "Second map moves failed. [{0}]".format(INFECTIONS)
dump_map(MAP)
sys.exit(-1)
print "Second moves ok"
print "All tests passed."
# All tests passing...
MAP, CUR_POS = load_map("input.txt")
print "Solution is: {0}".format(do_moves(MAP, CUR_POS, 10000000))
| 21.955556 | 81 | 0.58502 |
e40ea824239535adecf3f32b2f927f7beeb1c86f | 243 | py | Python | examples/delivery/market/basis.py | AlfonsoAgAr/binance-futures-connector-python | f0bd2c7b0576503bf526ce6be329ca2dae90fefe | [
"MIT"
] | 58 | 2021-11-22T11:46:27.000Z | 2022-03-30T06:58:53.000Z | examples/delivery/market/basis.py | sanjeevan121/binance-futures-connector-python | d820b73a15e9f64c80891a13694ca0c5d1693b90 | [
"MIT"
] | 15 | 2021-12-15T22:40:52.000Z | 2022-03-29T22:08:31.000Z | examples/delivery/market/basis.py | sanjeevan121/binance-futures-connector-python | d820b73a15e9f64c80891a13694ca0c5d1693b90 | [
"MIT"
] | 28 | 2021-12-10T03:56:13.000Z | 2022-03-25T22:23:44.000Z | #!/usr/bin/env python
import logging
from binance.delivery import Delivery as Client
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
client = Client()
logging.info(client.basis("BTCUSD","PERPETUAL","1d")) | 24.3 | 53 | 0.790123 |
0b2c2f48f8e80fe98b69f07277955aa582859635 | 1,233 | py | Python | candlestick/patterns/piercing_pattern.py | michalk21/candlestick-patterns | 8b44f405491b9f128a5e42a9c363893c2f93a12a | [
"MIT"
] | 212 | 2020-07-02T01:54:09.000Z | 2022-03-22T20:22:07.000Z | candlestick/patterns/piercing_pattern.py | dewmal/candlestick-patterns | 29cda62f42869def811a19ceb57118d2a4b5d68f | [
"MIT"
] | 2 | 2021-06-02T00:50:58.000Z | 2021-12-13T20:30:18.000Z | candlestick/patterns/piercing_pattern.py | dewmal/candlestick-patterns | 29cda62f42869def811a19ceb57118d2a4b5d68f | [
"MIT"
] | 74 | 2020-07-01T02:27:19.000Z | 2022-03-30T05:37:16.000Z | from candlestick.patterns.candlestick_finder import CandlestickFinder
class PiercingPattern(CandlestickFinder):
def __init__(self, target=None):
super().__init__(self.get_class_name(), 2, target=target)
def logic(self, idx):
candle = self.data.iloc[idx]
prev_candle = self.data.iloc[idx + 1 * self.multi_coeff]
close = candle[self.close_column]
open = candle[self.open_column]
high = candle[self.high_column]
low = candle[self.low_column]
prev_close = prev_candle[self.close_column]
prev_open = prev_candle[self.open_column]
prev_high = prev_candle[self.high_column]
prev_low = prev_candle[self.low_column]
# return (prev_close < prev_open and
# abs(prev_close - prev_open) / (prev_high - prev_low) >= 0.7 and
# close > open and
# abs(close - open) / (high - low) >= 0.7 and
# open <= prev_close and
# close < prev_open and
# close < ((prev_open + prev_close) / 2))
return (prev_close < prev_open and
open < prev_low and
prev_open > close > prev_close + ((prev_open - prev_close) / 2))
| 37.363636 | 81 | 0.601784 |
b619d4d7f9264bac16318a6e76942433ceb1ebe4 | 1,447 | py | Python | scantron/models/paper.py | suhelhammoud/scantron | fcf64d973342cd63acf5235ce621fed02aeae47d | [
"MIT"
] | null | null | null | scantron/models/paper.py | suhelhammoud/scantron | fcf64d973342cd63acf5235ce621fed02aeae47d | [
"MIT"
] | null | null | null | scantron/models/paper.py | suhelhammoud/scantron | fcf64d973342cd63acf5235ce621fed02aeae47d | [
"MIT"
] | null | null | null |
from django.db import models
from django.core.validators import MinValueValidator
class Paper(models.Model):
name = models.CharField(max_length=200,
null=True,
blank=True,
default="Add name to paper here")
id_number = models.PositiveSmallIntegerField(default=0)
num_questions = models.PositiveSmallIntegerField(default=50)
p_type = models.CharField(
max_length=1,
choices=[("A", "A"), ("B", "B"), ("C", "C"),("D", "D"), ("E", "E"), ("", "")],
null=True,
default="",
blank=True)
def __str__(self):
return f'{self.name}, {self.id_number}'
class PQuestion(models.Model):
#Constants
A_CHOICES = [("A", "A"), ("B", "B"), ("C", "C"),("D", "D"), ("E", "E"), ("", "")]
#Number range
Q_NUMBERS = zip(range(1, 201), range(1, 201))
paper = models.ForeignKey(Paper,
null=True,
blank=True,
on_delete=models.CASCADE)
q_number = models.PositiveSmallIntegerField(
choices=Q_NUMBERS,
default=1)
mark = models.DecimalField(
decimal_places=4,
max_digits=6,
default=1)
answer = models.CharField(
max_length=1,
choices=A_CHOICES,
default= "D")
class Meta:
verbose_name_plural = "questions"
ordering = ['q_number']
def __str__(self):
return f"{self.q_number}, {self.answer}, mark:{self.mark}"
| 26.796296 | 86 | 0.569454 |
3b835eda7f7a420283851b07426b0b14e84f2b8c | 3,344 | py | Python | data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy283.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy283.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy283.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=19
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.y(input_qubit[1]) # number=2
prog.y(input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.h(input_qubit[0]) # number=13
prog.cz(input_qubit[1],input_qubit[0]) # number=14
prog.h(input_qubit[0]) # number=15
prog.x(input_qubit[0]) # number=8
prog.h(input_qubit[0]) # number=16
prog.cz(input_qubit[1],input_qubit[0]) # number=17
prog.h(input_qubit[0]) # number=18
prog.cx(input_qubit[1],input_qubit[0]) # number=10
prog.x(input_qubit[0]) # number=11
prog.cx(input_qubit[1],input_qubit[0]) # number=12
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = FakeVigo()
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_noisy283.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.827586 | 82 | 0.629785 |
b84f7936d2c19a51356cf7cdd17b99e96117feea | 1,708 | py | Python | aiida/cmdline/commands/__init__.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | 1 | 2019-07-31T04:08:13.000Z | 2019-07-31T04:08:13.000Z | aiida/cmdline/commands/__init__.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | null | null | null | aiida/cmdline/commands/__init__.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=too-many-arguments, wrong-import-position
"""The `verdi` command line interface."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import click_completion
# Activate the completion of parameter types provided by the click_completion package
click_completion.init()
# Import to populate the `verdi` sub commands
from aiida.cmdline.commands import (cmd_calcjob, cmd_code, cmd_comment, cmd_completioncommand, cmd_computer, cmd_config,
cmd_data, cmd_database, cmd_daemon, cmd_devel, cmd_export, cmd_graph, cmd_group,
cmd_import, cmd_node, cmd_plugin, cmd_process, cmd_profile, cmd_rehash, cmd_restapi,
cmd_run, cmd_setup, cmd_shell, cmd_status, cmd_user)
# Import to populate the `verdi data` sub commands
from aiida.cmdline.commands.cmd_data import (cmd_array, cmd_bands, cmd_cif, cmd_dict, cmd_remote, cmd_structure,
cmd_trajectory, cmd_upf)
| 58.896552 | 120 | 0.582553 |
bfc3968306d1a5636e7709232678ffa62ef23a9e | 30,866 | py | Python | python/payloads_pb2.py | mrrabbitte/pooly | 7f3c8b0b3deda18d0dc8fb9284e5164abe9b3b1a | [
"Unlicense"
] | 1 | 2022-03-18T20:06:48.000Z | 2022-03-18T20:06:48.000Z | python/payloads_pb2.py | mrrabbitte/pooly | 7f3c8b0b3deda18d0dc8fb9284e5164abe9b3b1a | [
"Unlicense"
] | null | null | null | python/payloads_pb2.py | mrrabbitte/pooly | 7f3c8b0b3deda18d0dc8fb9284e5164abe9b3b1a | [
"Unlicense"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: src/payloads.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='src/payloads.proto',
package='payloads.proto',
syntax='proto3',
serialized_pb=_b('\n\x12src/payloads.proto\x12\x0epayloads.proto\"Z\n\x0cValueWrapper\x12\x10\n\x06string\x18\x01 \x01(\tH\x00\x12\x0e\n\x04int8\x18\x02 \x01(\x03H\x00\x12\x0e\n\x04int4\x18\x03 \x01(\x05H\x00\x12\x0f\n\x05\x62ytes\x18\x04 \x01(\x0cH\x00\x42\x07\n\x05value\"\x99\x01\n\x11JsonObjectWrapper\x12\x39\n\x04\x62ody\x18\x01 \x03(\x0b\x32+.payloads.proto.JsonObjectWrapper.BodyEntry\x1aI\n\tBodyEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.payloads.proto.ValueWrapper:\x02\x38\x01\";\n\x0bRowResponse\x12,\n\x06values\x18\x01 \x03(\x0b\x32\x1c.payloads.proto.ValueWrapper\"W\n\x14QuerySuccessResponse\x12\x14\n\x0c\x63olumn_names\x18\x02 \x03(\t\x12)\n\x04rows\x18\x03 \x03(\x0b\x32\x1b.payloads.proto.RowResponse\"\xd7\x02\n\rErrorResponse\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x16\n\x0e\x63orrelation_id\x18\x02 \x01(\t\x12;\n\nerror_type\x18\x03 \x01(\x0e\x32\'.payloads.proto.ErrorResponse.ErrorType\"\xdf\x01\n\tErrorType\x12\x1b\n\x17\x43ONNECTION_CONFIG_ERROR\x10\x00\x12\x15\n\x11\x43REATE_POOL_ERROR\x10\x01\x12\x1b\n\x17\x46ORBIDDEN_CONNECTION_ID\x10\x02\x12\x16\n\x12MISSING_CONNECTION\x10\x03\x12\x0e\n\nPOOL_ERROR\x10\x04\x12\x12\n\x0ePOSTGRES_ERROR\x10\x05\x12\x11\n\rSTORAGE_ERROR\x10\x06\x12\x17\n\x13WRONG_NUM_OF_PARAMS\x10\x07\x12\x19\n\x15UNKNOWN_PG_VALUE_TYPE\x10\x08\"\x83\x01\n\rQueryResponse\x12\x37\n\x07success\x18\x01 \x01(\x0b\x32$.payloads.proto.QuerySuccessResponseH\x00\x12.\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x1d.payloads.proto.ErrorResponseH\x00\x42\t\n\x07payload\"b\n\x0cQueryRequest\x12\x15\n\rconnection_id\x18\x01 \x01(\t\x12\r\n\x05query\x18\x02 \x01(\t\x12,\n\x06params\x18\x03 \x03(\x0b\x32\x1c.payloads.proto.ValueWrapper\"A\n\x11TxBulkQueryParams\x12,\n\x06values\x18\x01 \x03(\x0b\x32\x1c.payloads.proto.ValueWrapper\"Z\n\x16TxBulkQueryRequestBody\x12\r\n\x05query\x18\x01 \x01(\t\x12\x31\n\x06params\x18\x02 \x03(\x0b\x32!.payloads.proto.TxBulkQueryParams\"d\n\x12TxBulkQueryRequest\x12\x15\n\rconnection_id\x18\x01 \x01(\t\x12\x37\n\x07queries\x18\x02 \x03(\x0b\x32&.payloads.proto.TxBulkQueryRequestBody\"=\n\x10RowResponseGroup\x12)\n\x04rows\x18\x01 \x03(\x0b\x32\x1b.payloads.proto.RowResponse\"u\n\x16TxQuerySuccessResponse\x12\x0f\n\x07ord_num\x18\x01 \x01(\x05\x12\x14\n\x0c\x63olumn_names\x18\x02 \x03(\t\x12\x34\n\nrow_groups\x18\x03 \x03(\x0b\x32 .payloads.proto.RowResponseGroup\"W\n\x1aTxBulkQuerySuccessResponse\x12\x39\n\tresponses\x18\x01 \x03(\x0b\x32&.payloads.proto.TxQuerySuccessResponse\"\x8f\x01\n\x13TxBulkQueryResponse\x12=\n\x07success\x18\x01 \x01(\x0b\x32*.payloads.proto.TxBulkQuerySuccessResponseH\x00\x12.\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x1d.payloads.proto.ErrorResponseH\x00\x42\t\n\x07payloadb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ERRORRESPONSE_ERRORTYPE = _descriptor.EnumDescriptor(
name='ErrorType',
full_name='payloads.proto.ErrorResponse.ErrorType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='CONNECTION_CONFIG_ERROR', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CREATE_POOL_ERROR', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FORBIDDEN_CONNECTION_ID', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MISSING_CONNECTION', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POOL_ERROR', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POSTGRES_ERROR', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STORAGE_ERROR', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRONG_NUM_OF_PARAMS', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN_PG_VALUE_TYPE', index=8, number=8,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=557,
serialized_end=780,
)
_sym_db.RegisterEnumDescriptor(_ERRORRESPONSE_ERRORTYPE)
_VALUEWRAPPER = _descriptor.Descriptor(
name='ValueWrapper',
full_name='payloads.proto.ValueWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='string', full_name='payloads.proto.ValueWrapper.string', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='int8', full_name='payloads.proto.ValueWrapper.int8', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='int4', full_name='payloads.proto.ValueWrapper.int4', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bytes', full_name='payloads.proto.ValueWrapper.bytes', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='value', full_name='payloads.proto.ValueWrapper.value',
index=0, containing_type=None, fields=[]),
],
serialized_start=38,
serialized_end=128,
)
_JSONOBJECTWRAPPER_BODYENTRY = _descriptor.Descriptor(
name='BodyEntry',
full_name='payloads.proto.JsonObjectWrapper.BodyEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='payloads.proto.JsonObjectWrapper.BodyEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='payloads.proto.JsonObjectWrapper.BodyEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=211,
serialized_end=284,
)
_JSONOBJECTWRAPPER = _descriptor.Descriptor(
name='JsonObjectWrapper',
full_name='payloads.proto.JsonObjectWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='body', full_name='payloads.proto.JsonObjectWrapper.body', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_JSONOBJECTWRAPPER_BODYENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=284,
)
_ROWRESPONSE = _descriptor.Descriptor(
name='RowResponse',
full_name='payloads.proto.RowResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='values', full_name='payloads.proto.RowResponse.values', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=286,
serialized_end=345,
)
_QUERYSUCCESSRESPONSE = _descriptor.Descriptor(
name='QuerySuccessResponse',
full_name='payloads.proto.QuerySuccessResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='column_names', full_name='payloads.proto.QuerySuccessResponse.column_names', index=0,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rows', full_name='payloads.proto.QuerySuccessResponse.rows', index=1,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=347,
serialized_end=434,
)
_ERRORRESPONSE = _descriptor.Descriptor(
name='ErrorResponse',
full_name='payloads.proto.ErrorResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='payloads.proto.ErrorResponse.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='correlation_id', full_name='payloads.proto.ErrorResponse.correlation_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error_type', full_name='payloads.proto.ErrorResponse.error_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ERRORRESPONSE_ERRORTYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=437,
serialized_end=780,
)
_QUERYRESPONSE = _descriptor.Descriptor(
name='QueryResponse',
full_name='payloads.proto.QueryResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='success', full_name='payloads.proto.QueryResponse.success', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error', full_name='payloads.proto.QueryResponse.error', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='payloads.proto.QueryResponse.payload',
index=0, containing_type=None, fields=[]),
],
serialized_start=783,
serialized_end=914,
)
_QUERYREQUEST = _descriptor.Descriptor(
name='QueryRequest',
full_name='payloads.proto.QueryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='connection_id', full_name='payloads.proto.QueryRequest.connection_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='query', full_name='payloads.proto.QueryRequest.query', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='params', full_name='payloads.proto.QueryRequest.params', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=916,
serialized_end=1014,
)
_TXBULKQUERYPARAMS = _descriptor.Descriptor(
name='TxBulkQueryParams',
full_name='payloads.proto.TxBulkQueryParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='values', full_name='payloads.proto.TxBulkQueryParams.values', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1016,
serialized_end=1081,
)
_TXBULKQUERYREQUESTBODY = _descriptor.Descriptor(
name='TxBulkQueryRequestBody',
full_name='payloads.proto.TxBulkQueryRequestBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='payloads.proto.TxBulkQueryRequestBody.query', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='params', full_name='payloads.proto.TxBulkQueryRequestBody.params', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1083,
serialized_end=1173,
)
_TXBULKQUERYREQUEST = _descriptor.Descriptor(
name='TxBulkQueryRequest',
full_name='payloads.proto.TxBulkQueryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='connection_id', full_name='payloads.proto.TxBulkQueryRequest.connection_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='queries', full_name='payloads.proto.TxBulkQueryRequest.queries', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1175,
serialized_end=1275,
)
_ROWRESPONSEGROUP = _descriptor.Descriptor(
name='RowResponseGroup',
full_name='payloads.proto.RowResponseGroup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rows', full_name='payloads.proto.RowResponseGroup.rows', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1277,
serialized_end=1338,
)
_TXQUERYSUCCESSRESPONSE = _descriptor.Descriptor(
name='TxQuerySuccessResponse',
full_name='payloads.proto.TxQuerySuccessResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ord_num', full_name='payloads.proto.TxQuerySuccessResponse.ord_num', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='column_names', full_name='payloads.proto.TxQuerySuccessResponse.column_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_groups', full_name='payloads.proto.TxQuerySuccessResponse.row_groups', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1340,
serialized_end=1457,
)
_TXBULKQUERYSUCCESSRESPONSE = _descriptor.Descriptor(
name='TxBulkQuerySuccessResponse',
full_name='payloads.proto.TxBulkQuerySuccessResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='responses', full_name='payloads.proto.TxBulkQuerySuccessResponse.responses', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1459,
serialized_end=1546,
)
_TXBULKQUERYRESPONSE = _descriptor.Descriptor(
name='TxBulkQueryResponse',
full_name='payloads.proto.TxBulkQueryResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='success', full_name='payloads.proto.TxBulkQueryResponse.success', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error', full_name='payloads.proto.TxBulkQueryResponse.error', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='payloads.proto.TxBulkQueryResponse.payload',
index=0, containing_type=None, fields=[]),
],
serialized_start=1549,
serialized_end=1692,
)
_VALUEWRAPPER.oneofs_by_name['value'].fields.append(
_VALUEWRAPPER.fields_by_name['string'])
_VALUEWRAPPER.fields_by_name['string'].containing_oneof = _VALUEWRAPPER.oneofs_by_name['value']
_VALUEWRAPPER.oneofs_by_name['value'].fields.append(
_VALUEWRAPPER.fields_by_name['int8'])
_VALUEWRAPPER.fields_by_name['int8'].containing_oneof = _VALUEWRAPPER.oneofs_by_name['value']
_VALUEWRAPPER.oneofs_by_name['value'].fields.append(
_VALUEWRAPPER.fields_by_name['int4'])
_VALUEWRAPPER.fields_by_name['int4'].containing_oneof = _VALUEWRAPPER.oneofs_by_name['value']
_VALUEWRAPPER.oneofs_by_name['value'].fields.append(
_VALUEWRAPPER.fields_by_name['bytes'])
_VALUEWRAPPER.fields_by_name['bytes'].containing_oneof = _VALUEWRAPPER.oneofs_by_name['value']
_JSONOBJECTWRAPPER_BODYENTRY.fields_by_name['value'].message_type = _VALUEWRAPPER
_JSONOBJECTWRAPPER_BODYENTRY.containing_type = _JSONOBJECTWRAPPER
_JSONOBJECTWRAPPER.fields_by_name['body'].message_type = _JSONOBJECTWRAPPER_BODYENTRY
_ROWRESPONSE.fields_by_name['values'].message_type = _VALUEWRAPPER
_QUERYSUCCESSRESPONSE.fields_by_name['rows'].message_type = _ROWRESPONSE
_ERRORRESPONSE.fields_by_name['error_type'].enum_type = _ERRORRESPONSE_ERRORTYPE
_ERRORRESPONSE_ERRORTYPE.containing_type = _ERRORRESPONSE
_QUERYRESPONSE.fields_by_name['success'].message_type = _QUERYSUCCESSRESPONSE
_QUERYRESPONSE.fields_by_name['error'].message_type = _ERRORRESPONSE
_QUERYRESPONSE.oneofs_by_name['payload'].fields.append(
_QUERYRESPONSE.fields_by_name['success'])
_QUERYRESPONSE.fields_by_name['success'].containing_oneof = _QUERYRESPONSE.oneofs_by_name['payload']
_QUERYRESPONSE.oneofs_by_name['payload'].fields.append(
_QUERYRESPONSE.fields_by_name['error'])
_QUERYRESPONSE.fields_by_name['error'].containing_oneof = _QUERYRESPONSE.oneofs_by_name['payload']
_QUERYREQUEST.fields_by_name['params'].message_type = _VALUEWRAPPER
_TXBULKQUERYPARAMS.fields_by_name['values'].message_type = _VALUEWRAPPER
_TXBULKQUERYREQUESTBODY.fields_by_name['params'].message_type = _TXBULKQUERYPARAMS
_TXBULKQUERYREQUEST.fields_by_name['queries'].message_type = _TXBULKQUERYREQUESTBODY
_ROWRESPONSEGROUP.fields_by_name['rows'].message_type = _ROWRESPONSE
_TXQUERYSUCCESSRESPONSE.fields_by_name['row_groups'].message_type = _ROWRESPONSEGROUP
_TXBULKQUERYSUCCESSRESPONSE.fields_by_name['responses'].message_type = _TXQUERYSUCCESSRESPONSE
_TXBULKQUERYRESPONSE.fields_by_name['success'].message_type = _TXBULKQUERYSUCCESSRESPONSE
_TXBULKQUERYRESPONSE.fields_by_name['error'].message_type = _ERRORRESPONSE
_TXBULKQUERYRESPONSE.oneofs_by_name['payload'].fields.append(
_TXBULKQUERYRESPONSE.fields_by_name['success'])
_TXBULKQUERYRESPONSE.fields_by_name['success'].containing_oneof = _TXBULKQUERYRESPONSE.oneofs_by_name['payload']
_TXBULKQUERYRESPONSE.oneofs_by_name['payload'].fields.append(
_TXBULKQUERYRESPONSE.fields_by_name['error'])
_TXBULKQUERYRESPONSE.fields_by_name['error'].containing_oneof = _TXBULKQUERYRESPONSE.oneofs_by_name['payload']
DESCRIPTOR.message_types_by_name['ValueWrapper'] = _VALUEWRAPPER
DESCRIPTOR.message_types_by_name['JsonObjectWrapper'] = _JSONOBJECTWRAPPER
DESCRIPTOR.message_types_by_name['RowResponse'] = _ROWRESPONSE
DESCRIPTOR.message_types_by_name['QuerySuccessResponse'] = _QUERYSUCCESSRESPONSE
DESCRIPTOR.message_types_by_name['ErrorResponse'] = _ERRORRESPONSE
DESCRIPTOR.message_types_by_name['QueryResponse'] = _QUERYRESPONSE
DESCRIPTOR.message_types_by_name['QueryRequest'] = _QUERYREQUEST
DESCRIPTOR.message_types_by_name['TxBulkQueryParams'] = _TXBULKQUERYPARAMS
DESCRIPTOR.message_types_by_name['TxBulkQueryRequestBody'] = _TXBULKQUERYREQUESTBODY
DESCRIPTOR.message_types_by_name['TxBulkQueryRequest'] = _TXBULKQUERYREQUEST
DESCRIPTOR.message_types_by_name['RowResponseGroup'] = _ROWRESPONSEGROUP
DESCRIPTOR.message_types_by_name['TxQuerySuccessResponse'] = _TXQUERYSUCCESSRESPONSE
DESCRIPTOR.message_types_by_name['TxBulkQuerySuccessResponse'] = _TXBULKQUERYSUCCESSRESPONSE
DESCRIPTOR.message_types_by_name['TxBulkQueryResponse'] = _TXBULKQUERYRESPONSE
ValueWrapper = _reflection.GeneratedProtocolMessageType('ValueWrapper', (_message.Message,), dict(
DESCRIPTOR = _VALUEWRAPPER,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.ValueWrapper)
))
_sym_db.RegisterMessage(ValueWrapper)
JsonObjectWrapper = _reflection.GeneratedProtocolMessageType('JsonObjectWrapper', (_message.Message,), dict(
BodyEntry = _reflection.GeneratedProtocolMessageType('BodyEntry', (_message.Message,), dict(
DESCRIPTOR = _JSONOBJECTWRAPPER_BODYENTRY,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.JsonObjectWrapper.BodyEntry)
))
,
DESCRIPTOR = _JSONOBJECTWRAPPER,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.JsonObjectWrapper)
))
_sym_db.RegisterMessage(JsonObjectWrapper)
_sym_db.RegisterMessage(JsonObjectWrapper.BodyEntry)
RowResponse = _reflection.GeneratedProtocolMessageType('RowResponse', (_message.Message,), dict(
DESCRIPTOR = _ROWRESPONSE,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.RowResponse)
))
_sym_db.RegisterMessage(RowResponse)
QuerySuccessResponse = _reflection.GeneratedProtocolMessageType('QuerySuccessResponse', (_message.Message,), dict(
DESCRIPTOR = _QUERYSUCCESSRESPONSE,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.QuerySuccessResponse)
))
_sym_db.RegisterMessage(QuerySuccessResponse)
ErrorResponse = _reflection.GeneratedProtocolMessageType('ErrorResponse', (_message.Message,), dict(
DESCRIPTOR = _ERRORRESPONSE,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.ErrorResponse)
))
_sym_db.RegisterMessage(ErrorResponse)
QueryResponse = _reflection.GeneratedProtocolMessageType('QueryResponse', (_message.Message,), dict(
DESCRIPTOR = _QUERYRESPONSE,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.QueryResponse)
))
_sym_db.RegisterMessage(QueryResponse)
QueryRequest = _reflection.GeneratedProtocolMessageType('QueryRequest', (_message.Message,), dict(
DESCRIPTOR = _QUERYREQUEST,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.QueryRequest)
))
_sym_db.RegisterMessage(QueryRequest)
TxBulkQueryParams = _reflection.GeneratedProtocolMessageType('TxBulkQueryParams', (_message.Message,), dict(
DESCRIPTOR = _TXBULKQUERYPARAMS,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.TxBulkQueryParams)
))
_sym_db.RegisterMessage(TxBulkQueryParams)
TxBulkQueryRequestBody = _reflection.GeneratedProtocolMessageType('TxBulkQueryRequestBody', (_message.Message,), dict(
DESCRIPTOR = _TXBULKQUERYREQUESTBODY,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.TxBulkQueryRequestBody)
))
_sym_db.RegisterMessage(TxBulkQueryRequestBody)
TxBulkQueryRequest = _reflection.GeneratedProtocolMessageType('TxBulkQueryRequest', (_message.Message,), dict(
DESCRIPTOR = _TXBULKQUERYREQUEST,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.TxBulkQueryRequest)
))
_sym_db.RegisterMessage(TxBulkQueryRequest)
RowResponseGroup = _reflection.GeneratedProtocolMessageType('RowResponseGroup', (_message.Message,), dict(
DESCRIPTOR = _ROWRESPONSEGROUP,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.RowResponseGroup)
))
_sym_db.RegisterMessage(RowResponseGroup)
TxQuerySuccessResponse = _reflection.GeneratedProtocolMessageType('TxQuerySuccessResponse', (_message.Message,), dict(
DESCRIPTOR = _TXQUERYSUCCESSRESPONSE,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.TxQuerySuccessResponse)
))
_sym_db.RegisterMessage(TxQuerySuccessResponse)
TxBulkQuerySuccessResponse = _reflection.GeneratedProtocolMessageType('TxBulkQuerySuccessResponse', (_message.Message,), dict(
DESCRIPTOR = _TXBULKQUERYSUCCESSRESPONSE,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.TxBulkQuerySuccessResponse)
))
_sym_db.RegisterMessage(TxBulkQuerySuccessResponse)
TxBulkQueryResponse = _reflection.GeneratedProtocolMessageType('TxBulkQueryResponse', (_message.Message,), dict(
DESCRIPTOR = _TXBULKQUERYRESPONSE,
__module__ = 'src.payloads_pb2'
# @@protoc_insertion_point(class_scope:payloads.proto.TxBulkQueryResponse)
))
_sym_db.RegisterMessage(TxBulkQueryResponse)
_JSONOBJECTWRAPPER_BODYENTRY.has_options = True
_JSONOBJECTWRAPPER_BODYENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
| 37.458738 | 2,737 | 0.754973 |
e1244482855b66e4407148ea3a69a1d97f797d4c | 1,312 | py | Python | waitlist/blueprints/settings/inserts.py | kimnnmadsen/eve-inc-waitlist | c3e4853c5563a95edbf105c11e73d481595fb3ab | [
"MIT"
] | null | null | null | waitlist/blueprints/settings/inserts.py | kimnnmadsen/eve-inc-waitlist | c3e4853c5563a95edbf105c11e73d481595fb3ab | [
"MIT"
] | 1 | 2020-02-18T05:11:20.000Z | 2020-02-18T05:29:10.000Z | waitlist/blueprints/settings/inserts.py | kimnnmadsen/eve-inc-waitlist | c3e4853c5563a95edbf105c11e73d481595fb3ab | [
"MIT"
] | null | null | null | from flask import Response
from flask_login import login_required
from flask.templating import render_template
from waitlist.blueprints.settings import add_menu_entry
from waitlist.utility.settings import sget_insert, sset_insert
from flask.blueprints import Blueprint
import logging
from flask.globals import request
from flask.helpers import flash, url_for
from werkzeug.utils import redirect
from waitlist.permissions import perm_manager
from flask_babel import gettext, lazy_gettext
bp = Blueprint('settings_inserts', __name__)
logger = logging.getLogger(__name__)
perm_manager.define_permission('inserts_edit')
@bp.route("/")
@login_required
@perm_manager.require('inserts_edit')
def index() -> Response:
data = {'header': sget_insert('header')}
return render_template("settings/inserts.html", inserts=data)
@bp.route("/change/<string:type_>", methods=["POST"])
@login_required
@perm_manager.require('inserts_edit')
def change(type_) -> Response:
if type_ == "header":
content = request.form.get('content')
sset_insert('header', content)
flash(gettext("Header Insert Saved"), 'success')
return redirect(url_for('settings_inserts.index'))
add_menu_entry('settings_inserts.index', lazy_gettext('Page Inserts'), perm_manager.get_permission('inserts_edit').can)
| 32 | 119 | 0.778201 |
11990ceec5931b40947115186bac88fcb5da9ecb | 12,299 | py | Python | corehq/apps/app_manager/tests/test_form_workflow.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/app_manager/tests/test_form_workflow.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | 1 | 2022-03-12T01:03:25.000Z | 2022-03-12T01:03:25.000Z | corehq/apps/app_manager/tests/test_form_workflow.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | from django.test import SimpleTestCase
from corehq.apps.app_manager.models import (
FormLink,
WORKFLOW_FORM,
WORKFLOW_MODULE,
WORKFLOW_PREVIOUS,
WORKFLOW_ROOT,
WORKFLOW_PARENT_MODULE,
FormDatum)
from corehq.apps.app_manager.const import AUTO_SELECT_RAW
from corehq.apps.app_manager.tests.app_factory import AppFactory
from corehq.apps.app_manager.tests.util import TestXmlMixin
from corehq.feature_previews import MODULE_FILTER
from corehq.toggles import NAMESPACE_DOMAIN
from toggle.shortcuts import update_toggle_cache, clear_toggle_cache
class TestFormWorkflow(SimpleTestCase, TestXmlMixin):
file_path = ('data', 'form_workflow')
def setUp(self):
update_toggle_cache(MODULE_FILTER.slug, 'domain', True, NAMESPACE_DOMAIN)
def tearDown(self):
clear_toggle_cache(MODULE_FILTER.slug, 'domain', NAMESPACE_DOMAIN)
def test_basic(self):
factory = AppFactory(build_version='2.9.0/latest')
m0, m0f0 = factory.new_basic_module('m0', 'frog')
m1, m1f0 = factory.new_basic_module('m1', 'frog')
m0f0.post_form_workflow = WORKFLOW_FORM
m0f0.form_links = [
FormLink(xpath="(today() - dob) < 7", form_id=m1f0.unique_id)
]
self.assertXmlPartialEqual(self.get_xml('form_link_basic'), factory.app.create_suite(), "./entry[1]")
def test_with_case_management_both_update(self):
factory = AppFactory(build_version='2.9.0/latest')
m0, m0f0 = factory.new_basic_module('m0', 'frog')
factory.form_requires_case(m0f0)
m1, m1f0 = factory.new_basic_module('m1', 'frog')
factory.form_requires_case(m1f0)
m0f0.post_form_workflow = WORKFLOW_FORM
m0f0.form_links = [
FormLink(xpath="(today() - dob) > 7", form_id=m1f0.unique_id)
]
self.assertXmlPartialEqual(self.get_xml('form_link_update_case'), factory.app.create_suite(), "./entry[1]")
def test_with_case_management_create_update(self):
factory = AppFactory(build_version='2.9.0/latest')
m0, m0f0 = factory.new_basic_module('m0', 'frog')
factory.form_opens_case(m0f0)
m1, m1f0 = factory.new_basic_module('m1', 'frog')
factory.form_requires_case(m1f0)
m0f0.post_form_workflow = WORKFLOW_FORM
m0f0.form_links = [
FormLink(xpath='true()', form_id=m1f0.unique_id)
]
self.assertXmlPartialEqual(self.get_xml('form_link_create_update_case'), factory.app.create_suite(), "./entry[1]")
def test_with_case_management_multiple_links(self):
factory = AppFactory(build_version='2.9.0/latest')
m0, m0f0 = factory.new_basic_module('m0', 'frog')
factory.form_opens_case(m0f0)
m1, m1f0 = factory.new_basic_module('m1', 'frog')
factory.form_requires_case(m1f0)
m1f1 = factory.new_form(m1)
factory.form_opens_case(m1f1)
m0f0.post_form_workflow = WORKFLOW_FORM
m0f0.form_links = [
FormLink(xpath="a = 1", form_id=m1f0.unique_id),
FormLink(xpath="a = 2", form_id=m1f1.unique_id)
]
self.assertXmlPartialEqual(self.get_xml('form_link_multiple'), factory.app.create_suite(), "./entry[1]")
def test_link_to_child_module(self):
factory = AppFactory(build_version='2.9.0/latest')
m0, m0f0 = factory.new_basic_module('enroll child', 'child')
factory.form_opens_case(m0f0)
m1, m1f0 = factory.new_basic_module('child visit', 'child')
factory.form_requires_case(m1f0)
factory.form_opens_case(m1f0, case_type='visit', is_subcase=True)
m2, m2f0 = factory.new_advanced_module('visit history', 'visit', parent_module=m1)
factory.form_requires_case(m2f0, 'child')
factory.form_requires_case(m2f0, 'visit', parent_case_type='child')
m0f0.post_form_workflow = WORKFLOW_FORM
m0f0.form_links = [
FormLink(xpath="true()", form_id=m1f0.unique_id),
]
m1f0.post_form_workflow = WORKFLOW_FORM
m1f0.form_links = [
FormLink(xpath="true()", form_id=m2f0.unique_id),
]
self.assertXmlPartialEqual(self.get_xml('form_link_tdh'), factory.app.create_suite(), "./entry")
def test_manual_form_link(self):
factory = AppFactory(build_version='2.9.0/latest')
m0, m0f0 = factory.new_basic_module('enroll child', 'child')
factory.form_opens_case(m0f0)
m1, m1f0 = factory.new_basic_module('child visit', 'child')
factory.form_requires_case(m1f0)
factory.form_opens_case(m1f0, case_type='visit', is_subcase=True)
m2, m2f0 = factory.new_advanced_module('visit history', 'visit', parent_module=m1)
factory.form_requires_case(m2f0, 'child')
factory.form_requires_case(m2f0, 'visit', parent_case_type='child')
m0f0.post_form_workflow = WORKFLOW_FORM
m0f0.form_links = [
FormLink(xpath="true()", form_id=m1f0.unique_id, datums=[
FormDatum(name='case_id', xpath="instance('commcaresession')/session/data/case_id_new_child_0")
]),
]
m1f0.post_form_workflow = WORKFLOW_FORM
m1f0.form_links = [
FormLink(xpath="true()", form_id=m2f0.unique_id, datums=[
FormDatum(name='case_id', xpath="instance('commcaresession')/session/data/case_id"),
FormDatum(name='case_id_load_visit_0', xpath="instance('commcaresession')/session/data/case_id_new_visit_0"),
]),
]
self.assertXmlPartialEqual(self.get_xml('form_link_tdh'), factory.app.create_suite(), "./entry")
def test_return_to_parent_module(self):
factory = AppFactory(build_version='2.9.0/latest')
m0, m0f0 = factory.new_basic_module('enroll child', 'child')
factory.form_opens_case(m0f0)
m1, m1f0 = factory.new_basic_module('child visit', 'child')
factory.form_requires_case(m1f0)
factory.form_opens_case(m1f0, case_type='visit', is_subcase=True)
m2, m2f0 = factory.new_advanced_module('visit history', 'visit', parent_module=m1)
factory.form_requires_case(m2f0, 'child')
factory.form_requires_case(m2f0, 'visit', parent_case_type='child')
m2f0.post_form_workflow = WORKFLOW_PARENT_MODULE
expected = """
<partial>
<stack>
<create>
<command value="'m1'"/>
<datum id="case_id" value="instance('commcaresession')/session/data/case_id"/>
<datum id="case_id_new_visit_0" value="uuid()"/>
</create>
</stack>
</partial>
"""
self.assertXmlPartialEqual(expected, factory.app.create_suite(), "./entry[3]/stack")
def test_return_to_child_module(self):
factory = AppFactory(build_version='2.9.0/latest')
m0, m0f0 = factory.new_basic_module('enroll child', 'child')
factory.form_opens_case(m0f0)
m1, m1f0 = factory.new_basic_module('child visit', 'child')
factory.form_requires_case(m1f0)
factory.form_opens_case(m1f0, case_type='visit', is_subcase=True)
m2, m2f0 = factory.new_advanced_module('visit history', 'visit', parent_module=m1)
factory.form_requires_case(m2f0, 'child')
factory.form_requires_case(m2f0, 'visit', parent_case_type='child')
m2f0.post_form_workflow = WORKFLOW_MODULE
expected = """
<partial>
<stack>
<create>
<command value="'m1'"/>
<datum id="case_id" value="instance('commcaresession')/session/data/case_id"/>
<datum id="case_id_new_visit_0" value="uuid()"/>
<command value="'m2'"/>
</create>
</stack>
</partial>
"""
self.assertXmlPartialEqual(expected, factory.app.create_suite(), "./entry[3]/stack")
def test_link_to_form_in_parent_module(self):
factory = AppFactory(build_version='2.9.0/latest')
m0, m0f0 = factory.new_basic_module('enroll child', 'child')
factory.form_opens_case(m0f0)
m1, m1f0 = factory.new_basic_module('child visit', 'child')
factory.form_requires_case(m1f0)
m2, m2f0 = factory.new_advanced_module('visit history', 'visit', parent_module=m1)
factory.form_requires_case(m2f0, 'child')
# link to child -> edit child
m2f0.post_form_workflow = WORKFLOW_FORM
m2f0.form_links = [
FormLink(xpath="true()", form_id=m1f0.unique_id),
]
self.assertXmlPartialEqual(self.get_xml('form_link_child_modules'), factory.app.create_suite(), "./entry[3]")
def test_form_links_submodule(self):
"""
Test that when linking between two forms in a submodule we match up the session variables between the source
and target form correctly
:return:
"""
factory = AppFactory(build_version='2.9.0/latest')
m0, m0f0 = factory.new_basic_module('child visit', 'child')
factory.form_requires_case(m0f0)
factory.form_opens_case(m0f0, 'visit', is_subcase=True)
m1, m1f0 = factory.new_advanced_module('visit histroy', 'visit', parent_module=m0)
factory.form_requires_case(m1f0, 'child')
factory.form_requires_case(m1f0, 'visit', parent_case_type='child')
m1f1 = factory.new_form(m1)
factory.form_requires_case(m1f1, 'child')
factory.form_requires_case(m1f1, 'visit', parent_case_type='child')
m1f0.post_form_workflow = WORKFLOW_FORM
m1f0.form_links = [
FormLink(xpath="true()", form_id=m1f1.unique_id),
]
self.assertXmlPartialEqual(self.get_xml('form_link_submodule'), factory.app.create_suite(), "./entry")
def _build_workflow_app(self, mode):
factory = AppFactory(build_version='2.9.0/latest')
m0, m0f0 = factory.new_basic_module('m0', '')
factory.new_form(m0)
m1, m1f0 = factory.new_basic_module('m1', 'patient')
m1f1 = factory.new_form(m1)
factory.form_opens_case(m1f0)
factory.form_requires_case(m1f1)
m2, m2f0 = factory.new_basic_module('m2', 'patient')
m2f1 = factory.new_form(m2)
factory.form_requires_case(m2f0)
factory.form_requires_case(m2f1)
m3, m3f0 = factory.new_basic_module('m3', 'child')
m3f1 = factory.new_form(m3)
factory.form_requires_case(m3f0, parent_case_type='patient')
factory.form_requires_case(m3f1)
m4, m4f0 = factory.new_advanced_module('m4', 'patient')
factory.form_requires_case(m4f0, case_type='patient')
factory.form_requires_case(m4f0, case_type='patient')
m4f1 = factory.new_form(m4)
factory.form_requires_case(m4f1, case_type='patient')
factory.form_requires_case(m4f1, case_type='patient')
factory.form_requires_case(m4f1, case_type='patient')
m4f2 = factory.new_form(m4)
factory.form_requires_case(m4f2, case_type='patient')
factory.form_requires_case(m4f2, case_type='patient')
factory.advanced_form_autoloads(m4f2, AUTO_SELECT_RAW, 'case_id')
for module in factory.app.get_modules():
for form in module.get_forms():
form.post_form_workflow = mode
return factory.app
def test_form_workflow_previous(self):
app = self._build_workflow_app(WORKFLOW_PREVIOUS)
self.assertXmlPartialEqual(self.get_xml('suite-workflow-previous'), app.create_suite(), "./entry")
def test_form_workflow_module(self):
app = self._build_workflow_app(WORKFLOW_MODULE)
self.assertXmlPartialEqual(self.get_xml('suite-workflow-module'), app.create_suite(), "./entry")
def test_form_workflow_module_in_root(self):
app = self._build_workflow_app(WORKFLOW_PREVIOUS)
for m in [1, 2]:
module = app.get_module(m)
module.put_in_root = True
self.assertXmlPartialEqual(self.get_xml('suite-workflow-module-in-root'), app.create_suite(), "./entry")
def test_form_workflow_root(self):
app = self._build_workflow_app(WORKFLOW_ROOT)
self.assertXmlPartialEqual(self.get_xml('suite-workflow-root'), app.create_suite(), "./entry")
| 40.860465 | 125 | 0.66168 |
36091adbd8c5d3a8daf9ebe56234161778b43267 | 86 | py | Python | redis_h3_client/__init__.py | clarkcb/redis-h3-client-py | a338f345cd660674542f7ee438ba36cadbd2126b | [
"MIT"
] | null | null | null | redis_h3_client/__init__.py | clarkcb/redis-h3-client-py | a338f345cd660674542f7ee438ba36cadbd2126b | [
"MIT"
] | null | null | null | redis_h3_client/__init__.py | clarkcb/redis-h3-client-py | a338f345cd660674542f7ee438ba36cadbd2126b | [
"MIT"
] | null | null | null | from .redis_h3_client import RedisH3
__version__ = '0.1.0'
__author__ = 'Cary Clark'
| 17.2 | 36 | 0.755814 |
a943c5a526b0270b3c6d4ccb840e6c437c257558 | 13,465 | py | Python | aimsbackend/StaffViews.py | AhsanJoyia/Academic-Institute-Management-System | 51c43e8914abd193fe848b8b7550c5d75d97c922 | [
"MIT"
] | 1 | 2021-08-25T07:28:59.000Z | 2021-08-25T07:28:59.000Z | aimsbackend/StaffViews.py | AhsanJoyia/Academic-Institute-Management-System | 51c43e8914abd193fe848b8b7550c5d75d97c922 | [
"MIT"
] | null | null | null | aimsbackend/StaffViews.py | AhsanJoyia/Academic-Institute-Management-System | 51c43e8914abd193fe848b8b7550c5d75d97c922 | [
"MIT"
] | null | null | null | import json
from datetime import datetime
from uuid import uuid4
from django.contrib import messages
from django.core import serializers
from django.forms import model_to_dict
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
from aimsbackend.models import Subjects, SessionYearModel, Students, Attendance, AttendanceReport, \
StaffLeaveReport, Staffs, StaffsFeedBack, CustomUser, Courses, StaffsNotifications, StudentResult
def staff_home(request):
# For Fetch All Student Under Staff
subjects = Subjects.objects.filter(staff_id=request.user.id)
course_id_list = []
for subject in subjects:
course = Courses.objects.get(id=subject.course_id.id)
course_id_list.append(course.id)
final_course = []
# removing Duplicate Course ID
for course_id in course_id_list:
if course_id not in final_course:
final_course.append(course_id)
students_count = Students.objects.filter(
course_id__in=final_course).count()
# Fetch All Attendance Count
attendance_count = Attendance.objects.filter(
subject_id__in=subjects).count()
# Fetch All Approve Leave
staff = Staffs.objects.get(admin=request.user.id)
leave_count = StaffLeaveReport.objects.filter(
staff_id=staff.id, leave_status=1).count()
subject_count = subjects.count()
# Fetch Attendance Data by Subject
subject_list = []
attendance_list = []
for subject in subjects:
attendance_count1 = Attendance.objects.filter(
subject_id=subject.id).count()
subject_list.append(subject.subject_name)
attendance_list.append(attendance_count1)
students_attendance = Students.objects.filter(course_id__in=final_course)
student_list = []
student_list_attendance_present = []
student_list_attendance_absent = []
for student in students_attendance:
attendance_present_count = AttendanceReport.objects.filter(
status=True, student_id=student.id).count()
attendance_absent_count = AttendanceReport.objects.filter(
status=False, student_id=student.id).count()
student_list.append(student.admin.username)
student_list_attendance_present.append(attendance_present_count)
student_list_attendance_absent.append(attendance_absent_count)
return render(request, "staff_template/staff_home_template.html", {"students_count": students_count, "attendance_count": attendance_count, "leave_count": leave_count, "subject_count": subject_count, "subject_list": subject_list, "attendance_list": attendance_list, "student_list": student_list, "present_list": student_list_attendance_present, "absent_list": student_list_attendance_absent})
def staff_take_attendance(request):
subjects = Subjects.objects.filter(staff_id=request.user.id)
session_years = SessionYearModel.objects.all()
return render(request, "staff_template/staff_take_attendance.html", {"subjects": subjects, "session_years": session_years})
@csrf_exempt
def get_students(request):
subject_id = request.POST.get("subject")
session_year = request.POST.get("session_year")
subject = Subjects.objects.get(id=subject_id)
session_model = SessionYearModel.objects.get(id=session_year)
students = Students.objects.filter(
course_id=subject.course_id, session_year_id=session_model)
list_data = []
for student in students:
data_small = {"id": student.admin.id,
"name": student.admin.first_name+" "+student.admin.last_name}
list_data.append(data_small)
return JsonResponse(json.dumps(list_data), content_type="application/json", safe=False)
@csrf_exempt
def save_attendance_data(request):
student_ids = request.POST.get("student_ids")
subject_id = request.POST.get("subject_id")
attendance_date = request.POST.get("attendance_date")
session_year_id = request.POST.get("session_year_id")
subject_model = Subjects.objects.get(id=subject_id)
session_model = SessionYearModel.objects.get(id=session_year_id)
json_sstudent = json.loads(student_ids)
# print(data[0]['id'])
try:
attendance = Attendance(
subject_id=subject_model, attendance_date=attendance_date, session_year_id=session_model)
attendance.save()
for stud in json_sstudent:
student = Students.objects.get(admin=stud['id'])
attendance_report = AttendanceReport(
student_id=student, attendance_id=attendance, status=stud['status'])
attendance_report.save()
return HttpResponse("OK")
except:
return HttpResponse("ERR")
def staff_update_attendance(request):
subjects = Subjects.objects.filter(staff_id=request.user.id)
session_year_id = SessionYearModel.objects.all()
return render(request, "staff_template/staff_update_attendance.html", {"subjects": subjects, "session_year_id": session_year_id})
@csrf_exempt
def get_attendance_dates(request):
subject = request.POST.get("subject")
session_year_id = request.POST.get("session_year_id")
subject_obj = Subjects.objects.get(id=subject)
session_year_obj = SessionYearModel.objects.get(id=session_year_id)
attendance = Attendance.objects.filter(
subject_id=subject_obj, session_year_id=session_year_obj)
attendance_obj = []
for attendance_single in attendance:
data = {"id": attendance_single.id, "attendance_date": str(
attendance_single.attendance_date), "session_year_id": attendance_single.session_year_id.id}
attendance_obj.append(data)
return JsonResponse(json.dumps(attendance_obj), safe=False)
@csrf_exempt
def get_attendance_student(request):
attendance_date = request.POST.get("attendance_date")
attendance = Attendance.objects.get(id=attendance_date)
attendance_data = AttendanceReport.objects.filter(attendance_id=attendance)
list_data = []
for student in attendance_data:
data_small = {"id": student.student_id.admin.id, "name": student.student_id.admin.first_name +
" "+student.student_id.admin.last_name, "status": student.status}
list_data.append(data_small)
return JsonResponse(json.dumps(list_data), content_type="application/json", safe=False)
@csrf_exempt
def save_updateattendance_data(request):
student_ids = request.POST.get("student_ids")
attendance_date = request.POST.get("attendance_date")
attendance = Attendance.objects.get(id=attendance_date)
json_sstudent = json.loads(student_ids)
try:
for stud in json_sstudent:
student = Students.objects.get(admin=stud['id'])
attendance_report = AttendanceReport.objects.get(
student_id=student, attendance_id=attendance)
attendance_report.status = stud['status']
attendance_report.save()
return HttpResponse("OK")
except:
return HttpResponse("ERR")
def staff_apply_leave(request):
staff_obj = Staffs.objects.get(admin=request.user.id)
leave_data = StaffLeaveReport.objects.filter(staff_id=staff_obj)
return render(request, "staff_template/staff_apply_leave.html", {"leave_data": leave_data})
def staff_apply_leave_save(request):
if request.method != "POST":
return HttpResponseRedirect(reverse("staff_apply_leave"))
else:
leave_date = request.POST.get("leave_date")
leave_msg = request.POST.get("leave_msg")
staff_obj = Staffs.objects.get(admin=request.user.id)
try:
leave_report = StaffLeaveReport(
staff_id=staff_obj, leave_date=leave_date, leave_message=leave_msg, leave_status=0)
leave_report.save()
messages.success(request, "Successfully Applied for Leave")
return HttpResponseRedirect(reverse("staff_apply_leave"))
except:
messages.error(request, "Failed To Apply for Leave")
return HttpResponseRedirect(reverse("staff_apply_leave"))
def staff_feedback(request):
staff_id = Staffs.objects.get(admin=request.user.id)
feedback_data = StaffsFeedBack.objects.filter(staff_id=staff_id)
return render(request, "staff_template/staff_feedback.html", {"feedback_data": feedback_data})
def staff_feedback_save(request):
if request.method != "POST":
return HttpResponseRedirect(reverse("staff_feedback_save"))
else:
feedback_msg = request.POST.get("feedback_msg")
staff_obj = Staffs.objects.get(admin=request.user.id)
try:
feedback = StaffsFeedBack(
staff_id=staff_obj, feedback=feedback_msg, feedback_reply="")
feedback.save()
messages.success(request, "Successfully Sent Feedback")
return HttpResponseRedirect(reverse("staff_feedback"))
except:
messages.error(request, "Failed To Send Feedback")
return HttpResponseRedirect(reverse("staff_feedback"))
def staff_profile(request):
user = CustomUser.objects.get(id=request.user.id)
staff = Staffs.objects.get(admin=user)
return render(request, "staff_template/staff_profile.html", {"user": user, "staff": staff})
def staff_profile_save(request):
if request.method != "POST":
return HttpResponseRedirect(reverse("staff_profile"))
else:
first_name = request.POST.get("first_name")
last_name = request.POST.get("last_name")
address = request.POST.get("address")
password = request.POST.get("password")
try:
customuser = CustomUser.objects.get(id=request.user.id)
customuser.first_name = first_name
customuser.last_name = last_name
if password != None and password != "":
customuser.set_password(password)
customuser.save()
staff = Staffs.objects.get(admin=customuser.id)
staff.address = address
staff.save()
messages.success(request, "Successfully Updated Profile")
return HttpResponseRedirect(reverse("staff_profile"))
except:
messages.error(request, "Failed to Update Profile")
return HttpResponseRedirect(reverse("staff_profile"))
@csrf_exempt
def staff_fcmtoken_save(request):
token = request.POST.get("token")
try:
staff = Staffs.objects.get(admin=request.user.id)
staff.fcm_token = token
staff.save()
return HttpResponse("True")
except:
return HttpResponse("False")
def staff_all_notification(request):
staff = Staffs.objects.get(admin=request.user.id)
notifications = StaffsNotifications.objects.filter(staff_id=staff.id)
return render(request, "staff_template/all_notification.html", {"notifications": notifications})
def staff_add_result(request):
subjects = Subjects.objects.filter(staff_id=request.user.id)
session_years = SessionYearModel.objects.all()
return render(request, "staff_template/staff_add_result.html", {"subjects": subjects, "session_years": session_years})
def save_student_result(request):
if request.method != 'POST':
return HttpResponseRedirect('staff_add_result')
student_admin_id = request.POST.get('student_list')
assignment_marks = request.POST.get('assignment_marks')
exam_marks = request.POST.get('exam_marks')
subject_id = request.POST.get('subject')
student_obj = Students.objects.get(admin=student_admin_id)
subject_obj = Subjects.objects.get(id=subject_id)
try:
check_exist = StudentResult.objects.filter(
subject_id=subject_obj, student_id=student_obj).exists()
if check_exist:
result = StudentResult.objects.get(
subject_id=subject_obj, student_id=student_obj)
result.subject_assignment_marks = assignment_marks
result.subject_exam_marks = exam_marks
result.save()
messages.success(request, "Successfully Updated Result")
return HttpResponseRedirect(reverse("staff_add_result"))
else:
result = StudentResult(student_id=student_obj, subject_id=subject_obj,
subject_exam_marks=exam_marks, subject_assignment_marks=assignment_marks)
result.save()
messages.success(request, "Successfully Added Result")
return HttpResponseRedirect(reverse("staff_add_result"))
except:
messages.error(request, "Failed to Add Result")
return HttpResponseRedirect(reverse("staff_add_result"))
@csrf_exempt
def fetch_result_student(request):
subject_id = request.POST.get('subject_id')
student_id = request.POST.get('student_id')
student_obj = Students.objects.get(admin=student_id)
result = StudentResult.objects.filter(
student_id=student_obj.id, subject_id=subject_id).exists()
if result:
result = StudentResult.objects.get(
student_id=student_obj.id, subject_id=subject_id)
result_data = {"exam_marks": result.subject_exam_marks,
"assign_marks": result.subject_assignment_marks}
return HttpResponse(json.dumps(result_data))
else:
return HttpResponse("False")
def returnHtmlWidget(request):
return render(request, "widget.html")
| 40.19403 | 395 | 0.709543 |
9c0d64e056599b531f932429c611bf51c65b4412 | 1,511 | py | Python | Library_Management/library_management/requestbook/views.py | pankesh18/web_dev_for_info_system | e46faf4113e219eb3caad5fa494f2c5506df2689 | [
"Unlicense"
] | null | null | null | Library_Management/library_management/requestbook/views.py | pankesh18/web_dev_for_info_system | e46faf4113e219eb3caad5fa494f2c5506df2689 | [
"Unlicense"
] | null | null | null | Library_Management/library_management/requestbook/views.py | pankesh18/web_dev_for_info_system | e46faf4113e219eb3caad5fa494f2c5506df2689 | [
"Unlicense"
] | null | null | null | import json
from telnetlib import STATUS
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
import requests
from django.http import HttpResponse
from Library.models import Book, MyBook
from library_management.settings import MEDIA_URL,MEDIA_ROOT
@login_required
def reqbook(request):
return render(request, 'requestbook/requestbook.html')
def addreqbook(request):
BookTitle = request.POST.get("book[BookTitle]",None)
Author = request.POST.get("book[Author]",None)
Publisher = request.POST.get("book[Publisher]",None)
Genre = 'General'
Year = request.POST.get("book[Year]",None)
Language = request.POST.get("book[Language]",None)
Description = request.POST.get("book[Description]",None)
Tags = 'N/A'
ISBN = request.POST.get("book[ISBN]",None)
imgurl=request.POST.get("book[ImageURL]",None)
# print(imgurl)
# with open(MEDIA_ROOT +"/book_images/" +request.POST.get("book[BookTitle]",None)+".jpg", 'wb') as handle:
# data = requests.get(imgurl, stream=True).content
# print(data)
# handle.write(data)
book = Book(BookTitle=BookTitle, Author=Author,Publisher=Publisher,Genre=Genre, Year=Year,Language= Language,Description= Description,Tags= Tags,ISBN=ISBN, IssuedBy=request.user)
book.save()
oldbooks= MyBook.objects.filter(book=book)
oldbooks.delete()
mybk= MyBook(book=book,user=request.user)
mybk.save()
return HttpResponse(status=200)
| 26.982143 | 182 | 0.711449 |
05fe2bd262d5349ea149357bad05abc7f18634ff | 3,663 | py | Python | backend/app/bug_killer_app/api/project.py | SeanFitzpatrick0/BugKiller | c7dd328ac539aa75e8a1d908dd35722df4e78ab4 | [
"Apache-2.0"
] | null | null | null | backend/app/bug_killer_app/api/project.py | SeanFitzpatrick0/BugKiller | c7dd328ac539aa75e8a1d908dd35722df4e78ab4 | [
"Apache-2.0"
] | null | null | null | backend/app/bug_killer_app/api/project.py | SeanFitzpatrick0/BugKiller | c7dd328ac539aa75e8a1d908dd35722df4e78ab4 | [
"Apache-2.0"
] | null | null | null | from aws_lambda_powertools.utilities.typing import LambdaContext
from bug_killer_api_interface.domain.endpoint.endpoint import EndpointDetails
from bug_killer_api_interface.interface.endpoint.project import DELETE_PROJECT, UPDATE_PROJECT, CREATE_PROJECT, \
GET_PROJECT, GET_USER_PROJECTS
from bug_killer_app.access.entities.permission import assert_user_has_project_member_access
from bug_killer_app.access.entities.project import create_project, delete_project, get_users_projects, update_project, \
get_project
from bug_killer_app.domain.api_handler import lambda_api_handler
from bug_killer_app.domain.request import get_path_param, get_auth_user, parse_dto, get_event_body
from bug_killer_app.domain.response import HttpResponse
from bug_killer_app.domain.types import ApiGatewayEvt
@lambda_api_handler(GET_USER_PROJECTS)
async def get_user_projects_handler(
evt: ApiGatewayEvt,
_: LambdaContext,
endpoint_details: EndpointDetails
) -> HttpResponse:
user_id = get_auth_user(evt)
manager_projects, member_projects = await get_users_projects(user_id)
rsp_model = endpoint_details.response_model
rsp_status = endpoint_details.status
rsp = rsp_model(manager_projects=manager_projects, member_projects=member_projects)
return HttpResponse(status_code=rsp_status, body=rsp.api_dict())
@lambda_api_handler(GET_PROJECT)
async def get_project_handler(
evt: ApiGatewayEvt,
_: LambdaContext,
endpoint_details: EndpointDetails
) -> HttpResponse:
user_id = get_auth_user(evt)
project_id = get_path_param(evt, 'projectId')
project = await get_project(project_id)
assert_user_has_project_member_access(user_id, project)
rsp_model = endpoint_details.response_model
rsp_status = endpoint_details.status
rsp = rsp_model(project=project)
return HttpResponse(status_code=rsp_status, body=rsp.api_dict())
@lambda_api_handler(CREATE_PROJECT)
async def create_project_handler(
evt: ApiGatewayEvt,
_: LambdaContext,
endpoint_details: EndpointDetails
) -> HttpResponse:
user_id = get_auth_user(evt)
payload_model = endpoint_details.payload_model
payload = parse_dto(get_event_body(evt), payload_model)
project = await create_project(user_id, payload)
rsp_model = endpoint_details.response_model
rsp_status = endpoint_details.status
rsp = rsp_model(project=project)
return HttpResponse(status_code=rsp_status, body=rsp.api_dict())
@lambda_api_handler(UPDATE_PROJECT)
async def update_project_handler(
evt: ApiGatewayEvt,
_: LambdaContext,
endpoint_details: EndpointDetails
) -> HttpResponse:
user_id = get_auth_user(evt)
project_id = get_path_param(evt, 'projectId')
payload_model = endpoint_details.payload_model
payload = parse_dto(get_event_body(evt), payload_model)
project = await update_project(user_id, project_id, payload)
rsp_model = endpoint_details.response_model
rsp_status = endpoint_details.status
rsp = rsp_model(project=project)
return HttpResponse(status_code=rsp_status, body=rsp.api_dict())
@lambda_api_handler(DELETE_PROJECT)
async def delete_project_handler(
evt: ApiGatewayEvt,
_: LambdaContext,
endpoint_details: EndpointDetails
) -> HttpResponse:
user_id = get_auth_user(evt)
project_id = get_path_param(evt, 'projectId')
project = await delete_project(user_id, project_id)
rsp_model = endpoint_details.response_model
rsp_status = endpoint_details.status
rsp = rsp_model(project=project)
return HttpResponse(status_code=rsp_status, body=rsp.api_dict())
| 33.916667 | 120 | 0.780235 |
0d0fc4ff56dc9a9111f30055e879bc72999d6a0e | 6,436 | py | Python | tests/workflows/test_skymodel_serial.py | cnwangfeng/algorithm-reference-library | 9605eb01652fbfcb9ff003cc12b44c84093b7fb1 | [
"Apache-2.0"
] | null | null | null | tests/workflows/test_skymodel_serial.py | cnwangfeng/algorithm-reference-library | 9605eb01652fbfcb9ff003cc12b44c84093b7fb1 | [
"Apache-2.0"
] | null | null | null | tests/workflows/test_skymodel_serial.py | cnwangfeng/algorithm-reference-library | 9605eb01652fbfcb9ff003cc12b44c84093b7fb1 | [
"Apache-2.0"
] | null | null | null | """ Unit tests for pipelines expressed via arlexecute
"""
import logging
import sys
import unittest
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from workflows.serial.skymodel.skymodel_serial import predict_skymodel_list_serial_workflow
from data_models.memory_data_models import Image
from data_models.memory_data_models import Skycomponent
from data_models.polarisation import PolarisationFrame
from wrappers.serial.simulation.testing_support import create_named_configuration, ingest_unittest_visibility, \
create_low_test_skymodel_from_gleam
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
log.addHandler(logging.StreamHandler(sys.stderr))
class TestSkyModel(unittest.TestCase):
def setUp(self):
from data_models.parameters import arl_path
self.dir = arl_path('test_results')
def tearDown(self):
pass
def actualSetUp(self, freqwin=1, block=False, dopol=False, zerow=False):
self.npixel = 1024
self.low = create_named_configuration('LOWBD2', rmax=750.0)
self.freqwin = freqwin
self.vis_list = list()
self.ntimes = 5
self.cellsize = 0.0005
# Choose the interval so that the maximum change in w is smallish
integration_time = numpy.pi * (24 / (12 * 60))
self.times = numpy.linspace(-integration_time * (self.ntimes // 2), integration_time * (self.ntimes // 2),
self.ntimes)
if freqwin > 1:
self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin)
self.channelwidth = numpy.array(freqwin * [self.frequency[1] - self.frequency[0]])
else:
self.frequency = numpy.array([1.0e8])
self.channelwidth = numpy.array([4e7])
if dopol:
self.vis_pol = PolarisationFrame('linear')
self.image_pol = PolarisationFrame('stokesIQUV')
f = numpy.array([100.0, 20.0, -10.0, 1.0])
else:
self.vis_pol = PolarisationFrame('stokesI')
self.image_pol = PolarisationFrame('stokesI')
f = numpy.array([100.0])
self.phasecentre = SkyCoord(ra=+30.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
self.vis_list = [ingest_unittest_visibility(self.low,
[self.frequency[freqwin]],
[self.channelwidth[freqwin]],
self.times,
self.vis_pol,
self.phasecentre, block=block,
zerow=zerow)
for freqwin, _ in enumerate(self.frequency)]
def test_time_setup(self):
self.actualSetUp()
def test_predict(self):
self.actualSetUp(zerow=True)
self.skymodel_list = [create_low_test_skymodel_from_gleam
(npixel=self.npixel, cellsize=self.cellsize, frequency=[self.frequency[f]],
phasecentre=self.phasecentre,
polarisation_frame=PolarisationFrame("stokesI"),
flux_limit=0.3,
flux_threshold=1.0,
flux_max=5.0) for f, freq in enumerate(self.frequency)]
assert isinstance(self.skymodel_list[0].image, Image), self.skymodel_list[0].image
assert isinstance(self.skymodel_list[0].components[0], Skycomponent), self.skymodel_list[0].components[0]
assert len(self.skymodel_list[0].components) == 13, len(self.skymodel_list[0].components)
assert numpy.max(numpy.abs(self.skymodel_list[0].image.data)) > 0.0, "Image is empty"
skymodel_vislist = predict_skymodel_list_serial_workflow(self.vis_list[0],
self.skymodel_list, context='2d')
assert numpy.max(numpy.abs(skymodel_vislist[0].vis)) > 0.0
def test_predict_nocomponents(self):
self.actualSetUp(zerow=True)
self.skymodel_list = [create_low_test_skymodel_from_gleam
(npixel=self.npixel, cellsize=self.cellsize, frequency=[self.frequency[f]],
phasecentre=self.phasecentre,
polarisation_frame=PolarisationFrame("stokesI"),
flux_limit=0.3,
flux_threshold=1.0,
flux_max=5.0) for f, freq in enumerate(self.frequency)]
for i, sm in enumerate(self.skymodel_list):
sm.components = []
assert isinstance(self.skymodel_list[0].image, Image), self.skymodel_list[0].image
assert numpy.max(numpy.abs(self.skymodel_list[0].image.data)) > 0.0, "Image is empty"
skymodel_vislist = predict_skymodel_list_serial_workflow(self.vis_list[0], self.skymodel_list, context='2d')
assert numpy.max(numpy.abs(skymodel_vislist[0].vis)) > 0.0
def test_predict_noimage(self):
self.actualSetUp(zerow=True)
self.skymodel_list = [create_low_test_skymodel_from_gleam
(npixel=self.npixel, cellsize=self.cellsize, frequency=[self.frequency[f]],
phasecentre=self.phasecentre,
polarisation_frame=PolarisationFrame("stokesI"),
flux_limit=0.3,
flux_threshold=1.0,
flux_max=5.0) for f, freq in enumerate(self.frequency)]
for i, sm in enumerate(self.skymodel_list):
sm.image= None
assert isinstance(self.skymodel_list[0].components[0], Skycomponent), self.skymodel_list[0].components[0]
assert len(self.skymodel_list[0].components) == 13, len(self.skymodel_list[0].components)
skymodel_vislist = predict_skymodel_list_serial_workflow(self.vis_list[0], self.skymodel_list, context='2d')
assert numpy.max(numpy.abs(skymodel_vislist[0].vis)) > 0.0
if __name__ == '__main__':
unittest.main()
| 45.006993 | 116 | 0.586389 |
e6190d839bbf80c64dd850df1c6ef8368ca0d356 | 938 | py | Python | setup.py | Mouse-Imaging-Centre/fastCell | 1d1c39d9953ee83e2c56b284f5b1f57052e490a8 | [
"MIT"
] | 1 | 2021-02-03T14:41:02.000Z | 2021-02-03T14:41:02.000Z | setup.py | Mouse-Imaging-Centre/fastCell | 1d1c39d9953ee83e2c56b284f5b1f57052e490a8 | [
"MIT"
] | 2 | 2019-11-11T19:09:59.000Z | 2021-03-12T17:36:55.000Z | setup.py | Mouse-Imaging-Centre/fastCell | 1d1c39d9953ee83e2c56b284f5b1f57052e490a8 | [
"MIT"
] | 1 | 2020-01-16T21:09:41.000Z | 2020-01-16T21:09:41.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="fastCell",
version="1.0.0",
author="Nick Wang",
author_email="nick.wang@mail.mcgill.ca",
description="Fast Deep Neural Networks for Cell Image Analysis",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Mouse-Imaging-Centre/fastCell",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
scripts=[
'fastCell/deep_segment.py',
'fastCell/pixelize_segment.py'
],
install_requires=[
'opencv-python==4.2.0.32',
'pandas>=0.25',
'fastai==1.0.59',
'torch==1.2.0',
'torchvision==0.4.0'
]
) | 28.424242 | 68 | 0.619403 |
d8ec70bb6ba855abcd44e4c50f90952219f2a9a8 | 2,385 | py | Python | scripts/devSetup/which.py | flauta/codecombat | 8b0a0c4e516fb3d01815082f7c7650312b7cd8e5 | [
"MIT"
] | 1 | 2021-01-09T23:57:54.000Z | 2021-01-09T23:57:54.000Z | scripts/devSetup/which.py | flauta/codecombat | 8b0a0c4e516fb3d01815082f7c7650312b7cd8e5 | [
"MIT"
] | 4 | 2016-04-19T00:06:28.000Z | 2016-04-19T02:25:22.000Z | scripts/devSetup/which.py | flauta/codecombat | 8b0a0c4e516fb3d01815082f7c7650312b7cd8e5 | [
"MIT"
] | 2 | 2020-10-28T14:50:54.000Z | 2020-11-05T12:39:19.000Z | __author__ = 'root'
#copied from python3
import os
import sys
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None | 36.136364 | 80 | 0.615933 |
db9fb536f9fdfc4c02cd4b31a45246f17c3d6175 | 184 | py | Python | app/gws/ext/layer/wms/__init__.py | ewie/gbd-websuite | 6f2814c7bb64d11cb5a0deec712df751718fb3e1 | [
"Apache-2.0"
] | null | null | null | app/gws/ext/layer/wms/__init__.py | ewie/gbd-websuite | 6f2814c7bb64d11cb5a0deec712df751718fb3e1 | [
"Apache-2.0"
] | null | null | null | app/gws/ext/layer/wms/__init__.py | ewie/gbd-websuite | 6f2814c7bb64d11cb5a0deec712df751718fb3e1 | [
"Apache-2.0"
] | null | null | null | import gws.ext.ows.provider.wms.layer
class Config(gws.ext.ows.provider.wms.layer.Config):
"""WMS layer"""
pass
class Object(gws.ext.ows.provider.wms.layer.Object):
pass
| 20.444444 | 52 | 0.711957 |
48dc21938399fe0d175e81d93c789f9b283372fd | 1,022 | py | Python | samples/secure_file_share/coreservices/get_list_of_files.py | ssethumavisa/cybersource-rest-samples-python | 1da9dfb943df0e6be3c4fd591a69e2f8f20ac18e | [
"MIT"
] | null | null | null | samples/secure_file_share/coreservices/get_list_of_files.py | ssethumavisa/cybersource-rest-samples-python | 1da9dfb943df0e6be3c4fd591a69e2f8f20ac18e | [
"MIT"
] | null | null | null | samples/secure_file_share/coreservices/get_list_of_files.py | ssethumavisa/cybersource-rest-samples-python | 1da9dfb943df0e6be3c4fd591a69e2f8f20ac18e | [
"MIT"
] | null | null | null | from CyberSource import *
import os
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
def get_list_of_files():
try:
start_date = "2018-10-20"
end_date = "2018-10-30"
# Reading Merchant details from Configuration file
config_obj = configuration.Configuration()
details_dict1 = config_obj.get_configuration()
search_transaction_obj = SecureFileShareApi(details_dict1)
return_data, status, body = search_transaction_obj.get_file_details(start_date, end_date,
organization_id="testrest")
print("API RESPONSE CODE : ", status)
print("API RESPONSE BODY : ", body)
except Exception as e:
print("Exception when calling SecureFileShareApi->get_file_details: %s\n" % e)
if __name__ == "__main__":
get_list_of_files()
| 37.851852 | 103 | 0.661448 |
5fe41b9ce3ada1b3f39a4b2de935be0240807d90 | 1,043 | py | Python | tests/test_deepred_pytorch.py | dileep-kishore/DEEPred-pytorch | 17f5189ebce41b9f0b959a2c7e797545b93f762c | [
"MIT"
] | null | null | null | tests/test_deepred_pytorch.py | dileep-kishore/DEEPred-pytorch | 17f5189ebce41b9f0b959a2c7e797545b93f762c | [
"MIT"
] | null | null | null | tests/test_deepred_pytorch.py | dileep-kishore/DEEPred-pytorch | 17f5189ebce41b9f0b959a2c7e797545b93f762c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `deepred_pytorch` package."""
import pytest
from click.testing import CliRunner
from deepred_pytorch import deepred_pytorch
from deepred_pytorch import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'deepred_pytorch.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| 26.74359 | 78 | 0.709492 |
026e151d40fe1039f8a2e468d7fd7a54e7d0a1de | 13,918 | py | Python | mindspore/parallel/_utils.py | mindspore-ai/mindspore | a9fbb25530a2874166ff0045ddcdfc73207bf5eb | [
"Apache-2.0"
] | 3,200 | 2020-02-17T12:45:41.000Z | 2022-03-31T20:21:16.000Z | mindspore/parallel/_utils.py | Ascend/mindspore | 1509d3f848e6685660194d9f58646fc73ae0f0f0 | [
"Apache-2.0"
] | 176 | 2020-02-12T02:52:11.000Z | 2022-03-28T22:15:55.000Z | mindspore/parallel/_utils.py | mindspore-ai/mindspore | a9fbb25530a2874166ff0045ddcdfc73207bf5eb | [
"Apache-2.0"
] | 621 | 2020-03-09T01:31:41.000Z | 2022-03-30T03:43:19.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utils of auto parallel"""
import numpy as np
from mindspore import context, log as logger
from mindspore.context import ParallelMode
from mindspore._c_expression import reset_op_id
from mindspore.common.tensor import Tensor
from mindspore.common.dtype import dtype_to_nptype
from mindspore.common import dtype as mstype
from mindspore.communication.management import get_group_size, get_rank
from mindspore.parallel._auto_parallel_context import auto_parallel_context
from mindspore.common.seed import get_seed
def _get_parallel_mode():
"""Get parallel mode."""
return auto_parallel_context().get_parallel_mode()
def _is_in_auto_parallel_mode():
return _get_parallel_mode() in [ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL]
def _get_full_batch():
"""Get whether to use full_batch."""
return auto_parallel_context().get_full_batch()
def _get_pipeline_stages():
"""Get pipeline stages"""
return auto_parallel_context().get_pipeline_stages()
def _check_task_sink_envs():
"""
Check whether task_sink environment variables have been exported or not.
return True if task_sink environment variables have been exported, False otherwise.
"""
import os
task_sink = os.getenv("GRAPH_OP_RUN")
if task_sink and task_sink.isdigit() and int(task_sink) == 1:
return False
return True
def _check_full_batch():
"""
full_batch could only be used under semi_auto_parallel or auto_parallel, check it.
Raises:
RuntimeError: Using full_batch under neither semi_auto_parallel nor auto_parallel.
"""
parallel_mode = _get_parallel_mode()
full_batch = _get_full_batch()
if ((parallel_mode not in ("semi_auto_parallel", "auto_parallel")) and full_batch):
raise RuntimeError("full_batch could only be used under semi_auto_parallel or auto_parallel.")
def _need_to_full():
"""Check whether to convert input to full shape or tensor."""
if _get_parallel_mode() not in ("semi_auto_parallel", "auto_parallel"):
return False
dataset_strategy = context.get_auto_parallel_context("dataset_strategy")
if dataset_strategy and dataset_strategy not in ("data_parallel", "full_batch"):
return True
return not _get_full_batch()
def _to_full_shapes(shapes, device_num):
"""Expanding batch dimension according to device_num, adapt to mindspore minddata graph solution."""
new_shapes = []
dataset_strategy = ()
if context.get_auto_parallel_context("dataset_strategy") not in ("data_parallel", "full_batch"):
dataset_strategy = context.get_auto_parallel_context("dataset_strategy")
if dataset_strategy:
if len(shapes) != len(dataset_strategy):
raise ValueError("The input shapes size {} is not equal to "
"dataset strategy size {}".format(len(shapes), len(dataset_strategy)))
for index, shape in enumerate(shapes):
if len(shape) != len(dataset_strategy[index]):
raise ValueError("The input shapes item size {} is not equal to "
"dataset strategy item size {}".format(len(shape), len(dataset_strategy[index])))
new_shape = ()
for i, item in enumerate(shape):
new_shape += (item * dataset_strategy[index][i],)
new_shapes.append(new_shape)
return new_shapes
for shape in shapes:
new_shape = ()
for i, item in enumerate(shape):
if i == 0:
new_shape += (item * device_num,)
else:
new_shape += (item,)
new_shapes.append(new_shape)
return new_shapes
def _to_full_tensor(elem, global_device_num, global_rank, scaling_sens=None):
"""Convert numpy to tensor, expanding batch dimension according to device_num, adapt to feed the data
from host solution.
"""
lst = []
device_num = global_device_num // _get_pipeline_stages()
stage_rank = global_rank % device_num
if not isinstance(elem, (tuple, list)):
elem = [elem]
if stage_rank >= device_num:
raise ValueError("The global rank must be smaller than device number, the global rank is {}, "
"the device num is {}".format(stage_rank, device_num))
dataset_strategy = ()
if context.get_auto_parallel_context("dataset_strategy") not in ("data_parallel", "full_batch"):
dataset_strategy = context.get_auto_parallel_context("dataset_strategy")
if elem and dataset_strategy:
if len(elem) != len(dataset_strategy):
raise ValueError("The input size {} is not equal to "
"dataset strategy size {}".format(len(elem), len(dataset_strategy)))
for index, data in enumerate(elem):
if isinstance(data, np.ndarray):
data = Tensor(data)
if not isinstance(data, Tensor):
raise ValueError("elements in tensors must be Tensor")
shape_ = data.shape
type_ = data.dtype
new_shape = ()
if not dataset_strategy:
batchsize_per_device = 1
for i, item in enumerate(shape_):
if i == 0:
new_shape += (item * device_num,)
batchsize_per_device = item
else:
new_shape += (item,)
new_tensor_numpy = np.zeros(new_shape, dtype_to_nptype(type_))
start = stage_rank * batchsize_per_device
new_tensor_numpy[start: start + batchsize_per_device] = data.asnumpy()
else:
if len(shape_) != len(dataset_strategy[index]):
raise ValueError("The input shapes item size {} is not equal to "
"dataset strategy item size {}".format(len(shape_), len(dataset_strategy[index])))
slice_index = ()
for i, item in enumerate(shape_):
new_shape += (item * dataset_strategy[index][i],)
start = (stage_rank % dataset_strategy[index][i]) * item
end = (stage_rank % dataset_strategy[index][i] + 1) * item
s = slice(start, end, 1)
slice_index += (s,)
new_tensor_numpy = np.zeros(new_shape, dtype_to_nptype(type_))
new_tensor_numpy[slice_index] = data.asnumpy()
new_tensor = Tensor(new_tensor_numpy)
lst.append(new_tensor)
if scaling_sens:
lst.append(Tensor(scaling_sens, mstype.float32))
return tuple(lst)
def _get_gradients_mean():
"""Get if using gradients_mean."""
return auto_parallel_context().get_gradients_mean()
def _get_device_num():
"""Get the device num."""
parallel_mode = auto_parallel_context().get_parallel_mode()
if parallel_mode == "stand_alone":
device_num = 1
return device_num
if auto_parallel_context().get_device_num_is_set() is False:
device_num = get_group_size()
else:
device_num = auto_parallel_context().get_device_num()
return device_num
def _get_global_rank():
"""Get the global rank."""
parallel_mode = auto_parallel_context().get_parallel_mode()
if parallel_mode == "stand_alone":
global_rank = 0
return global_rank
if auto_parallel_context().get_global_rank_is_set() is False:
global_rank = get_rank()
else:
global_rank = auto_parallel_context().get_global_rank()
return global_rank
def _get_parameter_broadcast():
"""Get the parameter broadcast."""
parallel_mode = auto_parallel_context().get_parallel_mode()
parameter_broadcast = auto_parallel_context().get_parameter_broadcast()
if parallel_mode in ("data_parallel", "hybrid_parallel") and parameter_broadcast is False and get_seed() is None:
logger.warning("You are suggested to use mindspore.context.set_auto_parallel_context(parameter_broadcast=True)"
" or mindspore.common.set_seed() to share parameters among multi-devices.")
return parameter_broadcast
def _get_enable_parallel_optimizer():
"""Get if using parallel optimizer."""
return auto_parallel_context().get_enable_parallel_optimizer()
def _device_number_check(parallel_mode, device_number):
"""
Check device num.
Args:
parallel_mode (str): The parallel mode.
device_number (int): The device number.
"""
if parallel_mode == "stand_alone" and device_number != 1:
raise ValueError("If parallel_mode is stand_alone, device_number must be 1, "
"device_number: {0}, parallel_mode:{1}".format(device_number, parallel_mode))
def _parameter_broadcast_check(parallel_mode, parameter_broadcast):
"""
Check parameter broadcast.
Note:
If parallel mode is semi_auto_parallel or auto_parallel, parameter broadcast is not supported. Using the same
random seed to make sure parameters on multiple devices are the same.
Args:
parallel_mode (str): The parallel mode.
parameter_broadcast (bool): The parameter broadcast.
Raises:
ValueError: If parameter is broadcasted
but the parallel mode is "stand_alone" or "semi_auto_parallel" or "auto_parallel").
"""
if parameter_broadcast is True and parallel_mode in ("stand_alone", "semi_auto_parallel", "auto_parallel"):
raise ValueError("stand_alone, semi_auto_parallel and auto_parallel "
"do not support parameter broadcast, parallel_mode: {0}, parameter_broadcast:{1}"
.format(parallel_mode, parameter_broadcast))
def _get_python_op(op_name, op_path, instance_name, arglist):
"""Get python operator."""
module = __import__(op_path, fromlist=["None"])
cls = getattr(module, op_name)
if op_path != "mindspore.ops.functional":
op = cls(*arglist)
else:
op = cls
op.set_prim_instance_name(instance_name)
return op
def _reset_op_id():
"""Reset op id."""
reset_op_id()
def _parallel_predict_check():
"""validate parallel model prediction"""
if _get_parallel_mode() in (ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL):
dataset_strategy = context.get_auto_parallel_context("dataset_strategy")
is_shard_dataset_mp = (dataset_strategy and dataset_strategy not in ("data_parallel", "full_batch"))
if not context.get_auto_parallel_context("full_batch") and not is_shard_dataset_mp:
raise RuntimeError('Model prediction only supports full batch dataset. Please set "full_batch" with True.')
def _check_similar_layout(tensor_layout1, tensor_layout2):
"""check if two tensor layouts are same"""
if tensor_layout1[1] != tensor_layout2[1]:
return False
for i in tensor_layout1[1]:
if i == -1:
continue
if tensor_layout1[0][-1-i] != tensor_layout2[0][-1-i]:
return False
return True
def _check_same_layout(tensor_layout1, tensor_layout2):
"""check if two tensor layouts are same"""
return tensor_layout1[0] == tensor_layout2[0] and tensor_layout1[1] == tensor_layout2[1]
def _remove_repeated_slices(tensor_layout):
"""generate unrepeated tensor layout"""
import copy
new_tensor_layout = copy.deepcopy(tensor_layout)
dev_mat = tensor_layout[0][:]
tensor_map = tensor_layout[1]
for dim in range(len(dev_mat)):
if dim not in tensor_map:
dev_mat[-1-dim] = 1
new_tensor_layout[0] = dev_mat
return new_tensor_layout
def _infer_rank_list(train_map, predict_map=None):
"""infer checkpoint slices to be loaded"""
ret = {}
if _get_pipeline_stages() > 1:
local_rank = int(_get_global_rank() % (_get_device_num() / _get_pipeline_stages()))
else:
local_rank = _get_global_rank()
for param_name in train_map:
train_layout = train_map[param_name]
train_dev_mat = train_layout[0]
dev_num = np.array(train_dev_mat).prod()
new_train_layout = _remove_repeated_slices(train_layout)
array = np.arange(dev_num).reshape(train_dev_mat)
index = ()
for i in new_train_layout[0]:
if i == 1:
index = index + (0,)
else:
index = index + (slice(None),)
rank_list = array[index].flatten()
if not predict_map:
ret[param_name] = (rank_list, False)
continue
if param_name not in predict_map:
logger.warning("predict_map does not contain %s", param_name)
continue
predict_layout = predict_map[param_name]
dev_num = np.array(predict_layout[0]).prod()
# optimization pass
if _check_same_layout(train_layout, predict_layout):
ret[param_name] = ([local_rank], True)
continue
if _check_similar_layout(train_layout, predict_layout):
if len(rank_list) == 1:
ret[param_name] = (rank_list, True)
elif len(rank_list) == dev_num:
ret[param_name] = ([rank_list[local_rank]], True)
else:
ret[param_name] = (rank_list, False)
else:
ret[param_name] = (rank_list, False)
return ret
| 39.205634 | 119 | 0.663457 |
949308df3ba02e18d367a526f74f367e13a9ca66 | 3,174 | py | Python | var/spack/repos/builtin/packages/siesta/package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-06-27T13:26:50.000Z | 2019-07-01T16:24:54.000Z | var/spack/repos/builtin/packages/siesta/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75 | 2016-07-27T11:43:00.000Z | 2020-12-08T15:56:53.000Z | var/spack/repos/builtin/packages/siesta/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8 | 2015-10-16T13:51:49.000Z | 2021-10-18T13:58:03.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class Siesta(Package):
"""SIESTA performs electronic structure calculations and ab initio molecular
dynamics simulations of molecules and solids."""
homepage = "https://departments.icmab.es/leem/siesta/"
version('4.0.1', '5cb60ce068f2f6e84fa9184ffca94c08', url='https://launchpad.net/siesta/4.0/4.0.1/+download/siesta-4.0.1.tar.gz')
version('3.2-pl-5', '27a300c65eb2a25d107d910d26aaf81a', url='http://departments.icmab.es/leem/siesta/CodeAccess/Code/siesta-3.2-pl-5.tgz')
patch('configure.patch', when='@:4.0')
depends_on('mpi')
depends_on('blas')
depends_on('lapack')
depends_on('scalapack')
depends_on('netcdf')
depends_on('netcdf-fortran')
phases = ['configure', 'build', 'install']
def configure(self, spec, prefix):
sh = which('sh')
configure_args = ['--enable-mpi',
'--with-blas=%s' % spec['blas'].libs,
'--with-lapack=%s' % spec['lapack'].libs,
# need to include BLAS below because Intel MKL's
# BLACS depends on BLAS, otherwise the compiler
# test fails
'--with-blacs=%s' % (spec['scalapack'].libs +
spec['blas'].libs),
'--with-scalapack=%s' % spec['scalapack'].libs,
'--with-netcdf=%s' % (spec['netcdf-fortran'].libs +
spec['netcdf'].libs),
# need to specify MPIFC explicitly below, otherwise
# Intel's mpiifort is not found
'MPIFC=%s' % spec['mpi'].mpifc
]
for d in ['Obj', 'Obj_trans']:
with working_dir(d, create=True):
sh('../Src/configure', *configure_args)
if spec.satisfies('@:4.0%intel'):
with open('arch.make', 'a') as f:
f.write('\natom.o: atom.F\n')
f.write('\t$(FC) -c $(FFLAGS) -O1')
f.write('$(INCFLAGS) $(FPPFLAGS) $<')
sh('../Src/obj_setup.sh')
def build(self, spec, prefix):
with working_dir('Obj'):
make(parallel=False)
with working_dir('Obj_trans'):
make('transiesta', parallel=False)
with working_dir('Util'):
sh = which('sh')
sh('build_all.sh')
def install(self, spec, prefix):
mkdir(prefix.bin)
with working_dir('Obj'):
install('siesta', prefix.bin)
with working_dir('Obj_trans'):
install('transiesta', prefix.bin)
for root, _, files in os.walk('Util'):
for fname in files:
fname = join_path(root, fname)
if os.access(fname, os.X_OK):
install(fname, prefix.bin)
| 40.692308 | 142 | 0.521109 |
aaf60553093ad8ac04cb5b60198381daabe3138a | 410 | py | Python | examples/commandsbot.py | spacedev-official/disformers | 31800466741be5ddcdfb531e021638f6ee112e23 | [
"Apache-2.0"
] | null | null | null | examples/commandsbot.py | spacedev-official/disformers | 31800466741be5ddcdfb531e021638f6ee112e23 | [
"Apache-2.0"
] | 14 | 2021-11-01T08:23:06.000Z | 2022-03-31T08:32:24.000Z | examples/commandsbot.py | spacedev-official/disformers | 31800466741be5ddcdfb531e021638f6ee112e23 | [
"Apache-2.0"
] | null | null | null | import discord
from discord.ext import commands
from DisFormers import DisFormersBot
class MyBot(commands.Bot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def on_ready(self):
print("Bot is ready.")
my_bot = MyBot(command_prefix="!", intents=discord.Intents.all())
DisFormersBot(my_bot,prefix="!")
if __name__ == "__main__":
my_bot.run("token") | 24.117647 | 65 | 0.692683 |
e54873121d886c5f8072ba1cebd840d16c8e65f7 | 10,842 | py | Python | DASH/DASH/DashBoard/App.py | changsoooo/kisra-1 | dce2132df97d2b20d9019fa3421540895a097183 | [
"FTL"
] | null | null | null | DASH/DASH/DashBoard/App.py | changsoooo/kisra-1 | dce2132df97d2b20d9019fa3421540895a097183 | [
"FTL"
] | null | null | null | DASH/DASH/DashBoard/App.py | changsoooo/kisra-1 | dce2132df97d2b20d9019fa3421540895a097183 | [
"FTL"
] | null | null | null | import dash
import dash_html_components as html
import Character
from dash.dependencies import Input, Output, State
import plotly.express as px
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import layout
import User
import numpy as np
from datetime import timedelta
import plotly.graph_objects as go
class App:
def __init__(self):
self.sheet = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
self.app = dash.Dash(__name__, external_stylesheets=self.sheet, suppress_callback_exceptions=True)
self.layout = layout
self.user = User.User()
def show_content(self):
app = self.app
style = self.layout.style
tab = self.layout.tab
origin = tab
user = self.user
app.layout = origin
@app.callback(
Output(self.layout.output_id, 'children'),
Input(self.layout.input_id, "value"),
)
def show_page(tab_input):
if tab_input == 'signup':
app.layout.children[-1] = html.Div(self.layout.signup)
return html.Div(self.layout.signup)
if tab_input == 'analysis':
app.layout.children[-1] = html.Div(self.layout.analysis)
self.layout.analysis[0].children[1].value, self.layout.analysis[0].children[3].value = \
user.name, user.date
return html.Div(self.layout.analysis)
if tab_input == 'info':
app.layout.children[-1] = html.Div(self.layout.info)
userList = user.userList()
self.layout.info[0].children[1].options = userList
self.layout.info[0].children[1].value = user.name
return html.Div(self.layout.info)
@app.callback(
Output('output-div', 'children'),
Input('submit-val', 'n_clicks'),
State('self-understand-degree', 'value'),
State('age-check', 'value'),
State('invest-terms', 'value'),
State('finance-ratio', 'value'),
State('annual-income', 'value'),
State('character-risk', 'value'),
State('invest-purpose', 'value'),
State('invest-experience', 'value'),
State('invest-scale', 'value'),
State('datetime', 'value'),
State('name', 'value')
)
def page1_result(n_clicks, input_1, input_2, input_3, input_4,
input_5, input_6, input_7, input_8, input_9, input_10,
input_11):
if 0 < n_clicks:
tags_id = [input_1, input_2, input_3, input_4, input_5, input_6, input_7, input_8, input_9,
input_10, input_11]
character = Character.Character(tags_id)
output = app.layout.children[-1].children[-1]
if character.empty_check():
answer, df = character.predict()
result = '당신은 {0}형 투자자입니다. 당신에게 맞는 포트폴리오를 확인해 보세요'.format(answer)
pie = px.pie(df, names=df.columns[-1], values=df.columns[0])
output.children[0].children = result
if len(output.children) < 3:
fig = dcc.Graph(id='pie-chart')
fig.figure = pie
fig.figure.layout.paper_bgcolor = style['pie_chart_style']['backgroundColor']
output.children.append(fig)
output.style = style['pie_chart_style']
return output
warning = '비어있는 항목이 있습니다! 전부 체크해 주세요'
if 2 < len(output.children):
output.children = output.children[:-1]
output.children[0].children = warning
output.style = style['pie_chart_style']
return output
def page2_result(content):
print('-=-=-=-= is this every called??-=-=-=')
if type(content) == str:
return dcc.ConfirmDialog(
id='confirm',
message=content
)
before, after = content
table_header = [
html.Thead(html.Tr([html.Th("시점"), html.Th("현금성"), html.Th("주식"), html.Th("채권"), html.Th("대체"), html.Th('상세정보')]))
]
row1 = html.Tr([html.Td("현재"), html.Td(before[before['asset_class'] == '현금성']['value'].iloc[0]),
html.Td(before[before['asset_class'] == '주식']['value'].iloc[0]),
html.Td(before[before['asset_class'] == '채권']['value'].iloc[0]),
html.Td(before[before['asset_class'] == '대체']['value'].iloc[0]),
html.Td(html.Div([html.Button('상세정보', id='detail-info-button'),
dbc.Modal(
[
dbc.ModalHeader("상세정보"),
dbc.ModalBody("A small modal.", id='record'),
dbc.ModalFooter(
dbc.Button("Close", id="close-detail-info", className="ml-auto")
),
],
id="modal-detail-info",
size="sm"
)]))])
row2 = html.Tr([html.Td("미래"), html.Td(before[before['asset_class'] == '현금성']['value'].iloc[0]),
html.Td(before[before['asset_class'] == '주식']['value'].iloc[0]),
html.Td(before[before['asset_class'] == '채권']['value'].iloc[0]),
html.Td(before[before['asset_class'] == '대체']['value'].iloc[0]),
html.Td('')],
style={'background-color': '#FFA500'})
# if not content[-1]:
# row2.style['background-color'] = '#ddd'
# return html.Div(dbc.Table(table_header, html.Tbody([row1, row2]), bordered=True))
return html.Div(dbc.Table(table_header + [html.Tbody([row1, row2])], bordered=True))
def changePeriod(select):
for idx, sel in enumerate(select):
if select[idx] < 12:
select[idx] = (12-select[idx])*30
continue
if select[idx] < 14:
select[idx] = (14-select[idx])*7
continue
select[idx] = 17-select[idx]
return select
def page3Layout(result, from_date, allowable):
chart, table = result
pie = px.pie(chart, names=chart['asset_class'].tolist(), values=chart['wt'].tolist())
fig = dcc.Graph(id='pie-chart-page3')
fig.figure = pie
table_header = [
html.Thead(html.Tr([html.Th("종목명"), html.Th("평가액"), html.Th("비중"), html.Th("비고")]))
]
informations = table.loc[:, ['itemname', 'value', 'wt', 'asset_class']]
informations.loc[:, 'wt'] = informations.loc[:, 'wt']*100
sumOfInfo = [html.Td('계'), html.Td(sum(informations['value'])), html.Td(round(sum(informations['wt']))), html.Td('')]
informations = informations.values.tolist()
table_row = list()
for row in informations:
temp = [html.Td(data) for data in row]
table_row.extend([html.Tr(temp)])
table_row.extend([html.Tr(sumOfInfo)])
table_result = html.Div(dbc.Table(table_header + [html.Tbody(table_row)], bordered=True))
x_axis = [from_date]
now = from_date
while now<allowable:
now += timedelta(days=30)
x_axis.append(now)
y_axis = np.random.randn(2, len(x_axis)).tolist()
y_axis[0].sort()
y_axis[1].sort()
fig_2 = dcc.Graph(id='line-chart')
fig_line = go.Figure()
fig_line.add_trace(go.Scatter(x=x_axis, y=y_axis[0], mode='lines+markers', name='before'))
fig_line.add_trace(go.Scatter(x=x_axis, y=y_axis[1], mode='lines+markers', name='after'))
fig_2.figure = fig_line
return html.Div([fig,
table_result,
fig_2])
@app.callback(
Output('output-pos', 'children'),
Input('predict-slider', 'value'),
State('analysis-name', 'value'),
State('analysis-datetime', 'value')
)
def show_prediction(select, name, date):
if not date:
date = user.getStartDate(name)
user.name, user.date = name, date
select = changePeriod(select)
result = user.closeData(select, choice=True)
return page2_result(result)
@app.callback(
Output('modal-detail-info', 'is_open'),
Output('record', 'children'),
[Input('detail-info-button', 'n_clicks'),
Input('close-detail-info', 'n_clicks')],
State('modal-detail-info', 'is_open'),
State('predict-slider', 'value')
)
def detailInfo(open, close, is_open, select):
print(' is this ever called???')
select = changePeriod(select)
result = user.closeData(select, choice=False)
table_header = [
html.Thead(html.Tr([html.Th(col) for col in list(result.columns)]))
]
rows = result.values.tolist()
table_row = list()
for row in rows:
temp = [html.Td(data) for data in row]
table_row.extend([html.Tr(temp)])
result = html.Div(dbc.Table(table_header + [html.Tbody(table_row)], bordered=True))
if open or not close:
return not is_open, result
return is_open, result
@app.callback(
[
Output('info-datetime', 'value'),
Output('default-predict-date', 'min_date_allowed')],
Input({'type': 'filter-dropdown'}, 'value')
)
def page3DateResult(name):
user.name = name
startPoint = user.getStartDate(name)
return startPoint, startPoint
@app.callback(
Output('detail-info-output', 'children'),
Input('default-predict-date', 'date')
)
def page3OutputResult(pDate):
pDate += ' 0:0:0'
result = user.page3Data(pDate)
return page3Layout(result, user.changedUserData(user.date), user.changedUserData(pDate))
| 42.685039 | 130 | 0.501752 |
41a0032a19d24a072da7c168b458a1fd7d4d07a1 | 983 | py | Python | core/urls.py | Wanderer2436/django_pharmacy | 2e12c41e30f2f2e2c0f3abdaded98a917420f5b8 | [
"MIT"
] | null | null | null | core/urls.py | Wanderer2436/django_pharmacy | 2e12c41e30f2f2e2c0f3abdaded98a917420f5b8 | [
"MIT"
] | 2 | 2022-03-31T14:34:44.000Z | 2022-03-31T14:35:17.000Z | core/urls.py | Wanderer2436/django_pharmacy | 2e12c41e30f2f2e2c0f3abdaded98a917420f5b8 | [
"MIT"
] | null | null | null | from django.urls import path
import core.views
app_name = 'core'
urlpatterns = [
path('', core.views.IndexView.as_view(), name='home'),
path('catalog/', core.views.ProductList.as_view(), name='catalog'),
path('catalog/category/<int:category_id>/', core.views.ProductList.as_view(), name='category'),
path('catalog/create', core.views.ProductCreate.as_view(), name='product_create'),
path('catalog/product/<int:pk>/delete/', core.views.ProductDelete.as_view(), name='product_delete'),
path('catalog/product/<int:pk>/update/', core.views.ProductUpdate.as_view(), name='product_update'),
path('catalog/product/<int:pk>/', core.views.ProductDetail.as_view(), name='products'),
path('catalog/product/add_review/<int:pk>/', core.views.Review.as_view(), name='review'),
path('pharmacy/', core.views.PharmacyList.as_view(), name='pharmacy'),
path('pharmacy/<int:pharmacy_id>/', core.views.ProductInPharmacy.as_view(), name='product_in_pharmacy'),
]
| 54.611111 | 108 | 0.711089 |
ae2a59502bc1362b9bc6e318befc7dbc54d3d5cb | 4,085 | py | Python | python_module/megengine/functional/graph.py | WestCityInstitute/MegEngine | f91881ffdc051ab49314b1bd12c4a07a862dc9c6 | [
"Apache-2.0"
] | 2 | 2020-03-26T08:26:29.000Z | 2020-06-01T14:41:38.000Z | python_module/megengine/functional/graph.py | ted51/MegEngine | f91881ffdc051ab49314b1bd12c4a07a862dc9c6 | [
"Apache-2.0"
] | null | null | null | python_module/megengine/functional/graph.py | ted51/MegEngine | f91881ffdc051ab49314b1bd12c4a07a862dc9c6 | [
"Apache-2.0"
] | 1 | 2020-11-09T06:29:51.000Z | 2020-11-09T06:29:51.000Z | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import collections
from typing import Iterable, Optional, Union
import megengine._internal as mgb
from ..core.graph import get_default_graph
from ..core.tensor import Tensor, wrap_io_tensor
from ..jit import barrier, mark_impure
@wrap_io_tensor
def grad(
target: Tensor,
wrt: Union[Tensor, Iterable[Tensor]],
warn_mid_wrt: bool = True,
use_virtual_grad: bool = None,
return_zero_for_nodep: bool = True,
) -> Union[Tensor, Iterable[Optional[Tensor]], None]:
r"""compute symbolic grad
:param target: grad target var
:param wrt: with respect to which to compute the grad
:param warn_mid_wrt: whether to give warning if ``wrt`` is not endpoint
:param use_virtual_grad: whether to use virtual grad opr, so fwd graph can
be optimized before applying grad; if ``None`` is given, then virtual
grad would be used if ``graph_opt_level >= 2``
:param return_zero_for_nodep: if ``target`` does not depend on ``wrt``, set to True to return
a zero-valued :class:`~.Tensor` rather than ``None``; can't be set to False when using
virtual grad opr.
:return: :math:`\partial\text{target} / \partial\text{wrt}`
"""
if not isinstance(wrt, mgb.SymbolVar):
assert isinstance(wrt, collections.Iterable)
wrt = [w._symvar for w in wrt]
return mgb.grad(target, wrt, warn_mid_wrt, use_virtual_grad, return_zero_for_nodep)
_add_update_cache = {} # type: dict
_dummy = mgb.SharedScalar(0)
def add_update(
dest: Tensor,
delta: Tensor,
*,
alpha: Union[Tensor, float, int] = 1.0,
beta: Union[Tensor, float, int] = 1.0,
bias: Union[Tensor, float, int] = 0.0
):
r"""Inplace modify ``dest`` as follows:
.. math::
dest = alpha * dest + beta * delta + bias
:param dest: input data that will be inplace modified.
:param delta: update value that will be added to ``dest``.
:param alpha: weight ratio of ``dest``. Default: 1.0
:param beta: weight ratio of ``delta``. Default: 1.0
:param bias: bias value appended to the result. Default: 0.0
"""
if isinstance(beta, Tensor) or isinstance(alpha, Tensor):
delta *= beta
beta = 1.0
if isinstance(alpha, Tensor):
delta += (alpha - 1.0) * dest
alpha = 1.0
if isinstance(bias, Tensor):
delta += bias
bias = 0.0
comp_graph = dest._comp_graph or get_default_graph()
comp_node = dest._comp_node
if not isinstance(delta, Tensor):
_delta = mgb.make_immutable(
value=delta, comp_node=comp_node, comp_graph=comp_graph
)
else:
_delta = delta._attach(comp_graph)
_dest = dest._attach(comp_graph)
# use (dest, delta) as the key, so we could not add the same delta to dest in static graph
key = (comp_graph._id(), _dest.id, _delta.id)
if key in _add_update_cache:
_alpha, _beta, _bias, config = _add_update_cache[key]
mgb.mgb._mgb.SharedScalar__set(_alpha, alpha)
mgb.mgb._mgb.SharedScalar__set(_beta, beta)
mgb.mgb._mgb.SharedScalar__set(_bias, bias)
else:
_alpha = mgb.SharedScalar(alpha)
_beta = mgb.SharedScalar(beta)
_bias = mgb.SharedScalar(bias)
config = mgb.helper.gen_config(None, comp_node, None)
_add_update_cache[key] = (_alpha, _beta, _bias, config)
u = mgb.mgb._Opr.add_update(
_dest, barrier(_delta), _alpha, _beta, _bias, _dummy, config
)
mark_impure(u)
return Tensor(u)
@wrap_io_tensor
def add_extra_vardep(oup: Tensor, dep: Tensor):
r"""Explicitly set the dependency that tensor ``oup`` depends on tensor ``dep``.
"""
return mgb.config.add_extra_vardep(oup, dep)
| 33.760331 | 97 | 0.667564 |
920c890613ee43ed59316e8a2b2afa228a993883 | 3,077 | py | Python | setup.py | pedromanrique/tensorflow-tts | 62796295284c7cc19f3224df1116a82ce3b1c1d7 | [
"Apache-2.0"
] | 2 | 2020-07-03T05:47:47.000Z | 2020-07-03T19:59:09.000Z | setup.py | RakaMaru/TensorflowTTS | 473eb717503454fa2eabadefd7bcd2459673f6f2 | [
"Apache-2.0"
] | null | null | null | setup.py | RakaMaru/TensorflowTTS | 473eb717503454fa2eabadefd7bcd2459673f6f2 | [
"Apache-2.0"
] | 4 | 2021-02-23T13:05:59.000Z | 2021-04-23T05:15:32.000Z | """Setup Tensorflow TTS libarary."""
import os
import pip
import sys
from distutils.version import LooseVersion
from setuptools import find_packages
from setuptools import setup
if LooseVersion(sys.version) < LooseVersion("3.6"):
raise RuntimeError(
"Tensorflow TTS requires python >= 3.6, "
"but your Python version is {}".format(sys.version)
)
if LooseVersion(pip.__version__) < LooseVersion("19"):
raise RuntimeError(
"pip>=19.0.0 is required, but your pip version is {}. "
'Try again after "pip install -U pip"'.format(pip.__version__)
)
# TODO(@dathudeptrai) update requirement if needed.
requirements = {
"install": [
"tensorflow-gpu>=2.2.0",
"tensorflow-addons>=0.9.1",
"setuptools>=38.5.1",
"librosa>=0.7.0",
"soundfile>=0.10.2",
"matplotlib>=3.1.0",
"PyYAML>=3.12",
"tqdm>=4.26.1",
"h5py>=2.10.0",
"pathos>=0.2.5",
"unidecode>=1.1.1",
"inflect>=4.1.0",
"scikit-learn>=0.22.0",
"pyworld>=0.2.10",
"numba<=0.48", # Fix No module named "numba.decorators"
],
"setup": ["numpy", "pytest-runner",],
"test": [
"pytest>=3.3.0",
"hacking>=1.1.0",
"flake8<=3.8.3",
"flake8-docstrings<=1.5.0",
],
}
# TODO(@dathudeptrai) update console_scripts.
entry_points = {
"console_scripts": [
"tensorflow-tts-preprocess=tensorflow_tts.bin.preprocess:main",
"tensorflow-tts-compute-statistics=tensorflow_tts.bin.compute_statistics:main",
"tensorflow-tts-normalize=tensorflow_tts.bin.normalize:main",
]
}
install_requires = requirements["install"]
setup_requires = requirements["setup"]
tests_require = requirements["test"]
extras_require = {
k: v for k, v in requirements.items() if k not in ["install", "setup"]
}
dirname = os.path.dirname(__file__)
setup(
name="TensorflowTTS",
version="0.0",
url="https://github.com/dathudeptrai/TensorflowTTS",
author="Minh Nguyen Quan Anh, Eren Gölge, Kuan Chen, Takuya Ebata, Trinh Le Quang",
author_email="nguyenquananhminh@gmail.com, erengolge@gmail.com, azraelkuan@gmail.com, meguru.mokke@gmail.com, trinhle.cse@gmail.com",
description="TensorflowTTS: Real-Time State-of-the-art Speech Synthesis for Tensorflow 2",
long_description=open(os.path.join(dirname, "README.md"), encoding="utf-8").read(),
long_description_content_type="text/markdown",
license="Apache-2.0",
packages=find_packages(include=["tensorflow_tts*"]),
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require=extras_require,
entry_points=entry_points,
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Intended Audience :: Science/Research",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: Apache-2.0 License",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 33.086022 | 137 | 0.646409 |
06fe407f1f6f723cec43df0d963083e621f45f62 | 861 | py | Python | DQM/SiStripMonitorClient/python/RecoForDQM_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | DQM/SiStripMonitorClient/python/RecoForDQM_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | DQM/SiStripMonitorClient/python/RecoForDQM_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z |
import FWCore.ParameterSet.Config as cms
# Digitiser ####
# SiStrip
from EventFilter.SiStripRawToDigi.SiStripDigis_cfi import *
siStripDigis.ProductLabel = 'source'
# SiPixel
from EventFilter.SiPixelRawToDigi.SiPixelRawToDigi_cfi import *
siPixelDigis.InputLabel = 'source'
# Local Reco ####
from RecoLocalTracker.Configuration.RecoLocalTracker_cff import *
#DefaultClusterizer.ConditionsLabel = '' #not needed to specify it is used as default
# Track Reconstruction ########
from RecoTracker.Configuration.RecoTracker_cff import *
# Beam Spot ########
from RecoVertex.BeamSpotProducer.BeamSpot_cff import *
# Pixel Vertex
from RecoPixelVertexing.Configuration.RecoPixelVertexing_cff import *
# Reconstruction Sequence
RecoForDQMCollision = cms.Sequence(siPixelDigis*siStripDigis*trackerlocalreco*offlineBeamSpot*recopixelvertexing*ckftracks)
| 27.774194 | 123 | 0.806039 |
a591ab7b8d29ee727f7c71954a20eaf617bf9155 | 5,585 | py | Python | notifications.py | labscript-suite-temp-archive/blacs-fork--mearnshaw-blacs--forked-from--labscript_suite-blacs | c3243ad3c9d846138172a08ece58c67ea9e263d6 | [
"BSD-2-Clause"
] | null | null | null | notifications.py | labscript-suite-temp-archive/blacs-fork--mearnshaw-blacs--forked-from--labscript_suite-blacs | c3243ad3c9d846138172a08ece58c67ea9e263d6 | [
"BSD-2-Clause"
] | null | null | null | notifications.py | labscript-suite-temp-archive/blacs-fork--mearnshaw-blacs--forked-from--labscript_suite-blacs | c3243ad3c9d846138172a08ece58c67ea9e263d6 | [
"BSD-2-Clause"
] | null | null | null | #####################################################################
# #
# /notifications.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program BLACS, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
import os
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtUiTools import QUiLoader
class Notifications(object):
def __init__(self, BLACS):
self._BLACS = BLACS
self._notifications = {}
self._widgets = {}
self._minimized_widgets = {}
def add_notification(self, notification_class):
if notification_class in self._notifications:
return False
try:
# instantiate the notification class
# TODO: Do we need to pass anything in here?
self._notifications[notification_class] = notification_class(self._BLACS)
# get the widget
widget = self._notifications[notification_class].get_widget()
# get details on whether the widget can be closed or hidden
properties = self._notifications[notification_class].get_properties()
# Function shortcuts
show_func = lambda: self.show_notification(notification_class)
hide_func = lambda: self.minimize_notification(notification_class)
close_func = lambda: self.close_notification(notification_class)
get_state = lambda: self.get_state(notification_class)
# create layout/widget with appropriate buttons and the widget from the notification class
ui = QUiLoader().load(os.path.join(os.path.dirname(os.path.realpath(__file__)),'notification_widget.ui'))
ui.hide_button.setVisible(bool(properties['can_hide']))
ui.hide_button.clicked.connect(hide_func)
ui.close_button.setVisible(bool(properties['can_close']))
ui.close_button.clicked.connect(close_func)
ui.widget_layout.addWidget(widget)
#ui.hide()
#TODO: Make the minimized widget
ui2 = QUiLoader().load(os.path.join(os.path.dirname(os.path.realpath(__file__)),'notification_minimized_widget.ui'))
#ui2.hide()
if not hasattr(self._notifications[notification_class], 'name'):
self._notifications[notification_class].name = notification_class.__name__
ui2.name.setText(self._notifications[notification_class].name)
ui2.show_button.setVisible(bool(properties['can_hide'])) #If you can hide, you can also show
ui2.show_button.clicked.connect(show_func)
ui2.close_button.setVisible(bool(properties['can_close']))
ui2.close_button.clicked.connect(close_func)
# pass the show/hide/close functions to the notfication class
self._widgets[notification_class] = ui
self._minimized_widgets[notification_class] = ui2
self._notifications[notification_class].set_functions(show_func,hide_func,close_func,get_state)
except:
# Cleanup
# TODO: cleanup a little more
if notification_class in self._notifications:
del self._notifications[notification_class]
return False
# add the widgets, initially hidden
ui.setVisible(False)
ui2.setVisible(False)
self._BLACS['ui'].notifications.addWidget(ui)
self._BLACS['ui'].notifications_minimized.addWidget(ui2)
return True
def get_instance(self, notification_class):
if notification_class in self._notifications:
return self._notifications[notification_class]
return None
def show_notification(self, notification_class):
self._widgets[notification_class].setVisible(True)
self._minimized_widgets[notification_class].setVisible(False)
def close_notification(self, notification_class):
self._widgets[notification_class].setVisible(False)
self._minimized_widgets[notification_class].setVisible(False)
def minimize_notification(self,notification_class):
self._widgets[notification_class].setVisible(False)
self._minimized_widgets[notification_class].setVisible(True)
def get_state(self,notification_class):
if self._widgets[notification_class].isVisible():
return 'shown'
elif self._minimized_widgets[notification_class].isVisible():
return 'hidden'
else:
return 'closed'
def close_all(self):
for notification in self._notifications:
try:
notification.close()
except:
pass
| 46.932773 | 130 | 0.570636 |
3c37006f6562bfc236f1574fe153291362470c05 | 2,146 | py | Python | src/handlers.py | lareira-digital/octodon | 1131bf4ae3b215f1d8021e3ffd5769c5a9a5e9e4 | [
"MIT"
] | null | null | null | src/handlers.py | lareira-digital/octodon | 1131bf4ae3b215f1d8021e3ffd5769c5a9a5e9e4 | [
"MIT"
] | null | null | null | src/handlers.py | lareira-digital/octodon | 1131bf4ae3b215f1d8021e3ffd5769c5a9a5e9e4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: Oscar Carballal Prego
# @Date: 2017-04-18 23:25:07
# @Last Modified by: Oscar Carballal Prego
# @Last Modified time: 2017-04-20 22:54:12
import gi
from gi.repository import Gtk
from .parser import SSHConfigParser
class Handler():
def __init__(self, builder):
self.ssh_config = SSHConfigParser()
self.builder = builder
self.grid = self.builder.get_object("config_grid")
def quit_application(self, *args):
Gtk.main_quit(*args)
def show_host_config(self, treeview, treepath, column):
# TODO: This is a piece of crap code, needs to be replaced
# whether someone likes it or not.
# Clear the grid before repopulating
if self.grid.get_children():
for element in self.grid.get_children():
self.grid.remove(element)
selection = treeview.get_selection()
model, treeiter = selection.get_selected()
hostconf = self.ssh_config.get_host_detail(model[treeiter][0])
row_count = 0
for key in hostconf.keys():
# Define label
label = Gtk.Label(key)
label.set_justify(Gtk.Justification.RIGHT)
# Define input
gtkinput = Gtk.Entry()
if type(hostconf[key]) == list:
gtkinput.set_text(hostconf[key][0])
else:
gtkinput.set_text(hostconf[key])
self.grid.attach(label, 0, row_count, 1, 1)
self.grid.attach(gtkinput, 1, row_count, 2, 1)
row_count += 1
self.grid.show_all()
row_count = 0
def add_new_environment(self, *args):
dialog = self.builder.get_object('dialog_add_environment')
print(dir(dialog))
dialog.run()
# On ok button save details to the file
def cancel_new_environment(self, dialog):
dialog.destroy()
def create_new_environment(self, *args):
# TODO
pass
def load_environment(self, combobox):
print(dir(combobox))
print("loading environment")
print(combobox.get_entry_text_column())
pass | 32.515152 | 70 | 0.609972 |
c6702b3ca275ec56af022a44f608dc739744132e | 4,719 | py | Python | chapter_07/grid_world_sarsa.py | linklab/link_rl_book_codes | b272b46d5ecd2802f34648440ff53641c68cbbf0 | [
"MIT"
] | null | null | null | chapter_07/grid_world_sarsa.py | linklab/link_rl_book_codes | b272b46d5ecd2802f34648440ff53641c68cbbf0 | [
"MIT"
] | 7 | 2020-11-13T18:57:32.000Z | 2022-02-10T01:52:44.000Z | chapter_07/grid_world_sarsa.py | linklab/link_rl_book_codes | b272b46d5ecd2802f34648440ff53641c68cbbf0 | [
"MIT"
] | 1 | 2021-09-07T12:41:33.000Z | 2021-09-07T12:41:33.000Z | import numpy as np
import random
import matplotlib.pyplot as plt
import matplotlib as mpl
import pickle
from environments.gridworld import GridWorld
plt.rcParams["font.family"] = 'AppleGothic'
plt.rcParams["font.size"] = 12
mpl.rcParams['axes.unicode_minus'] = False
GRID_HEIGHT = 4
GRID_WIDTH = 4
TERMINAL_STATES = [(0, 0), (GRID_HEIGHT-1, GRID_WIDTH-1)]
# 초기 하이퍼파라미터 설정: ε=0.2, α=0.5, γ=0.98, n-스텝 = 3, 에피소드 수행 횟수 = 100
EPSILON = 0.2
ALPHA = 0.5
GAMMA = 0.98
MAX_EPISODES = 100
# 행동-가치 함수 생성
def state_action_value(env):
q = dict()
for state in env.observation_space.STATES:
for action in env.action_space.ACTIONS:
q[(state, action)] = np.random.normal()
return q
# ε-탐욕적 정책의 확률 계산 함수
def e_greedy(env, e, q, state):
action_values = []
prob = []
for action in env.action_space.ACTIONS:
action_values.append(q[(state, action)])
for i in range(len(action_values)):
if i == np.argmax(action_values):
prob.append(1 - e + e/len(action_values))
else:
prob.append(e/len(action_values))
return env.action_space.ACTIONS, prob
# ε-탐욕적 정책 생성 함수
def generate_e_greedy_policy(env, e, Q):
policy = dict()
for state in env.observation_space.STATES:
policy[state] = e_greedy(env, e, Q, state)
return policy
# n-스텝 SARSA 함수
def n_step_sarsa(env, Q, policy, n):
episode_reward_list = []
for episode in range(MAX_EPISODES):
state = env.reset() # exploring start
action = np.random.choice(policy[state][0], p=policy[state][1])
state_trace, action_trace, reward_trace = [state], [action], []
# 타임 스텝
time_step = 0
# 이 에피소드의 길이
T = float('inf')
# SARSA == STATE ACTION REWARD STATE ACTION
while True:
if time_step < T:
next_state, reward, done, _ = env.step(action)
reward_trace.append(reward)
if done:
T = time_step + 1
else:
state_trace.append(next_state)
next_action = np.random.choice(policy[next_state][0], p=policy[next_state][1])
action_trace.append(next_action)
action = next_action
# 갱신을 수행할 타임 스텝 결정
tau = time_step - n + 1
if tau >= 0: # update_state 시작위치부터 n개를 reward_trace[]에서 가져와야 하기 때문
# print(len(state_trace), len(action_trace), len(reward_trace)
G = 0
for i in range(tau + 1, min([tau + n, T]) + 1):
G += pow(GAMMA, (i - tau - 1)) * reward_trace[i - 1]
if tau + n < T:
G += pow(GAMMA, n) * Q[state_trace[tau + n], action_trace[tau + n]]
Q[state_trace[tau], action_trace[tau]] += ALPHA * (G - Q[state_trace[tau], action_trace[tau]])
policy[state_trace[tau]] = e_greedy(env, EPSILON, Q, state_trace[tau])
if tau == T - 1:
break
time_step += 1
state = next_state
episode_reward = sum(reward_trace)
episode_reward_list.append(episode_reward)
return policy, np.asarray(episode_reward_list)
def main():
# 그리드 월드 환경 객체 생성
env = GridWorld(
height=GRID_HEIGHT,
width=GRID_WIDTH,
start_state=None, # exploring start
terminal_states=TERMINAL_STATES,
transition_reward=-1.0,
terminal_reward=-1.0,
outward_reward=-1.0
)
runs = 10
step_n = [1, 2, 4, 8, 16]
data = np.zeros(shape=(len(step_n), MAX_EPISODES))
for run in range(runs):
print("RUNS: {0}".format(run))
for idx_n, n in enumerate(step_n):
Q = state_action_value(env)
policy = generate_e_greedy_policy(env, EPSILON, Q)
print("n={0} ".format(n), end=" ")
_, episode_reward_list = n_step_sarsa(env, Q, policy, n)
avg_episode_reward_list = []
for episode in range(MAX_EPISODES):
avg_episode_reward_list.append(episode_reward_list[max(0, episode - 10):(episode + 1)].mean())
for idx in range(MAX_EPISODES):
data[idx_n, idx] += avg_episode_reward_list[idx]
print()
data[:, :] /= runs
marker = ['o', 'x', '.', 's', '*', '+', '|', '^', 'D', ' ']
for idx_n, n in enumerate(step_n):
plt.plot(range(0, MAX_EPISODES, 5), data[idx_n, ::5], marker=marker[idx_n], label='n = {0}'.format(step_n[idx_n]))
plt.xlabel('에피소드')
plt.ylabel('에피소드별 평균 리워드')
plt.legend()
plt.savefig('images/n_step_sarsa.png')
plt.close()
if __name__ == '__main__':
main() | 28.77439 | 122 | 0.571307 |
7fde633e76b92579f73735cbf88f93b628787a41 | 297 | py | Python | src/model_training.py | nworb95/clean-code-ml | 59812fb507ae82212f119ea4a8ab831cae09ef0d | [
"Apache-2.0"
] | 1 | 2020-11-06T21:43:59.000Z | 2020-11-06T21:43:59.000Z | src/model_training.py | nworb95/clean-code-ml | 59812fb507ae82212f119ea4a8ab831cae09ef0d | [
"Apache-2.0"
] | null | null | null | src/model_training.py | nworb95/clean-code-ml | 59812fb507ae82212f119ea4a8ab831cae09ef0d | [
"Apache-2.0"
] | null | null | null | def train_model(ModelClass, X_train, Y_train, **kwargs):
model = ModelClass(**kwargs)
model.fit(X_train, Y_train)
accuracy_score = round(model.score(X_train, Y_train) * 100, 2)
print(f'accuracy ({ModelClass.__name__}): {accuracy_score}')
return model, accuracy_score
| 33 | 66 | 0.690236 |
cb07cce486ca8c4aa8d8c3cf706b163d06ec3bcb | 777 | py | Python | mycroft/utils.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | mycroft/utils.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | mycroft/utils.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | import daphne_context
import random
def generate_unique_mycroft_session():
current_sessions = get_current_mycroft_sessions()
session = generate_mycroft_session()
tries = 0
while session in current_sessions:
session = generate_mycroft_session()
tries += 1
if tries > 100:
session
return session
def get_current_mycroft_sessions():
sessions = []
entries = daphne_context.models.UserInformation.objects.all()
for entry in entries:
if entry.mycroft_session is not None:
sessions.append(entry.mycroft_session)
return sessions
def generate_mycroft_session():
number = str(abs(random.randint(0, 9999)))
while len(number) < 4:
number = '0' + number
return number
| 22.2 | 65 | 0.679537 |
b28f38d9699bfd534d57eb70be43900c2c335772 | 7,098 | py | Python | h/routes.py | rickyhan/h | d13cbc3ec5cf92fbfb40ad360c7a5e0d937fbd14 | [
"MIT"
] | 2 | 2021-11-07T23:14:54.000Z | 2021-11-17T10:11:55.000Z | h/routes.py | 0b01/h | d13cbc3ec5cf92fbfb40ad360c7a5e0d937fbd14 | [
"MIT"
] | null | null | null | h/routes.py | 0b01/h | d13cbc3ec5cf92fbfb40ad360c7a5e0d937fbd14 | [
"MIT"
] | 1 | 2017-03-12T00:18:33.000Z | 2017-03-12T00:18:33.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
def includeme(config):
# Core
config.add_route('index', '/')
config.add_route('robots', '/robots.txt')
config.add_route('via_redirect', '/via')
# Accounts
config.add_route('login', '/login')
config.add_route('logout', '/logout')
config.add_route('signup', '/signup')
config.add_route('activate', '/activate/{id}/{code}')
config.add_route('forgot_password', '/forgot-password')
config.add_route('account_reset', '/account/reset')
config.add_route('account_reset_with_code', '/account/reset/{code}')
config.add_route('account', '/account/settings')
config.add_route('account_profile', '/account/profile')
config.add_route('account_notifications', '/account/settings/notifications')
config.add_route('account_developer', '/account/developer')
config.add_route('claim_account_legacy', '/claim_account/{token}')
config.add_route('dismiss_sidebar_tutorial', '/app/dismiss_sidebar_tutorial')
# Activity
config.add_route('activity.search', '/search')
config.add_route('activity.user_search',
'/users/{username}',
factory='h.models.user:UserFactory',
traverse='/{username}')
# Admin
config.add_route('admin_index', '/admin/')
config.add_route('admin_admins', '/admin/admins')
config.add_route('admin_badge', '/admin/badge')
config.add_route('admin_features', '/admin/features')
config.add_route('admin_cohorts', '/admin/features/cohorts')
config.add_route('admin_cohorts_edit', '/admin/features/cohorts/{id}')
config.add_route('admin_groups', '/admin/groups')
config.add_route('admin_groups_csv', '/admin/groups.csv')
config.add_route('admin_mailer', '/admin/mailer')
config.add_route('admin_mailer_test', '/admin/mailer/test')
config.add_route('admin_nipsa', '/admin/nipsa')
config.add_route('admin_staff', '/admin/staff')
config.add_route('admin_users', '/admin/users')
config.add_route('admin_users_activate', '/admin/users/activate')
config.add_route('admin_users_delete', '/admin/users/delete')
config.add_route('admin_users_rename', '/admin/users/rename')
# Annotations & stream
config.add_route('annotation',
'/a/{id}',
factory='memex.resources:AnnotationResourceFactory',
traverse='/{id}')
config.add_route('stream', '/stream')
config.add_route('stream.user_query', '/u/{user}')
config.add_route('stream.tag_query', '/t/{tag}')
# Assets
config.add_route('assets', '/assets/*subpath')
# API
# For historical reasons, the `api` route ends with a trailing slash. This
# is not (or should not) be necessary, but for now the client will
# construct URLs incorrectly if its `apiUrl` setting does not end in a
# trailing slash.
config.add_route('api.index', '/api/')
config.add_route('api.annotations', '/api/annotations')
config.add_route('api.annotation',
'/api/annotations/{id:[A-Za-z0-9_-]{20,22}}',
factory='memex.resources:AnnotationResourceFactory',
traverse='/{id}')
config.add_route('api.annotation_flag',
'/api/annotations/{id:[A-Za-z0-9_-]{20,22}}/flag',
factory='memex.resources:AnnotationResourceFactory',
traverse='/{id}')
config.add_route('api.annotation_hide',
'/api/annotations/{id:[A-Za-z0-9_-]{20,22}}/hide',
factory='memex.resources:AnnotationResourceFactory',
traverse='/{id}')
config.add_route('api.annotation.jsonld',
'/api/annotations/{id:[A-Za-z0-9_-]{20,22}}.jsonld',
factory='memex.resources:AnnotationResourceFactory',
traverse='/{id}')
config.add_route('api.profile', '/api/profile')
config.add_route('api.debug_token', '/api/debug-token')
config.add_route('api.search', '/api/search')
config.add_route('api.users', '/api/users')
config.add_route('badge', '/api/badge')
config.add_route('token', '/api/token')
# Client
config.add_route('session', '/app')
config.add_route('sidebar_app', '/app.html')
config.add_route('embed', '/embed.js')
# Feeds
config.add_route('stream_atom', '/stream.atom')
config.add_route('stream_rss', '/stream.rss')
# Groups
config.add_route('group_create', '/groups/new')
config.add_route('group_edit',
'/groups/{pubid}/edit',
factory='h.models.group:GroupFactory',
traverse='/{pubid}')
config.add_route('group_leave',
'/groups/{pubid}/leave',
factory='h.models.group:GroupFactory',
traverse='/{pubid}')
# Match "/<pubid>/": we redirect to the version with the slug.
config.add_route('group_read',
'/groups/{pubid}/{slug:[^/]*}',
factory='h.models.group:GroupFactory',
traverse='/{pubid}')
config.add_route('group_read_noslug',
'/groups/{pubid}',
factory='h.models.group:GroupFactory',
traverse='/{pubid}')
# Help
config.add_route('help', '/docs/help')
config.add_route('onboarding', '/welcome/')
config.add_route('custom_onboarding', '/welcome/{slug}')
# Notification
config.add_route('unsubscribe', '/notification/unsubscribe/{token}')
# Health check
config.add_route('status', '/_status')
# Static
config.add_route('about', '/about/', static=True)
config.add_route('bioscience', '/bioscience/', static=True)
config.add_route('blog', '/blog/', static=True)
config.add_route(
'chrome-extension',
'https://chrome.google.com/webstore/detail/bjfhmglciegochdpefhhlphglcehbmek',
static=True)
config.add_route('contact', '/contact/', static=True)
config.add_route('contribute', '/contribute/', static=True)
config.add_route('education', '/education/', static=True)
config.add_route('for-publishers', '/for-publishers/', static=True)
config.add_route('fund', '/fund/', static=True)
config.add_route(
'help-center', 'https://hypothesis.zendesk.com/hc/en-us', static=True)
config.add_route(
'hypothesis-github', 'https://github.com/hypothesis', static=True)
config.add_route(
'hypothesis-twitter', 'https://twitter.com/hypothes_is', static=True)
config.add_route('jobs', '/jobs/', static=True)
config.add_route('press', '/press/', static=True)
config.add_route('privacy', '/privacy/', static=True)
config.add_route('roadmap', '/roadmap/', static=True)
config.add_route('team', '/team/', static=True)
config.add_route('terms-of-service', '/terms-of-service/', static=True)
config.add_route(
'wordpress-plugin', 'https://wordpress.org/plugins/hypothesis/',
static=True)
| 43.546012 | 85 | 0.625247 |
2cbd5e920eadb7e396362d39cb3b4ac5f4a09ecc | 4,557 | py | Python | gem_metrics/local_recall.py | ndaheim/GEM-metrics | cfa7a3223d94a7f7ef5fda1d2928e2510b0cbede | [
"MIT"
] | 30 | 2021-02-06T04:58:14.000Z | 2022-03-04T11:26:14.000Z | gem_metrics/local_recall.py | ndaheim/GEM-metrics | cfa7a3223d94a7f7ef5fda1d2928e2510b0cbede | [
"MIT"
] | 70 | 2021-01-12T17:55:15.000Z | 2022-03-30T17:37:02.000Z | gem_metrics/local_recall.py | ndaheim/GEM-metrics | cfa7a3223d94a7f7ef5fda1d2928e2510b0cbede | [
"MIT"
] | 14 | 2021-01-30T20:55:17.000Z | 2022-03-24T02:31:21.000Z | from .metric import ReferencedMetric
from collections import Counter, defaultdict
class LocalRecall(ReferencedMetric):
"""
LocalRecall checks the extent to which a model produces the same tokens as the reference data.
For each item, tokens receive an importance score. If all N annotators use a particular word,
that word gets an importance score of N.
The output of this metric is a dictionary with {1:score, ..., N: score}.
The local recall metric is based on Van Miltenburg et al. (2018).
Paper: https://www.aclweb.org/anthology/C18-1147/
Repository: https://github.com/evanmiltenburg/MeasureDiversity/blob/master/local_recall.py
The main difference is that Van Miltenburg et al. only include content words,
while the code below just counts ALL tokens, including determiners (a, the) etc.
This means that the scores produced by this code will be higher than the ones produced by the original code.
The advantage is that we don't have to rely on a part-of-speech tagger.
"""
def support_caching(self):
# LocalRecall is corpus-level, so individual examples can't be aggregated.
return False
def compute(self, cache, predictions, references):
results = LocalRecall.local_recall_scores(
predictions.list_tokenized_lower_nopunct,
references.list_tokenized_lower_nopunct,
)
return {"local_recall": results}
@staticmethod
def build_reference_index(refs):
"""
Build reference index for a given item.
Input: list of lists (list of sentences, where each sentence is a list of string tokens).
Output: dictionary with key: int (1-number of references), value: set of words.
"""
counts = Counter()
for ref in refs:
counts.update(set(ref))
importance_index = defaultdict(set)
for word, count in counts.items():
importance_index[count].add(word)
return importance_index
@staticmethod
def check_item(prediction, refs):
"""
Check whether the predictions capture words that are frequently mentioned.
This function produces more info than strictly needed.
Use the detailed results to analyze system performance.
"""
reference_index = LocalRecall.build_reference_index(refs)
pred_tokens = set(prediction)
results = dict()
for n in range(1, len(refs) + 1):
overlap = pred_tokens & reference_index[n]
results[f"overlap-{n}"] = overlap
results[f"size-overlap-{n}"] = len(overlap)
results[f"refs-{n}"] = reference_index[n]
results[f"size-refs-{n}"] = len(reference_index[n])
# Just in case there are no words at all that occur in all references,
# Make score equal to None to avoid divide by zero error.
# This also avoids ambiguity between "no items recalled" and "no items to recall".
if len(reference_index[n]) > 0:
results[f"item-score-{n}"] = len(overlap) / len(reference_index[n])
else:
results[f"item-score-{n}"] = None
return results
@staticmethod
def replace(a_list, to_replace, replacement):
"""
Returns a_list with all occurrences of to_replace replaced with replacement.
"""
return [replacement if x == to_replace else x for x in a_list]
@staticmethod
def aggregate_score(outcomes):
"""
Produce an aggregate score based on a list of tuples: [(size_overlap, size_refs)]
"""
overlaps, ref_numbers = zip(*outcomes)
ref_numbers = LocalRecall.replace(ref_numbers, None, 0)
score = (sum(overlaps) / sum(ref_numbers)) if sum(ref_numbers) > 0 else 0
return score
@staticmethod
def local_recall_scores(predictions, full_references):
"""
Compute local recall scores.
"""
num_refs = set()
outcomes = defaultdict(list)
for pred, refs in zip(predictions, full_references):
results = LocalRecall.check_item(pred, refs)
total_refs = len(refs)
num_refs.add(total_refs)
for n in range(1, total_refs + 1):
pair = (results[f"size-overlap-{n}"], results[f"size-refs-{n}"])
outcomes[n].append(pair)
scores = {
n: LocalRecall.aggregate_score(outcomes[n])
for n in range(1, max(num_refs) + 1)
}
return scores
| 39.973684 | 112 | 0.639456 |
aaae183ca161b218fd96145d3845cbef47321f95 | 4,602 | py | Python | Scweet/user.py | pydxflwb/Scweet | 4d6892edf3b76ff9ec50cda5ee570b80b064b46a | [
"MIT"
] | null | null | null | Scweet/user.py | pydxflwb/Scweet | 4d6892edf3b76ff9ec50cda5ee570b80b064b46a | [
"MIT"
] | null | null | null | Scweet/user.py | pydxflwb/Scweet | 4d6892edf3b76ff9ec50cda5ee570b80b064b46a | [
"MIT"
] | null | null | null | import csv
import os
import datetime
import argparse
import pandas as pd
import utils
from time import sleep
import random
def get_user_information(users, driver=None, headless=True):
""" get user information if the "from_account" argument is specified """
driver = utils.init_driver(headless=headless)
users_info = {}
for i, user in enumerate(users):
log_user_page(user, driver)
if user is not None:
try:
following = driver.find_element_by_xpath(
'//a[contains(@href,"/following")]/span[1]/span[1]').text
followers = driver.find_element_by_xpath(
'//a[contains(@href,"/followers")]/span[1]/span[1]').text
# nickname = driver.find_element_by_xpath(
# '//a[contains(@href,"/followers")]/../../../div[2]/div[1]/div[1]/div[1]/div[1]/span[1]/span[1]').text
except Exception as e:
#print(e)
continue
try:
element = driver.find_element_by_xpath('//div[contains(@data-testid,"UserProfileHeader_Items")]//a[1]')
website = element.get_attribute("href")
except Exception as e:
#print(e)
website = ""
try:
desc = driver.find_element_by_xpath('//div[contains(@data-testid,"UserDescription")]').text
except Exception as e:
#print(e)
desc = ""
a=0
try:
join_date = driver.find_element_by_xpath(
'//div[contains(@data-testid,"UserProfileHeader_Items")]/span[3]').text
birthday = driver.find_element_by_xpath(
'//div[contains(@data-testid,"UserProfileHeader_Items")]/span[2]').text
location = driver.find_element_by_xpath(
'//div[contains(@data-testid,"UserProfileHeader_Items")]/span[1]').text
except Exception as e:
#print(e)
try :
join_date = driver.find_element_by_xpath(
'//div[contains(@data-testid,"UserProfileHeader_Items")]/span[2]').text
span1 = driver.find_element_by_xpath(
'//div[contains(@data-testid,"UserProfileHeader_Items")]/span[1]').text
if hasNumbers(span1):
birthday = span1
location = ""
else :
location = span1
birthday = ""
except Exception as e:
#print(e)
try :
join_date = driver.find_element_by_xpath(
'//div[contains(@data-testid,"UserProfileHeader_Items")]/span[1]').text
birthday = ""
location = ""
except Exception as e:
#print(e)
join_date = ""
birthday = ""
location = ""
print("--------------- " + user + " information : ---------------")
# print("Nickname : ", nickname)
print("Following : ", following)
print("Followers : ", followers)
print("Location : ", location)
print("Join date : ", join_date)
print("Birth date : ", birthday)
print("Description : ", desc)
print("Website : ", website)
users_info[user] = [following, followers, join_date, birthday, location, website, desc]
if i == len(users)-1 :
driver.close()
return users_info
else:
print("You must specify the user")
continue
def log_user_page(user, driver, headless=True):
sleep(random.uniform(1, 2))
driver.get('https://twitter.com/' + user)
sleep(random.uniform(1, 2))
def get_users_followers(users, verbose=1, headless=True, wait=2):
followers = utils.get_users_follow(users, headless, "followers", verbose, wait=wait)
return followers
def get_users_following(users, verbose=1, headless=True, wait=2):
following = utils.get_users_follow(users, headless, "following", verbose, wait=wait)
return following
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
if __name__ == '__main__':
users = ['@YourAnonCentral', '@YourAnonNews']
result = get_user_information(users, headless=True)
| 35.674419 | 123 | 0.524989 |
c9fcb4b818587b93cc9dc9d2d41947bbac4169a7 | 3,018 | py | Python | ngraph_onnx/onnx_importer/utils/types.py | cliveseldon/ngraph-onnx | a2d20afdc7acd5064e4717612ad372d864d03d3d | [
"Apache-2.0"
] | null | null | null | ngraph_onnx/onnx_importer/utils/types.py | cliveseldon/ngraph-onnx | a2d20afdc7acd5064e4717612ad372d864d03d3d | [
"Apache-2.0"
] | null | null | null | ngraph_onnx/onnx_importer/utils/types.py | cliveseldon/ngraph-onnx | a2d20afdc7acd5064e4717612ad372d864d03d3d | [
"Apache-2.0"
] | null | null | null | # ******************************************************************************
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE, TENSOR_TYPE_TO_NP_TYPE
from onnx import TensorProto
import ngraph as ng
from ngraph.impl import Node as NgraphNode
from ngraph.impl import Type as NgraphType
from ngraph.utils.types import get_dtype
from typing import Any, Tuple
logger = logging.getLogger(__name__)
def onnx_tensor_type_to_numpy_type(data_type): # type: (Any) -> np.dtype
"""Return ONNX TensorProto type mapped into numpy dtype.
:param data_type: The type we want to convert from.
:return: Converted numpy dtype.
"""
if type(data_type) is int:
return TENSOR_TYPE_TO_NP_TYPE[data_type]
elif type(data_type) is str:
return TENSOR_TYPE_TO_NP_TYPE[TensorProto.DataType.Value(data_type)]
else:
raise ValueError('Unsupported data type representation (%s).', str(type(data_type)))
def np_dtype_to_tensor_type_name(data_type): # type: (np.dtype) -> str
"""Return TensorProto type name respective to provided numpy dtype.
:param data_type: Numpy dtype we want to convert.
:return: String representation of TensorProto type name.
"""
return TensorProto.DataType.Name(NP_TYPE_TO_TENSOR_TYPE[data_type])
def np_dtype_to_tensor_type(data_type): # type: (np.type) -> int
"""Return TensorProto type for provided numpy dtype.
:param data_type: Numpy data type object.
:return: TensorProto.DataType enum value for corresponding type.
"""
return NP_TYPE_TO_TENSOR_TYPE[data_type]
def get_bool_nodes(nodes): # type: (Tuple[NgraphNode, ...]) -> Tuple[NgraphNode, ...]
"""Convert each input node to bool data type if necessary.
:param nodes: Input nodes to be converted.
:return: Converted nodes.
"""
bool_nodes = []
for node in nodes:
if not node.get_element_type() == NgraphType.boolean:
bool_nodes.append(ng.convert(node, bool))
logger.warning('Converting node of type: <{}> to bool.'.format(get_dtype(
node.get_element_type())))
else:
bool_nodes.append(node)
return tuple(bool_nodes)
| 36.361446 | 92 | 0.69218 |
0b520b3e583daba06e0646d1725c1194802bc87c | 1,202 | py | Python | Day 7/HangMan.py | anti-batman/100-Days-of-Code | 2ba087a8eacd86f23104349f3044baf9965d5073 | [
"MIT"
] | 72 | 2021-02-20T06:00:46.000Z | 2022-03-29T21:54:01.000Z | Day 7/HangMan.py | anti-batman/100-Days-of-Code | 2ba087a8eacd86f23104349f3044baf9965d5073 | [
"MIT"
] | 2 | 2021-06-05T17:39:16.000Z | 2022-01-30T08:58:14.000Z | Day 7/HangMan.py | anti-batman/100-Days-of-Code | 2ba087a8eacd86f23104349f3044baf9965d5073 | [
"MIT"
] | 21 | 2021-04-03T09:59:48.000Z | 2022-01-30T20:24:43.000Z | import random
from replit import clear
from hangman_words import word_list
from hangman_art import stages, logo
print(logo)
display = []
end_of_game = False
lives = 6
chosen_word = random.choice(word_list)
#testing code
#print(f"The choosen word is: {chosen_word}")
length = len(chosen_word)
for _ in range(length):
display += "_"
while not end_of_game:
guess = input("Guess a letter: ").lower()
clear() #Clear the screen after each guess
#Formating
print(f"{' '.join(display)}")
if guess not in chosen_word:
print(f"\nYou guessed {guess}, that's no in the word. You loose a life!")
if guess in display:
print(f"\nYou have already guessed {guess}.")
#Check if the letter the user guessed is one of the letters in the chosen_word.
for position in range(length):
letter = chosen_word[position]
if letter in guess:
display[position] = letter
#Win
if "_" not in display:
end_of_game = True
print("\nYou Win!")
#loose
if guess not in chosen_word:
lives -= 1
if lives == 0:
end_of_game = True
print("\nYou loose!")
print(stages[lives]) | 22.679245 | 83 | 0.634775 |
1058761793e05fec48d546880e2a626054cb0e05 | 39,080 | py | Python | build/plugins/ytest.py | dmitrySorokin/catboost | 7a12febbc181eb977bca1f0dceba8f046795be11 | [
"Apache-2.0"
] | null | null | null | build/plugins/ytest.py | dmitrySorokin/catboost | 7a12febbc181eb977bca1f0dceba8f046795be11 | [
"Apache-2.0"
] | null | null | null | build/plugins/ytest.py | dmitrySorokin/catboost | 7a12febbc181eb977bca1f0dceba8f046795be11 | [
"Apache-2.0"
] | null | null | null | import os
import re
import sys
import json
import copy
import base64
import shlex
import _common
import _metric_resolvers as mr
import _test_const as consts
import _requirements as reqs
import StringIO
import subprocess
import collections
import ymake
MDS_URI_PREFIX = 'https://storage.yandex-team.ru/get-devtools/'
MDS_SHEME = 'mds'
CANON_DATA_DIR_NAME = 'canondata'
CANON_OUTPUT_STORAGE = 'canondata_storage'
CANON_RESULT_FILE_NAME = 'result.json'
CANON_MDS_RESOURCE_REGEX = re.compile(re.escape(MDS_URI_PREFIX) + r'(.*?)($|#)')
CANON_SB_VAULT_REGEX = re.compile(r"\w+=(value|file):[-\w]+:\w+")
CANON_SBR_RESOURCE_REGEX = re.compile(r'(sbr:/?/?(\d+))')
VALID_NETWORK_REQUIREMENTS = ("full", "restricted")
VALID_DNS_REQUIREMENTS = ("default", "local", "dns64")
BLOCK_SEPARATOR = '============================================================='
SPLIT_FACTOR_MAX_VALUE = 1000
PARTITION_MODS = ('SEQUENTIAL', 'MODULO')
def ontest_data(unit, *args):
ymake.report_configure_error("TEST_DATA is removed in favour of DATA")
def save_in_file(filepath, data):
if filepath:
with open(filepath, 'a') as file_handler:
if os.stat(filepath).st_size == 0:
print >>file_handler, BLOCK_SEPARATOR
print >> file_handler, data
def prepare_recipes(data):
data = data.replace('"USE_RECIPE_DELIM"', "\n")
data = data.replace("$TEST_RECIPES_VALUE", "")
return base64.b64encode(data or "")
def prepare_env(data):
data = data.replace("$TEST_ENV_VALUE", "")
return serialize_list(shlex.split(data))
def validate_sb_vault(name, value):
if not CANON_SB_VAULT_REGEX.match(value):
return "sb_vault value '{}' should follow pattern <ENV_NAME>=:<value|file>:<owner>:<vault key>".format(value)
def validate_numerical_requirement(name, value):
if mr.resolve_value(value) is None:
return "Cannot convert [[imp]]{}[[rst]] to the proper [[imp]]{}[[rst]] requirement value".format(value, name)
def validate_choice_requirement(name, val, valid):
if val not in valid:
return "Unknown [[imp]]{}[[rst]] requirement: [[imp]]{}[[rst]], choose from [[imp]]{}[[rst]]".format(name, val, ", ".join(valid))
def validate_force_sandbox_requirement(name, value, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, check_func):
if is_force_sandbox or not in_autocheck or is_fuzzing:
if value == 'all':
return
return validate_numerical_requirement(name, value)
error_msg = validate_numerical_requirement(name, value)
if error_msg:
return error_msg
return check_func(mr.resolve_value(value), test_size, is_kvm)
# TODO: Remove is_kvm param when there will be guarantees on RAM
def validate_requirement(req_name, value, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm):
req_checks = {
'container': validate_numerical_requirement,
'cpu': lambda n, v: validate_force_sandbox_requirement(n, v, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, reqs.check_cpu),
'disk_usage': validate_numerical_requirement,
'dns': lambda n, v: validate_choice_requirement(n, v, VALID_DNS_REQUIREMENTS),
'network': lambda n, v: validate_choice_requirement(n, v, VALID_NETWORK_REQUIREMENTS),
'ram': lambda n, v: validate_force_sandbox_requirement(n, v, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, reqs.check_ram),
'ram_disk': lambda n, v: validate_force_sandbox_requirement(n, v, test_size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm, reqs.check_ram_disk),
'sb': None,
'sb_vault': validate_sb_vault,
}
if req_name not in req_checks:
return "Unknown requirement: [[imp]]{}[[rst]], choose from [[imp]]{}[[rst]]".format(req_name, ", ".join(sorted(req_checks)))
if req_name in ('container', 'disk') and not is_force_sandbox:
return "Only [[imp]]LARGE[[rst]] tests with [[imp]]ya:force_sandbox[[rst]] tag can have [[imp]]{}[[rst]] requirement".format(req_name)
check_func = req_checks[req_name]
if check_func:
return check_func(req_name, value)
def validate_test(unit, kw):
def get_list(key):
return deserialize_list(kw.get(key, ""))
valid_kw = copy.deepcopy(kw)
errors = []
warnings = []
if valid_kw.get('SCRIPT-REL-PATH') == 'boost.test':
project_path = valid_kw.get('BUILD-FOLDER-PATH', "")
if not project_path.startswith(("contrib", "mail", "maps", "tools/idl", "metrika", "devtools", "mds")):
errors.append("BOOSTTEST is not allowed here")
elif valid_kw.get('SCRIPT-REL-PATH') == 'ytest.py':
project_path = valid_kw.get('BUILD-FOLDER-PATH', "")
if not project_path.startswith("yweb/antispam") and not project_path.startswith("devtools"):
errors.append("FLEUR test is not allowed here")
elif valid_kw.get('SCRIPT-REL-PATH') == 'gtest':
project_path = valid_kw.get('BUILD-FOLDER-PATH', "")
if not project_path.startswith(("adfox", "contrib", "devtools", "mail", "mds")):
errors.append("GTEST is not allowed here")
size_timeout = collections.OrderedDict(sorted(consts.TestSize.DefaultTimeouts.items(), key=lambda t: t[1]))
size = valid_kw.get('SIZE', consts.TestSize.Small).lower()
# TODO: use set instead list
tags = get_list("TAG")
requirements_set = set(get_list("REQUIREMENTS"))
in_autocheck = "ya:not_autocheck" not in tags and 'ya:manual' not in tags
is_fat = 'ya:fat' in tags
is_force_sandbox = 'ya:force_sandbox' in tags
is_fuzzing = valid_kw.get("FUZZING", False)
is_kvm = 'kvm' in requirements_set
requirements = {}
list_requirements = ('sb_vault')
for req in requirements_set:
if req in ('kvm', ):
requirements[req] = str(True)
continue
if ":" in req:
req_name, req_value = req.split(":", 1)
error_msg = validate_requirement(req_name, req_value, size, is_force_sandbox, in_autocheck, is_fuzzing, is_kvm)
if error_msg:
errors += [error_msg]
else:
if req_name in list_requirements:
requirements[req_name] = ",".join(filter(None, [requirements.get(req_name), req_value]))
else:
if req_name in requirements:
warnings.append("Requirement [[imp]]{}[[rst]] is redefined [[imp]]{}[[rst]] -> [[imp]]{}[[rst]]".format(req_name, requirements[req_name], req_value))
requirements[req_name] = req_value
else:
errors.append("Invalid requirement syntax [[imp]]{}[[rst]]: expect <requirement>:<value>".format(req))
invalid_requirements_for_distbuild = [requirement for requirement in requirements.keys() if requirement not in ('ram', 'ram_disk', 'cpu', 'network')]
sb_tags = [tag for tag in tags if tag.startswith('sb:')]
if is_fat:
if in_autocheck and not is_force_sandbox:
if invalid_requirements_for_distbuild:
errors.append("'{}' REQUIREMENTS options can be used only for FAT tests with ya:force_sandbox tag. Add TAG(ya:force_sandbox) or remove option.".format(invalid_requirements_for_distbuild))
if sb_tags:
errors.append("You can set sandbox tags '{}' only for FAT tests with ya:force_sandbox. Add TAG(ya:force_sandbox) or remove sandbox tags.".format(sb_tags))
if 'ya:sandbox_coverage' in tags:
errors.append("You can set 'ya:sandbox_coverage' tag only for FAT tests with ya:force_sandbox.")
else:
if is_force_sandbox:
errors.append('ya:force_sandbox can be used with LARGE tests only')
if 'ya:privileged' in tags and 'container' not in requirements:
errors.append("Only tests with 'container' requirement can have 'ya:privileged' tag")
if 'ya:privileged' in tags and not is_fat:
errors.append("Only fat tests can have 'ya:privileged' tag")
if size not in size_timeout:
errors.append("Unknown test size: [[imp]]{}[[rst]], choose from [[imp]]{}[[rst]]".format(size.upper(), ", ".join([sz.upper() for sz in size_timeout.keys()])))
else:
try:
timeout = int(valid_kw.get('TEST-TIMEOUT', size_timeout[size]) or size_timeout[size])
script_rel_path = valid_kw.get('SCRIPT-REL-PATH')
if timeout < 0:
raise Exception("Timeout must be > 0")
if size_timeout[size] < timeout and in_autocheck and script_rel_path != 'java.style':
suggested_size = None
for s, t in size_timeout.items():
if timeout <= t:
suggested_size = s
break
if suggested_size:
suggested_size = ", suggested size: [[imp]]{}[[rst]]".format(suggested_size.upper())
else:
suggested_size = ""
errors.append("Max allowed timeout for test size [[imp]]{}[[rst]] is [[imp]]{} sec[[rst]]{}".format(size.upper(), size_timeout[size], suggested_size))
except Exception as e:
errors.append("Error when parsing test timeout: [[bad]]{}[[rst]]".format(e))
if in_autocheck and size == consts.TestSize.Large and not is_fat:
errors.append("LARGE test must have ya:fat tag")
if is_fat and size != consts.TestSize.Large:
errors.append("Only LARGE test may have ya:fat tag")
requiremtens_list = []
for req_name, req_value in requirements.iteritems():
requiremtens_list.append(req_name + ":" + req_value)
valid_kw['REQUIREMENTS'] = serialize_list(requiremtens_list)
if valid_kw.get("FUZZ-OPTS"):
for option in get_list("FUZZ-OPTS"):
if not option.startswith("-"):
errors.append("Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should start with '-'".format(option))
break
eqpos = option.find("=")
if eqpos == -1 or len(option) == eqpos + 1:
errors.append("Unrecognized fuzzer option '[[imp]]{}[[rst]]'. All fuzzer options should obtain value specified after '='".format(option))
break
if option[eqpos - 1] == " " or option[eqpos + 1] == " ":
errors.append("Spaces are not allowed: '[[imp]]{}[[rst]]'".format(option))
break
if option[:eqpos] in ("-runs", "-dict", "-jobs", "-workers", "-artifact_prefix", "-print_final_stats"):
errors.append("You can't use '[[imp]]{}[[rst]]' - it will be automatically calculated or configured during run".format(option))
break
if valid_kw.get("YT-SPEC"):
if 'ya:yt' not in tags:
errors.append("You can use YT_SPEC macro only tests marked with ya:yt tag")
else:
for filename in get_list("YT-SPEC"):
filename = unit.resolve('$S/' + filename)
if not os.path.exists(filename):
errors.append("File '{}' specified in the YT_SPEC macro doesn't exist".format(filename))
continue
try:
with open(filename) as afile:
data = json.load(afile)
except Exception as e:
errors.append("Malformed data in {}: {} ({})".format(unit.path(), e, filename))
continue
known = {'operation_spec', 'task_spec'}
unknown = set(data.keys()) - known
if unknown:
errors.append("Don't know what to do with {} field(s) in {}. You can use only: {}".format(unknown, unit.path(), known))
continue
if valid_kw.get("USE_ARCADIA_PYTHON") == "yes" and valid_kw.get("SCRIPT-REL-PATH") == "py.test":
errors.append("PYTEST_SCRIPT is deprecated")
partition = valid_kw.get('TEST_PARTITION', 'SEQUENTIAL')
if partition not in PARTITION_MODS:
raise ValueError('partition mode should be one of {}, detected: {}'.format(PARTITION_MODS, partition))
if valid_kw.get('SPLIT-FACTOR'):
if valid_kw.get('FORK-MODE') == 'none':
errors.append('SPLIT_FACTOR must be use with FORK_TESTS() or FORK_SUBTESTS() macro')
try:
value = int(valid_kw.get('SPLIT-FACTOR'))
if value <= 0:
raise ValueError("must be > 0")
if value > SPLIT_FACTOR_MAX_VALUE:
raise ValueError("the maximum allowed value is {}".format(SPLIT_FACTOR_MAX_VALUE))
except ValueError as e:
errors.append('Incorrect SPLIT_FACTOR value: {}'.format(e))
if errors:
return None, warnings, errors
return valid_kw, warnings, errors
def dump_test(unit, kw):
valid_kw, warnings, errors = validate_test(unit, kw)
for w in warnings:
unit.message(['warn', w])
for e in errors:
ymake.report_configure_error(e)
if valid_kw is None:
return None
string_handler = StringIO.StringIO()
for k, v in valid_kw.iteritems():
print >>string_handler, k + ': ' + v
print >>string_handler, BLOCK_SEPARATOR
data = string_handler.getvalue()
string_handler.close()
return data
def serialize_list(lst):
lst = filter(None, lst)
return '\"' + ';'.join(lst) + '\"' if lst else ''
def deserialize_list(val):
return filter(None, val.replace('"', "").split(";"))
def get_values_list(unit, key):
res = map(str.strip, (unit.get(key) or '').replace('$' + key, '').strip().split())
return [r for r in res if r and r not in ['""', "''"]]
def get_unit_list_variable(unit, name):
items = unit.get(name)
if items:
items = items.split(' ')
assert items[0] == "${}".format(name), (items, name)
return items[1:]
return []
def implies(a, b):
return bool((not a) or b)
def match_coverage_extractor_requirements(unit):
# we shouldn't add test if
return all([
# tests are not requested
unit.get("TESTS_REQUESTED") == "yes",
# build doesn't imply clang coverage, which supports segment extraction from the binaries
unit.get("CLANG_COVERAGE") == "yes",
# contrib wasn't requested
implies(_common.strip_roots(unit.path()).startswith("contrib/"), unit.get("ENABLE_CONTRIB_COVERAGE") == "yes"),
])
def onadd_ytest(unit, *args):
keywords = {"DEPENDS": -1, "DATA": -1, "TIMEOUT": 1, "FORK_MODE": 1, "SPLIT_FACTOR": 1,
"FORK_SUBTESTS": 0, "FORK_TESTS": 0}
flat_args, spec_args = _common.sort_by_keywords(keywords, args)
if flat_args[1] == "fuzz.test":
unit.ondata("arcadia/fuzzing/{}/corpus.json".format(_common.strip_roots(unit.path())))
elif flat_args[1] == "coverage.extractor" and not match_coverage_extractor_requirements(unit):
# XXX
# Current ymake implementation doesn't allow to call macro inside the 'when' body
# that's why we add ADD_YTEST(coverage.extractor) to every PROGRAM entry and check requirements later
return
fork_mode = []
if 'FORK_SUBTESTS' in spec_args:
fork_mode.append('subtests')
if 'FORK_TESTS' in spec_args:
fork_mode.append('tests')
fork_mode = fork_mode or spec_args.get('FORK_MODE', []) or unit.get('TEST_FORK_MODE').split()
fork_mode = ' '.join(fork_mode) if fork_mode else ''
test_record = {
'TEST-NAME': flat_args[0],
'SCRIPT-REL-PATH': flat_args[1],
'TESTED-PROJECT-NAME': unit.name(),
'TESTED-PROJECT-FILENAME': unit.filename(),
'SOURCE-FOLDER-PATH': unit.resolve(unit.path()),
'BUILD-FOLDER-PATH': _common.strip_roots(unit.path()),
'BINARY-PATH': _common.strip_roots(os.path.join(unit.path(), unit.filename())),
'CUSTOM-DEPENDENCIES': ' '.join(spec_args.get('DEPENDS', []) + get_values_list(unit, 'TEST_DEPENDS_VALUE')),
'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
# 'TEST-PRESERVE-ENV': 'da',
'TEST-DATA': serialize_list(sorted(_common.filter_out_by_keyword(spec_args.get('DATA', []) + get_values_list(unit, 'TEST_DATA_VALUE'), 'AUTOUPDATED'))),
'TEST-TIMEOUT': ''.join(spec_args.get('TIMEOUT', [])) or unit.get('TEST_TIMEOUT') or '',
'FORK-MODE': fork_mode,
'SPLIT-FACTOR': ''.join(spec_args.get('SPLIT_FACTOR', [])) or unit.get('TEST_SPLIT_FACTOR') or '',
'SIZE': ''.join(spec_args.get('SIZE', [])) or unit.get('TEST_SIZE_NAME') or '',
'TAG': serialize_list(spec_args.get('TAG', []) + get_values_list(unit, 'TEST_TAGS_VALUE')),
'REQUIREMENTS': serialize_list(spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')),
'TEST-CWD': unit.get('TEST_CWD_VALUE') or '',
'FUZZ-DICTS': serialize_list(spec_args.get('FUZZ_DICTS', []) + get_unit_list_variable(unit, 'FUZZ_DICTS_VALUE')),
'FUZZ-OPTS': serialize_list(spec_args.get('FUZZ_OPTS', []) + get_unit_list_variable(unit, 'FUZZ_OPTS_VALUE')),
'YT-SPEC': serialize_list(spec_args.get('YT_SPEC', []) + get_unit_list_variable(unit, 'TEST_YT_SPEC_VALUE')),
'BLOB': unit.get('TEST_BLOB_DATA') or '',
'SKIP_TEST': unit.get('SKIP_TEST_VALUE') or '',
'TEST_IOS_DEVICE_TYPE': unit.get('TEST_IOS_DEVICE_TYPE_VALUE') or '',
'TEST_IOS_RUNTIME_TYPE': unit.get('TEST_IOS_RUNTIME_TYPE_VALUE') or '',
'ANDROID_APK_TEST_ACTIVITY': unit.get('ANDROID_APK_TEST_ACTIVITY_VALUE') or '',
'TEST_PARTITION': unit.get("TEST_PARTITION") or 'SEQUENTIAL',
}
if flat_args[1] == 'fuzz.test' and unit.get('FUZZING') == 'yes':
test_record['FUZZING'] = '1'
# use all cores if fuzzing requested
test_record['REQUIREMENTS'] = serialize_list(filter(None, deserialize_list(test_record['REQUIREMENTS']) + ["cpu:all", "ram:all"]))
data = dump_test(unit, test_record)
if data:
unit.set_property(["DART_DATA", data])
save_in_file(unit.get('TEST_DART_OUT_FILE'), data)
def onadd_test(unit, *args):
flat_args, spec_args = _common.sort_by_keywords({"DEPENDS": -1, "TIMEOUT": 1, "DATA": -1, "TAG": -1, "REQUIREMENTS": -1, "FORK_MODE": 1,
"SPLIT_FACTOR": 1, "FORK_SUBTESTS": 0, "FORK_TESTS": 0, "SIZE": 1}, args)
test_type = flat_args[0]
test_files = flat_args[1:]
if test_type in ["PEP8", "PY_FLAKES"]:
return
# unit_path = unit.path()
# paths = []
# for test_file in test_files:
# if test_file == ".":
# path_to_check = unit_path
# else:
# path_to_check = os.path.join(unit_path, test_file)
# paths.append(path_to_check)
# return onadd_check(unit, *tuple([test_type] + sorted(paths)))
custom_deps = spec_args.get('DEPENDS', [])
timeout = spec_args.get("TIMEOUT", [])
if timeout:
timeout = timeout[0]
else:
timeout = '0'
fork_mode = []
if 'FORK_SUBTESTS' in spec_args:
fork_mode.append('subtests')
if 'FORK_TESTS' in spec_args:
fork_mode.append('tests')
fork_mode = fork_mode or spec_args.get('FORK_MODE', [])
split_factor = ''.join(spec_args.get('SPLIT_FACTOR', [])) or ''
test_size = ''.join(spec_args.get('SIZE', [])) or 'SMALL'
test_dir = unit.resolve(os.path.join(unit.path()))
tags = spec_args.get('TAG', []) + get_values_list(unit, 'TEST_TAGS_VALUE')
requirements = spec_args.get('REQUIREMENTS', []) + get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
test_data = spec_args.get("DATA", []) + get_values_list(unit, 'TEST_DATA_VALUE')
python_paths = get_values_list(unit, 'TEST_PYTHON_PATH_VALUE')
if test_type == "PY_TEST":
old_pytest = True
else:
old_pytest = False
_dump_test(unit, test_type, test_files, timeout, test_dir, custom_deps, test_data, python_paths, split_factor, fork_mode, test_size, tags, requirements, None, old_pytest)
def onadd_check(unit, *args):
flat_args, spec_args = _common.sort_by_keywords({"DEPENDS": -1, "TIMEOUT": 1, "DATA": -1, "TAG": -1, "REQUIREMENTS": -1, "FORK_MODE": 1,
"SPLIT_FACTOR": 1, "FORK_SUBTESTS": 0, "FORK_TESTS": 0, "SIZE": 1}, args)
check_type = flat_args[0]
test_dir = unit.resolve(os.path.join(unit.path()))
test_timeout = ''
if check_type in ["PEP8", "PYFLAKES", "PY_FLAKES", "PEP8_2", "PYFLAKES_2"]:
script_rel_path = "py.lint.pylint"
elif check_type in ["PEP8_3", "PYFLAKES_3"]:
script_rel_path = "py.lint.pylint.3"
elif check_type == "JAVA_STYLE":
if len(flat_args) < 2:
raise Exception("Not enough arguments for JAVA_STYLE check")
check_level = flat_args[1]
allowed_levels = {
'base': '/yandex_checks.xml',
'strict': '/yandex_checks_strict.xml',
'extended': '/yandex_checks_extended.xml',
}
if check_level not in allowed_levels:
raise Exception('{} is not allowed in LINT(), use one of {}'.format(check_level, allowed_levels.keys()))
flat_args[1] = allowed_levels[check_level]
script_rel_path = "java.style"
test_timeout = '120'
elif check_type == "gofmt":
script_rel_path = check_type
go_files = flat_args[1:]
if go_files:
test_dir = os.path.dirname(go_files[0]).lstrip("$S/")
else:
script_rel_path = check_type
use_arcadia_python = unit.get('USE_ARCADIA_PYTHON')
test_record = {
'TEST-NAME': check_type.lower(),
'TEST-TIMEOUT': test_timeout,
'SCRIPT-REL-PATH': script_rel_path,
'TESTED-PROJECT-NAME': os.path.basename(test_dir),
'SOURCE-FOLDER-PATH': test_dir,
'CUSTOM-DEPENDENCIES': " ".join(spec_args.get('DEPENDS', [])),
'TEST-DATA': '',
'SPLIT-FACTOR': '',
'TEST_PARTITION': 'SEQUENTIAL',
'FORK-MODE': '',
'FORK-TEST-FILES': '',
'SIZE': 'SMALL',
'TAG': '',
'REQUIREMENTS': '',
'USE_ARCADIA_PYTHON': use_arcadia_python or '',
'OLD_PYTEST': 'no',
'PYTHON-PATHS': '',
'FILES': serialize_list(flat_args[1:])
}
data = dump_test(unit, test_record)
if data:
unit.set_property(["DART_DATA", data])
save_in_file(unit.get('TEST_DART_OUT_FILE'), data)
def on_register_no_check_imports(unit):
s = unit.get('NO_CHECK_IMPORTS_FOR_VALUE')
if s not in ('', 'None'):
unit.onresource(['-', 'py/no_check_imports/{}="{}"'.format(_common.pathid(s), s)])
def onadd_check_py_imports(unit, *args):
if unit.get('NO_CHECK_IMPORTS_FOR_VALUE').strip() == "":
return
unit.onpeerdir(['library/python/testing/import_test'])
check_type = "py.imports"
test_dir = unit.resolve(os.path.join(unit.path()))
use_arcadia_python = unit.get('USE_ARCADIA_PYTHON')
test_record = {
'TEST-NAME': "pyimports",
'TEST-TIMEOUT': '',
'SCRIPT-REL-PATH': check_type,
'TESTED-PROJECT-NAME': os.path.basename(test_dir),
'SOURCE-FOLDER-PATH': test_dir,
'CUSTOM-DEPENDENCIES': '',
'TEST-DATA': '',
'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
'SPLIT-FACTOR': '',
'TEST_PARTITION': 'SEQUENTIAL',
'FORK-MODE': '',
'FORK-TEST-FILES': '',
'SIZE': 'SMALL',
'TAG': '',
'USE_ARCADIA_PYTHON': use_arcadia_python or '',
'OLD_PYTEST': 'no',
'PYTHON-PATHS': '',
'FILES': serialize_list(["{}/{}".format(_common.strip_roots(unit.path()), unit.filename())])
}
if unit.get('NO_CHECK_IMPORTS_FOR_VALUE') != "None":
test_record["NO-CHECK"] = serialize_list(get_values_list(unit, 'NO_CHECK_IMPORTS_FOR_VALUE') or ["*"])
else:
test_record["NO-CHECK"] = ''
data = dump_test(unit, test_record)
if data:
unit.set_property(["DART_DATA", data])
save_in_file(unit.get('TEST_DART_OUT_FILE'), data)
def onadd_pytest_script(unit, *args):
unit.set(["PYTEST_BIN", "no"])
custom_deps = get_values_list(unit, 'TEST_DEPENDS_VALUE')
timeout = filter(None, [unit.get(["TEST_TIMEOUT"])])
if timeout:
timeout = timeout[0]
else:
timeout = '0'
test_type = args[0]
fork_mode = unit.get('TEST_FORK_MODE').split() or ''
split_factor = unit.get('TEST_SPLIT_FACTOR') or ''
test_size = unit.get('TEST_SIZE_NAME') or ''
unit_path = unit.path()
test_dir = unit.resolve(unit_path)
test_files = get_values_list(unit, 'TEST_SRCS_VALUE')
tags = get_values_list(unit, 'TEST_TAGS_VALUE')
requirements = get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
test_data = get_values_list(unit, 'TEST_DATA_VALUE')
data, data_files = get_canonical_test_resources(test_dir, unit_path)
test_data += data
python_paths = get_values_list(unit, 'TEST_PYTHON_PATH_VALUE')
binary_path = None
test_cwd = unit.get('TEST_CWD_VALUE') or ''
_dump_test(unit, test_type, test_files, timeout, test_dir, custom_deps, test_data, python_paths, split_factor, fork_mode, test_size, tags, requirements, binary_path, test_cwd=test_cwd, data_files=data_files)
def onadd_pytest_bin(unit, *args):
flat, kws = _common.sort_by_keywords({'RUNNER_BIN': 1}, args)
if flat:
ymake.report_configure_error(
'Unknown arguments found while processing add_pytest_bin macro: {!r}'
.format(flat)
)
runner_bin = kws.get('RUNNER_BIN', [None])[0]
add_test_to_dart(unit, "pytest.bin", runner_bin=runner_bin)
def add_test_to_dart(unit, test_type, binary_path=None, runner_bin=None):
custom_deps = get_values_list(unit, 'TEST_DEPENDS_VALUE')
timeout = filter(None, [unit.get(["TEST_TIMEOUT"])])
if timeout:
timeout = timeout[0]
else:
timeout = '0'
fork_mode = unit.get('TEST_FORK_MODE').split() or ''
split_factor = unit.get('TEST_SPLIT_FACTOR') or ''
test_size = unit.get('TEST_SIZE_NAME') or ''
test_cwd = unit.get('TEST_CWD_VALUE') or ''
unit_path = unit.path()
test_dir = unit.resolve(unit_path)
test_files = get_values_list(unit, 'TEST_SRCS_VALUE')
tags = get_values_list(unit, 'TEST_TAGS_VALUE')
requirements = get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')
test_data = get_values_list(unit, 'TEST_DATA_VALUE')
data, data_files = get_canonical_test_resources(test_dir, unit_path)
test_data += data
python_paths = get_values_list(unit, 'TEST_PYTHON_PATH_VALUE')
yt_spec = get_values_list(unit, 'TEST_YT_SPEC_VALUE')
if not binary_path:
binary_path = os.path.join(unit_path, unit.filename())
_dump_test(unit, test_type, test_files, timeout, test_dir, custom_deps, test_data, python_paths, split_factor, fork_mode, test_size, tags, requirements, binary_path, test_cwd=test_cwd, runner_bin=runner_bin, yt_spec=yt_spec, data_files=data_files)
def extract_java_system_properties(unit, args):
if len(args) % 2:
return [], 'Wrong use of SYSTEM_PROPERTIES in {}: odd number of arguments'.format(unit.path())
props = []
for x, y in zip(args[::2], args[1::2]):
if x == 'FILE':
if y.startswith('${BINDIR}') or y.startswith('${ARCADIA_BUILD_ROOT}') or y.startswith('/'):
return [], 'Wrong use of SYSTEM_PROPERTIES in {}: absolute/build file path {}'.format(unit.path(), y)
y = _common.rootrel_arc_src(y, unit)
if not os.path.exists(unit.resolve('$S/' + y)):
return [], 'Wrong use of SYSTEM_PROPERTIES in {}: can\'t resolve {}'.format(unit.path(), y)
y = '${ARCADIA_ROOT}/' + y
props.append({'type': 'file', 'path': y})
else:
props.append({'type': 'inline', 'key': x, 'value': y})
return props, None
def onjava_test(unit, *args):
assert unit.get('MODULE_TYPE') is not None
if unit.get('MODULE_TYPE') == 'JTEST_FOR':
if not unit.get('UNITTEST_DIR'):
ymake.report_configure_error('skip JTEST_FOR in {}: no args provided'.format(unit.path()))
return
unit_path = unit.path()
path = _common.strip_roots(unit_path)
test_dir = unit.resolve(unit_path)
test_data = get_values_list(unit, 'TEST_DATA_VALUE')
test_data.append('arcadia/build/scripts/unpacking_jtest_runner.py')
test_data.append('arcadia/build/scripts/run_testng.py')
data, data_files = get_canonical_test_resources(test_dir, unit_path)
test_data += data
props, error_mgs = extract_java_system_properties(unit, get_values_list(unit, 'SYSTEM_PROPERTIES_VALUE'))
if error_mgs:
ymake.report_configure_error(error_mgs)
return
for prop in props:
if prop['type'] == 'file':
test_data.append(prop['path'].replace('${ARCADIA_ROOT}', 'arcadia'))
props = base64.b64encode(json.dumps(props, encoding='utf-8'))
test_cwd = unit.get('TEST_CWD_VALUE') or '' # TODO: validate test_cwd value
if unit.get('MODULE_TYPE') == 'TESTNG':
script_rel_path = 'testng.test'
elif unit.get('MODULE_TYPE') == 'JUNIT5':
script_rel_path = 'junit5.test'
else:
script_rel_path = 'junit.test'
test_record = {
'SOURCE-FOLDER-PATH': path,
'TEST-NAME': '-'.join([os.path.basename(os.path.dirname(path)), os.path.basename(path)]),
'SCRIPT-REL-PATH': script_rel_path,
'TEST-TIMEOUT': unit.get('TEST_TIMEOUT') or '',
'TESTED-PROJECT-NAME': path,
'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
# 'TEST-PRESERVE-ENV': 'da',
'TEST-DATA': serialize_list(sorted(_common.filter_out_by_keyword(test_data, 'AUTOUPDATED'))),
'FORK-MODE': unit.get('TEST_FORK_MODE') or '',
'SPLIT-FACTOR': unit.get('TEST_SPLIT_FACTOR') or '',
'CUSTOM-DEPENDENCIES': ' '.join(get_values_list(unit, 'TEST_DEPENDS_VALUE')),
'TAG': serialize_list(get_values_list(unit, 'TEST_TAGS_VALUE')),
'SIZE': unit.get('TEST_SIZE_NAME') or '',
'REQUIREMENTS': serialize_list(get_values_list(unit, 'TEST_REQUIREMENTS_VALUE')),
'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
# JTEST/JTEST_FOR only
'MODULE_TYPE': unit.get('MODULE_TYPE'),
'UNITTEST_DIR': unit.get('UNITTEST_DIR') or '',
'JVM_ARGS': serialize_list(get_values_list(unit, 'JVM_ARGS_VALUE')),
'SYSTEM_PROPERTIES': props,
'TEST-CWD': test_cwd,
'SKIP_TEST': unit.get('SKIP_TEST_VALUE') or '',
}
data = dump_test(unit, test_record)
if data:
unit.set_property(['DART_DATA', data])
_add_data_files(unit, data_files, test_dir)
def onjava_test_deps(unit, *args):
assert unit.get('MODULE_TYPE') is not None
path = _common.strip_roots(unit.path())
test_record = {
'SOURCE-FOLDER-PATH': path,
'TEST-NAME': '-'.join([os.path.basename(os.path.dirname(path)), os.path.basename(path), 'dependencies']).strip('-'),
'SCRIPT-REL-PATH': 'java.dependency.test',
'TEST-TIMEOUT': '',
'TESTED-PROJECT-NAME': path,
'TEST-DATA': '',
'TEST_PARTITION': 'SEQUENTIAL',
'FORK-MODE': '',
'SPLIT-FACTOR': '',
'CUSTOM-DEPENDENCIES': ' '.join(get_values_list(unit, 'TEST_DEPENDS_VALUE')),
'TAG': '',
'SIZE': 'SMALL',
'IGNORE_CLASSPATH_CLASH': ' '.join(get_values_list(unit, 'JAVA_IGNORE_CLASSPATH_CLASH_VALUE')),
# JTEST/JTEST_FOR only
'MODULE_TYPE': unit.get('MODULE_TYPE'),
'UNITTEST_DIR': '',
'SYSTEM_PROPERTIES': '',
'TEST-CWD': '',
}
data = dump_test(unit, test_record)
unit.set_property(['DART_DATA', data])
def _dump_test(
unit,
test_type,
test_files,
timeout,
test_dir,
custom_deps,
test_data,
python_paths,
split_factor,
fork_mode,
test_size,
tags,
requirements,
binary_path='',
old_pytest=False,
test_cwd=None,
runner_bin=None,
yt_spec=None,
data_files=None
):
if test_type == "PY_TEST":
script_rel_path = "py.test"
elif test_type == "FLEUR":
script_rel_path = "ytest.py"
elif test_type == "PEP8":
script_rel_path = "py.test.pep8"
elif test_type == "PY_FLAKES":
script_rel_path = "py.test.flakes"
else:
script_rel_path = test_type
unit_path = unit.path()
fork_test_files = unit.get('FORK_TEST_FILES_MODE')
fork_mode = ' '.join(fork_mode) if fork_mode else ''
use_arcadia_python = unit.get('USE_ARCADIA_PYTHON')
if test_cwd:
test_cwd = test_cwd.replace("$TEST_CWD_VALUE", "").replace('"MACRO_CALLS_DELIM"', "").strip()
if binary_path:
if fork_test_files == 'on':
tests = test_files
else:
tests = [os.path.basename(binary_path)]
else:
tests = test_files
for test_name in tests:
test_record = {
'TEST-NAME': os.path.splitext(test_name)[0],
'TEST-TIMEOUT': timeout,
'SCRIPT-REL-PATH': script_rel_path,
'TESTED-PROJECT-NAME': test_name,
'SOURCE-FOLDER-PATH': test_dir,
'CUSTOM-DEPENDENCIES': " ".join(custom_deps),
'TEST-ENV': prepare_env(unit.get("TEST_ENV_VALUE")),
# 'TEST-PRESERVE-ENV': 'da',
'TEST-DATA': serialize_list(sorted(_common.filter_out_by_keyword(test_data, 'AUTOUPDATED'))),
'TEST-RECIPES': prepare_recipes(unit.get("TEST_RECIPES_VALUE")),
'SPLIT-FACTOR': split_factor,
'TEST_PARTITION': unit.get('TEST_PARTITION') or 'SEQUENTIAL',
'FORK-MODE': fork_mode,
'FORK-TEST-FILES': fork_test_files,
'TEST-FILES': serialize_list(tests),
'SIZE': test_size,
'TAG': serialize_list(tags),
'REQUIREMENTS': serialize_list(requirements),
'USE_ARCADIA_PYTHON': use_arcadia_python or '',
'OLD_PYTEST': 'yes' if old_pytest else 'no',
'PYTHON-PATHS': serialize_list(python_paths),
'TEST-CWD': test_cwd or '',
'SKIP_TEST': unit.get('SKIP_TEST_VALUE') or '',
'BUILD-FOLDER-PATH': _common.strip_roots(unit_path),
'BLOB': unit.get('TEST_BLOB_DATA') or '',
}
if binary_path:
test_record['BINARY-PATH'] = _common.strip_roots(binary_path)
if runner_bin:
test_record['TEST-RUNNER-BIN'] = runner_bin
if yt_spec:
test_record['YT-SPEC'] = serialize_list(yt_spec)
data = dump_test(unit, test_record)
if data:
unit.set_property(["DART_DATA", data])
save_in_file(unit.get('TEST_DART_OUT_FILE'), data)
_add_data_files(unit, data_files, test_dir)
def _add_data_files(unit, data_files, test_dir):
if not data_files:
return
unit_path = unit.path()
data_files = [os.path.relpath(i, test_dir) for i in data_files]
data_files = [os.path.join(unit_path, i) for i in data_files]
unit.set_property(['DART_DATA_FILES', serialize_list(sorted(data_files))])
def onsetup_pytest_bin(unit, *args):
use_arcadia_python = unit.get('USE_ARCADIA_PYTHON') == "yes"
if use_arcadia_python:
unit.onresource(['-', 'PY_MAIN={}'.format("library.python.pytest.main:main")]) # XXX
unit.onadd_pytest_bin(list(args))
else:
unit.onno_platform()
unit.onadd_pytest_script(["PY_TEST"])
def onrun(unit, *args):
exectest_cmd = unit.get(["EXECTEST_COMMAND_VALUE"]) or ''
exectest_cmd += "\n" + subprocess.list2cmdline(args)
unit.set(["EXECTEST_COMMAND_VALUE", exectest_cmd])
def onsetup_exectest(unit, *args):
command = unit.get(["EXECTEST_COMMAND_VALUE"])
if command is None:
ymake.report_configure_error("EXECTEST must have at least one RUN macro")
return
command = command.replace("$EXECTEST_COMMAND_VALUE", "")
if "PYTHON_BIN" in command:
unit.ondepends('contrib/tools/python')
unit.set(["TEST_BLOB_DATA", base64.b64encode(command)])
add_test_to_dart(unit, "exectest", binary_path=os.path.join(unit.path(), unit.filename()).replace(".pkg", ""))
def onsetup_run_python(unit):
if unit.get("USE_ARCADIA_PYTHON") == "yes":
unit.ondepends('contrib/tools/python')
def get_canonical_test_resources(test_dir, unit_path):
canon_data_dir = os.path.join(test_dir, CANON_DATA_DIR_NAME)
try:
_, dirs, files = next(os.walk(canon_data_dir))
except StopIteration:
# path doesn't exist
return ([], [])
if CANON_RESULT_FILE_NAME in files:
return _get_canonical_data_resources_v2(os.path.join(canon_data_dir, CANON_RESULT_FILE_NAME), unit_path)
return ([], [])
def _load_canonical_file(filename, unit_path):
try:
with open(filename) as results_file:
return json.load(results_file)
except Exception as e:
print>>sys.stderr, "malformed canonical data in {}: {} ({})".format(unit_path, e, filename)
return {}
def _get_resource_from_uri(uri):
m = CANON_MDS_RESOURCE_REGEX.match(uri)
if m:
res_id = m.group(1)
return "{}:{}".format(MDS_SHEME, res_id)
m = CANON_SBR_RESOURCE_REGEX.match(uri)
if m:
# There might be conflict between resources, because all resources in sandbox have 'resource.tar.gz' name
# That's why we use notation with '=' to specify specific path for resource
uri = m.group(1)
res_id = m.group(2)
return "{}={}".format(uri, '/'.join([CANON_OUTPUT_STORAGE, res_id]))
def _get_external_resources_from_canon_data(data):
# Method should work with both canonization versions:
# result.json: {'uri':X 'checksum':Y}
# result.json: {'testname': {'uri':X 'checksum':Y}}
# result.json: {'testname': [{'uri':X 'checksum':Y}]}
# Also there is a bug - if user returns {'uri': 1} from test - machinery will fail
# That's why we check 'uri' and 'checksum' fields presence
# (it's still a bug - user can return {'uri':X, 'checksum': Y}, we need to unify canonization format)
res = set()
if isinstance(data, dict):
if 'uri' in data and 'checksum' in data:
resource = _get_resource_from_uri(data['uri'])
if resource:
res.add(resource)
else:
for k, v in data.iteritems():
res.update(_get_external_resources_from_canon_data(v))
elif isinstance(data, list):
for e in data:
res.update(_get_external_resources_from_canon_data(e))
return res
def _get_canonical_data_resources_v2(filename, unit_path):
return (_get_external_resources_from_canon_data(_load_canonical_file(filename, unit_path)), [filename])
| 41.886388 | 251 | 0.630194 |
c387f4c87437ea80642e635aea154bef93230d48 | 993 | py | Python | ML_Lecture5_SelfStudy5/gradient_example.py | Jbarata98/ML_AAU1920 | 090b8cbae9d6adba4ab30e7d4fd68eb24e04c5f4 | [
"MIT"
] | 2 | 2021-05-16T11:21:23.000Z | 2021-05-16T11:21:23.000Z | ML_Lecture5_SelfStudy5/gradient_example.py | Jbarata98/ML_AAU1920 | 090b8cbae9d6adba4ab30e7d4fd68eb24e04c5f4 | [
"MIT"
] | null | null | null | ML_Lecture5_SelfStudy5/gradient_example.py | Jbarata98/ML_AAU1920 | 090b8cbae9d6adba4ab30e7d4fd68eb24e04c5f4 | [
"MIT"
] | null | null | null | import torch
sigmoid = torch.nn.Sigmoid()
relu = torch.nn.ReLU()
x1 = torch.tensor(1.0, requires_grad=True)
w1 = torch.tensor(2.0, requires_grad=True)
x2 = torch.tensor(3.0, requires_grad=True)
w2 = torch.tensor(0.5, requires_grad=True)
y1 = x1*w1
y1.register_hook(lambda grad: print("Grad y1 = {}".format(grad)))
y2 = x2*w2
y2.register_hook(lambda grad: print("Grad y2 = {}".format(grad)))
y3 = y1+y2
y3.register_hook(lambda grad: print("Grad y3 = {}".format(grad)))
y4 = sigmoid(y3)
y4.register_hook(lambda grad: print("Grad y4 = {}".format(grad)))
y5 = relu(y3)
y5.register_hook(lambda grad: print("Grad y5 = {}".format(grad)))
y6 = y4 * y5
y6.register_hook(lambda grad: print("Grad y6 = {}".format(grad)))
e = (1.0 - y6)**2
print(e)
e.backward()
print("Grad x1 = {}".format(x1.grad))
print("Grad x2 = {}".format(x2.grad))
print("Grad w1 = {}".format(w1.grad))
print("Grad w2 = {}".format(w2.grad))
print("Done")
Grad w5 = -0.10102789849042892
Grad w4 = -0.02277098223567009
| 23.093023 | 65 | 0.668681 |
c82ace0bb5321d8a7f051f7f5d47d20833ab9d15 | 2,466 | py | Python | pythonCore/ch10/myexc.py | Furzoom/learnpython | a3034584e481d4e7c55912d9da06439688aa67ea | [
"MIT"
] | null | null | null | pythonCore/ch10/myexc.py | Furzoom/learnpython | a3034584e481d4e7c55912d9da06439688aa67ea | [
"MIT"
] | null | null | null | pythonCore/ch10/myexc.py | Furzoom/learnpython | a3034584e481d4e7c55912d9da06439688aa67ea | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import errno
import os
import socket
import tempfile
import types
class NetworkError(IOError):
pass
class FileError(IOError):
pass
def updArgs(args, newarg=None):
if isinstance(args, IOError):
myargs = []
myargs.extend([arg for arg in args])
else:
myargs = list(args)
if newarg:
myargs.append(newarg)
return tuple(myargs)
def fileArgs(file, mode, args):
if args[0] == errno.EACCES and \
'access' in dir(os):
perms = ''
permd = {'r': os.R_OK, 'w': os.W_OK,
'x': os.X_OK}
pkeys = permd.keys()
pkeys.sort()
pkeys.reverse()
for eachPerm in 'rwx':
if os.access(file, permd[eachPerm]):
perms += eachPerm
else:
perms += '-'
if isinstance(args, IOError):
myargs = []
myargs.extend([arg for arg in args])
else:
myargs = list(args)
myargs[1] = "'%s' %s (perms: '%s')" % \
(mode, myargs[1], perms)
myargs.append(args.filename)
else:
myargs = args
return tuple(myargs)
def myconnect(sock, host, port):
try:
sock.connect((host, port))
except socket.error as args:
myargs = updArgs(args)
if len(myargs) == 1:
myargs = (errno.ENXIO, myargs[0])
raise NetworkError, updArgs(myargs, host + ': ' + str(port))
def myopen(file, mode='r'):
try:
fo = open(file, mode)
except IOError as args:
raise FileError, fileArgs(file, mode, args)
return fo
def testfile():
file = tempfile.mktemp()
f = open(file, 'w')
f.close()
for eachTest in ((0, 'r'), (0100, 'r'), (0400, 'w'), (0500, 'w')):
try:
os.chmod(file, eachTest[0])
f = myopen(file, eachTest[1])
except FileError as args:
print '%s: %s' % (args.__class__.__name__, args)
else:
print file, 'opened ok... perm ignored'
f.close()
os.chmod(file, 0777)
os.unlink(file)
def testnet():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for eachHost in ('deli', 'www'):
try:
myconnect(s, eachHost, 8080)
except NetworkError as args:
print '%s: %s' % (args.__class__.__name__, args)
if __name__ == '__main__':
testfile()
testnet()
| 21.631579 | 70 | 0.528792 |
37daf05f40d93c7f491b6b78cd4613f5dfeeb157 | 21,305 | py | Python | es_distributed/policies.py | TrevorCMorton/deep-neuroevolution | d6e5c5b41202e9c2e50c986fc822809b507ede64 | [
"MIT"
] | null | null | null | es_distributed/policies.py | TrevorCMorton/deep-neuroevolution | d6e5c5b41202e9c2e50c986fc822809b507ede64 | [
"MIT"
] | null | null | null | es_distributed/policies.py | TrevorCMorton/deep-neuroevolution | d6e5c5b41202e9c2e50c986fc822809b507ede64 | [
"MIT"
] | null | null | null | import logging
import pickle
import time
import h5py
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
from . import tf_util as U
logger = logging.getLogger(__name__)
class Policy:
def __init__(self, *args, **kwargs):
self.args, self.kwargs = args, kwargs
self.scope = self._initialize(*args, **kwargs)
self.all_variables = tf.get_collection(tf.GraphKeys.VARIABLES, self.scope.name)
self.trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope.name)
self.num_params = sum(int(np.prod(v.get_shape().as_list())) for v in self.trainable_variables)
self._setfromflat = U.SetFromFlat(self.trainable_variables)
self._getflat = U.GetFlat(self.trainable_variables)
logger.info('Trainable variables ({} parameters)'.format(self.num_params))
for v in self.trainable_variables:
shp = v.get_shape().as_list()
logger.info('- {} shape:{} size:{}'.format(v.name, shp, np.prod(shp)))
logger.info('All variables')
for v in self.all_variables:
shp = v.get_shape().as_list()
logger.info('- {} shape:{} size:{}'.format(v.name, shp, np.prod(shp)))
placeholders = [tf.placeholder(v.value().dtype, v.get_shape().as_list()) for v in self.all_variables]
self.set_all_vars = U.function(
inputs=placeholders,
outputs=[],
updates=[tf.group(*[v.assign(p) for v, p in zip(self.all_variables, placeholders)])]
)
def reinitialize(self):
for v in self.trainable_variables:
v.reinitialize.eval()
def _initialize(self, *args, **kwargs):
raise NotImplementedError
def save(self, filename):
assert filename.endswith('.h5')
with h5py.File(filename, 'w', libver='latest') as f:
for v in self.all_variables:
f[v.name] = v.eval()
# TODO: it would be nice to avoid pickle, but it's convenient to pass Python objects to _initialize
# (like Gym spaces or numpy arrays)
f.attrs['name'] = type(self).__name__
f.attrs['args_and_kwargs'] = np.void(pickle.dumps((self.args, self.kwargs), protocol=-1))
@classmethod
def Load(cls, filename, extra_kwargs=None):
with h5py.File(filename, 'r') as f:
args, kwargs = pickle.loads(f.attrs['args_and_kwargs'].tostring())
if extra_kwargs:
kwargs.update(extra_kwargs)
policy = cls(*args, **kwargs)
policy.set_all_vars(*[f[v.name][...] for v in policy.all_variables])
return policy
# === Rollouts/training ===
def rollout(self, env, *, render=False, timestep_limit=None, save_obs=False, random_stream=None):
"""
If random_stream is provided, the rollout will take noisy actions with noise drawn from that stream.
Otherwise, no action noise will be added.
"""
env_timestep_limit = env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps')
timestep_limit = env_timestep_limit if timestep_limit is None else min(timestep_limit, env_timestep_limit)
rews = []
t = 0
if save_obs:
obs = []
ob = env.reset()
for _ in range(timestep_limit):
ac = self.act(ob[None], random_stream=random_stream)[0]
if save_obs:
obs.append(ob)
ob, rew, done, _ = env.step(ac)
rews.append(rew)
t += 1
if render:
env.render()
if done:
break
rews = np.array(rews, dtype=np.float32)
if save_obs:
return rews, t, np.array(obs)
return rews, t
def act(self, ob, random_stream=None):
raise NotImplementedError
def set_trainable_flat(self, x):
self._setfromflat(x)
def get_trainable_flat(self):
return self._getflat()
@property
def needs_ob_stat(self):
raise NotImplementedError
def set_ob_stat(self, ob_mean, ob_std):
raise NotImplementedError
def bins(x, dim, num_bins, name):
scores = U.dense(x, dim * num_bins, name, U.normc_initializer(0.01))
scores_nab = tf.reshape(scores, [-1, dim, num_bins])
return tf.argmax(scores_nab, 2) # 0 ... num_bins-1
class MujocoPolicy(Policy):
def _initialize(self, ob_space, ac_space, ac_bins, ac_noise_std, nonlin_type, hidden_dims, connection_type):
self.ac_space = ac_space
self.ac_bins = ac_bins
self.ac_noise_std = ac_noise_std
self.hidden_dims = hidden_dims
self.connection_type = connection_type
assert len(ob_space.shape) == len(self.ac_space.shape) == 1
assert np.all(np.isfinite(self.ac_space.low)) and np.all(np.isfinite(self.ac_space.high)), \
'Action bounds required'
self.nonlin = {'tanh': tf.tanh, 'relu': tf.nn.relu, 'lrelu': U.lrelu, 'elu': tf.nn.elu}[nonlin_type]
with tf.variable_scope(type(self).__name__) as scope:
# Observation normalization
ob_mean = tf.get_variable(
'ob_mean', ob_space.shape, tf.float32, tf.constant_initializer(np.nan), trainable=False)
ob_std = tf.get_variable(
'ob_std', ob_space.shape, tf.float32, tf.constant_initializer(np.nan), trainable=False)
in_mean = tf.placeholder(tf.float32, ob_space.shape)
in_std = tf.placeholder(tf.float32, ob_space.shape)
self._set_ob_mean_std = U.function([in_mean, in_std], [], updates=[
tf.assign(ob_mean, in_mean),
tf.assign(ob_std, in_std),
])
# Policy network
o = tf.placeholder(tf.float32, [None] + list(ob_space.shape))
a = self._make_net(tf.clip_by_value((o - ob_mean) / ob_std, -5.0, 5.0))
self._act = U.function([o], a)
return scope
def _make_net(self, o):
# Process observation
if self.connection_type == 'ff':
x = o
for ilayer, hd in enumerate(self.hidden_dims):
x = self.nonlin(U.dense(x, hd, 'l{}'.format(ilayer), U.normc_initializer(1.0)))
else:
raise NotImplementedError(self.connection_type)
# Map to action
adim, ahigh, alow = self.ac_space.shape[0], self.ac_space.high, self.ac_space.low
assert isinstance(self.ac_bins, str)
ac_bin_mode, ac_bin_arg = self.ac_bins.split(':')
if ac_bin_mode == 'uniform':
# Uniformly spaced bins, from ac_space.low to ac_space.high
num_ac_bins = int(ac_bin_arg)
aidx_na = bins(x, adim, num_ac_bins, 'out') # 0 ... num_ac_bins-1
ac_range_1a = (ahigh - alow)[None, :]
a = 1. / (num_ac_bins - 1.) * tf.to_float(aidx_na) * ac_range_1a + alow[None, :]
elif ac_bin_mode == 'custom':
# Custom bins specified as a list of values from -1 to 1
# The bins are rescaled to ac_space.low to ac_space.high
acvals_k = np.array(list(map(float, ac_bin_arg.split(','))), dtype=np.float32)
logger.info('Custom action values: ' + ' '.join('{:.3f}'.format(x) for x in acvals_k))
assert acvals_k.ndim == 1 and acvals_k[0] == -1 and acvals_k[-1] == 1
acvals_ak = (
(ahigh - alow)[:, None] / (acvals_k[-1] - acvals_k[0]) * (acvals_k - acvals_k[0])[None, :]
+ alow[:, None]
)
aidx_na = bins(x, adim, len(acvals_k), 'out') # values in [0, k-1]
a = tf.gather_nd(
acvals_ak,
tf.concat(2, [
tf.tile(np.arange(adim)[None, :, None], [tf.shape(aidx_na)[0], 1, 1]),
tf.expand_dims(aidx_na, -1)
]) # (n,a,2)
) # (n,a)
elif ac_bin_mode == 'continuous':
a = U.dense(x, adim, 'out', U.normc_initializer(0.01))
else:
raise NotImplementedError(ac_bin_mode)
return a
def act(self, ob, random_stream=None):
a = self._act(ob)
if random_stream is not None and self.ac_noise_std != 0:
a += random_stream.randn(*a.shape) * self.ac_noise_std
return a
@property
def needs_ob_stat(self):
return True
@property
def needs_ref_batch(self):
return False
def set_ob_stat(self, ob_mean, ob_std):
self._set_ob_mean_std(ob_mean, ob_std)
def initialize_from(self, filename, ob_stat=None):
"""
Initializes weights from another policy, which must have the same architecture (variable names),
but the weight arrays can be smaller than the current policy.
"""
with h5py.File(filename, 'r') as f:
f_var_names = []
f.visititems(lambda name, obj: f_var_names.append(name) if isinstance(obj, h5py.Dataset) else None)
assert set(v.name for v in self.all_variables) == set(f_var_names), 'Variable names do not match'
init_vals = []
for v in self.all_variables:
shp = v.get_shape().as_list()
f_shp = f[v.name].shape
assert len(shp) == len(f_shp) and all(a >= b for a, b in zip(shp, f_shp)), \
'This policy must have more weights than the policy to load'
init_val = v.eval()
# ob_mean and ob_std are initialized with nan, so set them manually
if 'ob_mean' in v.name:
init_val[:] = 0
init_mean = init_val
elif 'ob_std' in v.name:
init_val[:] = 0.001
init_std = init_val
# Fill in subarray from the loaded policy
init_val[tuple([np.s_[:s] for s in f_shp])] = f[v.name]
init_vals.append(init_val)
self.set_all_vars(*init_vals)
if ob_stat is not None:
ob_stat.set_from_init(init_mean, init_std, init_count=1e5)
def _get_pos(self, model):
mass = model.body_mass
xpos = model.data.xipos
center = (np.sum(mass * xpos, 0) / np.sum(mass))
return center[0], center[1], center[2]
def rollout(self, env, *, render=False, timestep_limit=None, save_obs=False, random_stream=None, policy_seed=None, bc_choice=None):
"""
If random_stream is provided, the rollout will take noisy actions with noise drawn from that stream.
Otherwise, no action noise will be added.
"""
env_timestep_limit = env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps')
timestep_limit = env_timestep_limit if timestep_limit is None else min(timestep_limit, env_timestep_limit)
rews = []
x_traj, y_traj = np.zeros(timestep_limit), np.zeros(timestep_limit)
t = 0
if save_obs:
obs = []
if policy_seed:
env.seed(policy_seed)
np.random.seed(policy_seed)
if random_stream:
random_stream.seed(policy_seed)
ob = env.reset()
for _ in range(timestep_limit):
ac = self.act(ob[None], random_stream=random_stream)[0]
if save_obs:
obs.append(ob)
ob, rew, done, _ = env.step(ac)
x_traj[t], y_traj[t], _ = self._get_pos(env.unwrapped.model)
rews.append(rew)
t += 1
if render:
env.render()
if done:
break
x_pos, y_pos, _ = self._get_pos(env.unwrapped.model)
rews = np.array(rews, dtype=np.float32)
x_traj[t:] = x_traj[t-1]
y_traj[t:] = y_traj[t-1]
if bc_choice and bc_choice == "traj":
novelty_vector = np.concatenate((x_traj, y_traj), axis=0)
else:
novelty_vector = np.array([x_pos, y_pos])
if save_obs:
return rews, t, np.array(obs), novelty_vector
return rews, t, novelty_vector
class ESAtariPolicy(Policy):
def _initialize(self, ob_space, ac_space):
self.ob_space_shape = ob_space.shape
self.ac_space = ac_space
self.num_actions = ac_space.n
with tf.variable_scope(type(self).__name__) as scope:
o = tf.placeholder(tf.float32, [None] + list(self.ob_space_shape))
is_ref_ph = tf.placeholder(tf.bool, shape=[])
a = self._make_net(o, is_ref_ph)
self._act = U.function([o, is_ref_ph] , a)
return scope
def _make_net(self, o, is_ref):
x = o
x = layers.convolution2d(x, num_outputs=16, kernel_size=8, stride=4, activation_fn=None, scope='conv1')
x = layers.batch_norm(x, scale=True, is_training=is_ref, decay=0., updates_collections=None, activation_fn=tf.nn.relu, epsilon=1e-3)
x = layers.convolution2d(x, num_outputs=32, kernel_size=4, stride=2, activation_fn=None, scope='conv2')
x = layers.batch_norm(x, scale=True, is_training=is_ref, decay=0., updates_collections=None, activation_fn=tf.nn.relu, epsilon=1e-3)
x = layers.flatten(x)
x = layers.fully_connected(x, num_outputs=256, activation_fn=None, scope='fc')
x = layers.batch_norm(x, scale=True, is_training=is_ref, decay=0., updates_collections=None, activation_fn=tf.nn.relu, epsilon=1e-3)
a = layers.fully_connected(x, num_outputs=self.num_actions, activation_fn=None, scope='out')
return tf.argmax(a,1)
def set_ref_batch(self, ref_batch):
self.ref_list = []
self.ref_list.append(ref_batch)
self.ref_list.append(True)
@property
def needs_ob_stat(self):
return False
@property
def needs_ref_batch(self):
return True
def initialize_from(self, filename):
"""
Initializes weights from another policy, which must have the same architecture (variable names),
but the weight arrays can be smaller than the current policy.
"""
with h5py.File(filename, 'r') as f:
f_var_names = []
f.visititems(lambda name, obj: f_var_names.append(name) if isinstance(obj, h5py.Dataset) else None)
assert set(v.name for v in self.all_variables) == set(f_var_names), 'Variable names do not match'
init_vals = []
for v in self.all_variables:
shp = v.get_shape().as_list()
f_shp = f[v.name].shape
assert len(shp) == len(f_shp) and all(a >= b for a, b in zip(shp, f_shp)), \
'This policy must have more weights than the policy to load'
init_val = v.eval()
# ob_mean and ob_std are initialized with nan, so set them manually
if 'ob_mean' in v.name:
init_val[:] = 0
init_mean = init_val
elif 'ob_std' in v.name:
init_val[:] = 0.001
init_std = init_val
# Fill in subarray from the loaded policy
init_val[tuple([np.s_[:s] for s in f_shp])] = f[v.name]
init_vals.append(init_val)
self.set_all_vars(*init_vals)
def act(self, train_vars, random_stream=None):
return self._act(*train_vars)
def rollout(self, env, *, render=False, timestep_limit=None, save_obs=False, random_stream=None, worker_stats=None, policy_seed=None):
"""
If random_stream is provided, the rollout will take noisy actions with noise drawn from that stream.
Otherwise, no action noise will be added.
"""
env_timestep_limit = env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps')
timestep_limit = env_timestep_limit if timestep_limit is None else min(timestep_limit, env_timestep_limit)
rews = []; novelty_vector = []
t = 0
if save_obs:
obs = []
if policy_seed:
env.seed(policy_seed)
np.random.seed(policy_seed)
if random_stream:
random_stream.seed(policy_seed)
ob = env.reset()
self.act(self.ref_list, random_stream=random_stream) #passing ref batch through network
for _ in range(timestep_limit):
start_time = time.time()
ac = self.act([ob[None], False], random_stream=random_stream)[0]
if worker_stats:
worker_stats.time_comp_act += time.time() - start_time
start_time = time.time()
ob, rew, done, info = env.step(ac)
ram = env.unwrapped._get_ram() # extracts RAM state information
if save_obs:
obs.append(ob)
if worker_stats:
worker_stats.time_comp_step += time.time() - start_time
rews.append(rew)
novelty_vector.append(ram)
t += 1
if render:
env.render()
if done:
break
rews = np.array(rews, dtype=np.float32)
if save_obs:
return rews, t, np.array(obs), np.array(novelty_vector)
return rews, t, np.array(novelty_vector)
class GAAtariPolicy(Policy):
def _initialize(self, ob_space, ac_space, nonlin_type, ac_init_std=0.1):
self.ob_space_shape = ob_space.shape
self.ac_space = ac_space
self.ac_init_std = ac_init_std
self.num_actions = self.ac_space.n
self.nonlin = {'tanh': tf.tanh, 'relu': tf.nn.relu, 'lrelu': U.lrelu, 'elu': tf.nn.elu}[nonlin_type]
with tf.variable_scope(type(self).__name__) as scope:
o = tf.placeholder(tf.float32, [None] + list(self.ob_space_shape))
a = self._make_net(o)
self._act = U.function([o] , a)
return scope
def _make_net(self, o):
x = o
x = self.nonlin(U.conv(x, name='conv1', num_outputs=16, kernel_size=8, stride=4, std=1.0))
x = self.nonlin(U.conv(x, name='conv2', num_outputs=32, kernel_size=4, stride=2, std=1.0))
x = U.flattenallbut0(x)
x = self.nonlin(U.dense(x, 256, 'fc', U.normc_initializer(1.0), std=1.0))
a = U.dense(x, self.num_actions, 'out', U.normc_initializer(self.ac_init_std), std=self.ac_init_std)
return tf.argmax(a,1)
@property
def needs_ob_stat(self):
return False
@property
def needs_ref_batch(self):
return False
# Dont add random noise since action space is discrete
def act(self, train_vars, random_stream=None):
return self._act(train_vars)
def rollout(self, env, *, render=False, timestep_limit=None, save_obs=False, random_stream=None, worker_stats=None, policy_seed=None):
"""
If random_stream is provided, the rollout will take noisy actions with noise drawn from that stream.
Otherwise, no action noise will be added.
"""
env_timestep_limit = env[0].spec.tags.get('wrapper_config.TimeLimit.max_episode_steps')
timestep_limit = env_timestep_limit if timestep_limit is None else min(timestep_limit, env_timestep_limit)
rewards = []
rews = []; novelty_vectors = []
rollout_details = {}
t = 0
obs = []
actions = []
if policy_seed:
env.seed(policy_seed)
np.random.seed(policy_seed)
if random_stream:
random_stream.seed(policy_seed)
max_actions = 0
for i in range(0, len(env)):
max_actions = max(max_actions, env[i].action_space.n)
for i in range(len(env)):
actions_chosen = np.zeros((max_actions,))
repetitions = np.zeros((max_actions,))
reward = 0
prev_action = None
ob = env[i].reset()
for j in range(timestep_limit):
ac = self.act(ob[None], random_stream=random_stream)[0]
if ac >= env[i].action_space.n:
ac = 0
actions_chosen[ac] += 1
if ac != prev_action:
repetitions[ac] += 1
prev_action = ac
obs.append(ob)
actions.append(ac)
ob, rew, done, info = env[i].step(ac)
rews.append(rew)
reward += rew
# novelty_vectors.append(np.array(env[i].unwrapped._get_ram())) # extracts RAM state information
t += 1
if render:
env[i].render(mode='human')
if done:
break
repetitions += repetitions == 0
novelty_vectors.append(np.concatenate([actions_chosen / j, actions_chosen / repetitions, [reward / j], [reward]]))
rews = np.array(rews, dtype=np.float32)
print('return={:.4f} len={}'.format(rews.sum(), t))
rewards.append(rews)
rews = []
#for en in env:
# novelty_vector.append(np.array(en.unwrapped._get_ram())) # extracts RAM state information
#novelty_vector = np.concatenate(novelty_vector)
#novelty_vector = actions
if save_obs:
return rewards, t, np.array(obs), novelty_vectors
return rewards, t, novelty_vectors
| 39.163603 | 140 | 0.588266 |
abe10bb02948764540930e6f8d51e6608042a259 | 3,390 | py | Python | neptune/new/internal/backends/offline_neptune_backend.py | janbolle/neptune-client | 33b1876b361d9a7184f557d7bd6e016cb08bd59f | [
"Apache-2.0"
] | null | null | null | neptune/new/internal/backends/offline_neptune_backend.py | janbolle/neptune-client | 33b1876b361d9a7184f557d7bd6e016cb08bd59f | [
"Apache-2.0"
] | null | null | null | neptune/new/internal/backends/offline_neptune_backend.py | janbolle/neptune-client | 33b1876b361d9a7184f557d7bd6e016cb08bd59f | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from typing import List
from neptune.new.exceptions import NeptuneOfflineModeFetchException
from neptune.new.internal.backends.api_model import (
Attribute,
BoolAttribute,
FileAttribute,
FloatAttribute,
IntAttribute,
StringAttribute,
DatetimeAttribute,
FloatSeriesAttribute,
StringSeriesAttribute,
StringSetAttribute,
StringSeriesValues,
FloatSeriesValues,
ImageSeriesValues,
)
from neptune.new.internal.backends.neptune_backend_mock import NeptuneBackendMock
class OfflineNeptuneBackend(NeptuneBackendMock):
def get_attributes(self, run_uuid: uuid.UUID) -> List[Attribute]:
raise NeptuneOfflineModeFetchException
def get_float_attribute(self, run_uuid: uuid.UUID, path: List[str]) -> FloatAttribute:
raise NeptuneOfflineModeFetchException
def get_int_attribute(self, run_uuid: uuid.UUID, path: List[str]) -> IntAttribute:
raise NeptuneOfflineModeFetchException
def get_bool_attribute(self, run_uuid: uuid.UUID, path: List[str]) -> BoolAttribute:
raise NeptuneOfflineModeFetchException
def get_file_attribute(self, run_uuid: uuid.UUID, path: List[str]) -> FileAttribute:
raise NeptuneOfflineModeFetchException
def get_string_attribute(self, run_uuid: uuid.UUID, path: List[str]) -> StringAttribute:
raise NeptuneOfflineModeFetchException
def get_datetime_attribute(self, run_uuid: uuid.UUID, path: List[str]) -> DatetimeAttribute:
raise NeptuneOfflineModeFetchException
def get_float_series_attribute(self, run_uuid: uuid.UUID, path: List[str]) -> FloatSeriesAttribute:
raise NeptuneOfflineModeFetchException
def get_string_series_attribute(self, run_uuid: uuid.UUID, path: List[str]) -> StringSeriesAttribute:
raise NeptuneOfflineModeFetchException
def get_string_set_attribute(self, run_uuid: uuid.UUID, path: List[str]) -> StringSetAttribute:
raise NeptuneOfflineModeFetchException
def get_string_series_values(self, run_uuid: uuid.UUID, path: List[str],
offset: int, limit: int) -> StringSeriesValues:
raise NeptuneOfflineModeFetchException
def get_float_series_values(self, run_uuid: uuid.UUID, path: List[str],
offset: int, limit: int) -> FloatSeriesValues:
raise NeptuneOfflineModeFetchException
def get_image_series_values(self, run_uuid: uuid.UUID, path: List[str],
offset: int, limit: int) -> ImageSeriesValues:
raise NeptuneOfflineModeFetchException
def download_file_series_by_index(self, run_uuid: uuid.UUID, path: List[str],
index: int, destination: str):
raise NeptuneOfflineModeFetchException
| 39.418605 | 105 | 0.729499 |
825459f00b9a264b51f473fde15599f86f6419ea | 1,016 | py | Python | lib/utils/save_model.py | CharlesTousignant/Deep-Feature-Flow | b89678d5957d5d1ecafd4ca0d540d001b3b4cbbf | [
"MIT"
] | 1,378 | 2017-05-11T15:23:44.000Z | 2022-03-30T01:50:34.000Z | lib/utils/save_model.py | CharlesTousignant/Deep-Feature-Flow | b89678d5957d5d1ecafd4ca0d540d001b3b4cbbf | [
"MIT"
] | 91 | 2017-05-12T11:44:43.000Z | 2022-02-01T07:33:52.000Z | lib/utils/save_model.py | CharlesTousignant/Deep-Feature-Flow | b89678d5957d5d1ecafd4ca0d540d001b3b4cbbf | [
"MIT"
] | 348 | 2017-05-11T15:27:37.000Z | 2022-01-19T13:33:01.000Z | # --------------------------------------------------------
# Deep Feature Flow
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Yuwen Xiong
# --------------------------------------------------------
import mxnet as mx
def save_checkpoint(prefix, epoch, arg_params, aux_params):
"""Checkpoint the model data into file.
:param prefix: Prefix of model name.
:param epoch: The epoch number of the model.
:param arg_params: dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
:param aux_params: dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
:return: None
prefix-epoch.params will be saved for parameters.
"""
save_dict = {('arg:%s' % k) : v for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
mx.nd.save(param_name, save_dict)
| 39.076923 | 75 | 0.604331 |
b8737528e4704852ec4efda75b8b8d983885f78c | 91 | py | Python | wire/typings.py | cheetahbyte/wire | f34a41804770968313a8a16a1d9794b7a0abc7dc | [
"MIT"
] | 5 | 2021-09-01T17:34:03.000Z | 2021-12-26T09:41:18.000Z | wire/typings.py | cheetahbyte/wire | f34a41804770968313a8a16a1d9794b7a0abc7dc | [
"MIT"
] | 4 | 2021-12-26T14:53:09.000Z | 2022-01-02T12:50:43.000Z | wire/typings.py | cheetahbyte/wire | f34a41804770968313a8a16a1d9794b7a0abc7dc | [
"MIT"
] | null | null | null | from typing import Callable, Any, Awaitable
CoroutineFunction = Callable[[Any], Awaitable] | 30.333333 | 46 | 0.802198 |
1c3a0fc8179469d205422ebdf25455570f2a5fae | 18,384 | py | Python | Coloring/mayuyu.py | MuAuan/llightning-pytorch | 38dc9ed75dd8e6f4a2a05e5a10072a549dcbf4d6 | [
"MIT"
] | null | null | null | Coloring/mayuyu.py | MuAuan/llightning-pytorch | 38dc9ed75dd8e6f4a2a05e5a10072a549dcbf4d6 | [
"MIT"
] | null | null | null | Coloring/mayuyu.py | MuAuan/llightning-pytorch | 38dc9ed75dd8e6f4a2a05e5a10072a549dcbf4d6 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torchvision
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
import cv2
import matplotlib.pyplot as plt
import glob
import os
from PIL import Image
ts = transforms.ToPILImage()
ts2 = transforms.ToTensor()
ts3 = transforms.Grayscale()
mean, std = [0.5,0.5,0.5],[0.25,0.25,0.25]
ts4 = transforms.Normalize(mean, std)
#image0 = cv2.imread('YCC.jpg')
#image0 = cv2.imread('Lenna_(test_image).png')
#image0 = cv2.imread('mayuyu.jpg')
#autoencode_preds_cifar10_Gray2ClolarizationNormalizeResize3LYCC_100.png
#image0 = cv2.imread('autoencode_preds_cifar10_Gray2ClolarizationNormalizeResize3LYCC_100.png')
#Lenna_(test_image).png
"""
image1=cv2.cvtColor(image0, cv2.COLOR_BGR2RGB)
npimg =ts(ts4(ts2(image1/255.)))
image_ = np.transpose(npimg, (0,1, 2))
plt.title('normalize')
plt.imshow(image_)
plt.pause(3)
plt.savefig('./YCC/normalize.png')
plt.clf()
image_=ts(image0).convert('L')
plt.title('gray')
plt.imshow(image_)
plt.pause(3)
plt.savefig('./YCC/image_gray.png')
plt.clf()
plt.title('gray_gray')
plt.imshow(image_, cmap='gray')
plt.pause(3)
plt.savefig('./YCC/image_gray_gray.png')
plt.clf()
image_g=ts3(ts(image0))
plt.title('gray_ts')
plt.imshow(image_g)
plt.pause(3)
plt.savefig('./YCC/image_g_gray.png')
plt.clf()
plt.title('gray_ts')
plt.imshow(image_g, cmap = 'gray')
plt.pause(3)
plt.savefig('./YCC/image_g_gray_gray.png')
plt.clf()
image1=cv2.cvtColor(image0, cv2.COLOR_BGR2RGB)
plt.title('image1')
plt.imshow(image1)
plt.pause(3)
plt.savefig('./YCC/original.png')
plt.clf()
orgYCrCb = cv2.cvtColor(image1, cv2.COLOR_BGR2YCR_CB)
plt.title('orgYCrCb')
plt.imshow(orgYCrCb)
plt.savefig('./YCC/orgYCrCb.png')
plt.pause(1)
plt.clf()
orgYCrCb_ = cv2.cvtColor(orgYCrCb, cv2.COLOR_YCR_CB2BGR)
plt.title('orgYCrCb_')
plt.imshow(orgYCrCb_)
plt.savefig('./YCC/orgYCrCb_.png')
plt.pause(3)
plt.clf()
Y, Cr,Cb = cv2.split(orgYCrCb)
plt.title('Y')
plt.imshow(Y) #, cmap = 'gray')
plt.savefig('./YCC/Y.png')
plt.pause(1)
plt.clf()
plt.title('Y_gray')
plt.imshow(Y, cmap = 'gray')
plt.savefig('./YCC/Y_gray.png')
plt.pause(1)
plt.clf()
plt.title('Cr')
plt.imshow(Cr) #, cmap = 'gray')
plt.savefig('./YCC/Cr.png') # _gray.png')
plt.pause(1)
plt.clf()
plt.title('Cr_gray')
plt.imshow(Cr, cmap = 'gray')
plt.savefig('./YCC/Cr_gray.png') # _gray.png')
plt.pause(1)
plt.clf()
plt.title('Cb')
plt.imshow(Cb) #, cmap = 'gray')
plt.savefig('./YCC/Cb.png') #_gray.png')
plt.pause(1)
plt.clf()
plt.title('Cb_gray')
plt.imshow(Cb, cmap = 'gray')
plt.savefig('./YCC/Cb_gray.png') #_gray.png')
plt.pause(1)
plt.clf()
Cr_=ts(Cr).convert("RGB")
Cb_ = ts(Cb).convert("RGB")
Y_ = ts(Y).convert('RGB')
#CC = cv2.merge((Y,Cr_,Cb_))
plt.title('Cr_RGB')
plt.imshow(Cr_)
plt.savefig('./YCC/Cr_RGB.png')
plt.pause(3)
plt.clf()
plt.title('Cb_RGB')
plt.imshow(Cb_)
plt.savefig('./YCC/Cb_RGB.png')
plt.pause(3)
plt.clf()
plt.title('Y_RGB')
plt.imshow(Y_)
plt.savefig('./YCC/Y_RGB.png')
plt.pause(3)
plt.clf()
YCC = cv2.merge((Y,Cr,Cb))
orgYCrCb_2 = cv2.cvtColor(YCC, cv2.COLOR_YCR_CB2BGR)
plt.title('YCrCb_merge')
plt.imshow(orgYCrCb_2)
plt.savefig('./YCC/YCC_RGB_merge.png')
plt.pause(3)
plt.clf()
orgLAB = cv2.cvtColor(image1, cv2.COLOR_BGR2LAB)
plt.title('orgLAB')
plt.imshow(orgLAB)
plt.savefig('./YCC/orgLAB.png')
plt.pause(1)
plt.clf()
L, A,B = cv2.split(orgLAB)
plt.title('L')
plt.imshow(L) #, cmap = 'gray')
plt.savefig('./YCC/L.png')
plt.pause(1)
plt.clf()
plt.title('L_gray')
plt.imshow(L, cmap = 'gray')
plt.savefig('./YCC/L_gray.png')
plt.pause(1)
plt.clf()
print(L.shape,A.shape,B.shape)
plt.title('A')
plt.imshow(A)
plt.savefig('./YCC/A.png')
plt.pause(1)
plt.clf()
plt.title('A_gray')
plt.imshow(A, cmap ='gray')
plt.savefig('./YCC/A_gray.png')
plt.pause(1)
plt.clf()
plt.title('B')
plt.imshow(B)
plt.savefig('./YCC/B.png')
plt.pause(1)
plt.clf()
plt.title('B_gray')
plt.imshow(B, cmap = 'gray')
plt.savefig('./YCC/B_gray.png')
plt.pause(1)
plt.clf()
LAB = cv2.merge((L,A,B))
orgLAB_2 = cv2.cvtColor(LAB, cv2.COLOR_LAB2BGR)
plt.title('LAB_merge')
plt.imshow(orgLAB_2)
plt.savefig('./YCC/LAB_merge.png')
plt.pause(3)
plt.clf()
X = np.zeros(L.shape,np.uint8)
print(X.shape)
plt.title('X')
plt.imshow(X)
plt.pause(1)
plt.clf()
XAB = cv2.merge((X,A,B))
orgXAB_2 = cv2.cvtColor(XAB, cv2.COLOR_LAB2BGR)
plt.title('orgXAB_2')
plt.imshow(orgXAB_2)
plt.pause(3)
plt.clf()
trans = torchvision.transforms.Compose([
#torchvision.transforms.Normalize(self.mean, self.std),
#torchvision.transforms.Resize(self.dims),
#MyAddGaussianNoise(0., 0.1),
torchvision.transforms.Grayscale()
])
x = ts3(ts(image1))
plt.title('grayscale')
plt.imshow(x, cmap = 'gray')
plt.pause(3)
plt.clf()
orgYCrCb = cv2.cvtColor(image1, cv2.COLOR_BGR2YCR_CB)
Y, Cr,Cb = cv2.split(orgYCrCb)
plt.title('Y')
plt.imshow(Y, cmap = 'gray')
plt.pause(3)
plt.clf()
xCC = cv2.merge((np.uint8(x),Cr,Cb))
orgxCrCb_2 = cv2.cvtColor(xCC, cv2.COLOR_YCR_CB2BGR)
plt.title('orgxCrCb_2')
plt.imshow(orgxCrCb_2)
plt.savefig('./YCC/orgxCrCb_2.png')
plt.pause(3)
plt.clf()
orgLAB = cv2.cvtColor(image1, cv2.COLOR_BGR2LAB)
L, A,B = cv2.split(orgLAB)
plt.title('L')
plt.imshow(L, cmap = 'gray')
plt.savefig('./YCC/L_gray.png')
plt.pause(3)
plt.clf()
plt.title('A')
plt.imshow(A, cmap = 'gray')
plt.savefig('./YCC/A_gray.png')
plt.pause(3)
plt.clf()
plt.title('B')
plt.imshow(B, cmap = 'gray')
plt.savefig('./YCC/B_gray.png')
plt.pause(3)
plt.clf()
CC = cv2.merge((Cr,Cb))
#xAB = cv2.merge((np.uint8(x),Cr,Cb))
xAB = cv2.merge((np.uint8(x),CC))
orgxAB_2 = cv2.cvtColor(xAB, cv2.COLOR_YCR_CB2BGR)
plt.title('xAB_2')
plt.imshow(orgxAB_2)
plt.savefig('./YCC/orgxAB_2.png')
plt.pause(3)
plt.clf()
"""
import os
import time
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torchvision
from torchvision.datasets import CIFAR10 #MNIST
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
import pytorch_lightning as pl
import matplotlib.pyplot as plt
from torchsummary import summary
import cv2
#from net_encoder_decoder2D import Encoder, Decoder
#from net_encoder_decoder1D2DResize import Encoder, Decoder
from net_encoder_decoder_vgg16 import Encoder, Decoder
def imshow(img,file='', text_=''):
img = img / 2 + 0.5 # unnormalize
npimg = img.detach().numpy() #img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.text(x = 3, y = 2, s = text_, c = "red")
plt.pause(3)
if file != '':
plt.savefig(file+'.png')
plt.close()
from pytorch_lightning.callbacks import Callback
class MyPrintingCallback(Callback):
def on_epoch_end(self, trainer, pl_module):
print('')
class MyAddGaussianNoise(object):
def __init__(self, mean=0., std=0.1):
self.std = std
self.mean = mean
def __call__(self, tensor):
return tensor + torch.randn(tensor.size()) * self.std + self.mean
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class rgb2YCrCb(object):
def __init__(self):
self.ts = torchvision.transforms.ToPILImage()
self.ts2 = transform=transforms.ToTensor()
self.mean, self.std =[0.5,0.5,0.5], [0.25,0.25,0.25]
self.ts3 = torchvision.transforms.Compose([
#torchvision.transforms.Normalize(self.mean, self.std),
torchvision.transforms.ToPILImage()
#transforms.ToTensor()
])
pass
def __call__(self, tensor):
tensor = self.ts3(tensor)
orgYCrCb = cv2.cvtColor(np.float32(tensor), cv2.COLOR_BGR2YCR_CB)
Y, Cr,Cb = cv2.split(orgYCrCb)
CC = cv2.merge((Cr,Cb))
CC = np.array(CC)
#print(CC.shape)
return CC
def __repr__(self):
return self.__class__.__name__
class rgb2YCrCb_(object):
def __init__(self):
self.ts = torchvision.transforms.ToPILImage()
self.ts2 = transform=transforms.ToTensor()
self.mean, self.std =[0.5,0.5,0.5], [0.25,0.25,0.25]
self.ts3 = torchvision.transforms.Compose([
#torchvision.transforms.Normalize(self.mean, self.std),
torchvision.transforms.ToPILImage()
#transforms.ToTensor()
])
pass
def __call__(self, tensor):
tensor = self.ts3(tensor)
orgYCrCb = cv2.cvtColor(np.float32(tensor), cv2.COLOR_BGR2YCR_CB)
Y, Cr,Cb = cv2.split(orgYCrCb)
CC = cv2.merge((Cr,Cb))
Y = np.array(Y).reshape(32*2,32*2)
#print(Y.shape)
return Y
class ImageDataset(torch.utils.data.Dataset):
def __init__(self, data_num,train_=True, transform1 = None, transform2 = None,train = True):
self.transform1 = transform1
self.transform2 = transform2
self.ts = torchvision.transforms.ToPILImage()
self.ts2 = transforms.ToTensor()
self.mean, self.std =[0.5,0.5,0.5], [0.25,0.25,0.25]
self.ts3 = torchvision.transforms.Compose([
torchvision.transforms.Normalize(self.mean, self.std),
transforms.ToTensor()
])
self.train = train_
self.data_dir = './'
self.data_num = data_num
self.data = []
self.label = []
# download
CIFAR10(self.data_dir, train=True, download=True)
#CIFAR10(self.data_dir, train=False, download=True)
self.data =CIFAR10(self.data_dir, train=self.train, transform=self.ts2)
def __len__(self):
return self.data_num
def __getitem__(self, idx):
out_data = self.data[idx][0]
out_label_ = self.data[idx][1]
out_label = torch.from_numpy(np.array(out_label_)).long()
if self.transform1:
out_data1 = self.transform1(out_data)
if self.transform2:
out_data2 = self.transform2(out_data)
#print( out_data1.shape, out_data2.shape)
#ts(np.array(Y).reshape(64,64))
return out_data, np.array(out_data1).reshape(1,64,64), np.array(out_data2.reshape(2,64,64)), out_label
class LitAutoEncoder(pl.LightningModule):
def __init__(self, data_dir='./'):
super().__init__()
self.data_dir = data_dir
self.data_num =50000 #50000
# Hardcode some dataset specific attributes
self.num_classes = 10
self.classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
self.dims = (32*2, 32*2)
self.dims2 = (32*4, 32*4)
self.mean, self.std =[0.5,0.5,0.5], [0.25,0.25,0.25]
self.trans2 = torchvision.transforms.Compose([
torchvision.transforms.Normalize(self.mean, self.std),
torchvision.transforms.Resize(self.dims)
])
self.trans1 = torchvision.transforms.Compose([
torchvision.transforms.Normalize(self.mean, self.std),
torchvision.transforms.Resize(self.dims),
MyAddGaussianNoise(0., 0.1),
torchvision.transforms.Grayscale()
])
self.trans2 = torchvision.transforms.Compose([
#torchvision.transforms.Normalize(self.mean, self.std),
torchvision.transforms.Resize(self.dims),
rgb2YCrCb(), #CC
transforms.ToTensor()
])
self.trans1 = torchvision.transforms.Compose([
#torchvision.transforms.Normalize(self.mean, self.std),
torchvision.transforms.Resize(self.dims),
rgb2YCrCb_(), #Y
transforms.ToTensor(),
#torchvision.transforms.Grayscale()
])
self.encoder = Encoder()
self.decoder = Decoder()
self.train_acc = pl.metrics.Accuracy()
self.val_acc = pl.metrics.Accuracy()
self.test_acc = pl.metrics.Accuracy()
def forward(self, x):
# in lightning, forward defines the prediction/inference actions
x, _ = self.encoder(x)
x = self.decoder(x)
return x
def training_step(self, batch, batch_idx):
# training_step defined the train loop. It is independent of forward
_,x,x_ , y = batch
#print(x.shape, x_.shape)
z, _ = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x_)
self.log('train_loss', loss, prog_bar = True)
return loss
def validation_step(self, batch, batch_idx):
_,x, x_, y = batch
z, _ = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x_)
self.log('test_loss', loss, prog_bar = True)
return loss
def test_step(self, batch, batch_idx):
# Here we just reuse the validation_step for testing
return self.validation_step(batch, batch_idx)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
def imshow(img,file='', text_=''):
img = img / 2 + 0.5 # unnormalize
npimg = img.detach().numpy() #img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.text(x = 3, y = 2, s = text_, c = "red")
plt.pause(3)
if file != '':
plt.savefig(file+'.png')
plt.close()
ts1 = transforms.Resize((64,64))
ts = transforms.ToPILImage()
ts2 = transforms.ToTensor()
trans2 = transforms.Compose([
transforms.Resize((64,64)),
rgb2YCrCb(), #CrCb
])
trans1 = transforms.Compose([
transforms.Resize((64,64)),
rgb2YCrCb_(), #Y
])
mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]
ts3 = transforms.Compose([
transforms.Normalize(mean, std),
#transforms.ToTensor()
])
autoencoder = LitAutoEncoder()
PATH = 'example_cifar4L100.ckpt'
pretrained_model = autoencoder.load_from_checkpoint(PATH)
pretrained_model.freeze()
pretrained_model.eval()
data_num = 50000
cifar10_full =ImageDataset(data_num, train=True, transform1=trans1, transform2=trans2)
n_train = int(len(cifar10_full)*0.1)
n_val = int(len(cifar10_full)*0.1)
n_test = len(cifar10_full)-n_train -n_val
cifar10_train, cifar10_val, cifar10_test = torch.utils.data.random_split(cifar10_full, [n_train, n_val, n_test])
trainloader = DataLoader(cifar10_train, shuffle=True, drop_last = True, batch_size=32, num_workers=0)
valloader = DataLoader(cifar10_val, shuffle=False, batch_size=32, num_workers=0)
testloader = DataLoader(cifar10_test, shuffle=False, batch_size=32, num_workers=0)
latent_dim,ver = "simpleGray2Clolarization", "color_plate1" #####save condition
dataiter = iter(testloader)
images0,images, images1, labels = dataiter.next()
encode_img,_ = pretrained_model.encoder(images[0:32].to('cpu').reshape(32,1,32*2,32*2)) #3
decode_img = pretrained_model.decoder(encode_img)
decode_img_cpu = decode_img.cpu()
images2 = []
for i in range(32): #32
print(i, images[i].shape, decode_img_cpu[i].shape)
YCC_ = cv2.merge((np.array(images[i].reshape(64,64)),np.array(decode_img_cpu[i].reshape(64,64,2))))
images2_ = cv2.cvtColor(YCC_, cv2.COLOR_YCR_CB2BGR)
images2.append(ts3(ts2(images2_/255.)))
imshow(torchvision.utils.make_grid(images2), 'autoencode_preds_cifar10_{}_{}'.format(latent_dim,ver),text_ =' '.join('%5s' % autoencoder.classes[labels[j]] for j in range(4)))
"""
for i in range(32):
plt.title('image2_preds')
img = images2[i] / 2 + 0.5 # unnormalize
npimg = img.detach().numpy() #img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.savefig('./YCC/5piece/image_preds{}.png'.format(i))
plt.pause(3)
plt.clf()
plt.title('image_gray')
img = images[i] / 2 + 0.5 # unnormalize
#npimg = img.detach().numpy() #img.numpy()
plt.imshow(img.reshape(64,64), cmap = 'gray')
plt.savefig('./YCC/5piece/image_gray{}.png'.format(i))
plt.pause(3)
plt.clf()
plt.title('image_original_norm')
img = ts3(ts2(ts(images0[i]))) / 2 + 0.5 # unnormalize
npimg = img.detach().numpy() #img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.savefig('./YCC/5piece/image_original{}_.png'.format(i))
plt.pause(3)
plt.clf()
plt.title('image_original_')
img = images0[i] / 2 + 0.5 # unnormalize
npimg = img.detach().numpy() #img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.savefig('./YCC/5piece/image_original{}.png'.format(i))
plt.pause(3)
plt.clf()
plt.title('image_originalx2_')
img = ts3(ts1(ts2(ts(images0[i])))) / 2 + 0.5 # unnormalize
npimg = img.detach().numpy() #img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.savefig('./YCC/5piece/image_originalx2_{}.png'.format(i))
plt.pause(3)
plt.clf()
"""
path_= 'YCC'
YCC = cv2.imread('YCC.jpg')
#YCC = cv2.imread('color_plate1.jpg')
image1=cv2.cvtColor(YCC, cv2.COLOR_BGR2RGB)
plt.title('image1')
plt.imshow(image1)
plt.pause(3)
plt.savefig('./YCC/'+path_+'.png')
plt.clf()
orgYCrCb = cv2.cvtColor(YCC, cv2.COLOR_BGR2YCR_CB)
Y, Cr,Cb = cv2.split(orgYCrCb)
plt.title('images[0]_')
img = images[0] / 2 + 0.5 # unnormalize
#npimg = img.detach().numpy() #img.numpy()
print('images[0]',img)
plt.imshow(img.reshape(64,64), cmap = 'gray')
plt.savefig('./YCC/'+path_+'_gray.png')
plt.pause(3)
plt.clf()
#images[0] = ts2(Y_)
Y_=ts(Y).resize((32*2,32*2))
plt.title('Y_')
img = ts2(Y_)*255. / 2 + 0.5 # unnormalize
print('Y_',img)
#npimg = img.detach().numpy() #img.numpy()
plt.imshow(img.reshape(64,64), cmap = 'gray')
plt.savefig('./YCC/'+path_+'original_gray.png')
plt.pause(3)
plt.clf()
Y_=ts(Y).resize((32*2,32*2))
plt.title('Y_')
plt.imshow(Y_, cmap = 'gray')
plt.savefig('./YCC/'+path_+'_mt_original.png')
plt.pause(3)
plt.clf()
#Y_ = np.array(Y_).reshape(1,64,64)
Y_2 = ts2(Y_)*255
encode_img,_ = pretrained_model.encoder(Y_2.to('cpu').reshape(1,1,32*2,32*2)) #3
decode_img = pretrained_model.decoder(encode_img)
decode_img_cpu = decode_img.cpu()
print(Y_2.shape, decode_img_cpu.shape)
YCC_ = cv2.merge((np.array(Y_2.reshape(64,64)),np.array(decode_img_cpu.reshape(64,64,2))))
images2_ = cv2.cvtColor(YCC_, cv2.COLOR_YCR_CB2BGR)
images2 = ts3(ts2(images2_/255.))
print(images2)
plt.title('preds')
img = images2 / 5 + 0.5 # unnormalize
npimg = img.detach().numpy() #img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.savefig('./YCC/'+path_+'_preds_5.png')
plt.pause(3)
plt.clf()
| 27.812405 | 179 | 0.652034 |
fee1148c4467d8fd9ac79e6e27f7c4340b300485 | 363 | py | Python | auth_service/src/AuthService/AuthService/database.py | newgene/biothings_oauth | 363a30e43537b38988363d02e9fa7c9016064a72 | [
"Apache-2.0"
] | null | null | null | auth_service/src/AuthService/AuthService/database.py | newgene/biothings_oauth | 363a30e43537b38988363d02e9fa7c9016064a72 | [
"Apache-2.0"
] | null | null | null | auth_service/src/AuthService/AuthService/database.py | newgene/biothings_oauth | 363a30e43537b38988363d02e9fa7c9016064a72 | [
"Apache-2.0"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from AuthService import settings
engine = create_engine(
settings.SQLALCHEMY_DB_URL, echo=True if settings.DEBUG else False
)
Session = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
| 27.923077 | 70 | 0.829201 |
fb431788154c4944b6f205727d4b06363962c896 | 4,211 | py | Python | tests/terraform/checks/resource/aws/test_ELBUsesSSL.py | kylelaker/checkov | 6eada26030a87f397a6bf1831827b3dc6c5dad2d | [
"Apache-2.0"
] | 4,013 | 2019-12-09T13:16:54.000Z | 2022-03-31T14:31:01.000Z | tests/terraform/checks/resource/aws/test_ELBUsesSSL.py | kylelaker/checkov | 6eada26030a87f397a6bf1831827b3dc6c5dad2d | [
"Apache-2.0"
] | 1,258 | 2019-12-17T09:55:51.000Z | 2022-03-31T19:17:17.000Z | tests/terraform/checks/resource/aws/test_ELBUsesSSL.py | kylelaker/checkov | 6eada26030a87f397a6bf1831827b3dc6c5dad2d | [
"Apache-2.0"
] | 638 | 2019-12-19T08:57:38.000Z | 2022-03-30T21:38:37.000Z | import unittest
import hcl2
from checkov.terraform.checks.resource.aws.ELBUsesSSL import check
from checkov.common.models.enums import CheckResult
class TestELBUsesSSL(unittest.TestCase):
def test_failure_elb_one_listener(self):
hcl_res = hcl2.loads("""
resource "aws_elb" "test" {
name = "foobar-terraform-elb"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "HTTP:8000/"
interval = 30
}
instances = [aws_instance.foo.id]
cross_zone_load_balancing = true
idle_timeout = 400
connection_draining = true
connection_draining_timeout = 400
}
""")
resource_conf = hcl_res['resource'][0]['aws_elb']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_elb_multi_listener(self):
hcl_res = hcl2.loads("""
resource "aws_elb" "test" {
name = "foobar-terraform-elb"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 443
lb_protocol = "https"
ssl_certificate_id = "arn:aws:iam::123456789012:server-certificate/certName"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "HTTP:8000/"
interval = 30
}
instances = [aws_instance.foo.id]
cross_zone_load_balancing = true
idle_timeout = 400
connection_draining = true
connection_draining_timeout = 400
}
""")
resource_conf = hcl_res['resource'][0]['aws_elb']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success_elb(self):
hcl_res = hcl2.loads("""
resource "aws_elb" "test" {
name = "foobar-terraform-elb"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 443
lb_protocol = "https"
ssl_certificate_id = "arn:aws:iam::123456789012:server-certificate/certName"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "HTTP:8000/"
interval = 30
}
instances = [aws_instance.foo.id]
cross_zone_load_balancing = true
idle_timeout = 400
connection_draining = true
connection_draining_timeout = 400
}
""")
resource_conf = hcl_res['resource'][0]['aws_elb']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 35.991453 | 92 | 0.478509 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.